summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig128
-rw-r--r--lib/Kconfig.debug190
-rw-r--r--lib/Kconfig.kgdb2
-rw-r--r--lib/Kconfig.ubsan25
-rw-r--r--lib/Makefile49
-rw-r--r--lib/alloc_tag.c6
-rw-r--r--lib/atomic64.c78
-rw-r--r--lib/atomic64_test.c2
-rw-r--r--lib/cpumask.c42
-rw-r--r--lib/crc-t10dif.c156
-rw-r--r--lib/crc16_kunit.c155
-rw-r--r--lib/crc32.c250
-rw-r--r--lib/crc32defs.h59
-rw-r--r--lib/crc32test.c852
-rw-r--r--lib/crc64-rocksoft.c126
-rw-r--r--lib/crc64.c49
-rw-r--r--lib/crc7.c6
-rw-r--r--lib/crypto/aesgcm.c2
-rw-r--r--lib/crypto/gf128mul.c75
-rw-r--r--lib/dump_stack.c4
-rw-r--r--lib/fault-inject.c28
-rw-r--r--lib/fonts/Kconfig2
-rw-r--r--lib/gen_crc32table.c113
-rw-r--r--lib/gen_crc64table.c10
-rw-r--r--lib/inflate.c2
-rw-r--r--lib/iov_iter.c11
-rw-r--r--lib/kobject.c24
-rw-r--r--lib/kunit/Kconfig12
-rw-r--r--lib/kunit/debugfs.c2
-rw-r--r--lib/kunit/executor.c21
-rw-r--r--lib/kunit/test.c6
-rw-r--r--lib/libcrc32c.c74
-rw-r--r--lib/list_debug.c22
-rw-r--r--lib/list_sort.c7
-rw-r--r--lib/lz4/lz4_compress.c1
-rw-r--r--lib/lz4/lz4_decompress.c1
-rw-r--r--lib/lz4/lz4defs.h4
-rw-r--r--lib/lz4/lz4hc_compress.c1
-rw-r--r--lib/maple_tree.c73
-rw-r--r--lib/math/Makefile4
-rw-r--r--lib/math/prime_numbers.c91
-rw-r--r--lib/math/prime_numbers_private.h16
-rw-r--r--lib/math/tests/Makefile7
-rw-r--r--lib/math/tests/gcd_kunit.c56
-rw-r--r--lib/math/tests/int_log_kunit.c74
-rw-r--r--lib/math/tests/int_sqrt_kunit.c66
-rw-r--r--lib/math/tests/prime_numbers_kunit.c59
-rw-r--r--lib/math/tests/rational_kunit.c (renamed from lib/math/rational-test.c)0
-rw-r--r--lib/packing.c293
-rw-r--r--lib/packing_test.c61
-rw-r--r--lib/parser.c5
-rw-r--r--lib/pldmfw/pldmfw.c8
-rw-r--r--lib/rcuref.c5
-rw-r--r--lib/rhashtable.c14
-rw-r--r--lib/sort.c7
-rw-r--r--lib/test_bitmap.c28
-rw-r--r--lib/test_bpf.c64
-rw-r--r--lib/test_maple_tree.c56
-rw-r--r--lib/test_min_heap.c30
-rw-r--r--lib/test_objpool.c22
-rw-r--r--lib/test_sysctl.c6
-rw-r--r--lib/test_ubsan.c18
-rw-r--r--lib/test_vmalloc.c2
-rw-r--r--lib/test_xarray.c38
-rw-r--r--lib/tests/Makefile43
-rw-r--r--lib/tests/bitfield_kunit.c (renamed from lib/bitfield_kunit.c)0
-rw-r--r--lib/tests/checksum_kunit.c (renamed from lib/checksum_kunit.c)0
-rw-r--r--lib/tests/cmdline_kunit.c (renamed from lib/cmdline_kunit.c)0
-rw-r--r--lib/tests/cpumask_kunit.c (renamed from lib/cpumask_kunit.c)0
-rw-r--r--lib/tests/crc_kunit.c495
-rw-r--r--lib/tests/fortify_kunit.c (renamed from lib/fortify_kunit.c)156
-rw-r--r--lib/tests/hashtable_test.c (renamed from lib/hashtable_test.c)0
-rw-r--r--lib/tests/is_signed_type_kunit.c (renamed from lib/is_signed_type_kunit.c)0
-rw-r--r--lib/tests/kfifo_kunit.c224
-rw-r--r--lib/tests/kunit_iov_iter.c (renamed from lib/kunit_iov_iter.c)5
-rw-r--r--lib/tests/list-test.c (renamed from lib/list-test.c)0
-rw-r--r--lib/tests/memcpy_kunit.c (renamed from lib/memcpy_kunit.c)0
-rw-r--r--lib/tests/overflow_kunit.c (renamed from lib/overflow_kunit.c)38
-rw-r--r--lib/tests/printf_kunit.c (renamed from lib/test_printf.c)442
-rw-r--r--lib/tests/scanf_kunit.c (renamed from lib/test_scanf.c)295
-rw-r--r--lib/tests/siphash_kunit.c (renamed from lib/siphash_kunit.c)0
-rw-r--r--lib/tests/slub_kunit.c (renamed from lib/slub_kunit.c)59
-rw-r--r--lib/tests/stackinit_kunit.c (renamed from lib/stackinit_kunit.c)138
-rw-r--r--lib/tests/string_helpers_kunit.c (renamed from lib/string_helpers_kunit.c)0
-rw-r--r--lib/tests/string_kunit.c (renamed from lib/string_kunit.c)4
-rw-r--r--lib/tests/test_bits.c (renamed from lib/test_bits.c)0
-rw-r--r--lib/tests/test_fprobe.c (renamed from lib/test_fprobe.c)51
-rw-r--r--lib/tests/test_hash.c (renamed from lib/test_hash.c)0
-rw-r--r--lib/tests/test_kprobes.c (renamed from lib/test_kprobes.c)0
-rw-r--r--lib/tests/test_linear_ranges.c (renamed from lib/test_linear_ranges.c)0
-rw-r--r--lib/tests/test_list_sort.c (renamed from lib/test_list_sort.c)0
-rw-r--r--lib/tests/test_sort.c (renamed from lib/test_sort.c)0
-rw-r--r--lib/tests/usercopy_kunit.c (renamed from lib/usercopy_kunit.c)0
-rw-r--r--lib/tests/util_macros_kunit.c (renamed from lib/util_macros_kunit.c)0
-rw-r--r--lib/ubsan.c28
-rw-r--r--lib/ubsan.h8
-rw-r--r--lib/vdso/Kconfig5
-rw-r--r--lib/vdso/Makefile19
-rw-r--r--lib/vdso/Makefile.include18
-rw-r--r--lib/vdso/datastore.c129
-rw-r--r--lib/vdso/getrandom.c8
-rw-r--r--lib/vdso/gettimeofday.c196
-rw-r--r--lib/vsprintf.c7
-rw-r--r--lib/win_minmax.c1
-rw-r--r--lib/xarray.c78
-rw-r--r--lib/zstd/common/portability_macros.h2
106 files changed, 3059 insertions, 3090 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 5a318f753b2f..61cce0686b53 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -156,21 +156,17 @@ config CRC16
config CRC_T10DIF
tristate "CRC calculation for the T10 Data Integrity Field"
- select CRYPTO
- select CRYPTO_CRCT10DIF
help
This option is only needed if a module that's not in the
kernel tree needs to calculate CRC checks for use with the
SCSI data integrity subsystem.
-config CRC64_ROCKSOFT
- tristate "CRC calculation for the Rocksoft model CRC64"
- select CRC64
- select CRYPTO
- select CRYPTO_CRC64_ROCKSOFT
- help
- This option provides a CRC64 API to a registered crypto driver.
- This is used with the block layer's data integrity subsystem.
+config ARCH_HAS_CRC_T10DIF
+ bool
+
+config CRC_T10DIF_ARCH
+ tristate
+ default CRC_T10DIF if ARCH_HAS_CRC_T10DIF && CRC_OPTIMIZATIONS
config CRC_ITU_T
tristate "CRC ITU-T V.41 functions"
@@ -190,103 +186,49 @@ config CRC32
the kernel tree does. Such modules that use library CRC32/CRC32c
functions require M here.
-config CRC32_SELFTEST
- tristate "CRC32 perform self test on init"
- depends on CRC32
- help
- This option enables the CRC32 library functions to perform a
- self test on initialization. The self test computes crc32_le
- and crc32_be over byte strings with random alignment and length
- and computes the total elapsed time and number of bytes processed.
-
-choice
- prompt "CRC32 implementation"
- depends on CRC32
- default CRC32_SLICEBY8
- help
- This option allows a kernel builder to override the default choice
- of CRC32 algorithm. Choose the default ("slice by 8") unless you
- know that you need one of the others.
-
-config CRC32_SLICEBY8
- bool "Slice by 8 bytes"
- help
- Calculate checksum 8 bytes at a time with a clever slicing algorithm.
- This is the fastest algorithm, but comes with a 8KiB lookup table.
- Most modern processors have enough cache to hold this table without
- thrashing the cache.
-
- This is the default implementation choice. Choose this one unless
- you have a good reason not to.
-
-config CRC32_SLICEBY4
- bool "Slice by 4 bytes"
- help
- Calculate checksum 4 bytes at a time with a clever slicing algorithm.
- This is a bit slower than slice by 8, but has a smaller 4KiB lookup
- table.
-
- Only choose this option if you know what you are doing.
-
-config CRC32_SARWATE
- bool "Sarwate's Algorithm (one byte at a time)"
- help
- Calculate checksum a byte at a time using Sarwate's algorithm. This
- is not particularly fast, but has a small 256 byte lookup table.
-
- Only choose this option if you know what you are doing.
+config ARCH_HAS_CRC32
+ bool
-config CRC32_BIT
- bool "Classic Algorithm (one bit at a time)"
- help
- Calculate checksum one bit at a time. This is VERY slow, but has
- no lookup table. This is provided as a debugging option.
+config CRC32_ARCH
+ tristate
+ default CRC32 if ARCH_HAS_CRC32 && CRC_OPTIMIZATIONS
- Only choose this option if you are debugging crc32.
+config CRC64
+ tristate
-endchoice
+config ARCH_HAS_CRC64
+ bool
-config CRC64
- tristate "CRC64 functions"
- help
- This option is provided for the case where no in-kernel-tree
- modules require CRC64 functions, but a module built outside
- the kernel tree does. Such modules that use library CRC64
- functions require M here.
+config CRC64_ARCH
+ tristate
+ default CRC64 if ARCH_HAS_CRC64 && CRC_OPTIMIZATIONS
config CRC4
- tristate "CRC4 functions"
- help
- This option is provided for the case where no in-kernel-tree
- modules require CRC4 functions, but a module built outside
- the kernel tree does. Such modules that use library CRC4
- functions require M here.
+ tristate
config CRC7
- tristate "CRC7 functions"
- help
- This option is provided for the case where no in-kernel-tree
- modules require CRC7 functions, but a module built outside
- the kernel tree does. Such modules that use library CRC7
- functions require M here.
+ tristate
config LIBCRC32C
- tristate "CRC32c (Castagnoli, et al) Cyclic Redundancy-Check"
- select CRYPTO
- select CRYPTO_CRC32C
+ tristate
+ select CRC32
help
- This option is provided for the case where no in-kernel-tree
- modules require CRC32c functions, but a module built outside the
- kernel tree does. Such modules that use library CRC32c functions
- require M here. See Castagnoli93.
- Module will be libcrc32c.
+ This option just selects CRC32 and is provided for compatibility
+ purposes until the users are updated to select CRC32 directly.
config CRC8
- tristate "CRC8 function"
+ tristate
+
+config CRC_OPTIMIZATIONS
+ bool "Enable optimized CRC implementations" if EXPERT
+ default y
help
- This option provides CRC8 function. Drivers may select this
- when they need to do cyclic redundancy check according CRC8
- algorithm. Module will be called crc8.
+ Disabling this option reduces code size slightly by disabling the
+ architecture-optimized implementations of any CRC variants that are
+ enabled. CRC checksumming performance may get much slower.
+
+ Keep this enabled unless you're really trying to minimize the size of
+ the kernel.
config XXHASH
tristate
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index f3d723705879..c7086ed09cde 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -433,7 +433,7 @@ config GDB_SCRIPTS
build directory. If you load vmlinux into gdb, the helper
scripts will be automatically imported by gdb as well, and
additional functions are available to analyze a Linux kernel
- instance. See Documentation/dev-tools/gdb-kernel-debugging.rst
+ instance. See Documentation/process/debugging/gdb-kernel-debugging.rst
for further details.
endif # DEBUG_INFO
@@ -545,6 +545,17 @@ config FRAME_POINTER
config OBJTOOL
bool
+config OBJTOOL_WERROR
+ bool "Upgrade objtool warnings to errors"
+ depends on OBJTOOL && !COMPILE_TEST
+ help
+ Fail the build on objtool warnings.
+
+ Objtool warnings can indicate kernel instability, including boot
+ failures. This option is highly recommended.
+
+ If unsure, say Y.
+
config STACK_VALIDATION
bool "Compile-time stack metadata validation"
depends on HAVE_STACK_VALIDATION && UNWINDER_FRAME_POINTER
@@ -808,6 +819,15 @@ config ARCH_HAS_DEBUG_VM_PGTABLE
An architecture should select this when it can successfully
build and run DEBUG_VM_PGTABLE.
+config DEBUG_VFS
+ bool "Debug VFS"
+ depends on DEBUG_KERNEL
+ help
+ Enable this to turn on extended checks in the VFS layer that may impact
+ performance.
+
+ If unsure, say N.
+
config DEBUG_VM_IRQSOFF
def_bool DEBUG_VM && !PREEMPT_RT
@@ -1301,15 +1321,6 @@ endmenu # "Debug lockups and hangs"
menu "Scheduler Debugging"
-config SCHED_DEBUG
- bool "Collect scheduler debugging info"
- depends on DEBUG_KERNEL && DEBUG_FS
- default y
- help
- If you say Y here, the /sys/kernel/debug/sched file will be provided
- that can help debug the scheduler. The runtime overhead of this
- option is minimal.
-
config SCHED_INFO
bool
default n
@@ -1397,9 +1408,9 @@ config PROVE_LOCKING
For more details, see Documentation/locking/lockdep-design.rst.
config PROVE_RAW_LOCK_NESTING
- bool
+ bool "Enable raw_spinlock - spinlock nesting checks" if !ARCH_SUPPORTS_RT
depends on PROVE_LOCKING
- default y
+ default y if ARCH_SUPPORTS_RT
help
Enable the raw_spinlock vs. spinlock nesting checks which ensure
that the lock nesting rules for PREEMPT_RT enabled kernels are
@@ -1502,15 +1513,15 @@ config LOCKDEP_SMALL
bool
config LOCKDEP_BITS
- int "Bitsize for MAX_LOCKDEP_ENTRIES"
+ int "Size for MAX_LOCKDEP_ENTRIES (as Nth power of 2)"
depends on LOCKDEP && !LOCKDEP_SMALL
- range 10 30
+ range 10 24
default 15
help
Try increasing this value if you hit "BUG: MAX_LOCKDEP_ENTRIES too low!" message.
config LOCKDEP_CHAINS_BITS
- int "Bitsize for MAX_LOCKDEP_CHAINS"
+ int "Size for MAX_LOCKDEP_CHAINS (as Nth power of 2)"
depends on LOCKDEP && !LOCKDEP_SMALL
range 10 21
default 16
@@ -1518,25 +1529,25 @@ config LOCKDEP_CHAINS_BITS
Try increasing this value if you hit "BUG: MAX_LOCKDEP_CHAINS too low!" message.
config LOCKDEP_STACK_TRACE_BITS
- int "Bitsize for MAX_STACK_TRACE_ENTRIES"
+ int "Size for MAX_STACK_TRACE_ENTRIES (as Nth power of 2)"
depends on LOCKDEP && !LOCKDEP_SMALL
- range 10 30
+ range 10 26
default 19
help
Try increasing this value if you hit "BUG: MAX_STACK_TRACE_ENTRIES too low!" message.
config LOCKDEP_STACK_TRACE_HASH_BITS
- int "Bitsize for STACK_TRACE_HASH_SIZE"
+ int "Size for STACK_TRACE_HASH_SIZE (as Nth power of 2)"
depends on LOCKDEP && !LOCKDEP_SMALL
- range 10 30
+ range 10 26
default 14
help
Try increasing this value if you need large STACK_TRACE_HASH_SIZE.
config LOCKDEP_CIRCULAR_QUEUE_BITS
- int "Bitsize for elements in circular_queue struct"
+ int "Size for elements in circular_queue struct (as Nth power of 2)"
depends on LOCKDEP
- range 10 30
+ range 10 26
default 12
help
Try increasing this value if you hit "lockdep bfs error:-1" warning due to __cq_enqueue() failure.
@@ -2103,7 +2114,7 @@ config FAIL_SKB_REALLOC
reallocated, catching possible invalid pointers to the skb.
For more information, check
- Documentation/dev-tools/fault-injection/fault-injection.rst
+ Documentation/fault-injection/fault-injection.rst
config FAULT_INJECTION_CONFIGFS
bool "Configfs interface for fault-injection capabilities"
@@ -2269,7 +2280,6 @@ config TEST_LIST_SORT
config TEST_MIN_HEAP
tristate "Min heap test"
depends on DEBUG_KERNEL || m
- select MIN_HEAP
help
Enable this to turn on min heap function tests. This test is
executed only once during system boot (so affects only boot time),
@@ -2428,6 +2438,24 @@ config ASYNC_RAID6_TEST
config TEST_HEXDUMP
tristate "Test functions located in the hexdump module at runtime"
+config PRINTF_KUNIT_TEST
+ tristate "KUnit test printf() family of functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test the printf functions at runtime.
+
+ If unsure, say N.
+
+config SCANF_KUNIT_TEST
+ tristate "KUnit test scanf() family of functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test the scanf functions at runtime.
+
+ If unsure, say N.
+
config STRING_KUNIT_TEST
tristate "KUnit test string functions at runtime" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2441,12 +2469,6 @@ config STRING_HELPERS_KUNIT_TEST
config TEST_KSTRTOX
tristate "Test kstrto*() family of functions at runtime"
-config TEST_PRINTF
- tristate "Test printf() family of functions at runtime"
-
-config TEST_SCANF
- tristate "Test scanf() family of functions at runtime"
-
config TEST_BITMAP
tristate "Test bitmap_*() family of functions at runtime"
help
@@ -2479,6 +2501,17 @@ config TEST_RHASHTABLE
config TEST_IDA
tristate "Perform selftest on IDA functions"
+config TEST_MISC_MINOR
+ tristate "Basic misc minor Kunit test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Kunit test for the misc minor.
+ It tests misc minor functions for dynamic and misc dynamic minor.
+ This include misc_xxx functions
+
+ If unsure, say N.
+
config TEST_PARMAN
tristate "Perform selftest on priority array manager"
depends on PARMAN
@@ -2681,6 +2714,20 @@ config SYSCTL_KUNIT_TEST
If unsure, say N.
+config KFIFO_KUNIT_TEST
+ tristate "KUnit Test for the generic kernel FIFO implementation" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the generic FIFO implementation KUnit test suite.
+ It tests that the API and basic functionality of the kfifo type
+ and associated macros.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
config LIST_KUNIT_TEST
tristate "KUnit Test for Kernel Linked-list structures" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2838,6 +2885,27 @@ config HW_BREAKPOINT_KUNIT_TEST
If unsure, say N.
+config CRC_KUNIT_TEST
+ tristate "KUnit tests for CRC functions" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ select CRC7
+ select CRC16
+ select CRC_T10DIF
+ select CRC32
+ select CRC64
+ help
+ Unit tests for the CRC library functions.
+
+ This is intended to help people writing architecture-specific
+ optimized versions. If unsure, say N.
+
+config CRC_BENCHMARK
+ bool "Benchmark for the CRC functions"
+ depends on CRC_KUNIT_TEST
+ help
+ Include benchmarks in the KUnit test suite for the CRC functions.
+
config SIPHASH_KUNIT_TEST
tristate "Perform selftest on siphash functions" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2858,15 +2926,6 @@ config USERCOPY_KUNIT_TEST
on the copy_to/from_user infrastructure, making sure basic
user/kernel boundary testing is working.
-config CRC16_KUNIT_TEST
- tristate "KUnit tests for CRC16"
- depends on KUNIT
- default KUNIT_ALL_TESTS
- select CRC16
- help
- Enable this option to run unit tests for the kernel's CRC16
- implementation (<linux/crc16.h>).
-
config TEST_UDELAY
tristate "udelay test driver"
help
@@ -3145,7 +3204,7 @@ config TEST_OBJPOOL
If unsure, say N.
-config INT_POW_TEST
+config INT_POW_KUNIT_TEST
tristate "Integer exponentiation (int_pow) test" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
@@ -3161,6 +3220,59 @@ config INT_POW_TEST
If unsure, say N
+config INT_SQRT_KUNIT_TEST
+ tristate "Integer square root test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This option enables the KUnit test suite for the int_sqrt() function,
+ which performs square root calculation. The test suite checks
+ various scenarios, including edge cases, to ensure correctness.
+
+ Enabling this option will include tests that check various scenarios
+ and edge cases to ensure the accuracy and reliability of the square root
+ function.
+
+ If unsure, say N
+
+config INT_LOG_KUNIT_TEST
+ tristate "Integer log (int_log) test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This option enables the KUnit test suite for the int_log library, which
+ provides two functions to compute the integer logarithm in base 2 and
+ base 10, called respectively as intlog2 and intlog10.
+
+ If unsure, say N
+
+config GCD_KUNIT_TEST
+ tristate "Greatest common divisor test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This option enables the KUnit test suite for the gcd() function,
+ which computes the greatest common divisor of two numbers.
+
+ This test suite verifies the correctness of gcd() across various
+ scenarios, including edge cases.
+
+ If unsure, say N
+
+config PRIME_NUMBERS_KUNIT_TEST
+ tristate "Prime number generator test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ select PRIME_NUMBERS
+ default KUNIT_ALL_TESTS
+ help
+ This option enables the KUnit test suite for the {is,next}_prime_number
+ functions.
+
+ Enabling this option will include tests that compare the prime number
+ generator functions against a brute force implementation.
+
+ If unsure, say N
+
endif # RUNTIME_TESTING_MENU
config ARCH_USE_MEMTEST
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 537e1b3f5734..8336b1a489a3 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -19,7 +19,7 @@ menuconfig KGDB
CONFIG_FRAME_POINTER to aid in producing more reliable stack
backtraces in the external debugger. Documentation of
kernel debugger is available at http://kgdb.sourceforge.net
- as well as in Documentation/dev-tools/kgdb.rst. If
+ as well as in Documentation/process/debugging/kgdb.rst. If
unsure, say N.
if KGDB
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 1d4aa7a83b3a..4216b3a4ff21 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -116,21 +116,22 @@ config UBSAN_UNREACHABLE
This option enables -fsanitize=unreachable which checks for control
flow reaching an expected-to-be-unreachable position.
-config UBSAN_SIGNED_WRAP
- bool "Perform checking for signed arithmetic wrap-around"
+config UBSAN_INTEGER_WRAP
+ bool "Perform checking for integer arithmetic wrap-around"
default UBSAN
depends on !COMPILE_TEST
- # The no_sanitize attribute was introduced in GCC with version 8.
- depends on !CC_IS_GCC || GCC_VERSION >= 80000
+ depends on $(cc-option,-fsanitize-undefined-ignore-overflow-pattern=all)
depends on $(cc-option,-fsanitize=signed-integer-overflow)
- help
- This option enables -fsanitize=signed-integer-overflow which checks
- for wrap-around of any arithmetic operations with signed integers.
- This currently performs nearly no instrumentation due to the
- kernel's use of -fno-strict-overflow which converts all would-be
- arithmetic undefined behavior into wrap-around arithmetic. Future
- sanitizer versions will allow for wrap-around checking (rather than
- exclusively undefined behavior).
+ depends on $(cc-option,-fsanitize=unsigned-integer-overflow)
+ depends on $(cc-option,-fsanitize=implicit-signed-integer-truncation)
+ depends on $(cc-option,-fsanitize=implicit-unsigned-integer-truncation)
+ depends on $(cc-option,-fsanitize-ignorelist=/dev/null)
+ help
+ This option enables all of the sanitizers involved in integer overflow
+ (wrap-around) mitigation: signed-integer-overflow, unsigned-integer-overflow,
+ implicit-signed-integer-truncation, and implicit-unsigned-integer-truncation.
+ This is currently limited only to the size_t type while testing and
+ compiler development continues.
config UBSAN_BOOL
bool "Perform checking for non-boolean values used as boolean"
diff --git a/lib/Makefile b/lib/Makefile
index a8155c972f02..0fdcd6d635ad 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -5,6 +5,11 @@
ccflags-remove-$(CONFIG_FUNCTION_TRACER) += $(CC_FLAGS_FTRACE)
+# Branch profiling isn't noinstr-safe
+ifdef CONFIG_TRACE_BRANCH_PROFILING
+CFLAGS_smp_processor_id.o += -DDISABLE_BRANCH_PROFILING
+endif
+
# These files are disabled because they produce lots of non-interesting and/or
# flaky coverage that is not a function of syscall inputs. For example,
# rbtree can be global and individual rotations don't correlate with inputs.
@@ -52,9 +57,7 @@ obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \
percpu-refcount.o rhashtable.o base64.o \
once.o refcount.o rcuref.o usercopy.o errseq.o bucket_locks.o \
generic-radix-tree.o bitmap-str.o
-obj-$(CONFIG_STRING_KUNIT_TEST) += string_kunit.o
obj-y += string_helpers.o
-obj-$(CONFIG_STRING_HELPERS_KUNIT_TEST) += string_helpers_kunit.o
obj-y += hexdump.o
obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o
obj-y += kstrtox.o
@@ -65,27 +68,20 @@ obj-$(CONFIG_TEST_DHRY) += test_dhry.o
obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
obj-$(CONFIG_TEST_BITOPS) += test_bitops.o
CFLAGS_test_bitops.o += -Werror
-obj-$(CONFIG_CPUMASK_KUNIT_TEST) += cpumask_kunit.o
obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
-obj-$(CONFIG_TEST_IOV_ITER) += kunit_iov_iter.o
-obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o
obj-$(CONFIG_TEST_IDA) += test_ida.o
obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla)
CFLAGS_test_ubsan.o += $(call cc-disable-warning, unused-but-set-variable)
UBSAN_SANITIZE_test_ubsan.o := y
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
-obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
obj-$(CONFIG_TEST_MIN_HEAP) += test_min_heap.o
obj-$(CONFIG_TEST_LKM) += test_module.o
obj-$(CONFIG_TEST_VMALLOC) += test_vmalloc.o
obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
-obj-$(CONFIG_TEST_SORT) += test_sort.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
obj-$(CONFIG_TEST_DYNAMIC_DEBUG) += test_dynamic_debug.o
-obj-$(CONFIG_TEST_PRINTF) += test_printf.o
-obj-$(CONFIG_TEST_SCANF) += test_scanf.o
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_KASAN),yy)
@@ -98,7 +94,6 @@ obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o
obj-$(CONFIG_TEST_PARMAN) += test_parman.o
obj-$(CONFIG_TEST_KMOD) += test_kmod.o
-obj-$(CONFIG_TEST_RUNTIME) += tests/
obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o
@@ -107,10 +102,7 @@ obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o
obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o
obj-$(CONFIG_TEST_HMM) += test_hmm.o
obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o
-obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
obj-$(CONFIG_TEST_REF_TRACKER) += test_ref_tracker.o
-CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE)
-obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o
obj-$(CONFIG_TEST_OBJPOOL) += test_objpool.o
obj-$(CONFIG_TEST_FPU) += test_fpu.o
@@ -132,7 +124,7 @@ endif
obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o
CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any)
-obj-y += math/ crypto/
+obj-y += math/ crypto/ tests/ vdso/
obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
@@ -164,12 +156,9 @@ obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o
obj-$(CONFIG_CRC32) += crc32.o
obj-$(CONFIG_CRC64) += crc64.o
-obj-$(CONFIG_CRC32_SELFTEST) += crc32test.o
obj-$(CONFIG_CRC4) += crc4.o
obj-$(CONFIG_CRC7) += crc7.o
-obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
obj-$(CONFIG_CRC8) += crc8.o
-obj-$(CONFIG_CRC64_ROCKSOFT) += crc64-rocksoft.o
obj-$(CONFIG_XXHASH) += xxhash.o
obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
@@ -370,32 +359,6 @@ obj-$(CONFIG_OBJAGG) += objagg.o
# pldmfw library
obj-$(CONFIG_PLDMFW) += pldmfw/
-# KUnit tests
-CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN)
-obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
-obj-$(CONFIG_CHECKSUM_KUNIT) += checksum_kunit.o
-obj-$(CONFIG_UTIL_MACROS_KUNIT) += util_macros_kunit.o
-obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
-obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o
-obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
-obj-$(CONFIG_BITS_TEST) += test_bits.o
-obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o
-obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o
-obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o
-obj-$(CONFIG_IS_SIGNED_TYPE_KUNIT_TEST) += is_signed_type_kunit.o
-CFLAGS_overflow_kunit.o = $(call cc-disable-warning, tautological-constant-out-of-range-compare)
-obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
-CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable)
-obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o
-CFLAGS_fortify_kunit.o += $(call cc-disable-warning, unsequenced)
-CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-overread)
-CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-truncation)
-CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN)
-obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
-obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
-obj-$(CONFIG_USERCOPY_KUNIT_TEST) += usercopy_kunit.o
-obj-$(CONFIG_CRC16_KUNIT_TEST) += crc16_kunit.o
-
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
obj-$(CONFIG_FIRMWARE_TABLE) += fw_table.o
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 65e706e1bc19..19b45617bdcf 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -29,6 +29,8 @@ EXPORT_SYMBOL(_shared_alloc_tag);
DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
mem_alloc_profiling_key);
+EXPORT_SYMBOL(mem_alloc_profiling_key);
+
DEFINE_STATIC_KEY_FALSE(mem_profiling_compressed);
struct alloc_tag_kernel_section kernel_tags = { NULL, 0 };
@@ -423,8 +425,8 @@ static int vm_module_tags_populate(void)
unsigned long nr;
more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT;
- nr = alloc_pages_bulk_array_node(GFP_KERNEL | __GFP_NOWARN,
- NUMA_NO_NODE, more_pages, next_page);
+ nr = alloc_pages_bulk_node(GFP_KERNEL | __GFP_NOWARN,
+ NUMA_NO_NODE, more_pages, next_page);
if (nr < more_pages ||
vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL,
next_page, PAGE_SHIFT) < 0) {
diff --git a/lib/atomic64.c b/lib/atomic64.c
index caf895789a1e..1a72bba36d24 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -25,15 +25,15 @@
* Ensure each lock is in a separate cacheline.
*/
static union {
- raw_spinlock_t lock;
+ arch_spinlock_t lock;
char pad[L1_CACHE_BYTES];
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
[0 ... (NR_LOCKS - 1)] = {
- .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
+ .lock = __ARCH_SPIN_LOCK_UNLOCKED,
},
};
-static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
+static inline arch_spinlock_t *lock_addr(const atomic64_t *v)
{
unsigned long addr = (unsigned long) v;
@@ -45,12 +45,14 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
s64 generic_atomic64_read(const atomic64_t *v)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_read);
@@ -58,11 +60,13 @@ EXPORT_SYMBOL(generic_atomic64_read);
void generic_atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
v->counter = i;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
}
EXPORT_SYMBOL(generic_atomic64_set);
@@ -70,11 +74,13 @@ EXPORT_SYMBOL(generic_atomic64_set);
void generic_atomic64_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
- raw_spinlock_t *lock = lock_addr(v); \
+ arch_spinlock_t *lock = lock_addr(v); \
\
- raw_spin_lock_irqsave(lock, flags); \
+ local_irq_save(flags); \
+ arch_spin_lock(lock); \
v->counter c_op a; \
- raw_spin_unlock_irqrestore(lock, flags); \
+ arch_spin_unlock(lock); \
+ local_irq_restore(flags); \
} \
EXPORT_SYMBOL(generic_atomic64_##op);
@@ -82,12 +88,14 @@ EXPORT_SYMBOL(generic_atomic64_##op);
s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
- raw_spinlock_t *lock = lock_addr(v); \
+ arch_spinlock_t *lock = lock_addr(v); \
s64 val; \
\
- raw_spin_lock_irqsave(lock, flags); \
+ local_irq_save(flags); \
+ arch_spin_lock(lock); \
val = (v->counter c_op a); \
- raw_spin_unlock_irqrestore(lock, flags); \
+ arch_spin_unlock(lock); \
+ local_irq_restore(flags); \
return val; \
} \
EXPORT_SYMBOL(generic_atomic64_##op##_return);
@@ -96,13 +104,15 @@ EXPORT_SYMBOL(generic_atomic64_##op##_return);
s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
- raw_spinlock_t *lock = lock_addr(v); \
+ arch_spinlock_t *lock = lock_addr(v); \
s64 val; \
\
- raw_spin_lock_irqsave(lock, flags); \
+ local_irq_save(flags); \
+ arch_spin_lock(lock); \
val = v->counter; \
v->counter c_op a; \
- raw_spin_unlock_irqrestore(lock, flags); \
+ arch_spin_unlock(lock); \
+ local_irq_restore(flags); \
return val; \
} \
EXPORT_SYMBOL(generic_atomic64_fetch_##op);
@@ -131,14 +141,16 @@ ATOMIC64_OPS(xor, ^=)
s64 generic_atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter - 1;
if (val >= 0)
v->counter = val;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
@@ -146,14 +158,16 @@ EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter;
if (val == o)
v->counter = n;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_cmpxchg);
@@ -161,13 +175,15 @@ EXPORT_SYMBOL(generic_atomic64_cmpxchg);
s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter;
v->counter = new;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_xchg);
@@ -175,14 +191,16 @@ EXPORT_SYMBOL(generic_atomic64_xchg);
s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter;
if (val != u)
v->counter += a;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 759ea1783cc5..d726068358c7 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -254,7 +254,7 @@ static __init int test_atomics_init(void)
pr_info("passed for %s platform %s CX8 and %s SSE\n",
#ifdef CONFIG_X86_64
"x86-64",
-#elif defined(CONFIG_X86_CMPXCHG64)
+#elif defined(CONFIG_X86_CX8)
"i586+",
#else
"i386+",
diff --git a/lib/cpumask.c b/lib/cpumask.c
index e77ee9d46f71..5adb9874fbd0 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -7,38 +7,6 @@
#include <linux/memblock.h>
#include <linux/numa.h>
-/**
- * cpumask_next_wrap - helper to implement for_each_cpu_wrap
- * @n: the cpu prior to the place to search
- * @mask: the cpumask pointer
- * @start: the start point of the iteration
- * @wrap: assume @n crossing @start terminates the iteration
- *
- * Return: >= nr_cpu_ids on completion
- *
- * Note: the @wrap argument is required for the start condition when
- * we cannot assume @start is set in @mask.
- */
-unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
-{
- unsigned int next;
-
-again:
- next = cpumask_next(n, mask);
-
- if (wrap && n < start && next >= start) {
- return nr_cpumask_bits;
-
- } else if (next >= nr_cpumask_bits) {
- wrap = true;
- n = -1;
- goto again;
- }
-
- return next;
-}
-EXPORT_SYMBOL(cpumask_next_wrap);
-
/* These are not inline because of header tangles. */
#ifdef CONFIG_CPUMASK_OFFSTACK
/**
@@ -83,10 +51,7 @@ EXPORT_SYMBOL(alloc_cpumask_var_node);
*/
void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
- *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
- if (!*mask)
- panic("%s: Failed to allocate %u bytes\n", __func__,
- cpumask_size());
+ *mask = memblock_alloc_or_panic(cpumask_size(), SMP_CACHE_BYTES);
}
/**
@@ -174,8 +139,7 @@ unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
/* NOTE: our first selection will skip 0. */
prev = __this_cpu_read(distribute_cpu_mask_prev);
- next = find_next_and_bit_wrap(cpumask_bits(src1p), cpumask_bits(src2p),
- nr_cpumask_bits, prev + 1);
+ next = cpumask_next_and_wrap(prev, src1p, src2p);
if (next < nr_cpu_ids)
__this_cpu_write(distribute_cpu_mask_prev, next);
@@ -195,7 +159,7 @@ unsigned int cpumask_any_distribute(const struct cpumask *srcp)
/* NOTE: our first selection will skip 0. */
prev = __this_cpu_read(distribute_cpu_mask_prev);
- next = find_next_bit_wrap(cpumask_bits(srcp), nr_cpumask_bits, prev + 1);
+ next = cpumask_next_wrap(prev, srcp);
if (next < nr_cpu_ids)
__this_cpu_write(distribute_cpu_mask_prev, next);
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
index 1ed2ed487097..311c2ab829f1 100644
--- a/lib/crc-t10dif.c
+++ b/lib/crc-t10dif.c
@@ -9,123 +9,57 @@
#include <linux/types.h>
#include <linux/module.h>
#include <linux/crc-t10dif.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <crypto/hash.h>
-#include <crypto/algapi.h>
-#include <linux/static_key.h>
-#include <linux/notifier.h>
-static struct crypto_shash __rcu *crct10dif_tfm;
-static DEFINE_STATIC_KEY_TRUE(crct10dif_fallback);
-static DEFINE_MUTEX(crc_t10dif_mutex);
-static struct work_struct crct10dif_rehash_work;
-
-static int crc_t10dif_notify(struct notifier_block *self, unsigned long val, void *data)
-{
- struct crypto_alg *alg = data;
-
- if (val != CRYPTO_MSG_ALG_LOADED ||
- strcmp(alg->cra_name, CRC_T10DIF_STRING))
- return NOTIFY_DONE;
-
- schedule_work(&crct10dif_rehash_work);
- return NOTIFY_OK;
-}
-
-static void crc_t10dif_rehash(struct work_struct *work)
-{
- struct crypto_shash *new, *old;
-
- mutex_lock(&crc_t10dif_mutex);
- old = rcu_dereference_protected(crct10dif_tfm,
- lockdep_is_held(&crc_t10dif_mutex));
- new = crypto_alloc_shash(CRC_T10DIF_STRING, 0, 0);
- if (IS_ERR(new)) {
- mutex_unlock(&crc_t10dif_mutex);
- return;
- }
- rcu_assign_pointer(crct10dif_tfm, new);
- mutex_unlock(&crc_t10dif_mutex);
-
- if (old) {
- synchronize_rcu();
- crypto_free_shash(old);
- } else {
- static_branch_disable(&crct10dif_fallback);
- }
-}
-
-static struct notifier_block crc_t10dif_nb = {
- .notifier_call = crc_t10dif_notify,
+/*
+ * Table generated using the following polynomial:
+ * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1
+ * gt: 0x8bb7
+ */
+static const u16 t10_dif_crc_table[256] = {
+ 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B,
+ 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6,
+ 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6,
+ 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B,
+ 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1,
+ 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C,
+ 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C,
+ 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781,
+ 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8,
+ 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255,
+ 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925,
+ 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698,
+ 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472,
+ 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF,
+ 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF,
+ 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02,
+ 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA,
+ 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067,
+ 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17,
+ 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA,
+ 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640,
+ 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD,
+ 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D,
+ 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30,
+ 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759,
+ 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4,
+ 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394,
+ 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29,
+ 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3,
+ 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E,
+ 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E,
+ 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3
};
-__u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
-{
- struct {
- struct shash_desc shash;
- __u16 crc;
- } desc;
- int err;
-
- if (static_branch_unlikely(&crct10dif_fallback))
- return crc_t10dif_generic(crc, buffer, len);
-
- rcu_read_lock();
- desc.shash.tfm = rcu_dereference(crct10dif_tfm);
- desc.crc = crc;
- err = crypto_shash_update(&desc.shash, buffer, len);
- rcu_read_unlock();
-
- BUG_ON(err);
-
- return desc.crc;
-}
-EXPORT_SYMBOL(crc_t10dif_update);
-
-__u16 crc_t10dif(const unsigned char *buffer, size_t len)
-{
- return crc_t10dif_update(0, buffer, len);
-}
-EXPORT_SYMBOL(crc_t10dif);
-
-static int __init crc_t10dif_mod_init(void)
-{
- INIT_WORK(&crct10dif_rehash_work, crc_t10dif_rehash);
- crypto_register_notifier(&crc_t10dif_nb);
- crc_t10dif_rehash(&crct10dif_rehash_work);
- return 0;
-}
-
-static void __exit crc_t10dif_mod_fini(void)
-{
- crypto_unregister_notifier(&crc_t10dif_nb);
- cancel_work_sync(&crct10dif_rehash_work);
- crypto_free_shash(rcu_dereference_protected(crct10dif_tfm, 1));
-}
-
-module_init(crc_t10dif_mod_init);
-module_exit(crc_t10dif_mod_fini);
-
-static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp)
+u16 crc_t10dif_generic(u16 crc, const u8 *p, size_t len)
{
- struct crypto_shash *tfm;
- int len;
+ size_t i;
- if (static_branch_unlikely(&crct10dif_fallback))
- return sprintf(buffer, "fallback\n");
+ for (i = 0; i < len; i++)
+ crc = (crc << 8) ^ t10_dif_crc_table[(crc >> 8) ^ p[i]];
- rcu_read_lock();
- tfm = rcu_dereference(crct10dif_tfm);
- len = snprintf(buffer, PAGE_SIZE, "%s\n",
- crypto_shash_driver_name(tfm));
- rcu_read_unlock();
-
- return len;
+ return crc;
}
+EXPORT_SYMBOL(crc_t10dif_generic);
-module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0444);
-
-MODULE_DESCRIPTION("T10 DIF CRC calculation (library API)");
+MODULE_DESCRIPTION("T10 DIF CRC calculation");
MODULE_LICENSE("GPL");
-MODULE_SOFTDEP("pre: crct10dif");
diff --git a/lib/crc16_kunit.c b/lib/crc16_kunit.c
deleted file mode 100644
index 0918c98a96d2..000000000000
--- a/lib/crc16_kunit.c
+++ /dev/null
@@ -1,155 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * KUnits tests for CRC16.
- *
- * Copyright (C) 2024, LKCAMP
- * Author: Vinicius Peixoto <vpeixoto@lkcamp.dev>
- * Author: Fabricio Gasperin <fgasperin@lkcamp.dev>
- * Author: Enzo Bertoloti <ebertoloti@lkcamp.dev>
- */
-#include <kunit/test.h>
-#include <linux/crc16.h>
-#include <linux/prandom.h>
-
-#define CRC16_KUNIT_DATA_SIZE 4096
-#define CRC16_KUNIT_TEST_SIZE 100
-#define CRC16_KUNIT_SEED 0x12345678
-
-/**
- * struct crc16_test - CRC16 test data
- * @crc: initial input value to CRC16
- * @start: Start index within the data buffer
- * @length: Length of the data
- */
-static struct crc16_test {
- u16 crc;
- u16 start;
- u16 length;
-} tests[CRC16_KUNIT_TEST_SIZE];
-
-u8 data[CRC16_KUNIT_DATA_SIZE];
-
-
-/* Naive implementation of CRC16 for validation purposes */
-static inline u16 _crc16_naive_byte(u16 crc, u8 data)
-{
- u8 i = 0;
-
- crc ^= (u16) data;
- for (i = 0; i < 8; i++) {
- if (crc & 0x01)
- crc = (crc >> 1) ^ 0xa001;
- else
- crc = crc >> 1;
- }
-
- return crc;
-}
-
-
-static inline u16 _crc16_naive(u16 crc, u8 *buffer, size_t len)
-{
- while (len--)
- crc = _crc16_naive_byte(crc, *buffer++);
- return crc;
-}
-
-
-/* Small helper for generating pseudorandom 16-bit data */
-static inline u16 _rand16(void)
-{
- static u32 rand = CRC16_KUNIT_SEED;
-
- rand = next_pseudo_random32(rand);
- return rand & 0xFFFF;
-}
-
-
-static int crc16_init_test_data(struct kunit_suite *suite)
-{
- size_t i;
-
- /* Fill the data buffer with random bytes */
- for (i = 0; i < CRC16_KUNIT_DATA_SIZE; i++)
- data[i] = _rand16() & 0xFF;
-
- /* Generate random test data while ensuring the random
- * start + length values won't overflow the 4096-byte
- * buffer (0x7FF * 2 = 0xFFE < 0x1000)
- */
- for (size_t i = 0; i < CRC16_KUNIT_TEST_SIZE; i++) {
- tests[i].crc = _rand16();
- tests[i].start = _rand16() & 0x7FF;
- tests[i].length = _rand16() & 0x7FF;
- }
-
- return 0;
-}
-
-static void crc16_test_empty(struct kunit *test)
-{
- u16 crc;
-
- /* The result for empty data should be the same as the
- * initial crc
- */
- crc = crc16(0x00, data, 0);
- KUNIT_EXPECT_EQ(test, crc, 0);
- crc = crc16(0xFF, data, 0);
- KUNIT_EXPECT_EQ(test, crc, 0xFF);
-}
-
-static void crc16_test_correctness(struct kunit *test)
-{
- size_t i;
- u16 crc, crc_naive;
-
- for (i = 0; i < CRC16_KUNIT_TEST_SIZE; i++) {
- /* Compare results with the naive crc16 implementation */
- crc = crc16(tests[i].crc, data + tests[i].start,
- tests[i].length);
- crc_naive = _crc16_naive(tests[i].crc, data + tests[i].start,
- tests[i].length);
- KUNIT_EXPECT_EQ(test, crc, crc_naive);
- }
-}
-
-
-static void crc16_test_combine(struct kunit *test)
-{
- size_t i, j;
- u16 crc, crc_naive;
-
- /* Make sure that combining two consecutive crc16 calculations
- * yields the same result as calculating the crc16 for the whole thing
- */
- for (i = 0; i < CRC16_KUNIT_TEST_SIZE; i++) {
- crc_naive = crc16(tests[i].crc, data + tests[i].start, tests[i].length);
- for (j = 0; j < tests[i].length; j++) {
- crc = crc16(tests[i].crc, data + tests[i].start, j);
- crc = crc16(crc, data + tests[i].start + j, tests[i].length - j);
- KUNIT_EXPECT_EQ(test, crc, crc_naive);
- }
- }
-}
-
-
-static struct kunit_case crc16_test_cases[] = {
- KUNIT_CASE(crc16_test_empty),
- KUNIT_CASE(crc16_test_combine),
- KUNIT_CASE(crc16_test_correctness),
- {},
-};
-
-static struct kunit_suite crc16_test_suite = {
- .name = "crc16",
- .test_cases = crc16_test_cases,
- .suite_init = crc16_init_test_data,
-};
-kunit_test_suite(crc16_test_suite);
-
-MODULE_AUTHOR("Fabricio Gasperin <fgasperin@lkcamp.dev>");
-MODULE_AUTHOR("Vinicius Peixoto <vpeixoto@lkcamp.dev>");
-MODULE_AUTHOR("Enzo Bertoloti <ebertoloti@lkcamp.dev>");
-MODULE_DESCRIPTION("Unit tests for crc16");
-MODULE_LICENSE("GPL");
diff --git a/lib/crc32.c b/lib/crc32.c
index ff587fee3893..fddd424ff224 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -30,20 +30,6 @@
#include <linux/crc32poly.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/sched.h>
-#include "crc32defs.h"
-
-#if CRC_LE_BITS > 8
-# define tole(x) ((__force u32) cpu_to_le32(x))
-#else
-# define tole(x) (x)
-#endif
-
-#if CRC_BE_BITS > 8
-# define tobe(x) ((__force u32) cpu_to_be32(x))
-#else
-# define tobe(x) (x)
-#endif
#include "crc32table.h"
@@ -51,173 +37,28 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
MODULE_DESCRIPTION("Various CRC32 calculations");
MODULE_LICENSE("GPL");
-#if CRC_LE_BITS > 8 || CRC_BE_BITS > 8
-
-/* implements slicing-by-4 or slicing-by-8 algorithm */
-static inline u32 __pure
-crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
+u32 crc32_le_base(u32 crc, const u8 *p, size_t len)
{
-# ifdef __LITTLE_ENDIAN
-# define DO_CRC(x) crc = t0[(crc ^ (x)) & 255] ^ (crc >> 8)
-# define DO_CRC4 (t3[(q) & 255] ^ t2[(q >> 8) & 255] ^ \
- t1[(q >> 16) & 255] ^ t0[(q >> 24) & 255])
-# define DO_CRC8 (t7[(q) & 255] ^ t6[(q >> 8) & 255] ^ \
- t5[(q >> 16) & 255] ^ t4[(q >> 24) & 255])
-# else
-# define DO_CRC(x) crc = t0[((crc >> 24) ^ (x)) & 255] ^ (crc << 8)
-# define DO_CRC4 (t0[(q) & 255] ^ t1[(q >> 8) & 255] ^ \
- t2[(q >> 16) & 255] ^ t3[(q >> 24) & 255])
-# define DO_CRC8 (t4[(q) & 255] ^ t5[(q >> 8) & 255] ^ \
- t6[(q >> 16) & 255] ^ t7[(q >> 24) & 255])
-# endif
- const u32 *b;
- size_t rem_len;
-# ifdef CONFIG_X86
- size_t i;
-# endif
- const u32 *t0=tab[0], *t1=tab[1], *t2=tab[2], *t3=tab[3];
-# if CRC_LE_BITS != 32
- const u32 *t4 = tab[4], *t5 = tab[5], *t6 = tab[6], *t7 = tab[7];
-# endif
- u32 q;
-
- /* Align it */
- if (unlikely((long)buf & 3 && len)) {
- do {
- DO_CRC(*buf++);
- } while ((--len) && ((long)buf)&3);
- }
-
-# if CRC_LE_BITS == 32
- rem_len = len & 3;
- len = len >> 2;
-# else
- rem_len = len & 7;
- len = len >> 3;
-# endif
-
- b = (const u32 *)buf;
-# ifdef CONFIG_X86
- --b;
- for (i = 0; i < len; i++) {
-# else
- for (--b; len; --len) {
-# endif
- q = crc ^ *++b; /* use pre increment for speed */
-# if CRC_LE_BITS == 32
- crc = DO_CRC4;
-# else
- crc = DO_CRC8;
- q = *++b;
- crc ^= DO_CRC4;
-# endif
- }
- len = rem_len;
- /* And the last few bytes */
- if (len) {
- u8 *p = (u8 *)(b + 1) - 1;
-# ifdef CONFIG_X86
- for (i = 0; i < len; i++)
- DO_CRC(*++p); /* use pre increment for speed */
-# else
- do {
- DO_CRC(*++p); /* use pre increment for speed */
- } while (--len);
-# endif
- }
+ while (len--)
+ crc = (crc >> 8) ^ crc32table_le[(crc & 255) ^ *p++];
return crc;
-#undef DO_CRC
-#undef DO_CRC4
-#undef DO_CRC8
}
-#endif
-
+EXPORT_SYMBOL(crc32_le_base);
-/**
- * crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II
- * CRC32/CRC32C
- * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for other
- * uses, or the previous crc32/crc32c value if computing incrementally.
- * @p: pointer to buffer over which CRC32/CRC32C is run
- * @len: length of buffer @p
- * @tab: little-endian Ethernet table
- * @polynomial: CRC32/CRC32c LE polynomial
- */
-static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
- size_t len, const u32 (*tab)[256],
- u32 polynomial)
+u32 crc32c_base(u32 crc, const u8 *p, size_t len)
{
-#if CRC_LE_BITS == 1
- int i;
- while (len--) {
- crc ^= *p++;
- for (i = 0; i < 8; i++)
- crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0);
- }
-# elif CRC_LE_BITS == 2
- while (len--) {
- crc ^= *p++;
- crc = (crc >> 2) ^ tab[0][crc & 3];
- crc = (crc >> 2) ^ tab[0][crc & 3];
- crc = (crc >> 2) ^ tab[0][crc & 3];
- crc = (crc >> 2) ^ tab[0][crc & 3];
- }
-# elif CRC_LE_BITS == 4
- while (len--) {
- crc ^= *p++;
- crc = (crc >> 4) ^ tab[0][crc & 15];
- crc = (crc >> 4) ^ tab[0][crc & 15];
- }
-# elif CRC_LE_BITS == 8
- /* aka Sarwate algorithm */
- while (len--) {
- crc ^= *p++;
- crc = (crc >> 8) ^ tab[0][crc & 255];
- }
-# else
- crc = (__force u32) __cpu_to_le32(crc);
- crc = crc32_body(crc, p, len, tab);
- crc = __le32_to_cpu((__force __le32)crc);
-#endif
+ while (len--)
+ crc = (crc >> 8) ^ crc32ctable_le[(crc & 255) ^ *p++];
return crc;
}
-
-#if CRC_LE_BITS == 1
-u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
-{
- return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE);
-}
-u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
-{
- return crc32_le_generic(crc, p, len, NULL, CRC32C_POLY_LE);
-}
-#else
-u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
-{
- return crc32_le_generic(crc, p, len, crc32table_le, CRC32_POLY_LE);
-}
-u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
-{
- return crc32_le_generic(crc, p, len, crc32ctable_le, CRC32C_POLY_LE);
-}
-#endif
-EXPORT_SYMBOL(crc32_le);
-EXPORT_SYMBOL(__crc32c_le);
-
-u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
-EXPORT_SYMBOL(crc32_le_base);
-
-u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
-EXPORT_SYMBOL(__crc32c_le_base);
-
-u32 __pure crc32_be_base(u32, unsigned char const *, size_t) __alias(crc32_be);
+EXPORT_SYMBOL(crc32c_base);
/*
* This multiplies the polynomials x and y modulo the given modulus.
* This follows the "little-endian" CRC convention that the lsbit
* represents the highest power of x, and the msbit represents x^0.
*/
-static u32 __attribute_const__ gf2_multiply(u32 x, u32 y, u32 modulus)
+static u32 gf2_multiply(u32 x, u32 y, u32 modulus)
{
u32 product = x & 1 ? y : 0;
int i;
@@ -243,8 +84,7 @@ static u32 __attribute_const__ gf2_multiply(u32 x, u32 y, u32 modulus)
* as appending len bytes of zero to the data), in time proportional
* to log(len).
*/
-static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
- u32 polynomial)
+static u32 crc32_generic_shift(u32 crc, size_t len, u32 polynomial)
{
u32 power = polynomial; /* CRC of x^32 */
int i;
@@ -273,76 +113,22 @@ static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
return crc;
}
-u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len)
+u32 crc32_le_shift(u32 crc, size_t len)
{
return crc32_generic_shift(crc, len, CRC32_POLY_LE);
}
+EXPORT_SYMBOL(crc32_le_shift);
-u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
+u32 crc32c_shift(u32 crc, size_t len)
{
return crc32_generic_shift(crc, len, CRC32C_POLY_LE);
}
-EXPORT_SYMBOL(crc32_le_shift);
-EXPORT_SYMBOL(__crc32c_le_shift);
+EXPORT_SYMBOL(crc32c_shift);
-/**
- * crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
- * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
- * other uses, or the previous crc32 value if computing incrementally.
- * @p: pointer to buffer over which CRC32 is run
- * @len: length of buffer @p
- * @tab: big-endian Ethernet table
- * @polynomial: CRC32 BE polynomial
- */
-static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
- size_t len, const u32 (*tab)[256],
- u32 polynomial)
+u32 crc32_be_base(u32 crc, const u8 *p, size_t len)
{
-#if CRC_BE_BITS == 1
- int i;
- while (len--) {
- crc ^= *p++ << 24;
- for (i = 0; i < 8; i++)
- crc =
- (crc << 1) ^ ((crc & 0x80000000) ? polynomial :
- 0);
- }
-# elif CRC_BE_BITS == 2
- while (len--) {
- crc ^= *p++ << 24;
- crc = (crc << 2) ^ tab[0][crc >> 30];
- crc = (crc << 2) ^ tab[0][crc >> 30];
- crc = (crc << 2) ^ tab[0][crc >> 30];
- crc = (crc << 2) ^ tab[0][crc >> 30];
- }
-# elif CRC_BE_BITS == 4
- while (len--) {
- crc ^= *p++ << 24;
- crc = (crc << 4) ^ tab[0][crc >> 28];
- crc = (crc << 4) ^ tab[0][crc >> 28];
- }
-# elif CRC_BE_BITS == 8
- while (len--) {
- crc ^= *p++ << 24;
- crc = (crc << 8) ^ tab[0][crc >> 24];
- }
-# else
- crc = (__force u32) __cpu_to_be32(crc);
- crc = crc32_body(crc, p, len, tab);
- crc = __be32_to_cpu((__force __be32)crc);
-# endif
+ while (len--)
+ crc = (crc << 8) ^ crc32table_be[(crc >> 24) ^ *p++];
return crc;
}
-
-#if CRC_BE_BITS == 1
-u32 __pure __weak crc32_be(u32 crc, unsigned char const *p, size_t len)
-{
- return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
-}
-#else
-u32 __pure __weak crc32_be(u32 crc, unsigned char const *p, size_t len)
-{
- return crc32_be_generic(crc, p, len, crc32table_be, CRC32_POLY_BE);
-}
-#endif
-EXPORT_SYMBOL(crc32_be);
+EXPORT_SYMBOL(crc32_be_base);
diff --git a/lib/crc32defs.h b/lib/crc32defs.h
deleted file mode 100644
index 0c8fb5923e7e..000000000000
--- a/lib/crc32defs.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/* Try to choose an implementation variant via Kconfig */
-#ifdef CONFIG_CRC32_SLICEBY8
-# define CRC_LE_BITS 64
-# define CRC_BE_BITS 64
-#endif
-#ifdef CONFIG_CRC32_SLICEBY4
-# define CRC_LE_BITS 32
-# define CRC_BE_BITS 32
-#endif
-#ifdef CONFIG_CRC32_SARWATE
-# define CRC_LE_BITS 8
-# define CRC_BE_BITS 8
-#endif
-#ifdef CONFIG_CRC32_BIT
-# define CRC_LE_BITS 1
-# define CRC_BE_BITS 1
-#endif
-
-/*
- * How many bits at a time to use. Valid values are 1, 2, 4, 8, 32 and 64.
- * For less performance-sensitive, use 4 or 8 to save table size.
- * For larger systems choose same as CPU architecture as default.
- * This works well on X86_64, SPARC64 systems. This may require some
- * elaboration after experiments with other architectures.
- */
-#ifndef CRC_LE_BITS
-# ifdef CONFIG_64BIT
-# define CRC_LE_BITS 64
-# else
-# define CRC_LE_BITS 32
-# endif
-#endif
-#ifndef CRC_BE_BITS
-# ifdef CONFIG_64BIT
-# define CRC_BE_BITS 64
-# else
-# define CRC_BE_BITS 32
-# endif
-#endif
-
-/*
- * Little-endian CRC computation. Used with serial bit streams sent
- * lsbit-first. Be sure to use cpu_to_le32() to append the computed CRC.
- */
-#if CRC_LE_BITS > 64 || CRC_LE_BITS < 1 || CRC_LE_BITS == 16 || \
- CRC_LE_BITS & CRC_LE_BITS-1
-# error "CRC_LE_BITS must be one of {1, 2, 4, 8, 32, 64}"
-#endif
-
-/*
- * Big-endian CRC computation. Used with serial bit streams sent
- * msbit-first. Be sure to use cpu_to_be32() to append the computed CRC.
- */
-#if CRC_BE_BITS > 64 || CRC_BE_BITS < 1 || CRC_BE_BITS == 16 || \
- CRC_BE_BITS & CRC_BE_BITS-1
-# error "CRC_BE_BITS must be one of {1, 2, 4, 8, 32, 64}"
-#endif
diff --git a/lib/crc32test.c b/lib/crc32test.c
deleted file mode 100644
index 9b4af79412c4..000000000000
--- a/lib/crc32test.c
+++ /dev/null
@@ -1,852 +0,0 @@
-/*
- * Aug 8, 2011 Bob Pearson with help from Joakim Tjernlund and George Spelvin
- * cleaned up code to current version of sparse and added the slicing-by-8
- * algorithm to the closely similar existing slicing-by-4 algorithm.
- *
- * Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com>
- * Nicer crc32 functions/docs submitted by linux@horizon.com. Thanks!
- * Code was from the public domain, copyright abandoned. Code was
- * subsequently included in the kernel, thus was re-licensed under the
- * GNU GPL v2.
- *
- * Oct 12, 2000 Matt Domsch <Matt_Domsch@dell.com>
- * Same crc32 function was used in 5 other places in the kernel.
- * I made one version, and deleted the others.
- * There are various incantations of crc32(). Some use a seed of 0 or ~0.
- * Some xor at the end with ~0. The generic crc32() function takes
- * seed as an argument, and doesn't xor at the end. Then individual
- * users can do whatever they need.
- * drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0.
- * fs/jffs2 uses seed 0, doesn't xor with ~0.
- * fs/partitions/efi.c uses seed ~0, xor's with ~0.
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
- */
-
-#include <linux/crc32.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-
-#include "crc32defs.h"
-
-/* 4096 random bytes */
-static u8 const __aligned(8) test_buf[] __initconst =
-{
- 0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30,
- 0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4,
- 0xc9, 0x6e, 0x8b, 0xdb, 0x98, 0x6b, 0xaa, 0x60,
- 0xa8, 0xb5, 0xbc, 0x6c, 0xa9, 0xb1, 0x5b, 0x2c,
- 0xea, 0xb4, 0x92, 0x6a, 0x3f, 0x79, 0x91, 0xe4,
- 0xe9, 0x70, 0x51, 0x8c, 0x7f, 0x95, 0x6f, 0x1a,
- 0x56, 0xa1, 0x5c, 0x27, 0x03, 0x67, 0x9f, 0x3a,
- 0xe2, 0x31, 0x11, 0x29, 0x6b, 0x98, 0xfc, 0xc4,
- 0x53, 0x24, 0xc5, 0x8b, 0xce, 0x47, 0xb2, 0xb9,
- 0x32, 0xcb, 0xc1, 0xd0, 0x03, 0x57, 0x4e, 0xd4,
- 0xe9, 0x3c, 0xa1, 0x63, 0xcf, 0x12, 0x0e, 0xca,
- 0xe1, 0x13, 0xd1, 0x93, 0xa6, 0x88, 0x5c, 0x61,
- 0x5b, 0xbb, 0xf0, 0x19, 0x46, 0xb4, 0xcf, 0x9e,
- 0xb6, 0x6b, 0x4c, 0x3a, 0xcf, 0x60, 0xf9, 0x7a,
- 0x8d, 0x07, 0x63, 0xdb, 0x40, 0xe9, 0x0b, 0x6f,
- 0xad, 0x97, 0xf1, 0xed, 0xd0, 0x1e, 0x26, 0xfd,
- 0xbf, 0xb7, 0xc8, 0x04, 0x94, 0xf8, 0x8b, 0x8c,
- 0xf1, 0xab, 0x7a, 0xd4, 0xdd, 0xf3, 0xe8, 0x88,
- 0xc3, 0xed, 0x17, 0x8a, 0x9b, 0x40, 0x0d, 0x53,
- 0x62, 0x12, 0x03, 0x5f, 0x1b, 0x35, 0x32, 0x1f,
- 0xb4, 0x7b, 0x93, 0x78, 0x0d, 0xdb, 0xce, 0xa4,
- 0xc0, 0x47, 0xd5, 0xbf, 0x68, 0xe8, 0x5d, 0x74,
- 0x8f, 0x8e, 0x75, 0x1c, 0xb2, 0x4f, 0x9a, 0x60,
- 0xd1, 0xbe, 0x10, 0xf4, 0x5c, 0xa1, 0x53, 0x09,
- 0xa5, 0xe0, 0x09, 0x54, 0x85, 0x5c, 0xdc, 0x07,
- 0xe7, 0x21, 0x69, 0x7b, 0x8a, 0xfd, 0x90, 0xf1,
- 0x22, 0xd0, 0xb4, 0x36, 0x28, 0xe6, 0xb8, 0x0f,
- 0x39, 0xde, 0xc8, 0xf3, 0x86, 0x60, 0x34, 0xd2,
- 0x5e, 0xdf, 0xfd, 0xcf, 0x0f, 0xa9, 0x65, 0xf0,
- 0xd5, 0x4d, 0x96, 0x40, 0xe3, 0xdf, 0x3f, 0x95,
- 0x5a, 0x39, 0x19, 0x93, 0xf4, 0x75, 0xce, 0x22,
- 0x00, 0x1c, 0x93, 0xe2, 0x03, 0x66, 0xf4, 0x93,
- 0x73, 0x86, 0x81, 0x8e, 0x29, 0x44, 0x48, 0x86,
- 0x61, 0x7c, 0x48, 0xa3, 0x43, 0xd2, 0x9c, 0x8d,
- 0xd4, 0x95, 0xdd, 0xe1, 0x22, 0x89, 0x3a, 0x40,
- 0x4c, 0x1b, 0x8a, 0x04, 0xa8, 0x09, 0x69, 0x8b,
- 0xea, 0xc6, 0x55, 0x8e, 0x57, 0xe6, 0x64, 0x35,
- 0xf0, 0xc7, 0x16, 0x9f, 0x5d, 0x5e, 0x86, 0x40,
- 0x46, 0xbb, 0xe5, 0x45, 0x88, 0xfe, 0xc9, 0x63,
- 0x15, 0xfb, 0xf5, 0xbd, 0x71, 0x61, 0xeb, 0x7b,
- 0x78, 0x70, 0x07, 0x31, 0x03, 0x9f, 0xb2, 0xc8,
- 0xa7, 0xab, 0x47, 0xfd, 0xdf, 0xa0, 0x78, 0x72,
- 0xa4, 0x2a, 0xe4, 0xb6, 0xba, 0xc0, 0x1e, 0x86,
- 0x71, 0xe6, 0x3d, 0x18, 0x37, 0x70, 0xe6, 0xff,
- 0xe0, 0xbc, 0x0b, 0x22, 0xa0, 0x1f, 0xd3, 0xed,
- 0xa2, 0x55, 0x39, 0xab, 0xa8, 0x13, 0x73, 0x7c,
- 0x3f, 0xb2, 0xd6, 0x19, 0xac, 0xff, 0x99, 0xed,
- 0xe8, 0xe6, 0xa6, 0x22, 0xe3, 0x9c, 0xf1, 0x30,
- 0xdc, 0x01, 0x0a, 0x56, 0xfa, 0xe4, 0xc9, 0x99,
- 0xdd, 0xa8, 0xd8, 0xda, 0x35, 0x51, 0x73, 0xb4,
- 0x40, 0x86, 0x85, 0xdb, 0x5c, 0xd5, 0x85, 0x80,
- 0x14, 0x9c, 0xfd, 0x98, 0xa9, 0x82, 0xc5, 0x37,
- 0xff, 0x32, 0x5d, 0xd0, 0x0b, 0xfa, 0xdc, 0x04,
- 0x5e, 0x09, 0xd2, 0xca, 0x17, 0x4b, 0x1a, 0x8e,
- 0x15, 0xe1, 0xcc, 0x4e, 0x52, 0x88, 0x35, 0xbd,
- 0x48, 0xfe, 0x15, 0xa0, 0x91, 0xfd, 0x7e, 0x6c,
- 0x0e, 0x5d, 0x79, 0x1b, 0x81, 0x79, 0xd2, 0x09,
- 0x34, 0x70, 0x3d, 0x81, 0xec, 0xf6, 0x24, 0xbb,
- 0xfb, 0xf1, 0x7b, 0xdf, 0x54, 0xea, 0x80, 0x9b,
- 0xc7, 0x99, 0x9e, 0xbd, 0x16, 0x78, 0x12, 0x53,
- 0x5e, 0x01, 0xa7, 0x4e, 0xbd, 0x67, 0xe1, 0x9b,
- 0x4c, 0x0e, 0x61, 0x45, 0x97, 0xd2, 0xf0, 0x0f,
- 0xfe, 0x15, 0x08, 0xb7, 0x11, 0x4c, 0xe7, 0xff,
- 0x81, 0x53, 0xff, 0x91, 0x25, 0x38, 0x7e, 0x40,
- 0x94, 0xe5, 0xe0, 0xad, 0xe6, 0xd9, 0x79, 0xb6,
- 0x92, 0xc9, 0xfc, 0xde, 0xc3, 0x1a, 0x23, 0xbb,
- 0xdd, 0xc8, 0x51, 0x0c, 0x3a, 0x72, 0xfa, 0x73,
- 0x6f, 0xb7, 0xee, 0x61, 0x39, 0x03, 0x01, 0x3f,
- 0x7f, 0x94, 0x2e, 0x2e, 0xba, 0x3a, 0xbb, 0xb4,
- 0xfa, 0x6a, 0x17, 0xfe, 0xea, 0xef, 0x5e, 0x66,
- 0x97, 0x3f, 0x32, 0x3d, 0xd7, 0x3e, 0xb1, 0xf1,
- 0x6c, 0x14, 0x4c, 0xfd, 0x37, 0xd3, 0x38, 0x80,
- 0xfb, 0xde, 0xa6, 0x24, 0x1e, 0xc8, 0xca, 0x7f,
- 0x3a, 0x93, 0xd8, 0x8b, 0x18, 0x13, 0xb2, 0xe5,
- 0xe4, 0x93, 0x05, 0x53, 0x4f, 0x84, 0x66, 0xa7,
- 0x58, 0x5c, 0x7b, 0x86, 0x52, 0x6d, 0x0d, 0xce,
- 0xa4, 0x30, 0x7d, 0xb6, 0x18, 0x9f, 0xeb, 0xff,
- 0x22, 0xbb, 0x72, 0x29, 0xb9, 0x44, 0x0b, 0x48,
- 0x1e, 0x84, 0x71, 0x81, 0xe3, 0x6d, 0x73, 0x26,
- 0x92, 0xb4, 0x4d, 0x2a, 0x29, 0xb8, 0x1f, 0x72,
- 0xed, 0xd0, 0xe1, 0x64, 0x77, 0xea, 0x8e, 0x88,
- 0x0f, 0xef, 0x3f, 0xb1, 0x3b, 0xad, 0xf9, 0xc9,
- 0x8b, 0xd0, 0xac, 0xc6, 0xcc, 0xa9, 0x40, 0xcc,
- 0x76, 0xf6, 0x3b, 0x53, 0xb5, 0x88, 0xcb, 0xc8,
- 0x37, 0xf1, 0xa2, 0xba, 0x23, 0x15, 0x99, 0x09,
- 0xcc, 0xe7, 0x7a, 0x3b, 0x37, 0xf7, 0x58, 0xc8,
- 0x46, 0x8c, 0x2b, 0x2f, 0x4e, 0x0e, 0xa6, 0x5c,
- 0xea, 0x85, 0x55, 0xba, 0x02, 0x0e, 0x0e, 0x48,
- 0xbc, 0xe1, 0xb1, 0x01, 0x35, 0x79, 0x13, 0x3d,
- 0x1b, 0xc0, 0x53, 0x68, 0x11, 0xe7, 0x95, 0x0f,
- 0x9d, 0x3f, 0x4c, 0x47, 0x7b, 0x4d, 0x1c, 0xae,
- 0x50, 0x9b, 0xcb, 0xdd, 0x05, 0x8d, 0x9a, 0x97,
- 0xfd, 0x8c, 0xef, 0x0c, 0x1d, 0x67, 0x73, 0xa8,
- 0x28, 0x36, 0xd5, 0xb6, 0x92, 0x33, 0x40, 0x75,
- 0x0b, 0x51, 0xc3, 0x64, 0xba, 0x1d, 0xc2, 0xcc,
- 0xee, 0x7d, 0x54, 0x0f, 0x27, 0x69, 0xa7, 0x27,
- 0x63, 0x30, 0x29, 0xd9, 0xc8, 0x84, 0xd8, 0xdf,
- 0x9f, 0x68, 0x8d, 0x04, 0xca, 0xa6, 0xc5, 0xc7,
- 0x7a, 0x5c, 0xc8, 0xd1, 0xcb, 0x4a, 0xec, 0xd0,
- 0xd8, 0x20, 0x69, 0xc5, 0x17, 0xcd, 0x78, 0xc8,
- 0x75, 0x23, 0x30, 0x69, 0xc9, 0xd4, 0xea, 0x5c,
- 0x4f, 0x6b, 0x86, 0x3f, 0x8b, 0xfe, 0xee, 0x44,
- 0xc9, 0x7c, 0xb7, 0xdd, 0x3e, 0xe5, 0xec, 0x54,
- 0x03, 0x3e, 0xaa, 0x82, 0xc6, 0xdf, 0xb2, 0x38,
- 0x0e, 0x5d, 0xb3, 0x88, 0xd9, 0xd3, 0x69, 0x5f,
- 0x8f, 0x70, 0x8a, 0x7e, 0x11, 0xd9, 0x1e, 0x7b,
- 0x38, 0xf1, 0x42, 0x1a, 0xc0, 0x35, 0xf5, 0xc7,
- 0x36, 0x85, 0xf5, 0xf7, 0xb8, 0x7e, 0xc7, 0xef,
- 0x18, 0xf1, 0x63, 0xd6, 0x7a, 0xc6, 0xc9, 0x0e,
- 0x4d, 0x69, 0x4f, 0x84, 0xef, 0x26, 0x41, 0x0c,
- 0xec, 0xc7, 0xe0, 0x7e, 0x3c, 0x67, 0x01, 0x4c,
- 0x62, 0x1a, 0x20, 0x6f, 0xee, 0x47, 0x4d, 0xc0,
- 0x99, 0x13, 0x8d, 0x91, 0x4a, 0x26, 0xd4, 0x37,
- 0x28, 0x90, 0x58, 0x75, 0x66, 0x2b, 0x0a, 0xdf,
- 0xda, 0xee, 0x92, 0x25, 0x90, 0x62, 0x39, 0x9e,
- 0x44, 0x98, 0xad, 0xc1, 0x88, 0xed, 0xe4, 0xb4,
- 0xaf, 0xf5, 0x8c, 0x9b, 0x48, 0x4d, 0x56, 0x60,
- 0x97, 0x0f, 0x61, 0x59, 0x9e, 0xa6, 0x27, 0xfe,
- 0xc1, 0x91, 0x15, 0x38, 0xb8, 0x0f, 0xae, 0x61,
- 0x7d, 0x26, 0x13, 0x5a, 0x73, 0xff, 0x1c, 0xa3,
- 0x61, 0x04, 0x58, 0x48, 0x55, 0x44, 0x11, 0xfe,
- 0x15, 0xca, 0xc3, 0xbd, 0xca, 0xc5, 0xb4, 0x40,
- 0x5d, 0x1b, 0x7f, 0x39, 0xb5, 0x9c, 0x35, 0xec,
- 0x61, 0x15, 0x32, 0x32, 0xb8, 0x4e, 0x40, 0x9f,
- 0x17, 0x1f, 0x0a, 0x4d, 0xa9, 0x91, 0xef, 0xb7,
- 0xb0, 0xeb, 0xc2, 0x83, 0x9a, 0x6c, 0xd2, 0x79,
- 0x43, 0x78, 0x5e, 0x2f, 0xe5, 0xdd, 0x1a, 0x3c,
- 0x45, 0xab, 0x29, 0x40, 0x3a, 0x37, 0x5b, 0x6f,
- 0xd7, 0xfc, 0x48, 0x64, 0x3c, 0x49, 0xfb, 0x21,
- 0xbe, 0xc3, 0xff, 0x07, 0xfb, 0x17, 0xe9, 0xc9,
- 0x0c, 0x4c, 0x5c, 0x15, 0x9e, 0x8e, 0x22, 0x30,
- 0x0a, 0xde, 0x48, 0x7f, 0xdb, 0x0d, 0xd1, 0x2b,
- 0x87, 0x38, 0x9e, 0xcc, 0x5a, 0x01, 0x16, 0xee,
- 0x75, 0x49, 0x0d, 0x30, 0x01, 0x34, 0x6a, 0xb6,
- 0x9a, 0x5a, 0x2a, 0xec, 0xbb, 0x48, 0xac, 0xd3,
- 0x77, 0x83, 0xd8, 0x08, 0x86, 0x4f, 0x48, 0x09,
- 0x29, 0x41, 0x79, 0xa1, 0x03, 0x12, 0xc4, 0xcd,
- 0x90, 0x55, 0x47, 0x66, 0x74, 0x9a, 0xcc, 0x4f,
- 0x35, 0x8c, 0xd6, 0x98, 0xef, 0xeb, 0x45, 0xb9,
- 0x9a, 0x26, 0x2f, 0x39, 0xa5, 0x70, 0x6d, 0xfc,
- 0xb4, 0x51, 0xee, 0xf4, 0x9c, 0xe7, 0x38, 0x59,
- 0xad, 0xf4, 0xbc, 0x46, 0xff, 0x46, 0x8e, 0x60,
- 0x9c, 0xa3, 0x60, 0x1d, 0xf8, 0x26, 0x72, 0xf5,
- 0x72, 0x9d, 0x68, 0x80, 0x04, 0xf6, 0x0b, 0xa1,
- 0x0a, 0xd5, 0xa7, 0x82, 0x3a, 0x3e, 0x47, 0xa8,
- 0x5a, 0xde, 0x59, 0x4f, 0x7b, 0x07, 0xb3, 0xe9,
- 0x24, 0x19, 0x3d, 0x34, 0x05, 0xec, 0xf1, 0xab,
- 0x6e, 0x64, 0x8f, 0xd3, 0xe6, 0x41, 0x86, 0x80,
- 0x70, 0xe3, 0x8d, 0x60, 0x9c, 0x34, 0x25, 0x01,
- 0x07, 0x4d, 0x19, 0x41, 0x4e, 0x3d, 0x5c, 0x7e,
- 0xa8, 0xf5, 0xcc, 0xd5, 0x7b, 0xe2, 0x7d, 0x3d,
- 0x49, 0x86, 0x7d, 0x07, 0xb7, 0x10, 0xe3, 0x35,
- 0xb8, 0x84, 0x6d, 0x76, 0xab, 0x17, 0xc6, 0x38,
- 0xb4, 0xd3, 0x28, 0x57, 0xad, 0xd3, 0x88, 0x5a,
- 0xda, 0xea, 0xc8, 0x94, 0xcc, 0x37, 0x19, 0xac,
- 0x9c, 0x9f, 0x4b, 0x00, 0x15, 0xc0, 0xc8, 0xca,
- 0x1f, 0x15, 0xaa, 0xe0, 0xdb, 0xf9, 0x2f, 0x57,
- 0x1b, 0x24, 0xc7, 0x6f, 0x76, 0x29, 0xfb, 0xed,
- 0x25, 0x0d, 0xc0, 0xfe, 0xbd, 0x5a, 0xbf, 0x20,
- 0x08, 0x51, 0x05, 0xec, 0x71, 0xa3, 0xbf, 0xef,
- 0x5e, 0x99, 0x75, 0xdb, 0x3c, 0x5f, 0x9a, 0x8c,
- 0xbb, 0x19, 0x5c, 0x0e, 0x93, 0x19, 0xf8, 0x6a,
- 0xbc, 0xf2, 0x12, 0x54, 0x2f, 0xcb, 0x28, 0x64,
- 0x88, 0xb3, 0x92, 0x0d, 0x96, 0xd1, 0xa6, 0xe4,
- 0x1f, 0xf1, 0x4d, 0xa4, 0xab, 0x1c, 0xee, 0x54,
- 0xf2, 0xad, 0x29, 0x6d, 0x32, 0x37, 0xb2, 0x16,
- 0x77, 0x5c, 0xdc, 0x2e, 0x54, 0xec, 0x75, 0x26,
- 0xc6, 0x36, 0xd9, 0x17, 0x2c, 0xf1, 0x7a, 0xdc,
- 0x4b, 0xf1, 0xe2, 0xd9, 0x95, 0xba, 0xac, 0x87,
- 0xc1, 0xf3, 0x8e, 0x58, 0x08, 0xd8, 0x87, 0x60,
- 0xc9, 0xee, 0x6a, 0xde, 0xa4, 0xd2, 0xfc, 0x0d,
- 0xe5, 0x36, 0xc4, 0x5c, 0x52, 0xb3, 0x07, 0x54,
- 0x65, 0x24, 0xc1, 0xb1, 0xd1, 0xb1, 0x53, 0x13,
- 0x31, 0x79, 0x7f, 0x05, 0x76, 0xeb, 0x37, 0x59,
- 0x15, 0x2b, 0xd1, 0x3f, 0xac, 0x08, 0x97, 0xeb,
- 0x91, 0x98, 0xdf, 0x6c, 0x09, 0x0d, 0x04, 0x9f,
- 0xdc, 0x3b, 0x0e, 0x60, 0x68, 0x47, 0x23, 0x15,
- 0x16, 0xc6, 0x0b, 0x35, 0xf8, 0x77, 0xa2, 0x78,
- 0x50, 0xd4, 0x64, 0x22, 0x33, 0xff, 0xfb, 0x93,
- 0x71, 0x46, 0x50, 0x39, 0x1b, 0x9c, 0xea, 0x4e,
- 0x8d, 0x0c, 0x37, 0xe5, 0x5c, 0x51, 0x3a, 0x31,
- 0xb2, 0x85, 0x84, 0x3f, 0x41, 0xee, 0xa2, 0xc1,
- 0xc6, 0x13, 0x3b, 0x54, 0x28, 0xd2, 0x18, 0x37,
- 0xcc, 0x46, 0x9f, 0x6a, 0x91, 0x3d, 0x5a, 0x15,
- 0x3c, 0x89, 0xa3, 0x61, 0x06, 0x7d, 0x2e, 0x78,
- 0xbe, 0x7d, 0x40, 0xba, 0x2f, 0x95, 0xb1, 0x2f,
- 0x87, 0x3b, 0x8a, 0xbe, 0x6a, 0xf4, 0xc2, 0x31,
- 0x74, 0xee, 0x91, 0xe0, 0x23, 0xaa, 0x5d, 0x7f,
- 0xdd, 0xf0, 0x44, 0x8c, 0x0b, 0x59, 0x2b, 0xfc,
- 0x48, 0x3a, 0xdf, 0x07, 0x05, 0x38, 0x6c, 0xc9,
- 0xeb, 0x18, 0x24, 0x68, 0x8d, 0x58, 0x98, 0xd3,
- 0x31, 0xa3, 0xe4, 0x70, 0x59, 0xb1, 0x21, 0xbe,
- 0x7e, 0x65, 0x7d, 0xb8, 0x04, 0xab, 0xf6, 0xe4,
- 0xd7, 0xda, 0xec, 0x09, 0x8f, 0xda, 0x6d, 0x24,
- 0x07, 0xcc, 0x29, 0x17, 0x05, 0x78, 0x1a, 0xc1,
- 0xb1, 0xce, 0xfc, 0xaa, 0x2d, 0xe7, 0xcc, 0x85,
- 0x84, 0x84, 0x03, 0x2a, 0x0c, 0x3f, 0xa9, 0xf8,
- 0xfd, 0x84, 0x53, 0x59, 0x5c, 0xf0, 0xd4, 0x09,
- 0xf0, 0xd2, 0x6c, 0x32, 0x03, 0xb0, 0xa0, 0x8c,
- 0x52, 0xeb, 0x23, 0x91, 0x88, 0x43, 0x13, 0x46,
- 0xf6, 0x1e, 0xb4, 0x1b, 0xf5, 0x8e, 0x3a, 0xb5,
- 0x3d, 0x00, 0xf6, 0xe5, 0x08, 0x3d, 0x5f, 0x39,
- 0xd3, 0x21, 0x69, 0xbc, 0x03, 0x22, 0x3a, 0xd2,
- 0x5c, 0x84, 0xf8, 0x15, 0xc4, 0x80, 0x0b, 0xbc,
- 0x29, 0x3c, 0xf3, 0x95, 0x98, 0xcd, 0x8f, 0x35,
- 0xbc, 0xa5, 0x3e, 0xfc, 0xd4, 0x13, 0x9e, 0xde,
- 0x4f, 0xce, 0x71, 0x9d, 0x09, 0xad, 0xf2, 0x80,
- 0x6b, 0x65, 0x7f, 0x03, 0x00, 0x14, 0x7c, 0x15,
- 0x85, 0x40, 0x6d, 0x70, 0xea, 0xdc, 0xb3, 0x63,
- 0x35, 0x4f, 0x4d, 0xe0, 0xd9, 0xd5, 0x3c, 0x58,
- 0x56, 0x23, 0x80, 0xe2, 0x36, 0xdd, 0x75, 0x1d,
- 0x94, 0x11, 0x41, 0x8e, 0xe0, 0x81, 0x8e, 0xcf,
- 0xe0, 0xe5, 0xf6, 0xde, 0xd1, 0xe7, 0x04, 0x12,
- 0x79, 0x92, 0x2b, 0x71, 0x2a, 0x79, 0x8b, 0x7c,
- 0x44, 0x79, 0x16, 0x30, 0x4e, 0xf4, 0xf6, 0x9b,
- 0xb7, 0x40, 0xa3, 0x5a, 0xa7, 0x69, 0x3e, 0xc1,
- 0x3a, 0x04, 0xd0, 0x88, 0xa0, 0x3b, 0xdd, 0xc6,
- 0x9e, 0x7e, 0x1e, 0x1e, 0x8f, 0x44, 0xf7, 0x73,
- 0x67, 0x1e, 0x1a, 0x78, 0xfa, 0x62, 0xf4, 0xa9,
- 0xa8, 0xc6, 0x5b, 0xb8, 0xfa, 0x06, 0x7d, 0x5e,
- 0x38, 0x1c, 0x9a, 0x39, 0xe9, 0x39, 0x98, 0x22,
- 0x0b, 0xa7, 0xac, 0x0b, 0xf3, 0xbc, 0xf1, 0xeb,
- 0x8c, 0x81, 0xe3, 0x48, 0x8a, 0xed, 0x42, 0xc2,
- 0x38, 0xcf, 0x3e, 0xda, 0xd2, 0x89, 0x8d, 0x9c,
- 0x53, 0xb5, 0x2f, 0x41, 0x01, 0x26, 0x84, 0x9c,
- 0xa3, 0x56, 0xf6, 0x49, 0xc7, 0xd4, 0x9f, 0x93,
- 0x1b, 0x96, 0x49, 0x5e, 0xad, 0xb3, 0x84, 0x1f,
- 0x3c, 0xa4, 0xe0, 0x9b, 0xd1, 0x90, 0xbc, 0x38,
- 0x6c, 0xdd, 0x95, 0x4d, 0x9d, 0xb1, 0x71, 0x57,
- 0x2d, 0x34, 0xe8, 0xb8, 0x42, 0xc7, 0x99, 0x03,
- 0xc7, 0x07, 0x30, 0x65, 0x91, 0x55, 0xd5, 0x90,
- 0x70, 0x97, 0x37, 0x68, 0xd4, 0x11, 0xf9, 0xe8,
- 0xce, 0xec, 0xdc, 0x34, 0xd5, 0xd3, 0xb7, 0xc4,
- 0xb8, 0x97, 0x05, 0x92, 0xad, 0xf8, 0xe2, 0x36,
- 0x64, 0x41, 0xc9, 0xc5, 0x41, 0x77, 0x52, 0xd7,
- 0x2c, 0xa5, 0x24, 0x2f, 0xd9, 0x34, 0x0b, 0x47,
- 0x35, 0xa7, 0x28, 0x8b, 0xc5, 0xcd, 0xe9, 0x46,
- 0xac, 0x39, 0x94, 0x3c, 0x10, 0xc6, 0x29, 0x73,
- 0x0e, 0x0e, 0x5d, 0xe0, 0x71, 0x03, 0x8a, 0x72,
- 0x0e, 0x26, 0xb0, 0x7d, 0x84, 0xed, 0x95, 0x23,
- 0x49, 0x5a, 0x45, 0x83, 0x45, 0x60, 0x11, 0x4a,
- 0x46, 0x31, 0xd4, 0xd8, 0x16, 0x54, 0x98, 0x58,
- 0xed, 0x6d, 0xcc, 0x5d, 0xd6, 0x50, 0x61, 0x9f,
- 0x9d, 0xc5, 0x3e, 0x9d, 0x32, 0x47, 0xde, 0x96,
- 0xe1, 0x5d, 0xd8, 0xf8, 0xb4, 0x69, 0x6f, 0xb9,
- 0x15, 0x90, 0x57, 0x7a, 0xf6, 0xad, 0xb0, 0x5b,
- 0xf5, 0xa6, 0x36, 0x94, 0xfd, 0x84, 0xce, 0x1c,
- 0x0f, 0x4b, 0xd0, 0xc2, 0x5b, 0x6b, 0x56, 0xef,
- 0x73, 0x93, 0x0b, 0xc3, 0xee, 0xd9, 0xcf, 0xd3,
- 0xa4, 0x22, 0x58, 0xcd, 0x50, 0x6e, 0x65, 0xf4,
- 0xe9, 0xb7, 0x71, 0xaf, 0x4b, 0xb3, 0xb6, 0x2f,
- 0x0f, 0x0e, 0x3b, 0xc9, 0x85, 0x14, 0xf5, 0x17,
- 0xe8, 0x7a, 0x3a, 0xbf, 0x5f, 0x5e, 0xf8, 0x18,
- 0x48, 0xa6, 0x72, 0xab, 0x06, 0x95, 0xe9, 0xc8,
- 0xa7, 0xf4, 0x32, 0x44, 0x04, 0x0c, 0x84, 0x98,
- 0x73, 0xe3, 0x89, 0x8d, 0x5f, 0x7e, 0x4a, 0x42,
- 0x8f, 0xc5, 0x28, 0xb1, 0x82, 0xef, 0x1c, 0x97,
- 0x31, 0x3b, 0x4d, 0xe0, 0x0e, 0x10, 0x10, 0x97,
- 0x93, 0x49, 0x78, 0x2f, 0x0d, 0x86, 0x8b, 0xa1,
- 0x53, 0xa9, 0x81, 0x20, 0x79, 0xe7, 0x07, 0x77,
- 0xb6, 0xac, 0x5e, 0xd2, 0x05, 0xcd, 0xe9, 0xdb,
- 0x8a, 0x94, 0x82, 0x8a, 0x23, 0xb9, 0x3d, 0x1c,
- 0xa9, 0x7d, 0x72, 0x4a, 0xed, 0x33, 0xa3, 0xdb,
- 0x21, 0xa7, 0x86, 0x33, 0x45, 0xa5, 0xaa, 0x56,
- 0x45, 0xb5, 0x83, 0x29, 0x40, 0x47, 0x79, 0x04,
- 0x6e, 0xb9, 0x95, 0xd0, 0x81, 0x77, 0x2d, 0x48,
- 0x1e, 0xfe, 0xc3, 0xc2, 0x1e, 0xe5, 0xf2, 0xbe,
- 0xfd, 0x3b, 0x94, 0x9f, 0xc4, 0xc4, 0x26, 0x9d,
- 0xe4, 0x66, 0x1e, 0x19, 0xee, 0x6c, 0x79, 0x97,
- 0x11, 0x31, 0x4b, 0x0d, 0x01, 0xcb, 0xde, 0xa8,
- 0xf6, 0x6d, 0x7c, 0x39, 0x46, 0x4e, 0x7e, 0x3f,
- 0x94, 0x17, 0xdf, 0xa1, 0x7d, 0xd9, 0x1c, 0x8e,
- 0xbc, 0x7d, 0x33, 0x7d, 0xe3, 0x12, 0x40, 0xca,
- 0xab, 0x37, 0x11, 0x46, 0xd4, 0xae, 0xef, 0x44,
- 0xa2, 0xb3, 0x6a, 0x66, 0x0e, 0x0c, 0x90, 0x7f,
- 0xdf, 0x5c, 0x66, 0x5f, 0xf2, 0x94, 0x9f, 0xa6,
- 0x73, 0x4f, 0xeb, 0x0d, 0xad, 0xbf, 0xc0, 0x63,
- 0x5c, 0xdc, 0x46, 0x51, 0xe8, 0x8e, 0x90, 0x19,
- 0xa8, 0xa4, 0x3c, 0x91, 0x79, 0xfa, 0x7e, 0x58,
- 0x85, 0x13, 0x55, 0xc5, 0x19, 0x82, 0x37, 0x1b,
- 0x0a, 0x02, 0x1f, 0x99, 0x6b, 0x18, 0xf1, 0x28,
- 0x08, 0xa2, 0x73, 0xb8, 0x0f, 0x2e, 0xcd, 0xbf,
- 0xf3, 0x86, 0x7f, 0xea, 0xef, 0xd0, 0xbb, 0xa6,
- 0x21, 0xdf, 0x49, 0x73, 0x51, 0xcc, 0x36, 0xd3,
- 0x3e, 0xa0, 0xf8, 0x44, 0xdf, 0xd3, 0xa6, 0xbe,
- 0x8a, 0xd4, 0x57, 0xdd, 0x72, 0x94, 0x61, 0x0f,
- 0x82, 0xd1, 0x07, 0xb8, 0x7c, 0x18, 0x83, 0xdf,
- 0x3a, 0xe5, 0x50, 0x6a, 0x82, 0x20, 0xac, 0xa9,
- 0xa8, 0xff, 0xd9, 0xf3, 0x77, 0x33, 0x5a, 0x9e,
- 0x7f, 0x6d, 0xfe, 0x5d, 0x33, 0x41, 0x42, 0xe7,
- 0x6c, 0x19, 0xe0, 0x44, 0x8a, 0x15, 0xf6, 0x70,
- 0x98, 0xb7, 0x68, 0x4d, 0xfa, 0x97, 0x39, 0xb0,
- 0x8e, 0xe8, 0x84, 0x8b, 0x75, 0x30, 0xb7, 0x7d,
- 0x92, 0x69, 0x20, 0x9c, 0x81, 0xfb, 0x4b, 0xf4,
- 0x01, 0x50, 0xeb, 0xce, 0x0c, 0x1c, 0x6c, 0xb5,
- 0x4a, 0xd7, 0x27, 0x0c, 0xce, 0xbb, 0xe5, 0x85,
- 0xf0, 0xb6, 0xee, 0xd5, 0x70, 0xdd, 0x3b, 0xfc,
- 0xd4, 0x99, 0xf1, 0x33, 0xdd, 0x8b, 0xc4, 0x2f,
- 0xae, 0xab, 0x74, 0x96, 0x32, 0xc7, 0x4c, 0x56,
- 0x3c, 0x89, 0x0f, 0x96, 0x0b, 0x42, 0xc0, 0xcb,
- 0xee, 0x0f, 0x0b, 0x8c, 0xfb, 0x7e, 0x47, 0x7b,
- 0x64, 0x48, 0xfd, 0xb2, 0x00, 0x80, 0x89, 0xa5,
- 0x13, 0x55, 0x62, 0xfc, 0x8f, 0xe2, 0x42, 0x03,
- 0xb7, 0x4e, 0x2a, 0x79, 0xb4, 0x82, 0xea, 0x23,
- 0x49, 0xda, 0xaf, 0x52, 0x63, 0x1e, 0x60, 0x03,
- 0x89, 0x06, 0x44, 0x46, 0x08, 0xc3, 0xc4, 0x87,
- 0x70, 0x2e, 0xda, 0x94, 0xad, 0x6b, 0xe0, 0xe4,
- 0xd1, 0x8a, 0x06, 0xc2, 0xa8, 0xc0, 0xa7, 0x43,
- 0x3c, 0x47, 0x52, 0x0e, 0xc3, 0x77, 0x81, 0x11,
- 0x67, 0x0e, 0xa0, 0x70, 0x04, 0x47, 0x29, 0x40,
- 0x86, 0x0d, 0x34, 0x56, 0xa7, 0xc9, 0x35, 0x59,
- 0x68, 0xdc, 0x93, 0x81, 0x70, 0xee, 0x86, 0xd9,
- 0x80, 0x06, 0x40, 0x4f, 0x1a, 0x0d, 0x40, 0x30,
- 0x0b, 0xcb, 0x96, 0x47, 0xc1, 0xb7, 0x52, 0xfd,
- 0x56, 0xe0, 0x72, 0x4b, 0xfb, 0xbd, 0x92, 0x45,
- 0x61, 0x71, 0xc2, 0x33, 0x11, 0xbf, 0x52, 0x83,
- 0x79, 0x26, 0xe0, 0x49, 0x6b, 0xb7, 0x05, 0x8b,
- 0xe8, 0x0e, 0x87, 0x31, 0xd7, 0x9d, 0x8a, 0xf5,
- 0xc0, 0x5f, 0x2e, 0x58, 0x4a, 0xdb, 0x11, 0xb3,
- 0x6c, 0x30, 0x2a, 0x46, 0x19, 0xe3, 0x27, 0x84,
- 0x1f, 0x63, 0x6e, 0xf6, 0x57, 0xc7, 0xc9, 0xd8,
- 0x5e, 0xba, 0xb3, 0x87, 0xd5, 0x83, 0x26, 0x34,
- 0x21, 0x9e, 0x65, 0xde, 0x42, 0xd3, 0xbe, 0x7b,
- 0xbc, 0x91, 0x71, 0x44, 0x4d, 0x99, 0x3b, 0x31,
- 0xe5, 0x3f, 0x11, 0x4e, 0x7f, 0x13, 0x51, 0x3b,
- 0xae, 0x79, 0xc9, 0xd3, 0x81, 0x8e, 0x25, 0x40,
- 0x10, 0xfc, 0x07, 0x1e, 0xf9, 0x7b, 0x9a, 0x4b,
- 0x6c, 0xe3, 0xb3, 0xad, 0x1a, 0x0a, 0xdd, 0x9e,
- 0x59, 0x0c, 0xa2, 0xcd, 0xae, 0x48, 0x4a, 0x38,
- 0x5b, 0x47, 0x41, 0x94, 0x65, 0x6b, 0xbb, 0xeb,
- 0x5b, 0xe3, 0xaf, 0x07, 0x5b, 0xd4, 0x4a, 0xa2,
- 0xc9, 0x5d, 0x2f, 0x64, 0x03, 0xd7, 0x3a, 0x2c,
- 0x6e, 0xce, 0x76, 0x95, 0xb4, 0xb3, 0xc0, 0xf1,
- 0xe2, 0x45, 0x73, 0x7a, 0x5c, 0xab, 0xc1, 0xfc,
- 0x02, 0x8d, 0x81, 0x29, 0xb3, 0xac, 0x07, 0xec,
- 0x40, 0x7d, 0x45, 0xd9, 0x7a, 0x59, 0xee, 0x34,
- 0xf0, 0xe9, 0xd5, 0x7b, 0x96, 0xb1, 0x3d, 0x95,
- 0xcc, 0x86, 0xb5, 0xb6, 0x04, 0x2d, 0xb5, 0x92,
- 0x7e, 0x76, 0xf4, 0x06, 0xa9, 0xa3, 0x12, 0x0f,
- 0xb1, 0xaf, 0x26, 0xba, 0x7c, 0xfc, 0x7e, 0x1c,
- 0xbc, 0x2c, 0x49, 0x97, 0x53, 0x60, 0x13, 0x0b,
- 0xa6, 0x61, 0x83, 0x89, 0x42, 0xd4, 0x17, 0x0c,
- 0x6c, 0x26, 0x52, 0xc3, 0xb3, 0xd4, 0x67, 0xf5,
- 0xe3, 0x04, 0xb7, 0xf4, 0xcb, 0x80, 0xb8, 0xcb,
- 0x77, 0x56, 0x3e, 0xaa, 0x57, 0x54, 0xee, 0xb4,
- 0x2c, 0x67, 0xcf, 0xf2, 0xdc, 0xbe, 0x55, 0xf9,
- 0x43, 0x1f, 0x6e, 0x22, 0x97, 0x67, 0x7f, 0xc4,
- 0xef, 0xb1, 0x26, 0x31, 0x1e, 0x27, 0xdf, 0x41,
- 0x80, 0x47, 0x6c, 0xe2, 0xfa, 0xa9, 0x8c, 0x2a,
- 0xf6, 0xf2, 0xab, 0xf0, 0x15, 0xda, 0x6c, 0xc8,
- 0xfe, 0xb5, 0x23, 0xde, 0xa9, 0x05, 0x3f, 0x06,
- 0x54, 0x4c, 0xcd, 0xe1, 0xab, 0xfc, 0x0e, 0x62,
- 0x33, 0x31, 0x73, 0x2c, 0x76, 0xcb, 0xb4, 0x47,
- 0x1e, 0x20, 0xad, 0xd8, 0xf2, 0x31, 0xdd, 0xc4,
- 0x8b, 0x0c, 0x77, 0xbe, 0xe1, 0x8b, 0x26, 0x00,
- 0x02, 0x58, 0xd6, 0x8d, 0xef, 0xad, 0x74, 0x67,
- 0xab, 0x3f, 0xef, 0xcb, 0x6f, 0xb0, 0xcc, 0x81,
- 0x44, 0x4c, 0xaf, 0xe9, 0x49, 0x4f, 0xdb, 0xa0,
- 0x25, 0xa4, 0xf0, 0x89, 0xf1, 0xbe, 0xd8, 0x10,
- 0xff, 0xb1, 0x3b, 0x4b, 0xfa, 0x98, 0xf5, 0x79,
- 0x6d, 0x1e, 0x69, 0x4d, 0x57, 0xb1, 0xc8, 0x19,
- 0x1b, 0xbd, 0x1e, 0x8c, 0x84, 0xb7, 0x7b, 0xe8,
- 0xd2, 0x2d, 0x09, 0x41, 0x41, 0x37, 0x3d, 0xb1,
- 0x6f, 0x26, 0x5d, 0x71, 0x16, 0x3d, 0xb7, 0x83,
- 0x27, 0x2c, 0xa7, 0xb6, 0x50, 0xbd, 0x91, 0x86,
- 0xab, 0x24, 0xa1, 0x38, 0xfd, 0xea, 0x71, 0x55,
- 0x7e, 0x9a, 0x07, 0x77, 0x4b, 0xfa, 0x61, 0x66,
- 0x20, 0x1e, 0x28, 0x95, 0x18, 0x1b, 0xa4, 0xa0,
- 0xfd, 0xc0, 0x89, 0x72, 0x43, 0xd9, 0x3b, 0x49,
- 0x5a, 0x3f, 0x9d, 0xbf, 0xdb, 0xb4, 0x46, 0xea,
- 0x42, 0x01, 0x77, 0x23, 0x68, 0x95, 0xb6, 0x24,
- 0xb3, 0xa8, 0x6c, 0x28, 0x3b, 0x11, 0x40, 0x7e,
- 0x18, 0x65, 0x6d, 0xd8, 0x24, 0x42, 0x7d, 0x88,
- 0xc0, 0x52, 0xd9, 0x05, 0xe4, 0x95, 0x90, 0x87,
- 0x8c, 0xf4, 0xd0, 0x6b, 0xb9, 0x83, 0x99, 0x34,
- 0x6d, 0xfe, 0x54, 0x40, 0x94, 0x52, 0x21, 0x4f,
- 0x14, 0x25, 0xc5, 0xd6, 0x5e, 0x95, 0xdc, 0x0a,
- 0x2b, 0x89, 0x20, 0x11, 0x84, 0x48, 0xd6, 0x3a,
- 0xcd, 0x5c, 0x24, 0xad, 0x62, 0xe3, 0xb1, 0x93,
- 0x25, 0x8d, 0xcd, 0x7e, 0xfc, 0x27, 0xa3, 0x37,
- 0xfd, 0x84, 0xfc, 0x1b, 0xb2, 0xf1, 0x27, 0x38,
- 0x5a, 0xb7, 0xfc, 0xf2, 0xfa, 0x95, 0x66, 0xd4,
- 0xfb, 0xba, 0xa7, 0xd7, 0xa3, 0x72, 0x69, 0x48,
- 0x48, 0x8c, 0xeb, 0x28, 0x89, 0xfe, 0x33, 0x65,
- 0x5a, 0x36, 0x01, 0x7e, 0x06, 0x79, 0x0a, 0x09,
- 0x3b, 0x74, 0x11, 0x9a, 0x6e, 0xbf, 0xd4, 0x9e,
- 0x58, 0x90, 0x49, 0x4f, 0x4d, 0x08, 0xd4, 0xe5,
- 0x4a, 0x09, 0x21, 0xef, 0x8b, 0xb8, 0x74, 0x3b,
- 0x91, 0xdd, 0x36, 0x85, 0x60, 0x2d, 0xfa, 0xd4,
- 0x45, 0x7b, 0x45, 0x53, 0xf5, 0x47, 0x87, 0x7e,
- 0xa6, 0x37, 0xc8, 0x78, 0x7a, 0x68, 0x9d, 0x8d,
- 0x65, 0x2c, 0x0e, 0x91, 0x5c, 0xa2, 0x60, 0xf0,
- 0x8e, 0x3f, 0xe9, 0x1a, 0xcd, 0xaa, 0xe7, 0xd5,
- 0x77, 0x18, 0xaf, 0xc9, 0xbc, 0x18, 0xea, 0x48,
- 0x1b, 0xfb, 0x22, 0x48, 0x70, 0x16, 0x29, 0x9e,
- 0x5b, 0xc1, 0x2c, 0x66, 0x23, 0xbc, 0xf0, 0x1f,
- 0xef, 0xaf, 0xe4, 0xd6, 0x04, 0x19, 0x82, 0x7a,
- 0x0b, 0xba, 0x4b, 0x46, 0xb1, 0x6a, 0x85, 0x5d,
- 0xb4, 0x73, 0xd6, 0x21, 0xa1, 0x71, 0x60, 0x14,
- 0xee, 0x0a, 0x77, 0xc4, 0x66, 0x2e, 0xf9, 0x69,
- 0x30, 0xaf, 0x41, 0x0b, 0xc8, 0x83, 0x3c, 0x53,
- 0x99, 0x19, 0x27, 0x46, 0xf7, 0x41, 0x6e, 0x56,
- 0xdc, 0x94, 0x28, 0x67, 0x4e, 0xb7, 0x25, 0x48,
- 0x8a, 0xc2, 0xe0, 0x60, 0x96, 0xcc, 0x18, 0xf4,
- 0x84, 0xdd, 0xa7, 0x5e, 0x3e, 0x05, 0x0b, 0x26,
- 0x26, 0xb2, 0x5c, 0x1f, 0x57, 0x1a, 0x04, 0x7e,
- 0x6a, 0xe3, 0x2f, 0xb4, 0x35, 0xb6, 0x38, 0x40,
- 0x40, 0xcd, 0x6f, 0x87, 0x2e, 0xef, 0xa3, 0xd7,
- 0xa9, 0xc2, 0xe8, 0x0d, 0x27, 0xdf, 0x44, 0x62,
- 0x99, 0xa0, 0xfc, 0xcf, 0x81, 0x78, 0xcb, 0xfe,
- 0xe5, 0xa0, 0x03, 0x4e, 0x6c, 0xd7, 0xf4, 0xaf,
- 0x7a, 0xbb, 0x61, 0x82, 0xfe, 0x71, 0x89, 0xb2,
- 0x22, 0x7c, 0x8e, 0x83, 0x04, 0xce, 0xf6, 0x5d,
- 0x84, 0x8f, 0x95, 0x6a, 0x7f, 0xad, 0xfd, 0x32,
- 0x9c, 0x5e, 0xe4, 0x9c, 0x89, 0x60, 0x54, 0xaa,
- 0x96, 0x72, 0xd2, 0xd7, 0x36, 0x85, 0xa9, 0x45,
- 0xd2, 0x2a, 0xa1, 0x81, 0x49, 0x6f, 0x7e, 0x04,
- 0xfa, 0xe2, 0xfe, 0x90, 0x26, 0x77, 0x5a, 0x33,
- 0xb8, 0x04, 0x9a, 0x7a, 0xe6, 0x4c, 0x4f, 0xad,
- 0x72, 0x96, 0x08, 0x28, 0x58, 0x13, 0xf8, 0xc4,
- 0x1c, 0xf0, 0xc3, 0x45, 0x95, 0x49, 0x20, 0x8c,
- 0x9f, 0x39, 0x70, 0xe1, 0x77, 0xfe, 0xd5, 0x4b,
- 0xaf, 0x86, 0xda, 0xef, 0x22, 0x06, 0x83, 0x36,
- 0x29, 0x12, 0x11, 0x40, 0xbc, 0x3b, 0x86, 0xaa,
- 0xaa, 0x65, 0x60, 0xc3, 0x80, 0xca, 0xed, 0xa9,
- 0xf3, 0xb0, 0x79, 0x96, 0xa2, 0x55, 0x27, 0x28,
- 0x55, 0x73, 0x26, 0xa5, 0x50, 0xea, 0x92, 0x4b,
- 0x3c, 0x5c, 0x82, 0x33, 0xf0, 0x01, 0x3f, 0x03,
- 0xc1, 0x08, 0x05, 0xbf, 0x98, 0xf4, 0x9b, 0x6d,
- 0xa5, 0xa8, 0xb4, 0x82, 0x0c, 0x06, 0xfa, 0xff,
- 0x2d, 0x08, 0xf3, 0x05, 0x4f, 0x57, 0x2a, 0x39,
- 0xd4, 0x83, 0x0d, 0x75, 0x51, 0xd8, 0x5b, 0x1b,
- 0xd3, 0x51, 0x5a, 0x32, 0x2a, 0x9b, 0x32, 0xb2,
- 0xf2, 0xa4, 0x96, 0x12, 0xf2, 0xae, 0x40, 0x34,
- 0x67, 0xa8, 0xf5, 0x44, 0xd5, 0x35, 0x53, 0xfe,
- 0xa3, 0x60, 0x96, 0x63, 0x0f, 0x1f, 0x6e, 0xb0,
- 0x5a, 0x42, 0xa6, 0xfc, 0x51, 0x0b, 0x60, 0x27,
- 0xbc, 0x06, 0x71, 0xed, 0x65, 0x5b, 0x23, 0x86,
- 0x4a, 0x07, 0x3b, 0x22, 0x07, 0x46, 0xe6, 0x90,
- 0x3e, 0xf3, 0x25, 0x50, 0x1b, 0x4c, 0x7f, 0x03,
- 0x08, 0xa8, 0x36, 0x6b, 0x87, 0xe5, 0xe3, 0xdb,
- 0x9a, 0x38, 0x83, 0xff, 0x9f, 0x1a, 0x9f, 0x57,
- 0xa4, 0x2a, 0xf6, 0x37, 0xbc, 0x1a, 0xff, 0xc9,
- 0x1e, 0x35, 0x0c, 0xc3, 0x7c, 0xa3, 0xb2, 0xe5,
- 0xd2, 0xc6, 0xb4, 0x57, 0x47, 0xe4, 0x32, 0x16,
- 0x6d, 0xa9, 0xae, 0x64, 0xe6, 0x2d, 0x8d, 0xc5,
- 0x8d, 0x50, 0x8e, 0xe8, 0x1a, 0x22, 0x34, 0x2a,
- 0xd9, 0xeb, 0x51, 0x90, 0x4a, 0xb1, 0x41, 0x7d,
- 0x64, 0xf9, 0xb9, 0x0d, 0xf6, 0x23, 0x33, 0xb0,
- 0x33, 0xf4, 0xf7, 0x3f, 0x27, 0x84, 0xc6, 0x0f,
- 0x54, 0xa5, 0xc0, 0x2e, 0xec, 0x0b, 0x3a, 0x48,
- 0x6e, 0x80, 0x35, 0x81, 0x43, 0x9b, 0x90, 0xb1,
- 0xd0, 0x2b, 0xea, 0x21, 0xdc, 0xda, 0x5b, 0x09,
- 0xf4, 0xcc, 0x10, 0xb4, 0xc7, 0xfe, 0x79, 0x51,
- 0xc3, 0xc5, 0xac, 0x88, 0x74, 0x84, 0x0b, 0x4b,
- 0xca, 0x79, 0x16, 0x29, 0xfb, 0x69, 0x54, 0xdf,
- 0x41, 0x7e, 0xe9, 0xc7, 0x8e, 0xea, 0xa5, 0xfe,
- 0xfc, 0x76, 0x0e, 0x90, 0xc4, 0x92, 0x38, 0xad,
- 0x7b, 0x48, 0xe6, 0x6e, 0xf7, 0x21, 0xfd, 0x4e,
- 0x93, 0x0a, 0x7b, 0x41, 0x83, 0x68, 0xfb, 0x57,
- 0x51, 0x76, 0x34, 0xa9, 0x6c, 0x00, 0xaa, 0x4f,
- 0x66, 0x65, 0x98, 0x4a, 0x4f, 0xa3, 0xa0, 0xef,
- 0x69, 0x3f, 0xe3, 0x1c, 0x92, 0x8c, 0xfd, 0xd8,
- 0xe8, 0xde, 0x7c, 0x7f, 0x3e, 0x84, 0x8e, 0x69,
- 0x3c, 0xf1, 0xf2, 0x05, 0x46, 0xdc, 0x2f, 0x9d,
- 0x5e, 0x6e, 0x4c, 0xfb, 0xb5, 0x99, 0x2a, 0x59,
- 0x63, 0xc1, 0x34, 0xbc, 0x57, 0xc0, 0x0d, 0xb9,
- 0x61, 0x25, 0xf3, 0x33, 0x23, 0x51, 0xb6, 0x0d,
- 0x07, 0xa6, 0xab, 0x94, 0x4a, 0xb7, 0x2a, 0xea,
- 0xee, 0xac, 0xa3, 0xc3, 0x04, 0x8b, 0x0e, 0x56,
- 0xfe, 0x44, 0xa7, 0x39, 0xe2, 0xed, 0xed, 0xb4,
- 0x22, 0x2b, 0xac, 0x12, 0x32, 0x28, 0x91, 0xd8,
- 0xa5, 0xab, 0xff, 0x5f, 0xe0, 0x4b, 0xda, 0x78,
- 0x17, 0xda, 0xf1, 0x01, 0x5b, 0xcd, 0xe2, 0x5f,
- 0x50, 0x45, 0x73, 0x2b, 0xe4, 0x76, 0x77, 0xf4,
- 0x64, 0x1d, 0x43, 0xfb, 0x84, 0x7a, 0xea, 0x91,
- 0xae, 0xf9, 0x9e, 0xb7, 0xb4, 0xb0, 0x91, 0x5f,
- 0x16, 0x35, 0x9a, 0x11, 0xb8, 0xc7, 0xc1, 0x8c,
- 0xc6, 0x10, 0x8d, 0x2f, 0x63, 0x4a, 0xa7, 0x57,
- 0x3a, 0x51, 0xd6, 0x32, 0x2d, 0x64, 0x72, 0xd4,
- 0x66, 0xdc, 0x10, 0xa6, 0x67, 0xd6, 0x04, 0x23,
- 0x9d, 0x0a, 0x11, 0x77, 0xdd, 0x37, 0x94, 0x17,
- 0x3c, 0xbf, 0x8b, 0x65, 0xb0, 0x2e, 0x5e, 0x66,
- 0x47, 0x64, 0xac, 0xdd, 0xf0, 0x84, 0xfd, 0x39,
- 0xfa, 0x15, 0x5d, 0xef, 0xae, 0xca, 0xc1, 0x36,
- 0xa7, 0x5c, 0xbf, 0xc7, 0x08, 0xc2, 0x66, 0x00,
- 0x74, 0x74, 0x4e, 0x27, 0x3f, 0x55, 0x8a, 0xb7,
- 0x38, 0x66, 0x83, 0x6d, 0xcf, 0x99, 0x9e, 0x60,
- 0x8f, 0xdd, 0x2e, 0x62, 0x22, 0x0e, 0xef, 0x0c,
- 0x98, 0xa7, 0x85, 0x74, 0x3b, 0x9d, 0xec, 0x9e,
- 0xa9, 0x19, 0x72, 0xa5, 0x7f, 0x2c, 0x39, 0xb7,
- 0x7d, 0xb7, 0xf1, 0x12, 0x65, 0x27, 0x4b, 0x5a,
- 0xde, 0x17, 0xfe, 0xad, 0x44, 0xf3, 0x20, 0x4d,
- 0xfd, 0xe4, 0x1f, 0xb5, 0x81, 0xb0, 0x36, 0x37,
- 0x08, 0x6f, 0xc3, 0x0c, 0xe9, 0x85, 0x98, 0x82,
- 0xa9, 0x62, 0x0c, 0xc4, 0x97, 0xc0, 0x50, 0xc8,
- 0xa7, 0x3c, 0x50, 0x9f, 0x43, 0xb9, 0xcd, 0x5e,
- 0x4d, 0xfa, 0x1c, 0x4b, 0x0b, 0xa9, 0x98, 0x85,
- 0x38, 0x92, 0xac, 0x8d, 0xe4, 0xad, 0x9b, 0x98,
- 0xab, 0xd9, 0x38, 0xac, 0x62, 0x52, 0xa3, 0x22,
- 0x63, 0x0f, 0xbf, 0x95, 0x48, 0xdf, 0x69, 0xe7,
- 0x8b, 0x33, 0xd5, 0xb2, 0xbd, 0x05, 0x49, 0x49,
- 0x9d, 0x57, 0x73, 0x19, 0x33, 0xae, 0xfa, 0x33,
- 0xf1, 0x19, 0xa8, 0x80, 0xce, 0x04, 0x9f, 0xbc,
- 0x1d, 0x65, 0x82, 0x1b, 0xe5, 0x3a, 0x51, 0xc8,
- 0x1c, 0x21, 0xe3, 0x5d, 0xf3, 0x7d, 0x9b, 0x2f,
- 0x2c, 0x1d, 0x4a, 0x7f, 0x9b, 0x68, 0x35, 0xa3,
- 0xb2, 0x50, 0xf7, 0x62, 0x79, 0xcd, 0xf4, 0x98,
- 0x4f, 0xe5, 0x63, 0x7c, 0x3e, 0x45, 0x31, 0x8c,
- 0x16, 0xa0, 0x12, 0xc8, 0x58, 0xce, 0x39, 0xa6,
- 0xbc, 0x54, 0xdb, 0xc5, 0xe0, 0xd5, 0xba, 0xbc,
- 0xb9, 0x04, 0xf4, 0x8d, 0xe8, 0x2f, 0x15, 0x9d,
-};
-
-/* 100 test cases */
-static struct crc_test {
- u32 crc; /* random starting crc */
- u32 start; /* random 6 bit offset in buf */
- u32 length; /* random 11 bit length of test */
- u32 crc_le; /* expected crc32_le result */
- u32 crc_be; /* expected crc32_be result */
- u32 crc32c_le; /* expected crc32c_le result */
-} const test[] __initconst =
-{
- {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c},
- {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca},
- {0x496da28e, 0x00000039, 0x000005af, 0xd933660f, 0x5d57e81f, 0x52e1ebb8},
- {0x09a9b90e, 0x00000027, 0x000001f8, 0xb45fe007, 0xf45fca9a, 0x0798af9a},
- {0xdc97e5a9, 0x00000025, 0x000003b6, 0xf81a3562, 0xe0126ba2, 0x18eb3152},
- {0x47c58900, 0x0000000a, 0x000000b9, 0x8e58eccf, 0xf3afc793, 0xd00d08c7},
- {0x292561e8, 0x0000000c, 0x00000403, 0xa2ba8aaf, 0x0b797aed, 0x8ba966bc},
- {0x415037f6, 0x00000003, 0x00000676, 0xa17d52e8, 0x7f0fdf35, 0x11d694a2},
- {0x3466e707, 0x00000026, 0x00000042, 0x258319be, 0x75c484a2, 0x6ab3208d},
- {0xafd1281b, 0x00000023, 0x000002ee, 0x4428eaf8, 0x06c7ad10, 0xba4603c5},
- {0xd3857b18, 0x00000028, 0x000004a2, 0x5c430821, 0xb062b7cb, 0xe6071c6f},
- {0x1d825a8f, 0x0000002b, 0x0000050b, 0xd2c45f0c, 0xd68634e0, 0x179ec30a},
- {0x5033e3bc, 0x0000000b, 0x00000078, 0xa3ea4113, 0xac6d31fb, 0x0903beb8},
- {0x94f1fb5e, 0x0000000f, 0x000003a2, 0xfbfc50b1, 0x3cfe50ed, 0x6a7cb4fa},
- {0xc9a0fe14, 0x00000009, 0x00000473, 0x5fb61894, 0x87070591, 0xdb535801},
- {0x88a034b1, 0x0000001c, 0x000005ad, 0xc1b16053, 0x46f95c67, 0x92bed597},
- {0xf0f72239, 0x00000020, 0x0000026d, 0xa6fa58f3, 0xf8c2c1dd, 0x192a3f1b},
- {0xcc20a5e3, 0x0000003b, 0x0000067a, 0x7740185a, 0x308b979a, 0xccbaec1a},
- {0xce589c95, 0x0000002b, 0x00000641, 0xd055e987, 0x40aae25b, 0x7eabae4d},
- {0x78edc885, 0x00000035, 0x000005be, 0xa39cb14b, 0x035b0d1f, 0x28c72982},
- {0x9d40a377, 0x0000003b, 0x00000038, 0x1f47ccd2, 0x197fbc9d, 0xc3cd4d18},
- {0x703d0e01, 0x0000003c, 0x000006f1, 0x88735e7c, 0xfed57c5a, 0xbca8f0e7},
- {0x776bf505, 0x0000000f, 0x000005b2, 0x5cc4fc01, 0xf32efb97, 0x713f60b3},
- {0x4a3e7854, 0x00000027, 0x000004b8, 0x8d923c82, 0x0cbfb4a2, 0xebd08fd5},
- {0x209172dd, 0x0000003b, 0x00000356, 0xb89e9c2b, 0xd7868138, 0x64406c59},
- {0x3ba4cc5b, 0x0000002f, 0x00000203, 0xe51601a9, 0x5b2a1032, 0x7421890e},
- {0xfc62f297, 0x00000000, 0x00000079, 0x71a8e1a2, 0x5d88685f, 0xe9347603},
- {0x64280b8b, 0x00000016, 0x000007ab, 0x0fa7a30c, 0xda3a455f, 0x1bef9060},
- {0x97dd724b, 0x00000033, 0x000007ad, 0x5788b2f4, 0xd7326d32, 0x34720072},
- {0x61394b52, 0x00000035, 0x00000571, 0xc66525f1, 0xcabe7fef, 0x48310f59},
- {0x29b4faff, 0x00000024, 0x0000006e, 0xca13751e, 0x993648e0, 0x783a4213},
- {0x29bfb1dc, 0x0000000b, 0x00000244, 0x436c43f7, 0x429f7a59, 0x9e8efd41},
- {0x86ae934b, 0x00000035, 0x00000104, 0x0760ec93, 0x9cf7d0f4, 0xfc3d34a5},
- {0xc4c1024e, 0x0000002e, 0x000006b1, 0x6516a3ec, 0x19321f9c, 0x17a52ae2},
- {0x3287a80a, 0x00000026, 0x00000496, 0x0b257eb1, 0x754ebd51, 0x886d935a},
- {0xa4db423e, 0x00000023, 0x0000045d, 0x9b3a66dc, 0x873e9f11, 0xeaaeaeb2},
- {0x7a1078df, 0x00000015, 0x0000014a, 0x8c2484c5, 0x6a628659, 0x8e900a4b},
- {0x6048bd5b, 0x00000006, 0x0000006a, 0x897e3559, 0xac9961af, 0xd74662b1},
- {0xd8f9ea20, 0x0000003d, 0x00000277, 0x60eb905b, 0xed2aaf99, 0xd26752ba},
- {0xea5ec3b4, 0x0000002a, 0x000004fe, 0x869965dc, 0x6c1f833b, 0x8b1fcd62},
- {0x2dfb005d, 0x00000016, 0x00000345, 0x6a3b117e, 0xf05e8521, 0xf54342fe},
- {0x5a214ade, 0x00000020, 0x000005b6, 0x467f70be, 0xcb22ccd3, 0x5b95b988},
- {0xf0ab9cca, 0x00000032, 0x00000515, 0xed223df3, 0x7f3ef01d, 0x2e1176be},
- {0x91b444f9, 0x0000002e, 0x000007f8, 0x84e9a983, 0x5676756f, 0x66120546},
- {0x1b5d2ddb, 0x0000002e, 0x0000012c, 0xba638c4c, 0x3f42047b, 0xf256a5cc},
- {0xd824d1bb, 0x0000003a, 0x000007b5, 0x6288653b, 0x3a3ebea0, 0x4af1dd69},
- {0x0470180c, 0x00000034, 0x000001f0, 0x9d5b80d6, 0x3de08195, 0x56f0a04a},
- {0xffaa3a3f, 0x00000036, 0x00000299, 0xf3a82ab8, 0x53e0c13d, 0x74f6b6b2},
- {0x6406cfeb, 0x00000023, 0x00000600, 0xa920b8e8, 0xe4e2acf4, 0x085951fd},
- {0xb24aaa38, 0x0000003e, 0x000004a1, 0x657cc328, 0x5077b2c3, 0xc65387eb},
- {0x58b2ab7c, 0x00000039, 0x000002b4, 0x3a17ee7e, 0x9dcb3643, 0x1ca9257b},
- {0x3db85970, 0x00000006, 0x000002b6, 0x95268b59, 0xb9812c10, 0xfd196d76},
- {0x857830c5, 0x00000003, 0x00000590, 0x4ef439d5, 0xf042161d, 0x5ef88339},
- {0xe1fcd978, 0x0000003e, 0x000007d8, 0xae8d8699, 0xce0a1ef5, 0x2c3714d9},
- {0xb982a768, 0x00000016, 0x000006e0, 0x62fad3df, 0x5f8a067b, 0x58576548},
- {0x1d581ce8, 0x0000001e, 0x0000058b, 0xf0f5da53, 0x26e39eee, 0xfd7c57de},
- {0x2456719b, 0x00000025, 0x00000503, 0x4296ac64, 0xd50e4c14, 0xd5fedd59},
- {0xfae6d8f2, 0x00000000, 0x0000055d, 0x057fdf2e, 0x2a31391a, 0x1cc3b17b},
- {0xcba828e3, 0x00000039, 0x000002ce, 0xe3f22351, 0x8f00877b, 0x270eed73},
- {0x13d25952, 0x0000000a, 0x0000072d, 0x76d4b4cc, 0x5eb67ec3, 0x91ecbb11},
- {0x0342be3f, 0x00000015, 0x00000599, 0xec75d9f1, 0x9d4d2826, 0x05ed8d0c},
- {0xeaa344e0, 0x00000014, 0x000004d8, 0x72a4c981, 0x2064ea06, 0x0b09ad5b},
- {0xbbb52021, 0x0000003b, 0x00000272, 0x04af99fc, 0xaf042d35, 0xf8d511fb},
- {0xb66384dc, 0x0000001d, 0x000007fc, 0xd7629116, 0x782bd801, 0x5ad832cc},
- {0x616c01b6, 0x00000022, 0x000002c8, 0x5b1dab30, 0x783ce7d2, 0x1214d196},
- {0xce2bdaad, 0x00000016, 0x0000062a, 0x932535c8, 0x3f02926d, 0x5747218a},
- {0x00fe84d7, 0x00000005, 0x00000205, 0x850e50aa, 0x753d649c, 0xde8f14de},
- {0xbebdcb4c, 0x00000006, 0x0000055d, 0xbeaa37a2, 0x2d8c9eba, 0x3563b7b9},
- {0xd8b1a02a, 0x00000010, 0x00000387, 0x5017d2fc, 0x503541a5, 0x071475d0},
- {0x3b96cad2, 0x00000036, 0x00000347, 0x1d2372ae, 0x926cd90b, 0x54c79d60},
- {0xc94c1ed7, 0x00000005, 0x0000038b, 0x9e9fdb22, 0x144a9178, 0x4c53eee6},
- {0x1aad454e, 0x00000025, 0x000002b2, 0xc3f6315c, 0x5c7a35b3, 0x10137a3c},
- {0xa4fec9a6, 0x00000000, 0x000006d6, 0x90be5080, 0xa4107605, 0xaa9d6c73},
- {0x1bbe71e2, 0x0000001f, 0x000002fd, 0x4e504c3b, 0x284ccaf1, 0xb63d23e7},
- {0x4201c7e4, 0x00000002, 0x000002b7, 0x7822e3f9, 0x0cc912a9, 0x7f53e9cf},
- {0x23fddc96, 0x00000003, 0x00000627, 0x8a385125, 0x07767e78, 0x13c1cd83},
- {0xd82ba25c, 0x00000016, 0x0000063e, 0x98e4148a, 0x283330c9, 0x49ff5867},
- {0x786f2032, 0x0000002d, 0x0000060f, 0xf201600a, 0xf561bfcd, 0x8467f211},
- {0xfebe4e1f, 0x0000002a, 0x000004f2, 0x95e51961, 0xfd80dcab, 0x3f9683b2},
- {0x1a6e0a39, 0x00000008, 0x00000672, 0x8af6c2a5, 0x78dd84cb, 0x76a3f874},
- {0x56000ab8, 0x0000000e, 0x000000e5, 0x36bacb8f, 0x22ee1f77, 0x863b702f},
- {0x4717fe0c, 0x00000000, 0x000006ec, 0x8439f342, 0x5c8e03da, 0xdc6c58ff},
- {0xd5d5d68e, 0x0000003c, 0x000003a3, 0x46fff083, 0x177d1b39, 0x0622cc95},
- {0xc25dd6c6, 0x00000024, 0x000006c0, 0x5ceb8eb4, 0x892b0d16, 0xe85605cd},
- {0xe9b11300, 0x00000023, 0x00000683, 0x07a5d59a, 0x6c6a3208, 0x31da5f06},
- {0x95cd285e, 0x00000001, 0x00000047, 0x7b3a4368, 0x0202c07e, 0xa1f2e784},
- {0xd9245a25, 0x0000001e, 0x000003a6, 0xd33c1841, 0x1936c0d5, 0xb07cc616},
- {0x103279db, 0x00000006, 0x0000039b, 0xca09b8a0, 0x77d62892, 0xbf943b6c},
- {0x1cba3172, 0x00000027, 0x000001c8, 0xcb377194, 0xebe682db, 0x2c01af1c},
- {0x8f613739, 0x0000000c, 0x000001df, 0xb4b0bc87, 0x7710bd43, 0x0fe5f56d},
- {0x1c6aa90d, 0x0000001b, 0x0000053c, 0x70559245, 0xda7894ac, 0xf8943b2d},
- {0xaabe5b93, 0x0000003d, 0x00000715, 0xcdbf42fa, 0x0c3b99e7, 0xe4d89272},
- {0xf15dd038, 0x00000006, 0x000006db, 0x6e104aea, 0x8d5967f2, 0x7c2f6bbb},
- {0x584dd49c, 0x00000020, 0x000007bc, 0x36b6cfd6, 0xad4e23b2, 0xabbf388b},
- {0x5d8c9506, 0x00000020, 0x00000470, 0x4c62378e, 0x31d92640, 0x1dca1f4e},
- {0xb80d17b0, 0x00000032, 0x00000346, 0x22a5bb88, 0x9a7ec89f, 0x5c170e23},
- {0xdaf0592e, 0x00000023, 0x000007b0, 0x3cab3f99, 0x9b1fdd99, 0xc0e9d672},
- {0x4793cc85, 0x0000000d, 0x00000706, 0xe82e04f6, 0xed3db6b7, 0xc18bdc86},
- {0x82ebf64e, 0x00000009, 0x000007c3, 0x69d590a9, 0x9efa8499, 0xa874fcdd},
- {0xb18a0319, 0x00000026, 0x000007db, 0x1cf98dcc, 0x8fa9ad6a, 0x9dc0bb48},
-};
-
-#include <linux/time.h>
-
-static int __init crc32c_test(void)
-{
- int i;
- int errors = 0;
- int bytes = 0;
- u64 nsec;
- unsigned long flags;
-
- /* keep static to prevent cache warming code from
- * getting eliminated by the compiler */
- static u32 crc;
-
- /* pre-warm the cache */
- for (i = 0; i < 100; i++) {
- bytes += test[i].length;
-
- crc ^= __crc32c_le(test[i].crc, test_buf +
- test[i].start, test[i].length);
- }
-
- /* reduce OS noise */
- local_irq_save(flags);
-
- nsec = ktime_get_ns();
- for (i = 0; i < 100; i++) {
- if (test[i].crc32c_le != __crc32c_le(test[i].crc, test_buf +
- test[i].start, test[i].length))
- errors++;
- }
- nsec = ktime_get_ns() - nsec;
-
- local_irq_restore(flags);
-
- pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS);
-
- if (errors)
- pr_warn("crc32c: %d self tests failed\n", errors);
- else {
- pr_info("crc32c: self tests passed, processed %d bytes in %lld nsec\n",
- bytes, nsec);
- }
-
- return 0;
-}
-
-static int __init crc32c_combine_test(void)
-{
- int i, j;
- int errors = 0, runs = 0;
-
- for (i = 0; i < 10; i++) {
- u32 crc_full;
-
- crc_full = __crc32c_le(test[i].crc, test_buf + test[i].start,
- test[i].length);
- for (j = 0; j <= test[i].length; ++j) {
- u32 crc1, crc2;
- u32 len1 = j, len2 = test[i].length - j;
-
- crc1 = __crc32c_le(test[i].crc, test_buf +
- test[i].start, len1);
- crc2 = __crc32c_le(0, test_buf + test[i].start +
- len1, len2);
-
- if (!(crc_full == __crc32c_le_combine(crc1, crc2, len2) &&
- crc_full == test[i].crc32c_le))
- errors++;
- runs++;
- cond_resched();
- }
- }
-
- if (errors)
- pr_warn("crc32c_combine: %d/%d self tests failed\n", errors, runs);
- else
- pr_info("crc32c_combine: %d self tests passed\n", runs);
-
- return 0;
-}
-
-static int __init crc32_test(void)
-{
- int i;
- int errors = 0;
- int bytes = 0;
- u64 nsec;
- unsigned long flags;
-
- /* keep static to prevent cache warming code from
- * getting eliminated by the compiler */
- static u32 crc;
-
- /* pre-warm the cache */
- for (i = 0; i < 100; i++) {
- bytes += 2*test[i].length;
-
- crc ^= crc32_le(test[i].crc, test_buf +
- test[i].start, test[i].length);
-
- crc ^= crc32_be(test[i].crc, test_buf +
- test[i].start, test[i].length);
- }
-
- /* reduce OS noise */
- local_irq_save(flags);
-
- nsec = ktime_get_ns();
- for (i = 0; i < 100; i++) {
- if (test[i].crc_le != crc32_le(test[i].crc, test_buf +
- test[i].start, test[i].length))
- errors++;
-
- if (test[i].crc_be != crc32_be(test[i].crc, test_buf +
- test[i].start, test[i].length))
- errors++;
- }
- nsec = ktime_get_ns() - nsec;
-
- local_irq_restore(flags);
-
- pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n",
- CRC_LE_BITS, CRC_BE_BITS);
-
- if (errors)
- pr_warn("crc32: %d self tests failed\n", errors);
- else {
- pr_info("crc32: self tests passed, processed %d bytes in %lld nsec\n",
- bytes, nsec);
- }
-
- return 0;
-}
-
-static int __init crc32_combine_test(void)
-{
- int i, j;
- int errors = 0, runs = 0;
-
- for (i = 0; i < 10; i++) {
- u32 crc_full;
-
- crc_full = crc32_le(test[i].crc, test_buf + test[i].start,
- test[i].length);
- for (j = 0; j <= test[i].length; ++j) {
- u32 crc1, crc2;
- u32 len1 = j, len2 = test[i].length - j;
-
- crc1 = crc32_le(test[i].crc, test_buf +
- test[i].start, len1);
- crc2 = crc32_le(0, test_buf + test[i].start +
- len1, len2);
-
- if (!(crc_full == crc32_le_combine(crc1, crc2, len2) &&
- crc_full == test[i].crc_le))
- errors++;
- runs++;
- cond_resched();
- }
- }
-
- if (errors)
- pr_warn("crc32_combine: %d/%d self tests failed\n", errors, runs);
- else
- pr_info("crc32_combine: %d self tests passed\n", runs);
-
- return 0;
-}
-
-static int __init crc32test_init(void)
-{
- crc32_test();
- crc32c_test();
-
- crc32_combine_test();
- crc32c_combine_test();
-
- return 0;
-}
-
-static void __exit crc32_exit(void)
-{
-}
-
-module_init(crc32test_init);
-module_exit(crc32_exit);
-
-MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
-MODULE_DESCRIPTION("CRC32 selftest");
-MODULE_LICENSE("GPL");
diff --git a/lib/crc64-rocksoft.c b/lib/crc64-rocksoft.c
deleted file mode 100644
index fc9ae0da5df7..000000000000
--- a/lib/crc64-rocksoft.c
+++ /dev/null
@@ -1,126 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/crc64.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <crypto/hash.h>
-#include <crypto/algapi.h>
-#include <linux/static_key.h>
-#include <linux/notifier.h>
-
-static struct crypto_shash __rcu *crc64_rocksoft_tfm;
-static DEFINE_STATIC_KEY_TRUE(crc64_rocksoft_fallback);
-static DEFINE_MUTEX(crc64_rocksoft_mutex);
-static struct work_struct crc64_rocksoft_rehash_work;
-
-static int crc64_rocksoft_notify(struct notifier_block *self, unsigned long val, void *data)
-{
- struct crypto_alg *alg = data;
-
- if (val != CRYPTO_MSG_ALG_LOADED ||
- strcmp(alg->cra_name, CRC64_ROCKSOFT_STRING))
- return NOTIFY_DONE;
-
- schedule_work(&crc64_rocksoft_rehash_work);
- return NOTIFY_OK;
-}
-
-static void crc64_rocksoft_rehash(struct work_struct *work)
-{
- struct crypto_shash *new, *old;
-
- mutex_lock(&crc64_rocksoft_mutex);
- old = rcu_dereference_protected(crc64_rocksoft_tfm,
- lockdep_is_held(&crc64_rocksoft_mutex));
- new = crypto_alloc_shash(CRC64_ROCKSOFT_STRING, 0, 0);
- if (IS_ERR(new)) {
- mutex_unlock(&crc64_rocksoft_mutex);
- return;
- }
- rcu_assign_pointer(crc64_rocksoft_tfm, new);
- mutex_unlock(&crc64_rocksoft_mutex);
-
- if (old) {
- synchronize_rcu();
- crypto_free_shash(old);
- } else {
- static_branch_disable(&crc64_rocksoft_fallback);
- }
-}
-
-static struct notifier_block crc64_rocksoft_nb = {
- .notifier_call = crc64_rocksoft_notify,
-};
-
-u64 crc64_rocksoft_update(u64 crc, const unsigned char *buffer, size_t len)
-{
- struct {
- struct shash_desc shash;
- u64 crc;
- } desc;
- int err;
-
- if (static_branch_unlikely(&crc64_rocksoft_fallback))
- return crc64_rocksoft_generic(crc, buffer, len);
-
- rcu_read_lock();
- desc.shash.tfm = rcu_dereference(crc64_rocksoft_tfm);
- desc.crc = crc;
- err = crypto_shash_update(&desc.shash, buffer, len);
- rcu_read_unlock();
-
- BUG_ON(err);
-
- return desc.crc;
-}
-EXPORT_SYMBOL_GPL(crc64_rocksoft_update);
-
-u64 crc64_rocksoft(const unsigned char *buffer, size_t len)
-{
- return crc64_rocksoft_update(0, buffer, len);
-}
-EXPORT_SYMBOL_GPL(crc64_rocksoft);
-
-static int __init crc64_rocksoft_mod_init(void)
-{
- INIT_WORK(&crc64_rocksoft_rehash_work, crc64_rocksoft_rehash);
- crypto_register_notifier(&crc64_rocksoft_nb);
- crc64_rocksoft_rehash(&crc64_rocksoft_rehash_work);
- return 0;
-}
-
-static void __exit crc64_rocksoft_mod_fini(void)
-{
- crypto_unregister_notifier(&crc64_rocksoft_nb);
- cancel_work_sync(&crc64_rocksoft_rehash_work);
- crypto_free_shash(rcu_dereference_protected(crc64_rocksoft_tfm, 1));
-}
-
-module_init(crc64_rocksoft_mod_init);
-module_exit(crc64_rocksoft_mod_fini);
-
-static int crc64_rocksoft_transform_show(char *buffer, const struct kernel_param *kp)
-{
- struct crypto_shash *tfm;
- int len;
-
- if (static_branch_unlikely(&crc64_rocksoft_fallback))
- return sprintf(buffer, "fallback\n");
-
- rcu_read_lock();
- tfm = rcu_dereference(crc64_rocksoft_tfm);
- len = snprintf(buffer, PAGE_SIZE, "%s\n",
- crypto_shash_driver_name(tfm));
- rcu_read_unlock();
-
- return len;
-}
-
-module_param_call(transform, NULL, crc64_rocksoft_transform_show, NULL, 0444);
-
-MODULE_AUTHOR("Keith Busch <kbusch@kernel.org>");
-MODULE_DESCRIPTION("Rocksoft model CRC64 calculation (library API)");
-MODULE_LICENSE("GPL");
-MODULE_SOFTDEP("pre: crc64");
diff --git a/lib/crc64.c b/lib/crc64.c
index 61ae8dfb6a1c..5b1b17057f0a 100644
--- a/lib/crc64.c
+++ b/lib/crc64.c
@@ -22,8 +22,8 @@
* x^24 + x^23 + x^22 + x^21 + x^19 + x^17 + x^13 + x^12 + x^10 + x^9 +
* x^7 + x^4 + x + 1
*
- * crc64rocksoft[256] table is from the Rocksoft specification polynomial
- * defined as,
+ * crc64nvmetable[256] uses the CRC64 polynomial from the NVME NVM Command Set
+ * Specification and uses least-significant-bit first bit order:
*
* x^64 + x^63 + x^61 + x^59 + x^58 + x^56 + x^55 + x^52 + x^49 + x^48 + x^47 +
* x^46 + x^44 + x^41 + x^37 + x^36 + x^34 + x^32 + x^31 + x^28 + x^26 + x^23 +
@@ -41,45 +41,18 @@
MODULE_DESCRIPTION("CRC64 calculations");
MODULE_LICENSE("GPL v2");
-/**
- * crc64_be - Calculate bitwise big-endian ECMA-182 CRC64
- * @crc: seed value for computation. 0 or (u64)~0 for a new CRC calculation,
- * or the previous crc64 value if computing incrementally.
- * @p: pointer to buffer over which CRC64 is run
- * @len: length of buffer @p
- */
-u64 __pure crc64_be(u64 crc, const void *p, size_t len)
+u64 crc64_be_generic(u64 crc, const u8 *p, size_t len)
{
- size_t i, t;
-
- const unsigned char *_p = p;
-
- for (i = 0; i < len; i++) {
- t = ((crc >> 56) ^ (*_p++)) & 0xFF;
- crc = crc64table[t] ^ (crc << 8);
- }
-
+ while (len--)
+ crc = (crc << 8) ^ crc64table[(crc >> 56) ^ *p++];
return crc;
}
-EXPORT_SYMBOL_GPL(crc64_be);
+EXPORT_SYMBOL_GPL(crc64_be_generic);
-/**
- * crc64_rocksoft_generic - Calculate bitwise Rocksoft CRC64
- * @crc: seed value for computation. 0 for a new CRC calculation, or the
- * previous crc64 value if computing incrementally.
- * @p: pointer to buffer over which CRC64 is run
- * @len: length of buffer @p
- */
-u64 __pure crc64_rocksoft_generic(u64 crc, const void *p, size_t len)
+u64 crc64_nvme_generic(u64 crc, const u8 *p, size_t len)
{
- const unsigned char *_p = p;
- size_t i;
-
- crc = ~crc;
-
- for (i = 0; i < len; i++)
- crc = (crc >> 8) ^ crc64rocksofttable[(crc & 0xff) ^ *_p++];
-
- return ~crc;
+ while (len--)
+ crc = (crc >> 8) ^ crc64nvmetable[(crc & 0xff) ^ *p++];
+ return crc;
}
-EXPORT_SYMBOL_GPL(crc64_rocksoft_generic);
+EXPORT_SYMBOL_GPL(crc64_nvme_generic);
diff --git a/lib/crc7.c b/lib/crc7.c
index 3848e313b722..8dd991cc6114 100644
--- a/lib/crc7.c
+++ b/lib/crc7.c
@@ -7,14 +7,13 @@
#include <linux/module.h>
#include <linux/crc7.h>
-
/*
* Table for CRC-7 (polynomial x^7 + x^3 + 1).
* This is a big-endian CRC (msbit is highest power of x),
* aligned so the msbit of the byte is the x^6 coefficient
* and the lsbit is not used.
*/
-const u8 crc7_be_syndrome_table[256] = {
+static const u8 crc7_be_syndrome_table[256] = {
0x00, 0x12, 0x24, 0x36, 0x48, 0x5a, 0x6c, 0x7e,
0x90, 0x82, 0xb4, 0xa6, 0xd8, 0xca, 0xfc, 0xee,
0x32, 0x20, 0x16, 0x04, 0x7a, 0x68, 0x5e, 0x4c,
@@ -48,7 +47,6 @@ const u8 crc7_be_syndrome_table[256] = {
0x1c, 0x0e, 0x38, 0x2a, 0x54, 0x46, 0x70, 0x62,
0x8c, 0x9e, 0xa8, 0xba, 0xc4, 0xd6, 0xe0, 0xf2
};
-EXPORT_SYMBOL(crc7_be_syndrome_table);
/**
* crc7_be - update the CRC7 for the data buffer
@@ -65,7 +63,7 @@ EXPORT_SYMBOL(crc7_be_syndrome_table);
u8 crc7_be(u8 crc, const u8 *buffer, size_t len)
{
while (len--)
- crc = crc7_be_byte(crc, *buffer++);
+ crc = crc7_be_syndrome_table[crc ^ *buffer++];
return crc;
}
EXPORT_SYMBOL(crc7_be);
diff --git a/lib/crypto/aesgcm.c b/lib/crypto/aesgcm.c
index 6bba6473fdf3..902e49410aaf 100644
--- a/lib/crypto/aesgcm.c
+++ b/lib/crypto/aesgcm.c
@@ -697,7 +697,7 @@ static int __init libaesgcm_init(void)
u8 tagbuf[AES_BLOCK_SIZE];
int plen = aesgcm_tv[i].plen;
struct aesgcm_ctx ctx;
- u8 buf[sizeof(ptext12)];
+ static u8 buf[sizeof(ptext12)];
if (aesgcm_expandkey(&ctx, aesgcm_tv[i].key, aesgcm_tv[i].klen,
aesgcm_tv[i].clen - plen)) {
diff --git a/lib/crypto/gf128mul.c b/lib/crypto/gf128mul.c
index 8f8c45e0cdcf..fbe72cb3453a 100644
--- a/lib/crypto/gf128mul.c
+++ b/lib/crypto/gf128mul.c
@@ -225,44 +225,6 @@ void gf128mul_lle(be128 *r, const be128 *b)
}
EXPORT_SYMBOL(gf128mul_lle);
-void gf128mul_bbe(be128 *r, const be128 *b)
-{
- be128 p[8];
- int i;
-
- p[0] = *r;
- for (i = 0; i < 7; ++i)
- gf128mul_x_bbe(&p[i + 1], &p[i]);
-
- memset(r, 0, sizeof(*r));
- for (i = 0;;) {
- u8 ch = ((u8 *)b)[i];
-
- if (ch & 0x80)
- be128_xor(r, r, &p[7]);
- if (ch & 0x40)
- be128_xor(r, r, &p[6]);
- if (ch & 0x20)
- be128_xor(r, r, &p[5]);
- if (ch & 0x10)
- be128_xor(r, r, &p[4]);
- if (ch & 0x08)
- be128_xor(r, r, &p[3]);
- if (ch & 0x04)
- be128_xor(r, r, &p[2]);
- if (ch & 0x02)
- be128_xor(r, r, &p[1]);
- if (ch & 0x01)
- be128_xor(r, r, &p[0]);
-
- if (++i >= 16)
- break;
-
- gf128mul_x8_bbe(r);
- }
-}
-EXPORT_SYMBOL(gf128mul_bbe);
-
/* This version uses 64k bytes of table space.
A 16 byte buffer has to be multiplied by a 16 byte key
value in GF(2^128). If we consider a GF(2^128) value in
@@ -380,28 +342,6 @@ out:
}
EXPORT_SYMBOL(gf128mul_init_4k_lle);
-struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g)
-{
- struct gf128mul_4k *t;
- int j, k;
-
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (!t)
- goto out;
-
- t->t[1] = *g;
- for (j = 1; j <= 64; j <<= 1)
- gf128mul_x_bbe(&t->t[j + j], &t->t[j]);
-
- for (j = 2; j < 256; j += j)
- for (k = 1; k < j; ++k)
- be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
-
-out:
- return t;
-}
-EXPORT_SYMBOL(gf128mul_init_4k_bbe);
-
void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t)
{
u8 *ap = (u8 *)a;
@@ -417,20 +357,5 @@ void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t)
}
EXPORT_SYMBOL(gf128mul_4k_lle);
-void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t)
-{
- u8 *ap = (u8 *)a;
- be128 r[1];
- int i = 0;
-
- *r = t->t[ap[0]];
- while (++i < 16) {
- gf128mul_x8_bbe(r);
- be128_xor(r, r, &t->t[ap[i]]);
- }
- *a = *r;
-}
-EXPORT_SYMBOL(gf128mul_4k_bbe);
-
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Functions for multiplying elements of GF(2^128)");
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 388da1aea14a..b3a85fe8b673 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -54,7 +54,7 @@ void __init dump_stack_set_arch_desc(const char *fmt, ...)
*/
void dump_stack_print_info(const char *log_lvl)
{
- printk("%sCPU: %d UID: %u PID: %d Comm: %.20s %s%s %s %.*s" BUILD_ID_FMT "\n",
+ printk("%sCPU: %d UID: %u PID: %d Comm: %.20s %s%s %s %.*s %s " BUILD_ID_FMT "\n",
log_lvl, raw_smp_processor_id(),
__kuid_val(current_real_cred()->euid),
current->pid, current->comm,
@@ -62,7 +62,7 @@ void dump_stack_print_info(const char *log_lvl)
print_tainted(),
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
- init_utsname()->version, BUILD_ID_VAL);
+ init_utsname()->version, preempt_model_str(), BUILD_ID_VAL);
if (get_taint())
printk("%s%s\n", log_lvl, print_tainted_verbose());
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 52eb6ba29698..999053fa133e 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/stat.h>
@@ -13,6 +13,24 @@
#include <linux/fault-inject.h>
/*
+ * The should_fail() functions use prandom instead of the normal Linux RNG
+ * since they don't need cryptographically secure random numbers.
+ */
+static DEFINE_PER_CPU(struct rnd_state, fault_rnd_state);
+
+static u32 fault_prandom_u32_below_100(void)
+{
+ struct rnd_state *state;
+ u32 res;
+
+ state = &get_cpu_var(fault_rnd_state);
+ res = prandom_u32_state(state);
+ put_cpu_var(fault_rnd_state);
+
+ return res % 100;
+}
+
+/*
* setup_fault_attr() is a helper function for various __setup handlers, so it
* returns 0 on error, because that is what __setup handlers do.
*/
@@ -31,6 +49,8 @@ int setup_fault_attr(struct fault_attr *attr, char *str)
return 0;
}
+ prandom_init_once(&fault_rnd_state);
+
attr->probability = probability;
attr->interval = interval;
atomic_set(&attr->times, times);
@@ -146,7 +166,7 @@ bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags)
return false;
}
- if (attr->probability <= get_random_u32_below(100))
+ if (attr->probability <= fault_prandom_u32_below_100())
return false;
fail:
@@ -219,6 +239,8 @@ struct dentry *fault_create_debugfs_attr(const char *name,
if (IS_ERR(dir))
return dir;
+ prandom_init_once(&fault_rnd_state);
+
debugfs_create_ul("probability", mode, dir, &attr->probability);
debugfs_create_ul("interval", mode, dir, &attr->interval);
debugfs_create_atomic_t("times", mode, dir, &attr->times);
@@ -431,6 +453,8 @@ static const struct config_item_type fault_config_type = {
void fault_config_init(struct fault_config *config, const char *name)
{
+ prandom_init_once(&fault_rnd_state);
+
config_group_init_type_name(&config->group, name, &fault_config_type);
}
EXPORT_SYMBOL_GPL(fault_config_init);
diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig
index 3ac26bdbc3ff..ae59b5b4e225 100644
--- a/lib/fonts/Kconfig
+++ b/lib/fonts/Kconfig
@@ -10,7 +10,7 @@ if FONT_SUPPORT
config FONTS
bool "Select compiled-in fonts"
- depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE || DRM_PANIC
+ depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE || DRM_PANIC || DRM_CLIENT_LOG
help
Say Y here if you would like to use fonts other than the default
your frame buffer console usually use.
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
index f755b997b967..6d03425b849e 100644
--- a/lib/gen_crc32table.c
+++ b/lib/gen_crc32table.c
@@ -2,30 +2,11 @@
#include <stdio.h>
#include "../include/linux/crc32poly.h"
#include "../include/generated/autoconf.h"
-#include "crc32defs.h"
#include <inttypes.h>
-#define ENTRIES_PER_LINE 4
-
-#if CRC_LE_BITS > 8
-# define LE_TABLE_ROWS (CRC_LE_BITS/8)
-# define LE_TABLE_SIZE 256
-#else
-# define LE_TABLE_ROWS 1
-# define LE_TABLE_SIZE (1 << CRC_LE_BITS)
-#endif
-
-#if CRC_BE_BITS > 8
-# define BE_TABLE_ROWS (CRC_BE_BITS/8)
-# define BE_TABLE_SIZE 256
-#else
-# define BE_TABLE_ROWS 1
-# define BE_TABLE_SIZE (1 << CRC_BE_BITS)
-#endif
-
-static uint32_t crc32table_le[LE_TABLE_ROWS][256];
-static uint32_t crc32table_be[BE_TABLE_ROWS][256];
-static uint32_t crc32ctable_le[LE_TABLE_ROWS][256];
+static uint32_t crc32table_le[256];
+static uint32_t crc32table_be[256];
+static uint32_t crc32ctable_le[256];
/**
* crc32init_le() - allocate and initialize LE table data
@@ -34,25 +15,17 @@ static uint32_t crc32ctable_le[LE_TABLE_ROWS][256];
* fact that crctable[i^j] = crctable[i] ^ crctable[j].
*
*/
-static void crc32init_le_generic(const uint32_t polynomial,
- uint32_t (*tab)[256])
+static void crc32init_le_generic(const uint32_t polynomial, uint32_t tab[256])
{
unsigned i, j;
uint32_t crc = 1;
- tab[0][0] = 0;
+ tab[0] = 0;
- for (i = LE_TABLE_SIZE >> 1; i; i >>= 1) {
+ for (i = 128; i; i >>= 1) {
crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0);
- for (j = 0; j < LE_TABLE_SIZE; j += 2 * i)
- tab[0][i + j] = crc ^ tab[0][j];
- }
- for (i = 0; i < LE_TABLE_SIZE; i++) {
- crc = tab[0][i];
- for (j = 1; j < LE_TABLE_ROWS; j++) {
- crc = tab[0][crc & 0xff] ^ (crc >> 8);
- tab[j][i] = crc;
- }
+ for (j = 0; j < 256; j += 2 * i)
+ tab[i + j] = crc ^ tab[j];
}
}
@@ -74,34 +47,22 @@ static void crc32init_be(void)
unsigned i, j;
uint32_t crc = 0x80000000;
- crc32table_be[0][0] = 0;
+ crc32table_be[0] = 0;
- for (i = 1; i < BE_TABLE_SIZE; i <<= 1) {
+ for (i = 1; i < 256; i <<= 1) {
crc = (crc << 1) ^ ((crc & 0x80000000) ? CRC32_POLY_BE : 0);
for (j = 0; j < i; j++)
- crc32table_be[0][i + j] = crc ^ crc32table_be[0][j];
- }
- for (i = 0; i < BE_TABLE_SIZE; i++) {
- crc = crc32table_be[0][i];
- for (j = 1; j < BE_TABLE_ROWS; j++) {
- crc = crc32table_be[0][(crc >> 24) & 0xff] ^ (crc << 8);
- crc32table_be[j][i] = crc;
- }
+ crc32table_be[i + j] = crc ^ crc32table_be[j];
}
}
-static void output_table(uint32_t (*table)[256], int rows, int len, char *trans)
+static void output_table(const uint32_t table[256])
{
- int i, j;
-
- for (j = 0 ; j < rows; j++) {
- printf("{");
- for (i = 0; i < len - 1; i++) {
- if (i % ENTRIES_PER_LINE == 0)
- printf("\n");
- printf("%s(0x%8.8xL), ", trans, table[j][i]);
- }
- printf("%s(0x%8.8xL)},\n", trans, table[j][len - 1]);
+ int i;
+
+ for (i = 0; i < 256; i += 4) {
+ printf("\t0x%08x, 0x%08x, 0x%08x, 0x%08x,\n",
+ table[i], table[i + 1], table[i + 2], table[i + 3]);
}
}
@@ -109,34 +70,20 @@ int main(int argc, char** argv)
{
printf("/* this file is generated - do not edit */\n\n");
- if (CRC_LE_BITS > 1) {
- crc32init_le();
- printf("static const u32 ____cacheline_aligned "
- "crc32table_le[%d][%d] = {",
- LE_TABLE_ROWS, LE_TABLE_SIZE);
- output_table(crc32table_le, LE_TABLE_ROWS,
- LE_TABLE_SIZE, "tole");
- printf("};\n");
- }
+ crc32init_le();
+ printf("static const u32 ____cacheline_aligned crc32table_le[256] = {\n");
+ output_table(crc32table_le);
+ printf("};\n");
- if (CRC_BE_BITS > 1) {
- crc32init_be();
- printf("static const u32 ____cacheline_aligned "
- "crc32table_be[%d][%d] = {",
- BE_TABLE_ROWS, BE_TABLE_SIZE);
- output_table(crc32table_be, LE_TABLE_ROWS,
- BE_TABLE_SIZE, "tobe");
- printf("};\n");
- }
- if (CRC_LE_BITS > 1) {
- crc32cinit_le();
- printf("static const u32 ____cacheline_aligned "
- "crc32ctable_le[%d][%d] = {",
- LE_TABLE_ROWS, LE_TABLE_SIZE);
- output_table(crc32ctable_le, LE_TABLE_ROWS,
- LE_TABLE_SIZE, "tole");
- printf("};\n");
- }
+ crc32init_be();
+ printf("static const u32 ____cacheline_aligned crc32table_be[256] = {\n");
+ output_table(crc32table_be);
+ printf("};\n");
+
+ crc32cinit_le();
+ printf("static const u32 ____cacheline_aligned crc32ctable_le[256] = {\n");
+ output_table(crc32ctable_le);
+ printf("};\n");
return 0;
}
diff --git a/lib/gen_crc64table.c b/lib/gen_crc64table.c
index 55e222acd0b8..e05a4230a0a0 100644
--- a/lib/gen_crc64table.c
+++ b/lib/gen_crc64table.c
@@ -17,10 +17,10 @@
#include <stdio.h>
#define CRC64_ECMA182_POLY 0x42F0E1EBA9EA3693ULL
-#define CRC64_ROCKSOFT_POLY 0x9A6C9329AC4BC9B5ULL
+#define CRC64_NVME_POLY 0x9A6C9329AC4BC9B5ULL
static uint64_t crc64_table[256] = {0};
-static uint64_t crc64_rocksoft_table[256] = {0};
+static uint64_t crc64_nvme_table[256] = {0};
static void generate_reflected_crc64_table(uint64_t table[256], uint64_t poly)
{
@@ -82,14 +82,14 @@ static void print_crc64_tables(void)
printf("static const u64 ____cacheline_aligned crc64table[256] = {\n");
output_table(crc64_table);
- printf("\nstatic const u64 ____cacheline_aligned crc64rocksofttable[256] = {\n");
- output_table(crc64_rocksoft_table);
+ printf("\nstatic const u64 ____cacheline_aligned crc64nvmetable[256] = {\n");
+ output_table(crc64_nvme_table);
}
int main(int argc, char *argv[])
{
generate_crc64_table(crc64_table, CRC64_ECMA182_POLY);
- generate_reflected_crc64_table(crc64_rocksoft_table, CRC64_ROCKSOFT_POLY);
+ generate_reflected_crc64_table(crc64_nvme_table, CRC64_NVME_POLY);
print_crc64_tables();
return 0;
}
diff --git a/lib/inflate.c b/lib/inflate.c
index fbaf03c1748d..eab886baa1b4 100644
--- a/lib/inflate.c
+++ b/lib/inflate.c
@@ -1257,8 +1257,6 @@ static int INIT gunzip(void)
/* Decompress */
if ((res = inflate())) {
switch (res) {
- case 0:
- break;
case 1:
error("invalid compressed format (err=1)");
break;
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 9ec806f989f2..8c7fdb7d8c8f 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1190,8 +1190,12 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
if (!n)
return -ENOMEM;
p = *pages;
- for (int k = 0; k < n; k++)
- get_page(p[k] = page + k);
+ for (int k = 0; k < n; k++) {
+ struct folio *folio = page_folio(page);
+ p[k] = page + k;
+ if (!folio_test_slab(folio))
+ folio_get(folio);
+ }
maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
i->count -= maxsize;
i->iov_offset += maxsize;
@@ -1428,6 +1432,8 @@ static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
struct iovec *iov = *iovp;
ssize_t ret;
+ *iovp = NULL;
+
if (compat)
ret = copy_compat_iovec_from_user(iov, uvec, 1);
else
@@ -1438,7 +1444,6 @@ static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
ret = import_ubuf(type, iov->iov_base, iov->iov_len, i);
if (unlikely(ret))
return ret;
- *iovp = NULL;
return i->count;
}
diff --git a/lib/kobject.c b/lib/kobject.c
index 72fa20f405f1..abe5f5b856ce 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -1096,30 +1096,6 @@ void *kobj_ns_grab_current(enum kobj_ns_type type)
}
EXPORT_SYMBOL_GPL(kobj_ns_grab_current);
-const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk)
-{
- const void *ns = NULL;
-
- spin_lock(&kobj_ns_type_lock);
- if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type])
- ns = kobj_ns_ops_tbl[type]->netlink_ns(sk);
- spin_unlock(&kobj_ns_type_lock);
-
- return ns;
-}
-
-const void *kobj_ns_initial(enum kobj_ns_type type)
-{
- const void *ns = NULL;
-
- spin_lock(&kobj_ns_type_lock);
- if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type])
- ns = kobj_ns_ops_tbl[type]->initial_ns();
- spin_unlock(&kobj_ns_type_lock);
-
- return ns;
-}
-
void kobj_ns_drop(enum kobj_ns_type type, void *ns)
{
spin_lock(&kobj_ns_type_lock);
diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig
index 34d7242d526d..a97897edd964 100644
--- a/lib/kunit/Kconfig
+++ b/lib/kunit/Kconfig
@@ -81,4 +81,16 @@ config KUNIT_DEFAULT_ENABLED
In most cases this should be left as Y. Only if additional opt-in
behavior is needed should this be set to N.
+config KUNIT_AUTORUN_ENABLED
+ bool "Default value of kunit.autorun"
+ default y
+ help
+ Sets the default value of kunit.autorun. If set to N then KUnit
+ tests will not run after initialization unless kunit.autorun=1 is
+ passed to the kernel command line. The test can still be run manually
+ via debugfs interface.
+
+ In most cases this should be left as Y. Only if additional opt-in
+ behavior is needed should this be set to N.
+
endif # KUNIT
diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
index af71911f4a07..9c326f1837bd 100644
--- a/lib/kunit/debugfs.c
+++ b/lib/kunit/debugfs.c
@@ -145,7 +145,7 @@ static ssize_t debugfs_run(struct file *file,
struct inode *f_inode = file->f_inode;
struct kunit_suite *suite = (struct kunit_suite *) f_inode->i_private;
- __kunit_test_suites_init(&suite, 1);
+ __kunit_test_suites_init(&suite, 1, true);
return count;
}
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 34b7b6833df3..3f39955cb0f1 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -29,6 +29,22 @@ const char *kunit_action(void)
return action_param;
}
+/*
+ * Run KUnit tests after initialization
+ */
+#ifdef CONFIG_KUNIT_AUTORUN_ENABLED
+static bool autorun_param = true;
+#else
+static bool autorun_param;
+#endif
+module_param_named(autorun, autorun_param, bool, 0);
+MODULE_PARM_DESC(autorun, "Run KUnit tests after initialization");
+
+bool kunit_autorun(void)
+{
+ return autorun_param;
+}
+
static char *filter_glob_param;
static char *filter_param;
static char *filter_action_param;
@@ -260,13 +276,14 @@ free_copy:
void kunit_exec_run_tests(struct kunit_suite_set *suite_set, bool builtin)
{
size_t num_suites = suite_set->end - suite_set->start;
+ bool autorun = kunit_autorun();
- if (builtin || num_suites) {
+ if (autorun && (builtin || num_suites)) {
pr_info("KTAP version 1\n");
pr_info("1..%zu\n", num_suites);
}
- __kunit_test_suites_init(suite_set->start, num_suites);
+ __kunit_test_suites_init(suite_set->start, num_suites, autorun);
}
void kunit_exec_list_tests(struct kunit_suite_set *suite_set, bool include_attr)
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 089c832e3cdb..146d1b48a096 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -708,7 +708,8 @@ bool kunit_enabled(void)
return enable_param;
}
-int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites)
+int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites,
+ bool run_tests)
{
unsigned int i;
@@ -731,7 +732,8 @@ int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_
for (i = 0; i < num_suites; i++) {
kunit_init_suite(suites[i]);
- kunit_run_tests(suites[i]);
+ if (run_tests)
+ kunit_run_tests(suites[i]);
}
static_branch_dec(&kunit_running);
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
deleted file mode 100644
index 649e687413a0..000000000000
--- a/lib/libcrc32c.c
+++ /dev/null
@@ -1,74 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CRC32C
- *@Article{castagnoli-crc,
- * author = { Guy Castagnoli and Stefan Braeuer and Martin Herrman},
- * title = {{Optimization of Cyclic Redundancy-Check Codes with 24
- * and 32 Parity Bits}},
- * journal = IEEE Transactions on Communication,
- * year = {1993},
- * volume = {41},
- * number = {6},
- * pages = {},
- * month = {June},
- *}
- * Used by the iSCSI driver, possibly others, and derived from
- * the iscsi-crc.c module of the linux-iscsi driver at
- * http://linux-iscsi.sourceforge.net.
- *
- * Following the example of lib/crc32, this function is intended to be
- * flexible and useful for all users. Modules that currently have their
- * own crc32c, but hopefully may be able to use this one are:
- * net/sctp (please add all your doco to here if you change to
- * use this one!)
- * <endoflist>
- *
- * Copyright (c) 2004 Cisco Systems, Inc.
- */
-
-#include <crypto/hash.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/crc32c.h>
-
-static struct crypto_shash *tfm;
-
-u32 crc32c(u32 crc, const void *address, unsigned int length)
-{
- SHASH_DESC_ON_STACK(shash, tfm);
- u32 ret, *ctx = (u32 *)shash_desc_ctx(shash);
- int err;
-
- shash->tfm = tfm;
- *ctx = crc;
-
- err = crypto_shash_update(shash, address, length);
- BUG_ON(err);
-
- ret = *ctx;
- barrier_data(ctx);
- return ret;
-}
-
-EXPORT_SYMBOL(crc32c);
-
-static int __init libcrc32c_mod_init(void)
-{
- tfm = crypto_alloc_shash("crc32c", 0, 0);
- return PTR_ERR_OR_ZERO(tfm);
-}
-
-static void __exit libcrc32c_mod_fini(void)
-{
- crypto_free_shash(tfm);
-}
-
-module_init(libcrc32c_mod_init);
-module_exit(libcrc32c_mod_fini);
-
-MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
-MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations");
-MODULE_LICENSE("GPL");
-MODULE_SOFTDEP("pre: crc32c");
diff --git a/lib/list_debug.c b/lib/list_debug.c
index db602417febf..ee7eeeb8f92c 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -22,17 +22,17 @@ __list_valid_slowpath
bool __list_add_valid_or_report(struct list_head *new, struct list_head *prev,
struct list_head *next)
{
- if (CHECK_DATA_CORRUPTION(prev == NULL,
+ if (CHECK_DATA_CORRUPTION(prev == NULL, NULL,
"list_add corruption. prev is NULL.\n") ||
- CHECK_DATA_CORRUPTION(next == NULL,
+ CHECK_DATA_CORRUPTION(next == NULL, NULL,
"list_add corruption. next is NULL.\n") ||
- CHECK_DATA_CORRUPTION(next->prev != prev,
+ CHECK_DATA_CORRUPTION(next->prev != prev, next,
"list_add corruption. next->prev should be prev (%px), but was %px. (next=%px).\n",
prev, next->prev, next) ||
- CHECK_DATA_CORRUPTION(prev->next != next,
+ CHECK_DATA_CORRUPTION(prev->next != next, prev,
"list_add corruption. prev->next should be next (%px), but was %px. (prev=%px).\n",
next, prev->next, prev) ||
- CHECK_DATA_CORRUPTION(new == prev || new == next,
+ CHECK_DATA_CORRUPTION(new == prev || new == next, NULL,
"list_add double add: new=%px, prev=%px, next=%px.\n",
new, prev, next))
return false;
@@ -49,20 +49,20 @@ bool __list_del_entry_valid_or_report(struct list_head *entry)
prev = entry->prev;
next = entry->next;
- if (CHECK_DATA_CORRUPTION(next == NULL,
+ if (CHECK_DATA_CORRUPTION(next == NULL, NULL,
"list_del corruption, %px->next is NULL\n", entry) ||
- CHECK_DATA_CORRUPTION(prev == NULL,
+ CHECK_DATA_CORRUPTION(prev == NULL, NULL,
"list_del corruption, %px->prev is NULL\n", entry) ||
- CHECK_DATA_CORRUPTION(next == LIST_POISON1,
+ CHECK_DATA_CORRUPTION(next == LIST_POISON1, next,
"list_del corruption, %px->next is LIST_POISON1 (%px)\n",
entry, LIST_POISON1) ||
- CHECK_DATA_CORRUPTION(prev == LIST_POISON2,
+ CHECK_DATA_CORRUPTION(prev == LIST_POISON2, prev,
"list_del corruption, %px->prev is LIST_POISON2 (%px)\n",
entry, LIST_POISON2) ||
- CHECK_DATA_CORRUPTION(prev->next != entry,
+ CHECK_DATA_CORRUPTION(prev->next != entry, prev,
"list_del corruption. prev->next should be %px, but was %px. (prev=%px)\n",
entry, prev->next, prev) ||
- CHECK_DATA_CORRUPTION(next->prev != entry,
+ CHECK_DATA_CORRUPTION(next->prev != entry, next,
"list_del corruption. next->prev should be %px, but was %px. (next=%px)\n",
entry, next->prev, next))
return false;
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 8d3f623536fe..a310ecb7ccc0 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -108,6 +108,13 @@ static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head,
* and list_sort is a stable sort, so it is not necessary to distinguish
* the @a < @b and @a == @b cases.
*
+ * The comparison function must adhere to specific mathematical properties
+ * to ensure correct and stable sorting:
+ * - Antisymmetry: cmp(@a, @b) must return the opposite sign of
+ * cmp(@b, @a).
+ * - Transitivity: if cmp(@a, @b) <= 0 and cmp(@b, @c) <= 0, then
+ * cmp(@a, @c) <= 0.
+ *
* This is compatible with two styles of @cmp function:
* - The traditional style which returns <0 / =0 / >0, or
* - Returning a boolean 0/1.
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c
index b0bbeeb74b9e..2a397bb2c661 100644
--- a/lib/lz4/lz4_compress.c
+++ b/lib/lz4/lz4_compress.c
@@ -33,7 +33,6 @@
/*-************************************
* Dependencies
**************************************/
-#include <linux/lz4.h>
#include "lz4defs.h"
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 0e31e6da5ce7..3a2cd9acada4 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -33,7 +33,6 @@
/*-************************************
* Dependencies
**************************************/
-#include <linux/lz4.h>
#include "lz4defs.h"
#include <linux/init.h>
#include <linux/module.h>
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
index cb358d6bde5a..17277ec16919 100644
--- a/lib/lz4/lz4defs.h
+++ b/lib/lz4/lz4defs.h
@@ -39,6 +39,7 @@
#include <linux/bitops.h>
#include <linux/string.h> /* memset, memcpy */
+#include <linux/lz4.h>
#define FORCE_INLINE __always_inline
@@ -92,8 +93,7 @@ typedef uintptr_t uptrval;
#define MB (1 << 20)
#define GB (1U << 30)
-#define MAXD_LOG 16
-#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
+#define MAX_DISTANCE LZ4_DISTANCE_MAX
#define STEPSIZE sizeof(size_t)
#define ML_BITS 4
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
index bc45594ad2a8..91936dc3d14b 100644
--- a/lib/lz4/lz4hc_compress.c
+++ b/lib/lz4/lz4hc_compress.c
@@ -34,7 +34,6 @@
/*-************************************
* Dependencies
**************************************/
-#include <linux/lz4.h>
#include "lz4defs.h"
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 047397136f15..f7153ade1be5 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -1863,11 +1863,11 @@ static inline int mab_no_null_split(struct maple_big_node *b_node,
* Return: The first split location. The middle split is set in @mid_split.
*/
static inline int mab_calc_split(struct ma_state *mas,
- struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
+ struct maple_big_node *bn, unsigned char *mid_split)
{
unsigned char b_end = bn->b_end;
int split = b_end / 2; /* Assume equal split. */
- unsigned char slot_min, slot_count = mt_slots[bn->type];
+ unsigned char slot_count = mt_slots[bn->type];
/*
* To support gap tracking, all NULL entries are kept together and a node cannot
@@ -1900,18 +1900,7 @@ static inline int mab_calc_split(struct ma_state *mas,
split = b_end / 3;
*mid_split = split * 2;
} else {
- slot_min = mt_min_slots[bn->type];
-
*mid_split = 0;
- /*
- * Avoid having a range less than the slot count unless it
- * causes one node to be deficient.
- * NOTE: mt_min_slots is 1 based, b_end and split are zero.
- */
- while ((split < slot_count - 1) &&
- ((bn->pivot[split] - min) < slot_count - 1) &&
- (b_end - split > slot_min))
- split++;
}
/* Avoid ending a node on a NULL entry */
@@ -2377,7 +2366,7 @@ static inline struct maple_enode
static inline unsigned char mas_mab_to_node(struct ma_state *mas,
struct maple_big_node *b_node, struct maple_enode **left,
struct maple_enode **right, struct maple_enode **middle,
- unsigned char *mid_split, unsigned long min)
+ unsigned char *mid_split)
{
unsigned char split = 0;
unsigned char slot_count = mt_slots[b_node->type];
@@ -2390,7 +2379,7 @@ static inline unsigned char mas_mab_to_node(struct ma_state *mas,
if (b_node->b_end < slot_count) {
split = b_node->b_end;
} else {
- split = mab_calc_split(mas, b_node, mid_split, min);
+ split = mab_calc_split(mas, b_node, mid_split);
*right = mas_new_ma_node(mas, b_node);
}
@@ -2877,7 +2866,7 @@ static void mas_spanning_rebalance(struct ma_state *mas,
mast->bn->b_end--;
mast->bn->type = mte_node_type(mast->orig_l->node);
split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
- &mid_split, mast->orig_l->min);
+ &mid_split);
mast_set_split_parents(mast, left, middle, right, split,
mid_split);
mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
@@ -3365,7 +3354,7 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node)
if (mas_push_data(mas, height, &mast, false))
break;
- split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
+ split = mab_calc_split(mas, b_node, &mid_split);
mast_split_data(&mast, mas, split);
/*
* Usually correct, mab_mas_cp in the above call overwrites
@@ -4746,29 +4735,6 @@ again:
}
/*
- * mas_next_entry() - Internal function to get the next entry.
- * @mas: The maple state
- * @limit: The maximum range start.
- *
- * Set the @mas->node to the next entry and the range_start to
- * the beginning value for the entry. Does not check beyond @limit.
- * Sets @mas->index and @mas->last to the range, Does not update @mas->index and
- * @mas->last on overflow.
- * Restarts on dead nodes.
- *
- * Return: the next entry or %NULL.
- */
-static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
-{
- if (mas->last >= limit) {
- mas->status = ma_overflow;
- return NULL;
- }
-
- return mas_next_slot(mas, limit, false);
-}
-
-/*
* mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
* highest gap address of a given size in a given node and descend.
* @mas: The maple state
@@ -4903,15 +4869,14 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
if (gap >= size) {
if (ma_is_leaf(type)) {
found = true;
- goto done;
- }
- if (mas->index <= pivot) {
- mas->node = mas_slot(mas, slots, offset);
- mas->min = min;
- mas->max = pivot;
- offset = 0;
break;
}
+
+ mas->node = mas_slot(mas, slots, offset);
+ mas->min = min;
+ mas->max = pivot;
+ offset = 0;
+ break;
}
next_slot:
min = pivot + 1;
@@ -4921,9 +4886,6 @@ next_slot:
}
}
- if (mte_is_root(mas->node))
- found = true;
-done:
mas->offset = offset;
return found;
}
@@ -5027,8 +4989,8 @@ static inline void mas_awalk(struct ma_state *mas, unsigned long size)
* There are 4 options:
* go to child (descend)
* go back to parent (ascend)
- * no gap found. (return, slot == MAPLE_NODE_SLOTS)
- * found the gap. (return, slot != MAPLE_NODE_SLOTS)
+ * no gap found. (return, error == -EBUSY)
+ * found the gap. (return)
*/
while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
if (last == mas->node)
@@ -5113,9 +5075,6 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
return xa_err(mas->node);
offset = mas->offset;
- if (unlikely(offset == MAPLE_NODE_SLOTS))
- return -EBUSY;
-
node = mas_mn(mas);
mt = mte_node_type(mas->node);
pivots = ma_pivots(node, mt);
@@ -6938,7 +6897,7 @@ retry:
goto unlock;
while (mas_is_active(&mas) && (mas.last < max)) {
- entry = mas_next_entry(&mas, max);
+ entry = mas_next_slot(&mas, max, false);
if (likely(entry && !xa_is_zero(entry)))
break;
}
@@ -7597,7 +7556,7 @@ void mt_validate(struct maple_tree *mt)
MAS_WARN_ON(&mas, mte_dead_node(mas.node));
end = mas_data_end(&mas);
if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) &&
- (mas.max != ULONG_MAX))) {
+ (!mte_is_root(mas.node)))) {
pr_err("Invalid size %u of " PTR_FMT "\n",
end, mas_mn(&mas));
}
diff --git a/lib/math/Makefile b/lib/math/Makefile
index 3ef11305f8d2..d1caba23baa0 100644
--- a/lib/math/Makefile
+++ b/lib/math/Makefile
@@ -5,7 +5,7 @@ obj-$(CONFIG_CORDIC) += cordic.o
obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o
obj-$(CONFIG_RATIONAL) += rational.o
-obj-$(CONFIG_INT_POW_TEST) += tests/int_pow_kunit.o
obj-$(CONFIG_TEST_DIV64) += test_div64.o
obj-$(CONFIG_TEST_MULDIV64) += test_mul_u64_u64_div_u64.o
-obj-$(CONFIG_RATIONAL_KUNIT_TEST) += rational-test.o
+
+obj-y += tests/
diff --git a/lib/math/prime_numbers.c b/lib/math/prime_numbers.c
index 9a17ee9af93a..95a6f7960db9 100644
--- a/lib/math/prime_numbers.c
+++ b/lib/math/prime_numbers.c
@@ -1,16 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
-#define pr_fmt(fmt) "prime numbers: " fmt
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/prime_numbers.h>
#include <linux/slab.h>
-struct primes {
- struct rcu_head rcu;
- unsigned long last, sz;
- unsigned long primes[];
-};
+#include "prime_numbers_private.h"
#if BITS_PER_LONG == 64
static const struct primes small_primes = {
@@ -62,9 +57,25 @@ static const struct primes small_primes = {
static DEFINE_MUTEX(lock);
static const struct primes __rcu *primes = RCU_INITIALIZER(&small_primes);
-static unsigned long selftest_max;
+#if IS_ENABLED(CONFIG_PRIME_NUMBERS_KUNIT_TEST)
+/*
+ * Calls the callback under RCU lock. The callback must not retain
+ * the primes pointer.
+ */
+void with_primes(void *ctx, primes_fn fn)
+{
+ rcu_read_lock();
+ fn(ctx, rcu_dereference(primes));
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(with_primes);
+
+EXPORT_SYMBOL(slow_is_prime_number);
-static bool slow_is_prime_number(unsigned long x)
+#else
+static
+#endif
+bool slow_is_prime_number(unsigned long x)
{
unsigned long y = int_sqrt(x);
@@ -239,77 +250,13 @@ bool is_prime_number(unsigned long x)
}
EXPORT_SYMBOL(is_prime_number);
-static void dump_primes(void)
-{
- const struct primes *p;
- char *buf;
-
- buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
-
- rcu_read_lock();
- p = rcu_dereference(primes);
-
- if (buf)
- bitmap_print_to_pagebuf(true, buf, p->primes, p->sz);
- pr_info("primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s\n",
- p->last, p->sz, p->primes[BITS_TO_LONGS(p->sz) - 1], buf);
-
- rcu_read_unlock();
-
- kfree(buf);
-}
-
-static int selftest(unsigned long max)
-{
- unsigned long x, last;
-
- if (!max)
- return 0;
-
- for (last = 0, x = 2; x < max; x++) {
- bool slow = slow_is_prime_number(x);
- bool fast = is_prime_number(x);
-
- if (slow != fast) {
- pr_err("inconsistent result for is-prime(%lu): slow=%s, fast=%s!\n",
- x, slow ? "yes" : "no", fast ? "yes" : "no");
- goto err;
- }
-
- if (!slow)
- continue;
-
- if (next_prime_number(last) != x) {
- pr_err("incorrect result for next-prime(%lu): expected %lu, got %lu\n",
- last, x, next_prime_number(last));
- goto err;
- }
- last = x;
- }
-
- pr_info("%s(%lu) passed, last prime was %lu\n", __func__, x, last);
- return 0;
-
-err:
- dump_primes();
- return -EINVAL;
-}
-
-static int __init primes_init(void)
-{
- return selftest(selftest_max);
-}
-
static void __exit primes_exit(void)
{
free_primes();
}
-module_init(primes_init);
module_exit(primes_exit);
-module_param_named(selftest, selftest_max, ulong, 0400);
-
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Prime number library");
MODULE_LICENSE("GPL");
diff --git a/lib/math/prime_numbers_private.h b/lib/math/prime_numbers_private.h
new file mode 100644
index 000000000000..f3ebf5386e6b
--- /dev/null
+++ b/lib/math/prime_numbers_private.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/types.h>
+
+struct primes {
+ struct rcu_head rcu;
+ unsigned long last, sz;
+ unsigned long primes[];
+};
+
+#if IS_ENABLED(CONFIG_PRIME_NUMBERS_KUNIT_TEST)
+typedef void (*primes_fn)(void *, const struct primes *);
+
+void with_primes(void *ctx, primes_fn fn);
+bool slow_is_prime_number(unsigned long x);
+#endif
diff --git a/lib/math/tests/Makefile b/lib/math/tests/Makefile
index 6a169123320a..13dc96e48408 100644
--- a/lib/math/tests/Makefile
+++ b/lib/math/tests/Makefile
@@ -1,3 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_INT_POW_TEST) += int_pow_kunit.o
+obj-$(CONFIG_GCD_KUNIT_TEST) += gcd_kunit.o
+obj-$(CONFIG_INT_LOG_KUNIT_TEST) += int_log_kunit.o
+obj-$(CONFIG_INT_POW_KUNIT_TEST) += int_pow_kunit.o
+obj-$(CONFIG_INT_SQRT_KUNIT_TEST) += int_sqrt_kunit.o
+obj-$(CONFIG_PRIME_NUMBERS_KUNIT_TEST) += prime_numbers_kunit.o
+obj-$(CONFIG_RATIONAL_KUNIT_TEST) += rational_kunit.o
diff --git a/lib/math/tests/gcd_kunit.c b/lib/math/tests/gcd_kunit.c
new file mode 100644
index 000000000000..ede1883583b1
--- /dev/null
+++ b/lib/math/tests/gcd_kunit.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <kunit/test.h>
+#include <linux/gcd.h>
+#include <linux/limits.h>
+
+struct test_case_params {
+ unsigned long val1;
+ unsigned long val2;
+ unsigned long expected_result;
+ const char *name;
+};
+
+static const struct test_case_params params[] = {
+ { 48, 18, 6, "GCD of 48 and 18" },
+ { 18, 48, 6, "GCD of 18 and 48" },
+ { 56, 98, 14, "GCD of 56 and 98" },
+ { 17, 13, 1, "Coprime numbers" },
+ { 101, 103, 1, "Coprime numbers" },
+ { 270, 192, 6, "GCD of 270 and 192" },
+ { 0, 5, 5, "GCD with zero" },
+ { 7, 0, 7, "GCD with zero reversed" },
+ { 36, 36, 36, "GCD of identical numbers" },
+ { ULONG_MAX, 1, 1, "GCD of max ulong and 1" },
+ { ULONG_MAX, ULONG_MAX, ULONG_MAX, "GCD of max ulong values" },
+};
+
+static void get_desc(const struct test_case_params *tc, char *desc)
+{
+ strscpy(desc, tc->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(gcd, params, get_desc);
+
+static void gcd_test(struct kunit *test)
+{
+ const struct test_case_params *tc = (const struct test_case_params *)test->param_value;
+
+ KUNIT_EXPECT_EQ(test, tc->expected_result, gcd(tc->val1, tc->val2));
+}
+
+static struct kunit_case math_gcd_test_cases[] = {
+ KUNIT_CASE_PARAM(gcd_test, gcd_gen_params),
+ {}
+};
+
+static struct kunit_suite gcd_test_suite = {
+ .name = "math-gcd",
+ .test_cases = math_gcd_test_cases,
+};
+
+kunit_test_suite(gcd_test_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("math.gcd KUnit test suite");
+MODULE_AUTHOR("Yu-Chun Lin <eleanor15x@gmail.com>");
diff --git a/lib/math/tests/int_log_kunit.c b/lib/math/tests/int_log_kunit.c
new file mode 100644
index 000000000000..14e854146cb4
--- /dev/null
+++ b/lib/math/tests/int_log_kunit.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <kunit/test.h>
+#include <linux/int_log.h>
+
+struct test_case_params {
+ u32 value;
+ unsigned int expected_result;
+ const char *name;
+};
+
+
+/* The expected result takes into account the log error */
+static const struct test_case_params intlog2_params[] = {
+ {0, 0, "Log base 2 of 0"},
+ {1, 0, "Log base 2 of 1"},
+ {2, 16777216, "Log base 2 of 2"},
+ {3, 26591232, "Log base 2 of 3"},
+ {4, 33554432, "Log base 2 of 4"},
+ {8, 50331648, "Log base 2 of 8"},
+ {16, 67108864, "Log base 2 of 16"},
+ {32, 83886080, "Log base 2 of 32"},
+ {U32_MAX, 536870911, "Log base 2 of MAX"},
+};
+
+static const struct test_case_params intlog10_params[] = {
+ {0, 0, "Log base 10 of 0"},
+ {1, 0, "Log base 10 of 1"},
+ {6, 13055203, "Log base 10 of 6"},
+ {10, 16777225, "Log base 10 of 10"},
+ {100, 33554450, "Log base 10 of 100"},
+ {1000, 50331675, "Log base 10 of 1000"},
+ {10000, 67108862, "Log base 10 of 10000"},
+ {U32_MAX, 161614247, "Log base 10 of MAX"}
+};
+
+static void get_desc(const struct test_case_params *tc, char *desc)
+{
+ strscpy(desc, tc->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+
+KUNIT_ARRAY_PARAM(intlog2, intlog2_params, get_desc);
+
+static void intlog2_test(struct kunit *test)
+{
+ const struct test_case_params *tc = (const struct test_case_params *)test->param_value;
+
+ KUNIT_EXPECT_EQ(test, tc->expected_result, intlog2(tc->value));
+}
+
+KUNIT_ARRAY_PARAM(intlog10, intlog10_params, get_desc);
+
+static void intlog10_test(struct kunit *test)
+{
+ const struct test_case_params *tc = (const struct test_case_params *)test->param_value;
+
+ KUNIT_EXPECT_EQ(test, tc->expected_result, intlog10(tc->value));
+}
+
+static struct kunit_case math_int_log_test_cases[] = {
+ KUNIT_CASE_PARAM(intlog2_test, intlog2_gen_params),
+ KUNIT_CASE_PARAM(intlog10_test, intlog10_gen_params),
+ {}
+};
+
+static struct kunit_suite int_log_test_suite = {
+ .name = "math-int_log",
+ .test_cases = math_int_log_test_cases,
+};
+
+kunit_test_suites(&int_log_test_suite);
+
+MODULE_DESCRIPTION("math.int_log KUnit test suite");
+MODULE_LICENSE("GPL");
diff --git a/lib/math/tests/int_sqrt_kunit.c b/lib/math/tests/int_sqrt_kunit.c
new file mode 100644
index 000000000000..1798e1312eb7
--- /dev/null
+++ b/lib/math/tests/int_sqrt_kunit.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <kunit/test.h>
+#include <linux/limits.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+struct test_case_params {
+ unsigned long x;
+ unsigned long expected_result;
+ const char *name;
+};
+
+static const struct test_case_params params[] = {
+ { 0, 0, "edge case: square root of 0" },
+ { 1, 1, "perfect square: square root of 1" },
+ { 2, 1, "non-perfect square: square root of 2" },
+ { 3, 1, "non-perfect square: square root of 3" },
+ { 4, 2, "perfect square: square root of 4" },
+ { 5, 2, "non-perfect square: square root of 5" },
+ { 6, 2, "non-perfect square: square root of 6" },
+ { 7, 2, "non-perfect square: square root of 7" },
+ { 8, 2, "non-perfect square: square root of 8" },
+ { 9, 3, "perfect square: square root of 9" },
+ { 15, 3, "non-perfect square: square root of 15 (N-1 from 16)" },
+ { 16, 4, "perfect square: square root of 16" },
+ { 17, 4, "non-perfect square: square root of 17 (N+1 from 16)" },
+ { 80, 8, "non-perfect square: square root of 80 (N-1 from 81)" },
+ { 81, 9, "perfect square: square root of 81" },
+ { 82, 9, "non-perfect square: square root of 82 (N+1 from 81)" },
+ { 255, 15, "non-perfect square: square root of 255 (N-1 from 256)" },
+ { 256, 16, "perfect square: square root of 256" },
+ { 257, 16, "non-perfect square: square root of 257 (N+1 from 256)" },
+ { 2147483648, 46340, "large input: square root of 2147483648" },
+ { 4294967295, 65535, "edge case: ULONG_MAX for 32-bit" },
+};
+
+static void get_desc(const struct test_case_params *tc, char *desc)
+{
+ strscpy(desc, tc->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(int_sqrt, params, get_desc);
+
+static void int_sqrt_test(struct kunit *test)
+{
+ const struct test_case_params *tc = (const struct test_case_params *)test->param_value;
+
+ KUNIT_EXPECT_EQ(test, tc->expected_result, int_sqrt(tc->x));
+}
+
+static struct kunit_case math_int_sqrt_test_cases[] = {
+ KUNIT_CASE_PARAM(int_sqrt_test, int_sqrt_gen_params),
+ {}
+};
+
+static struct kunit_suite int_sqrt_test_suite = {
+ .name = "math-int_sqrt",
+ .test_cases = math_int_sqrt_test_cases,
+};
+
+kunit_test_suites(&int_sqrt_test_suite);
+
+MODULE_DESCRIPTION("math.int_sqrt KUnit test suite");
+MODULE_LICENSE("GPL");
diff --git a/lib/math/tests/prime_numbers_kunit.c b/lib/math/tests/prime_numbers_kunit.c
new file mode 100644
index 000000000000..2f1643208c66
--- /dev/null
+++ b/lib/math/tests/prime_numbers_kunit.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <kunit/test.h>
+#include <linux/module.h>
+#include <linux/prime_numbers.h>
+
+#include "../prime_numbers_private.h"
+
+static void dump_primes(void *ctx, const struct primes *p)
+{
+ static char buf[PAGE_SIZE];
+ struct kunit_suite *suite = ctx;
+
+ bitmap_print_to_pagebuf(true, buf, p->primes, p->sz);
+ kunit_info(suite, "primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s",
+ p->last, p->sz, p->primes[BITS_TO_LONGS(p->sz) - 1], buf);
+}
+
+static void prime_numbers_test(struct kunit *test)
+{
+ const unsigned long max = 65536;
+ unsigned long x, last, next;
+
+ for (last = 0, x = 2; x < max; x++) {
+ const bool slow = slow_is_prime_number(x);
+ const bool fast = is_prime_number(x);
+
+ KUNIT_ASSERT_EQ_MSG(test, slow, fast, "is-prime(%lu)", x);
+
+ if (!slow)
+ continue;
+
+ next = next_prime_number(last);
+ KUNIT_ASSERT_EQ_MSG(test, next, x, "next-prime(%lu)", last);
+ last = next;
+ }
+}
+
+static void kunit_suite_exit(struct kunit_suite *suite)
+{
+ with_primes(suite, dump_primes);
+}
+
+static struct kunit_case prime_numbers_cases[] = {
+ KUNIT_CASE(prime_numbers_test),
+ {},
+};
+
+static struct kunit_suite prime_numbers_suite = {
+ .name = "math-prime_numbers",
+ .suite_exit = kunit_suite_exit,
+ .test_cases = prime_numbers_cases,
+};
+
+kunit_test_suite(prime_numbers_suite);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Prime number library");
+MODULE_LICENSE("GPL");
diff --git a/lib/math/rational-test.c b/lib/math/tests/rational_kunit.c
index 47486a95f088..47486a95f088 100644
--- a/lib/math/rational-test.c
+++ b/lib/math/tests/rational_kunit.c
diff --git a/lib/packing.c b/lib/packing.c
index 793942745e34..bb1643d9e64d 100644
--- a/lib/packing.c
+++ b/lib/packing.c
@@ -5,10 +5,37 @@
#include <linux/packing.h>
#include <linux/module.h>
#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/bitrev.h>
+#define __pack_fields(pbuf, pbuflen, ustruct, fields, num_fields, quirks) \
+ ({ \
+ for (size_t i = 0; i < (num_fields); i++) { \
+ typeof(&(fields)[0]) field = &(fields)[i]; \
+ u64 uval; \
+ \
+ uval = ustruct_field_to_u64(ustruct, field->offset, field->size); \
+ \
+ __pack(pbuf, uval, field->startbit, field->endbit, \
+ pbuflen, quirks); \
+ } \
+ })
+
+#define __unpack_fields(pbuf, pbuflen, ustruct, fields, num_fields, quirks) \
+ ({ \
+ for (size_t i = 0; i < (num_fields); i++) { \
+ typeof(&(fields)[0]) field = &fields[i]; \
+ u64 uval; \
+ \
+ __unpack(pbuf, &uval, field->startbit, field->endbit, \
+ pbuflen, quirks); \
+ \
+ u64_to_ustruct_field(ustruct, field->offset, field->size, uval); \
+ } \
+ })
+
/**
* calculate_box_addr - Determine physical location of byte in buffer
* @box: Index of byte within buffer seen as a logical big-endian big number
@@ -51,64 +78,29 @@ static size_t calculate_box_addr(size_t box, size_t len, u8 quirks)
return offset_of_group + offset_in_group;
}
-/**
- * pack - Pack u64 number into bitfield of buffer.
- *
- * @pbuf: Pointer to a buffer holding the packed value.
- * @uval: CPU-readable unpacked value to pack.
- * @startbit: The index (in logical notation, compensated for quirks) where
- * the packed value starts within pbuf. Must be larger than, or
- * equal to, endbit.
- * @endbit: The index (in logical notation, compensated for quirks) where
- * the packed value ends within pbuf. Must be smaller than, or equal
- * to, startbit.
- * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf.
- * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and
- * QUIRK_MSB_ON_THE_RIGHT.
- *
- * Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming
- * correct usage, return code may be discarded. The @pbuf memory will
- * be modified on success.
- */
-int pack(void *pbuf, u64 uval, size_t startbit, size_t endbit, size_t pbuflen,
- u8 quirks)
+static void __pack(void *pbuf, u64 uval, size_t startbit, size_t endbit,
+ size_t pbuflen, u8 quirks)
{
/* Logical byte indices corresponding to the
* start and end of the field.
*/
- int plogical_first_u8, plogical_last_u8, box;
- /* width of the field to access in the pbuf */
- u64 value_width;
-
- /* startbit is expected to be larger than endbit, and both are
- * expected to be within the logically addressable range of the buffer.
- */
- if (unlikely(startbit < endbit || startbit >= BITS_PER_BYTE * pbuflen))
- /* Invalid function call */
- return -EINVAL;
-
- value_width = startbit - endbit + 1;
- if (unlikely(value_width > 64))
- return -ERANGE;
+ int plogical_first_u8 = startbit / BITS_PER_BYTE;
+ int plogical_last_u8 = endbit / BITS_PER_BYTE;
+ int value_width = startbit - endbit + 1;
+ int box;
/* Check if "uval" fits in "value_width" bits.
- * If value_width is 64, the check will fail, but any
- * 64-bit uval will surely fit.
+ * The test only works for value_width < 64, but in the latter case,
+ * any 64-bit uval will surely fit.
*/
- if (unlikely(value_width < 64 && uval >= (1ull << value_width)))
- /* Cannot store "uval" inside "value_width" bits.
- * Truncating "uval" is most certainly not desirable,
- * so simply erroring out is appropriate.
- */
- return -ERANGE;
+ WARN(value_width < 64 && uval >= (1ull << value_width),
+ "Cannot store 0x%llx inside bits %zu-%zu - will truncate\n",
+ uval, startbit, endbit);
/* Iterate through an idealistic view of the pbuf as an u64 with
* no quirks, u8 by u8 (aligned at u8 boundaries), from high to low
* logical bit significance. "box" denotes the current logical u8.
*/
- plogical_first_u8 = startbit / BITS_PER_BYTE;
- plogical_last_u8 = endbit / BITS_PER_BYTE;
-
for (box = plogical_first_u8; box >= plogical_last_u8; box--) {
/* Bit indices into the currently accessed 8-bit box */
size_t box_start_bit, box_end_bit, box_addr;
@@ -163,15 +155,13 @@ int pack(void *pbuf, u64 uval, size_t startbit, size_t endbit, size_t pbuflen,
((u8 *)pbuf)[box_addr] &= ~box_mask;
((u8 *)pbuf)[box_addr] |= pval;
}
- return 0;
}
-EXPORT_SYMBOL(pack);
/**
- * unpack - Unpack u64 number from packed buffer.
+ * pack - Pack u64 number into bitfield of buffer.
*
* @pbuf: Pointer to a buffer holding the packed value.
- * @uval: Pointer to an u64 holding the unpacked value.
+ * @uval: CPU-readable unpacked value to pack.
* @startbit: The index (in logical notation, compensated for quirks) where
* the packed value starts within pbuf. Must be larger than, or
* equal to, endbit.
@@ -183,19 +173,12 @@ EXPORT_SYMBOL(pack);
* QUIRK_MSB_ON_THE_RIGHT.
*
* Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming
- * correct usage, return code may be discarded. The @uval will be
- * modified on success.
+ * correct usage, return code may be discarded. The @pbuf memory will
+ * be modified on success.
*/
-int unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit,
- size_t pbuflen, u8 quirks)
+int pack(void *pbuf, u64 uval, size_t startbit, size_t endbit, size_t pbuflen,
+ u8 quirks)
{
- /* Logical byte indices corresponding to the
- * start and end of the field.
- */
- int plogical_first_u8, plogical_last_u8, box;
- /* width of the field to access in the pbuf */
- u64 value_width;
-
/* startbit is expected to be larger than endbit, and both are
* expected to be within the logically addressable range of the buffer.
*/
@@ -203,10 +186,25 @@ int unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit,
/* Invalid function call */
return -EINVAL;
- value_width = startbit - endbit + 1;
- if (unlikely(value_width > 64))
+ if (unlikely(startbit - endbit >= 64))
return -ERANGE;
+ __pack(pbuf, uval, startbit, endbit, pbuflen, quirks);
+
+ return 0;
+}
+EXPORT_SYMBOL(pack);
+
+static void __unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit,
+ size_t pbuflen, u8 quirks)
+{
+ /* Logical byte indices corresponding to the
+ * start and end of the field.
+ */
+ int plogical_first_u8 = startbit / BITS_PER_BYTE;
+ int plogical_last_u8 = endbit / BITS_PER_BYTE;
+ int box;
+
/* Initialize parameter */
*uval = 0;
@@ -214,9 +212,6 @@ int unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit,
* no quirks, u8 by u8 (aligned at u8 boundaries), from high to low
* logical bit significance. "box" denotes the current logical u8.
*/
- plogical_first_u8 = startbit / BITS_PER_BYTE;
- plogical_last_u8 = endbit / BITS_PER_BYTE;
-
for (box = plogical_first_u8; box >= plogical_last_u8; box--) {
/* Bit indices into the currently accessed 8-bit box */
size_t box_start_bit, box_end_bit, box_addr;
@@ -271,6 +266,46 @@ int unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit,
*uval &= ~proj_mask;
*uval |= pval;
}
+}
+
+/**
+ * unpack - Unpack u64 number from packed buffer.
+ *
+ * @pbuf: Pointer to a buffer holding the packed value.
+ * @uval: Pointer to an u64 holding the unpacked value.
+ * @startbit: The index (in logical notation, compensated for quirks) where
+ * the packed value starts within pbuf. Must be larger than, or
+ * equal to, endbit.
+ * @endbit: The index (in logical notation, compensated for quirks) where
+ * the packed value ends within pbuf. Must be smaller than, or equal
+ * to, startbit.
+ * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf.
+ * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and
+ * QUIRK_MSB_ON_THE_RIGHT.
+ *
+ * Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming
+ * correct usage, return code may be discarded. The @uval will be
+ * modified on success.
+ */
+int unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit,
+ size_t pbuflen, u8 quirks)
+{
+ /* width of the field to access in the pbuf */
+ u64 value_width;
+
+ /* startbit is expected to be larger than endbit, and both are
+ * expected to be within the logically addressable range of the buffer.
+ */
+ if (startbit < endbit || startbit >= BITS_PER_BYTE * pbuflen)
+ /* Invalid function call */
+ return -EINVAL;
+
+ value_width = startbit - endbit + 1;
+ if (value_width > 64)
+ return -ERANGE;
+
+ __unpack(pbuf, uval, startbit, endbit, pbuflen, quirks);
+
return 0;
}
EXPORT_SYMBOL(unpack);
@@ -314,4 +349,130 @@ int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen,
}
EXPORT_SYMBOL(packing);
+static u64 ustruct_field_to_u64(const void *ustruct, size_t field_offset,
+ size_t field_size)
+{
+ switch (field_size) {
+ case 1:
+ return *((u8 *)(ustruct + field_offset));
+ case 2:
+ return *((u16 *)(ustruct + field_offset));
+ case 4:
+ return *((u32 *)(ustruct + field_offset));
+ default:
+ return *((u64 *)(ustruct + field_offset));
+ }
+}
+
+static void u64_to_ustruct_field(void *ustruct, size_t field_offset,
+ size_t field_size, u64 uval)
+{
+ switch (field_size) {
+ case 1:
+ *((u8 *)(ustruct + field_offset)) = uval;
+ break;
+ case 2:
+ *((u16 *)(ustruct + field_offset)) = uval;
+ break;
+ case 4:
+ *((u32 *)(ustruct + field_offset)) = uval;
+ break;
+ default:
+ *((u64 *)(ustruct + field_offset)) = uval;
+ break;
+ }
+}
+
+/**
+ * pack_fields_u8 - Pack array of fields
+ *
+ * @pbuf: Pointer to a buffer holding the packed value.
+ * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf.
+ * @ustruct: Pointer to CPU-readable structure holding the unpacked value.
+ * It is expected (but not checked) that this has the same data type
+ * as all struct packed_field_u8 definitions.
+ * @fields: Array of packed_field_u8 field definition. They must not overlap.
+ * @num_fields: Length of @fields array.
+ * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and
+ * QUIRK_MSB_ON_THE_RIGHT.
+ *
+ * Use the pack_fields() macro instead of calling this directly.
+ */
+void pack_fields_u8(void *pbuf, size_t pbuflen, const void *ustruct,
+ const struct packed_field_u8 *fields, size_t num_fields,
+ u8 quirks)
+{
+ __pack_fields(pbuf, pbuflen, ustruct, fields, num_fields, quirks);
+}
+EXPORT_SYMBOL(pack_fields_u8);
+
+/**
+ * pack_fields_u16 - Pack array of fields
+ *
+ * @pbuf: Pointer to a buffer holding the packed value.
+ * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf.
+ * @ustruct: Pointer to CPU-readable structure holding the unpacked value.
+ * It is expected (but not checked) that this has the same data type
+ * as all struct packed_field_u16 definitions.
+ * @fields: Array of packed_field_u16 field definitions. They must not overlap.
+ * @num_fields: Length of @fields array.
+ * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and
+ * QUIRK_MSB_ON_THE_RIGHT.
+ *
+ * Use the pack_fields() macro instead of calling this directly.
+ */
+void pack_fields_u16(void *pbuf, size_t pbuflen, const void *ustruct,
+ const struct packed_field_u16 *fields, size_t num_fields,
+ u8 quirks)
+{
+ __pack_fields(pbuf, pbuflen, ustruct, fields, num_fields, quirks);
+}
+EXPORT_SYMBOL(pack_fields_u16);
+
+/**
+ * unpack_fields_u8 - Unpack array of fields
+ *
+ * @pbuf: Pointer to a buffer holding the packed value.
+ * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf.
+ * @ustruct: Pointer to CPU-readable structure holding the unpacked value.
+ * It is expected (but not checked) that this has the same data type
+ * as all struct packed_field_u8 definitions.
+ * @fields: Array of packed_field_u8 field definitions. They must not overlap.
+ * @num_fields: Length of @fields array.
+ * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and
+ * QUIRK_MSB_ON_THE_RIGHT.
+ *
+ * Use the unpack_fields() macro instead of calling this directly.
+ */
+void unpack_fields_u8(const void *pbuf, size_t pbuflen, void *ustruct,
+ const struct packed_field_u8 *fields, size_t num_fields,
+ u8 quirks)
+{
+ __unpack_fields(pbuf, pbuflen, ustruct, fields, num_fields, quirks);
+}
+EXPORT_SYMBOL(unpack_fields_u8);
+
+/**
+ * unpack_fields_u16 - Unpack array of fields
+ *
+ * @pbuf: Pointer to a buffer holding the packed value.
+ * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf.
+ * @ustruct: Pointer to CPU-readable structure holding the unpacked value.
+ * It is expected (but not checked) that this has the same data type
+ * as all struct packed_field_u16 definitions.
+ * @fields: Array of packed_field_u16 field definitions. They must not overlap.
+ * @num_fields: Length of @fields array.
+ * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and
+ * QUIRK_MSB_ON_THE_RIGHT.
+ *
+ * Use the unpack_fields() macro instead of calling this directly.
+ */
+void unpack_fields_u16(const void *pbuf, size_t pbuflen, void *ustruct,
+ const struct packed_field_u16 *fields, size_t num_fields,
+ u8 quirks)
+{
+ __unpack_fields(pbuf, pbuflen, ustruct, fields, num_fields, quirks);
+}
+EXPORT_SYMBOL(unpack_fields_u16);
+
MODULE_DESCRIPTION("Generic bitfield packing and unpacking");
diff --git a/lib/packing_test.c b/lib/packing_test.c
index b38ea43c03fd..ce3b83d33b04 100644
--- a/lib/packing_test.c
+++ b/lib/packing_test.c
@@ -396,9 +396,70 @@ static void packing_test_unpack(struct kunit *test)
KUNIT_EXPECT_EQ(test, uval, params->uval);
}
+#define PACKED_BUF_SIZE 8
+
+typedef struct __packed { u8 buf[PACKED_BUF_SIZE]; } packed_buf_t;
+
+struct test_data {
+ u32 field3;
+ u16 field2;
+ u16 field4;
+ u16 field6;
+ u8 field1;
+ u8 field5;
+};
+
+static const struct packed_field_u8 test_fields[] = {
+ PACKED_FIELD(63, 61, struct test_data, field1),
+ PACKED_FIELD(60, 52, struct test_data, field2),
+ PACKED_FIELD(51, 28, struct test_data, field3),
+ PACKED_FIELD(27, 14, struct test_data, field4),
+ PACKED_FIELD(13, 9, struct test_data, field5),
+ PACKED_FIELD(8, 0, struct test_data, field6),
+};
+
+static void packing_test_pack_fields(struct kunit *test)
+{
+ const struct test_data data = {
+ .field1 = 0x2,
+ .field2 = 0x100,
+ .field3 = 0xF00050,
+ .field4 = 0x7D3,
+ .field5 = 0x9,
+ .field6 = 0x10B,
+ };
+ packed_buf_t expect = {
+ .buf = { 0x50, 0x0F, 0x00, 0x05, 0x01, 0xF4, 0xD3, 0x0B },
+ };
+ packed_buf_t buf = {};
+
+ pack_fields(&buf, sizeof(buf), &data, test_fields, 0);
+
+ KUNIT_EXPECT_MEMEQ(test, &expect, &buf, sizeof(buf));
+}
+
+static void packing_test_unpack_fields(struct kunit *test)
+{
+ const packed_buf_t buf = {
+ .buf = { 0x17, 0x28, 0x10, 0x19, 0x3D, 0xA9, 0x07, 0x9C },
+ };
+ struct test_data data = {};
+
+ unpack_fields(&buf, sizeof(buf), &data, test_fields, 0);
+
+ KUNIT_EXPECT_EQ(test, 0, data.field1);
+ KUNIT_EXPECT_EQ(test, 0x172, data.field2);
+ KUNIT_EXPECT_EQ(test, 0x810193, data.field3);
+ KUNIT_EXPECT_EQ(test, 0x36A4, data.field4);
+ KUNIT_EXPECT_EQ(test, 0x3, data.field5);
+ KUNIT_EXPECT_EQ(test, 0x19C, data.field6);
+}
+
static struct kunit_case packing_test_cases[] = {
KUNIT_CASE_PARAM(packing_test_pack, packing_gen_params),
KUNIT_CASE_PARAM(packing_test_unpack, packing_gen_params),
+ KUNIT_CASE(packing_test_pack_fields),
+ KUNIT_CASE(packing_test_unpack_fields),
{},
};
diff --git a/lib/parser.c b/lib/parser.c
index f4eafb9d74e6..73e8f8e5be73 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -275,8 +275,9 @@ EXPORT_SYMBOL(match_hex);
*
* Description: Parse the string @str to check if matches wildcard
* pattern @pattern. The pattern may contain two types of wildcards:
- * '*' - matches zero or more characters
- * '?' - matches one character
+ *
+ * * '*' - matches zero or more characters
+ * * '?' - matches one character
*
* Return: If the @str matches the @pattern, return true, else return false.
*/
diff --git a/lib/pldmfw/pldmfw.c b/lib/pldmfw/pldmfw.c
index 6e1581b9a616..6264e2013f25 100644
--- a/lib/pldmfw/pldmfw.c
+++ b/lib/pldmfw/pldmfw.c
@@ -481,9 +481,17 @@ static int pldm_parse_components(struct pldmfw_priv *data)
component->component_data = data->fw->data + offset;
component->component_size = size;
+ if (data->context->mode == PLDMFW_UPDATE_MODE_SINGLE_COMPONENT &&
+ data->context->component_identifier != component->identifier)
+ continue;
+
list_add_tail(&component->entry, &data->components);
}
+ if (data->context->mode == PLDMFW_UPDATE_MODE_SINGLE_COMPONENT &&
+ list_empty(&data->components))
+ return -ENOENT;
+
header_crc_ptr = data->fw->data + data->offset;
err = pldm_move_fw_offset(data, sizeof(data->header_crc));
diff --git a/lib/rcuref.c b/lib/rcuref.c
index 97f300eca927..5bd726b71e39 100644
--- a/lib/rcuref.c
+++ b/lib/rcuref.c
@@ -220,6 +220,7 @@ EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
/**
* rcuref_put_slowpath - Slowpath of __rcuref_put()
* @ref: Pointer to the reference count
+ * @cnt: The resulting value of the fastpath decrement
*
* Invoked when the reference count is outside of the valid zone.
*
@@ -233,10 +234,8 @@ EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
* with a concurrent get()/put() pair. Caller is not allowed to
* deconstruct the protected object.
*/
-bool rcuref_put_slowpath(rcuref_t *ref)
+bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt)
{
- unsigned int cnt = atomic_read(&ref->refcnt);
-
/* Did this drop the last reference? */
if (likely(cnt == RCUREF_NOREF)) {
/*
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 6c902639728b..3e555d012ed6 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -584,10 +584,6 @@ static struct bucket_table *rhashtable_insert_one(
*/
rht_assign_locked(bkt, obj);
- atomic_inc(&ht->nelems);
- if (rht_grow_above_75(ht, tbl))
- schedule_work(&ht->run_work);
-
return NULL;
}
@@ -615,15 +611,23 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
data = ERR_PTR(-EAGAIN);
} else {
+ bool inserted;
+
flags = rht_lock(tbl, bkt);
data = rhashtable_lookup_one(ht, bkt, tbl,
hash, key, obj);
new_tbl = rhashtable_insert_one(ht, bkt, tbl,
hash, obj, data);
+ inserted = data && !new_tbl;
+ if (inserted)
+ atomic_inc(&ht->nelems);
if (PTR_ERR(new_tbl) != -EEXIST)
data = ERR_CAST(new_tbl);
rht_unlock(tbl, bkt, flags);
+
+ if (inserted && rht_grow_above_75(ht, tbl))
+ schedule_work(&ht->run_work);
}
} while (!IS_ERR_OR_NULL(new_tbl));
@@ -665,7 +669,7 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
* structure outside the hash table.
*
* This function may be called from any process context, including
- * non-preemptable context, but cannot be called from softirq or
+ * non-preemptible context, but cannot be called from softirq or
* hardirq context.
*
* You must call rhashtable_walk_exit after this function returns.
diff --git a/lib/sort.c b/lib/sort.c
index 048b7a6ef967..8e73dc55476b 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -200,6 +200,13 @@ static size_t parent(size_t i, unsigned int lsbit, size_t size)
* copy (e.g. fix up pointers or auxiliary data), but the built-in swap
* avoids a slow retpoline and so is significantly faster.
*
+ * The comparison function must adhere to specific mathematical
+ * properties to ensure correct and stable sorting:
+ * - Antisymmetry: cmp_func(a, b) must return the opposite sign of
+ * cmp_func(b, a).
+ * - Transitivity: if cmp_func(a, b) <= 0 and cmp_func(b, c) <= 0, then
+ * cmp_func(a, c) <= 0.
+ *
* Sorting time is O(n log n) both on average and worst-case. While
* quicksort is slightly faster on average, it suffers from exploitable
* O(n*n) worst-case behavior and extra memory requirements that make
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 65a75d58ed9e..c83829ef557f 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -100,34 +100,6 @@ __check_eq_pbl(const char *srcfile, unsigned int line,
return true;
}
-static bool __init
-__check_eq_u32_array(const char *srcfile, unsigned int line,
- const u32 *exp_arr, unsigned int exp_len,
- const u32 *arr, unsigned int len) __used;
-static bool __init
-__check_eq_u32_array(const char *srcfile, unsigned int line,
- const u32 *exp_arr, unsigned int exp_len,
- const u32 *arr, unsigned int len)
-{
- if (exp_len != len) {
- pr_warn("[%s:%u] array length differ: expected %u, got %u\n",
- srcfile, line,
- exp_len, len);
- return false;
- }
-
- if (memcmp(exp_arr, arr, len*sizeof(*arr))) {
- pr_warn("[%s:%u] array contents differ\n", srcfile, line);
- print_hex_dump(KERN_WARNING, " exp: ", DUMP_PREFIX_OFFSET,
- 32, 4, exp_arr, exp_len*sizeof(*exp_arr), false);
- print_hex_dump(KERN_WARNING, " got: ", DUMP_PREFIX_OFFSET,
- 32, 4, arr, len*sizeof(*arr), false);
- return false;
- }
-
- return true;
-}
-
static bool __init __check_eq_clump8(const char *srcfile, unsigned int line,
const unsigned int offset,
const unsigned int size,
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 2eed1ad958e9..af0041df2b72 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -478,7 +478,7 @@ static int __bpf_ld_imm64(struct bpf_insn insns[2], u8 reg, s64 imm64)
* to overflow the field size of the native instruction, triggering
* a branch conversion mechanism in some JITs.
*/
-static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm)
+static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm, bool alu32)
{
struct bpf_insn *insns;
int len = S16_MAX + 5;
@@ -501,7 +501,7 @@ static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm)
};
int op = ops[(i >> 1) % ARRAY_SIZE(ops)];
- if (i & 1)
+ if ((i & 1) || alu32)
insns[i++] = BPF_ALU32_REG(op, R0, R1);
else
insns[i++] = BPF_ALU64_REG(op, R0, R1);
@@ -516,27 +516,47 @@ static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm)
}
/* Branch taken by runtime decision */
+static int bpf_fill_max_jmp_taken_32(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JEQ, 1, true);
+}
+
static int bpf_fill_max_jmp_taken(struct bpf_test *self)
{
- return __bpf_fill_max_jmp(self, BPF_JEQ, 1);
+ return __bpf_fill_max_jmp(self, BPF_JEQ, 1, false);
}
/* Branch not taken by runtime decision */
+static int bpf_fill_max_jmp_not_taken_32(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JEQ, 0, true);
+}
+
static int bpf_fill_max_jmp_not_taken(struct bpf_test *self)
{
- return __bpf_fill_max_jmp(self, BPF_JEQ, 0);
+ return __bpf_fill_max_jmp(self, BPF_JEQ, 0, false);
}
/* Branch always taken, known at JIT time */
+static int bpf_fill_max_jmp_always_taken_32(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JGE, 0, true);
+}
+
static int bpf_fill_max_jmp_always_taken(struct bpf_test *self)
{
- return __bpf_fill_max_jmp(self, BPF_JGE, 0);
+ return __bpf_fill_max_jmp(self, BPF_JGE, 0, false);
}
/* Branch never taken, known at JIT time */
+static int bpf_fill_max_jmp_never_taken_32(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JLT, 0, true);
+}
+
static int bpf_fill_max_jmp_never_taken(struct bpf_test *self)
{
- return __bpf_fill_max_jmp(self, BPF_JLT, 0);
+ return __bpf_fill_max_jmp(self, BPF_JLT, 0, false);
}
/* ALU result computation used in tests */
@@ -14234,6 +14254,38 @@ static struct bpf_test tests[] = {
},
/* Conditional branch conversions */
{
+ "Long conditional jump: taken at runtime (32 bits)",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_max_jmp_taken_32,
+ },
+ {
+ "Long conditional jump: not taken at runtime (32 bits)",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 2 } },
+ .fill_helper = bpf_fill_max_jmp_not_taken_32,
+ },
+ {
+ "Long conditional jump: always taken, known at JIT time (32 bits)",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_max_jmp_always_taken_32,
+ },
+ {
+ "Long conditional jump: never taken, known at JIT time (32 bits)",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 2 } },
+ .fill_helper = bpf_fill_max_jmp_never_taken_32,
+ },
+ {
"Long conditional jump: taken at runtime",
{ },
INTERNAL | FLAG_NO_DATA,
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 704cb1093ae8..13e2a10d7554 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -1563,6 +1563,30 @@ static noinline void __init check_root_expand(struct maple_tree *mt)
mas_unlock(&mas);
}
+static noinline void __init check_deficient_node(struct maple_tree *mt)
+{
+ MA_STATE(mas, mt, 0, 0);
+ int count;
+
+ mas_lock(&mas);
+ for (count = 0; count < 10; count++) {
+ mas_set(&mas, count);
+ mas_store_gfp(&mas, xa_mk_value(count), GFP_KERNEL);
+ }
+
+ for (count = 20; count < 39; count++) {
+ mas_set(&mas, count);
+ mas_store_gfp(&mas, xa_mk_value(count), GFP_KERNEL);
+ }
+
+ for (count = 10; count < 12; count++) {
+ mas_set(&mas, count);
+ mas_store_gfp(&mas, xa_mk_value(count), GFP_KERNEL);
+ }
+ mas_unlock(&mas);
+ mt_validate(mt);
+}
+
static noinline void __init check_gap_combining(struct maple_tree *mt)
{
struct maple_enode *mn1, *mn2;
@@ -3714,6 +3738,34 @@ static noinline void __init alloc_cyclic_testing(struct maple_tree *mt)
}
mtree_destroy(mt);
+
+ /*
+ * Issue with reverse search was discovered
+ * https://lore.kernel.org/all/20241216060600.287B4C4CED0@smtp.kernel.org/
+ * Exhausting the allocation area and forcing the search to wrap needs a
+ * mas_reset() in mas_alloc_cyclic().
+ */
+ next = 0;
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (int i = 0; i < 1023; i++) {
+ mtree_alloc_cyclic(mt, &location, mt, 2, 1024, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, i != location - 2);
+ MT_BUG_ON(mt, i != next - 3);
+ MT_BUG_ON(mt, mtree_load(mt, location) != mt);
+ }
+ mtree_erase(mt, 123);
+ MT_BUG_ON(mt, mtree_load(mt, 123) != NULL);
+ mtree_alloc_cyclic(mt, &location, mt, 2, 1024, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, 123 != location);
+ MT_BUG_ON(mt, 124 != next);
+ MT_BUG_ON(mt, mtree_load(mt, location) != mt);
+ mtree_erase(mt, 100);
+ mtree_alloc_cyclic(mt, &location, mt, 2, 1024, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, 100 != location);
+ MT_BUG_ON(mt, 101 != next);
+ MT_BUG_ON(mt, mtree_load(mt, location) != mt);
+ mtree_destroy(mt);
+
/* Overflow test */
next = ULONG_MAX - 1;
ret = mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL);
@@ -3797,6 +3849,10 @@ static int __init maple_tree_seed(void)
#endif
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_deficient_node(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_store_null(&tree);
mtree_destroy(&tree);
diff --git a/lib/test_min_heap.c b/lib/test_min_heap.c
index e6fbb798558b..a9c4a74d3898 100644
--- a/lib/test_min_heap.c
+++ b/lib/test_min_heap.c
@@ -32,7 +32,7 @@ static __init int pop_verify_heap(bool min_heap,
int last;
last = values[0];
- min_heap_pop(heap, funcs, NULL);
+ min_heap_pop_inline(heap, funcs, NULL);
while (heap->nr > 0) {
if (min_heap) {
if (last > values[0]) {
@@ -48,7 +48,7 @@ static __init int pop_verify_heap(bool min_heap,
}
}
last = values[0];
- min_heap_pop(heap, funcs, NULL);
+ min_heap_pop_inline(heap, funcs, NULL);
}
return err;
}
@@ -69,7 +69,7 @@ static __init int test_heapify_all(bool min_heap)
int i, err;
/* Test with known set of values. */
- min_heapify_all(&heap, &funcs, NULL);
+ min_heapify_all_inline(&heap, &funcs, NULL);
err = pop_verify_heap(min_heap, &heap, &funcs);
@@ -78,7 +78,7 @@ static __init int test_heapify_all(bool min_heap)
for (i = 0; i < heap.nr; i++)
values[i] = get_random_u32();
- min_heapify_all(&heap, &funcs, NULL);
+ min_heapify_all_inline(&heap, &funcs, NULL);
err += pop_verify_heap(min_heap, &heap, &funcs);
return err;
@@ -102,14 +102,14 @@ static __init int test_heap_push(bool min_heap)
/* Test with known set of values copied from data. */
for (i = 0; i < ARRAY_SIZE(data); i++)
- min_heap_push(&heap, &data[i], &funcs, NULL);
+ min_heap_push_inline(&heap, &data[i], &funcs, NULL);
err = pop_verify_heap(min_heap, &heap, &funcs);
/* Test with randomly generated values. */
while (heap.nr < heap.size) {
temp = get_random_u32();
- min_heap_push(&heap, &temp, &funcs, NULL);
+ min_heap_push_inline(&heap, &temp, &funcs, NULL);
}
err += pop_verify_heap(min_heap, &heap, &funcs);
@@ -135,22 +135,22 @@ static __init int test_heap_pop_push(bool min_heap)
/* Fill values with data to pop and replace. */
temp = min_heap ? 0x80000000 : 0x7FFFFFFF;
for (i = 0; i < ARRAY_SIZE(data); i++)
- min_heap_push(&heap, &temp, &funcs, NULL);
+ min_heap_push_inline(&heap, &temp, &funcs, NULL);
/* Test with known set of values copied from data. */
for (i = 0; i < ARRAY_SIZE(data); i++)
- min_heap_pop_push(&heap, &data[i], &funcs, NULL);
+ min_heap_pop_push_inline(&heap, &data[i], &funcs, NULL);
err = pop_verify_heap(min_heap, &heap, &funcs);
heap.nr = 0;
for (i = 0; i < ARRAY_SIZE(data); i++)
- min_heap_push(&heap, &temp, &funcs, NULL);
+ min_heap_push_inline(&heap, &temp, &funcs, NULL);
/* Test with randomly generated values. */
for (i = 0; i < ARRAY_SIZE(data); i++) {
temp = get_random_u32();
- min_heap_pop_push(&heap, &temp, &funcs, NULL);
+ min_heap_pop_push_inline(&heap, &temp, &funcs, NULL);
}
err += pop_verify_heap(min_heap, &heap, &funcs);
@@ -163,7 +163,7 @@ static __init int test_heap_del(bool min_heap)
-3, -1, -2, -4, 0x8000000, 0x7FFFFFF };
struct min_heap_test heap;
- min_heap_init(&heap, values, ARRAY_SIZE(values));
+ min_heap_init_inline(&heap, values, ARRAY_SIZE(values));
heap.nr = ARRAY_SIZE(values);
struct min_heap_callbacks funcs = {
.less = min_heap ? less_than : greater_than,
@@ -172,9 +172,9 @@ static __init int test_heap_del(bool min_heap)
int i, err;
/* Test with known set of values. */
- min_heapify_all(&heap, &funcs, NULL);
+ min_heapify_all_inline(&heap, &funcs, NULL);
for (i = 0; i < ARRAY_SIZE(values) / 2; i++)
- min_heap_del(&heap, get_random_u32() % heap.nr, &funcs, NULL);
+ min_heap_del_inline(&heap, get_random_u32() % heap.nr, &funcs, NULL);
err = pop_verify_heap(min_heap, &heap, &funcs);
@@ -182,10 +182,10 @@ static __init int test_heap_del(bool min_heap)
heap.nr = ARRAY_SIZE(values);
for (i = 0; i < heap.nr; i++)
values[i] = get_random_u32();
- min_heapify_all(&heap, &funcs, NULL);
+ min_heapify_all_inline(&heap, &funcs, NULL);
for (i = 0; i < ARRAY_SIZE(values) / 2; i++)
- min_heap_del(&heap, get_random_u32() % heap.nr, &funcs, NULL);
+ min_heap_del_inline(&heap, get_random_u32() % heap.nr, &funcs, NULL);
err += pop_verify_heap(min_heap, &heap, &funcs);
return err;
diff --git a/lib/test_objpool.c b/lib/test_objpool.c
index 5a3f6961a70f..8f688187fa87 100644
--- a/lib/test_objpool.c
+++ b/lib/test_objpool.c
@@ -190,8 +190,7 @@ static int ot_init_hrtimer(struct ot_item *item, unsigned long hrtimer)
return -ENOENT;
item->hrtcycle = ktime_set(0, hrtimer * 1000000UL);
- hrtimer_init(hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrt->function = ot_hrtimer_handler;
+ hrtimer_setup(hrt, ot_hrtimer_handler, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
return 0;
}
@@ -371,14 +370,10 @@ static int ot_start_sync(struct ot_test *test)
if (!cpu_online(cpu))
continue;
- work = kthread_create_on_node(ot_thread_worker, item,
- cpu_to_node(cpu), "ot_worker_%d", cpu);
- if (IS_ERR(work)) {
+ work = kthread_run_on_cpu(ot_thread_worker, item,
+ cpu, "ot_worker_%d");
+ if (IS_ERR(work))
pr_err("failed to create thread for cpu %d\n", cpu);
- } else {
- kthread_bind(work, cpu);
- wake_up_process(work);
- }
}
/* wait a while to make sure all threads waiting at start line */
@@ -562,14 +557,9 @@ static int ot_start_async(struct ot_test *test)
if (!cpu_online(cpu))
continue;
- work = kthread_create_on_node(ot_thread_worker, item,
- cpu_to_node(cpu), "ot_worker_%d", cpu);
- if (IS_ERR(work)) {
+ work = kthread_run_on_cpu(ot_thread_worker, item, cpu, "ot_worker_%d");
+ if (IS_ERR(work))
pr_err("failed to create thread for cpu %d\n", cpu);
- } else {
- kthread_bind(work, cpu);
- wake_up_process(work);
- }
}
/* wait a while to make sure all threads waiting at start line */
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c
index b6696fa1d426..4249e0cc8aaf 100644
--- a/lib/test_sysctl.c
+++ b/lib/test_sysctl.c
@@ -71,7 +71,7 @@ static struct test_sysctl_data test_data = {
};
/* These are all under /proc/sys/debug/test_sysctl/ */
-static struct ctl_table test_table[] = {
+static const struct ctl_table test_table[] = {
{
.procname = "int_0001",
.data = &test_data.int_0001,
@@ -177,7 +177,7 @@ static int test_sysctl_setup_node_tests(void)
}
/* Used to test that unregister actually removes the directory */
-static struct ctl_table test_table_unregister[] = {
+static const struct ctl_table test_table_unregister[] = {
{
.procname = "unregister_error",
.data = &test_data.int_0001,
@@ -220,7 +220,7 @@ static int test_sysctl_run_register_mount_point(void)
return 0;
}
-static struct ctl_table test_table_empty[] = { };
+static const struct ctl_table test_table_empty[] = { };
static int test_sysctl_run_register_empty(void)
{
diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c
index 5d7b10e98610..8772e5edaa4f 100644
--- a/lib/test_ubsan.c
+++ b/lib/test_ubsan.c
@@ -15,7 +15,7 @@ static void test_ubsan_add_overflow(void)
{
volatile int val = INT_MAX;
- UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ UBSAN_TEST(CONFIG_UBSAN_INTEGER_WRAP);
val += 2;
}
@@ -24,7 +24,7 @@ static void test_ubsan_sub_overflow(void)
volatile int val = INT_MIN;
volatile int val2 = 2;
- UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ UBSAN_TEST(CONFIG_UBSAN_INTEGER_WRAP);
val -= val2;
}
@@ -32,7 +32,7 @@ static void test_ubsan_mul_overflow(void)
{
volatile int val = INT_MAX / 2;
- UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ UBSAN_TEST(CONFIG_UBSAN_INTEGER_WRAP);
val *= 3;
}
@@ -40,7 +40,7 @@ static void test_ubsan_negate_overflow(void)
{
volatile int val = INT_MIN;
- UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ UBSAN_TEST(CONFIG_UBSAN_INTEGER_WRAP);
val = -val;
}
@@ -53,6 +53,15 @@ static void test_ubsan_divrem_overflow(void)
val /= val2;
}
+static void test_ubsan_truncate_signed(void)
+{
+ volatile long val = LONG_MAX;
+ volatile int val2 = 0;
+
+ UBSAN_TEST(CONFIG_UBSAN_INTEGER_WRAP);
+ val2 = val;
+}
+
static void test_ubsan_shift_out_of_bounds(void)
{
volatile int neg = -1, wrap = 4;
@@ -127,6 +136,7 @@ static const test_ubsan_fp test_ubsan_array[] = {
test_ubsan_sub_overflow,
test_ubsan_mul_overflow,
test_ubsan_negate_overflow,
+ test_ubsan_truncate_signed,
test_ubsan_shift_out_of_bounds,
test_ubsan_out_of_bounds,
test_ubsan_load_invalid_value,
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index 4ddf769861ff..f585949ff696 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -373,7 +373,7 @@ vm_map_ram_test(void)
if (!pages)
return -1;
- nr_allocated = alloc_pages_bulk_array(GFP_KERNEL, map_nr_pages, pages);
+ nr_allocated = alloc_pages_bulk(GFP_KERNEL, map_nr_pages, pages);
if (nr_allocated != map_nr_pages)
goto cleanup;
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index d5c5cbba33ed..0e865bab4a10 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -1418,7 +1418,7 @@ static noinline void check_pause(struct xarray *xa)
{
XA_STATE(xas, xa, 0);
void *entry;
- unsigned int order;
+ int order;
unsigned long index = 1;
unsigned int count = 0;
@@ -1448,6 +1448,42 @@ static noinline void check_pause(struct xarray *xa)
XA_BUG_ON(xa, count != order_limit);
xa_destroy(xa);
+
+ index = 0;
+ for (order = order_limit - 1; order >= 0; order--) {
+ XA_BUG_ON(xa, xa_store_order(xa, index, order,
+ xa_mk_index(index), GFP_KERNEL));
+ index += 1UL << order;
+ }
+
+ index = 0;
+ count = 0;
+ xas_set(&xas, 0);
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(index));
+ index += 1UL << (order_limit - count - 1);
+ count++;
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != order_limit);
+
+ index = 0;
+ count = 0;
+ /* test unaligned index */
+ xas_set(&xas, 1 % (1UL << (order_limit - 1)));
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(index));
+ index += 1UL << (order_limit - count - 1);
+ count++;
+ xas_pause(&xas);
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != order_limit);
+
+ xa_destroy(xa);
+
}
static noinline void check_move_tiny(struct xarray *xa)
diff --git a/lib/tests/Makefile b/lib/tests/Makefile
index 8e4f42cb9c54..498915255860 100644
--- a/lib/tests/Makefile
+++ b/lib/tests/Makefile
@@ -1 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for tests of kernel library functions.
+
+# KUnit tests
+CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN)
+obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
+obj-$(CONFIG_BITS_TEST) += test_bits.o
+obj-$(CONFIG_CHECKSUM_KUNIT) += checksum_kunit.o
+obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o
+obj-$(CONFIG_CPUMASK_KUNIT_TEST) += cpumask_kunit.o
+obj-$(CONFIG_CRC_KUNIT_TEST) += crc_kunit.o
+CFLAGS_fortify_kunit.o += $(call cc-disable-warning, unsequenced)
+CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-overread)
+CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-truncation)
+CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN)
+obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
+CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE)
+obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o
+obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o
+obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o
+obj-$(CONFIG_TEST_IOV_ITER) += kunit_iov_iter.o
+obj-$(CONFIG_IS_SIGNED_TYPE_KUNIT_TEST) += is_signed_type_kunit.o
+obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
+obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
+obj-$(CONFIG_KFIFO_KUNIT_TEST) += kfifo_kunit.o
+obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
+obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
+obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o
+CFLAGS_overflow_kunit.o = $(call cc-disable-warning, tautological-constant-out-of-range-compare)
+obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
+obj-$(CONFIG_PRINTF_KUNIT_TEST) += printf_kunit.o
+obj-$(CONFIG_SCANF_KUNIT_TEST) += scanf_kunit.o
+obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
+obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o
+obj-$(CONFIG_TEST_SORT) += test_sort.o
+CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable)
+obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o
+obj-$(CONFIG_STRING_KUNIT_TEST) += string_kunit.o
+obj-$(CONFIG_STRING_HELPERS_KUNIT_TEST) += string_helpers_kunit.o
+obj-$(CONFIG_USERCOPY_KUNIT_TEST) += usercopy_kunit.o
+obj-$(CONFIG_UTIL_MACROS_KUNIT) += util_macros_kunit.o
+
obj-$(CONFIG_TEST_RUNTIME_MODULE) += module/
diff --git a/lib/bitfield_kunit.c b/lib/tests/bitfield_kunit.c
index 5ccd86f61896..5ccd86f61896 100644
--- a/lib/bitfield_kunit.c
+++ b/lib/tests/bitfield_kunit.c
diff --git a/lib/checksum_kunit.c b/lib/tests/checksum_kunit.c
index be04aa42125c..be04aa42125c 100644
--- a/lib/checksum_kunit.c
+++ b/lib/tests/checksum_kunit.c
diff --git a/lib/cmdline_kunit.c b/lib/tests/cmdline_kunit.c
index c1602f797637..c1602f797637 100644
--- a/lib/cmdline_kunit.c
+++ b/lib/tests/cmdline_kunit.c
diff --git a/lib/cpumask_kunit.c b/lib/tests/cpumask_kunit.c
index 6b62a6bdd50e..6b62a6bdd50e 100644
--- a/lib/cpumask_kunit.c
+++ b/lib/tests/cpumask_kunit.c
diff --git a/lib/tests/crc_kunit.c b/lib/tests/crc_kunit.c
new file mode 100644
index 000000000000..585c48b65cef
--- /dev/null
+++ b/lib/tests/crc_kunit.c
@@ -0,0 +1,495 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Unit tests and benchmarks for the CRC library functions
+ *
+ * Copyright 2024 Google LLC
+ *
+ * Author: Eric Biggers <ebiggers@google.com>
+ */
+#include <kunit/test.h>
+#include <linux/crc7.h>
+#include <linux/crc16.h>
+#include <linux/crc-t10dif.h>
+#include <linux/crc32.h>
+#include <linux/crc32c.h>
+#include <linux/crc64.h>
+#include <linux/prandom.h>
+#include <linux/vmalloc.h>
+
+#define CRC_KUNIT_SEED 42
+#define CRC_KUNIT_MAX_LEN 16384
+#define CRC_KUNIT_NUM_TEST_ITERS 1000
+
+static struct rnd_state rng;
+static u8 *test_buffer;
+static size_t test_buflen;
+
+/**
+ * struct crc_variant - describes a CRC variant
+ * @bits: Number of bits in the CRC, 1 <= @bits <= 64.
+ * @le: true if it's a "little endian" CRC (reversed mapping between bits and
+ * polynomial coefficients in each byte), false if it's a "big endian" CRC
+ * (natural mapping between bits and polynomial coefficients in each byte)
+ * @poly: The generator polynomial with the highest-order term omitted.
+ * Bit-reversed if @le is true.
+ * @func: The function to compute a CRC. The type signature uses u64 so that it
+ * can fit any CRC up to CRC-64. The CRC is passed in, and is expected
+ * to be returned in, the least significant bits of the u64. The
+ * function is expected to *not* invert the CRC at the beginning and end.
+ * @combine_func: Optional function to combine two CRCs.
+ */
+struct crc_variant {
+ int bits;
+ bool le;
+ u64 poly;
+ u64 (*func)(u64 crc, const u8 *p, size_t len);
+ u64 (*combine_func)(u64 crc1, u64 crc2, size_t len2);
+};
+
+static u32 rand32(void)
+{
+ return prandom_u32_state(&rng);
+}
+
+static u64 rand64(void)
+{
+ u32 n = rand32();
+
+ return ((u64)n << 32) | rand32();
+}
+
+static u64 crc_mask(const struct crc_variant *v)
+{
+ return (u64)-1 >> (64 - v->bits);
+}
+
+/* Reference implementation of any CRC variant */
+static u64 crc_ref(const struct crc_variant *v,
+ u64 crc, const u8 *p, size_t len)
+{
+ size_t i, j;
+
+ for (i = 0; i < len; i++) {
+ for (j = 0; j < 8; j++) {
+ if (v->le) {
+ crc ^= (p[i] >> j) & 1;
+ crc = (crc >> 1) ^ ((crc & 1) ? v->poly : 0);
+ } else {
+ crc ^= (u64)((p[i] >> (7 - j)) & 1) <<
+ (v->bits - 1);
+ if (crc & (1ULL << (v->bits - 1)))
+ crc = ((crc << 1) ^ v->poly) &
+ crc_mask(v);
+ else
+ crc <<= 1;
+ }
+ }
+ }
+ return crc;
+}
+
+static int crc_suite_init(struct kunit_suite *suite)
+{
+ /*
+ * Allocate the test buffer using vmalloc() with a page-aligned length
+ * so that it is immediately followed by a guard page. This allows
+ * buffer overreads to be detected, even in assembly code.
+ */
+ test_buflen = round_up(CRC_KUNIT_MAX_LEN, PAGE_SIZE);
+ test_buffer = vmalloc(test_buflen);
+ if (!test_buffer)
+ return -ENOMEM;
+
+ prandom_seed_state(&rng, CRC_KUNIT_SEED);
+ prandom_bytes_state(&rng, test_buffer, test_buflen);
+ return 0;
+}
+
+static void crc_suite_exit(struct kunit_suite *suite)
+{
+ vfree(test_buffer);
+ test_buffer = NULL;
+}
+
+/* Generate a random initial CRC. */
+static u64 generate_random_initial_crc(const struct crc_variant *v)
+{
+ switch (rand32() % 4) {
+ case 0:
+ return 0;
+ case 1:
+ return crc_mask(v); /* All 1 bits */
+ default:
+ return rand64() & crc_mask(v);
+ }
+}
+
+/* Generate a random length, preferring small lengths. */
+static size_t generate_random_length(size_t max_length)
+{
+ size_t len;
+
+ switch (rand32() % 3) {
+ case 0:
+ len = rand32() % 128;
+ break;
+ case 1:
+ len = rand32() % 3072;
+ break;
+ default:
+ len = rand32();
+ break;
+ }
+ return len % (max_length + 1);
+}
+
+/* Test that v->func gives the same CRCs as a reference implementation. */
+static void crc_main_test(struct kunit *test, const struct crc_variant *v)
+{
+ size_t i;
+
+ for (i = 0; i < CRC_KUNIT_NUM_TEST_ITERS; i++) {
+ u64 init_crc, expected_crc, actual_crc;
+ size_t len, offset;
+ bool nosimd;
+
+ init_crc = generate_random_initial_crc(v);
+ len = generate_random_length(CRC_KUNIT_MAX_LEN);
+
+ /* Generate a random offset. */
+ if (rand32() % 2 == 0) {
+ /* Use a random alignment mod 64 */
+ offset = rand32() % 64;
+ offset = min(offset, CRC_KUNIT_MAX_LEN - len);
+ } else {
+ /* Go up to the guard page, to catch buffer overreads */
+ offset = test_buflen - len;
+ }
+
+ if (rand32() % 8 == 0)
+ /* Refresh the data occasionally. */
+ prandom_bytes_state(&rng, &test_buffer[offset], len);
+
+ nosimd = rand32() % 8 == 0;
+
+ /*
+ * Compute the CRC, and verify that it equals the CRC computed
+ * by a simple bit-at-a-time reference implementation.
+ */
+ expected_crc = crc_ref(v, init_crc, &test_buffer[offset], len);
+ if (nosimd)
+ local_irq_disable();
+ actual_crc = v->func(init_crc, &test_buffer[offset], len);
+ if (nosimd)
+ local_irq_enable();
+ KUNIT_EXPECT_EQ_MSG(test, expected_crc, actual_crc,
+ "Wrong result with len=%zu offset=%zu nosimd=%d",
+ len, offset, nosimd);
+ }
+}
+
+/* Test that CRC(concat(A, B)) == combine_CRCs(CRC(A), CRC(B), len(B)). */
+static void crc_combine_test(struct kunit *test, const struct crc_variant *v)
+{
+ int i;
+
+ for (i = 0; i < 100; i++) {
+ u64 init_crc = generate_random_initial_crc(v);
+ size_t len1 = generate_random_length(CRC_KUNIT_MAX_LEN);
+ size_t len2 = generate_random_length(CRC_KUNIT_MAX_LEN - len1);
+ u64 crc1, crc2, expected_crc, actual_crc;
+
+ prandom_bytes_state(&rng, test_buffer, len1 + len2);
+ crc1 = v->func(init_crc, test_buffer, len1);
+ crc2 = v->func(0, &test_buffer[len1], len2);
+ expected_crc = v->func(init_crc, test_buffer, len1 + len2);
+ actual_crc = v->combine_func(crc1, crc2, len2);
+ KUNIT_EXPECT_EQ_MSG(test, expected_crc, actual_crc,
+ "CRC combination gave wrong result with len1=%zu len2=%zu\n",
+ len1, len2);
+ }
+}
+
+static void crc_test(struct kunit *test, const struct crc_variant *v)
+{
+ crc_main_test(test, v);
+ if (v->combine_func)
+ crc_combine_test(test, v);
+}
+
+static __always_inline void
+crc_benchmark(struct kunit *test,
+ u64 (*crc_func)(u64 crc, const u8 *p, size_t len))
+{
+ static const size_t lens_to_test[] = {
+ 1, 16, 64, 127, 128, 200, 256, 511, 512, 1024, 3173, 4096, 16384,
+ };
+ size_t len, i, j, num_iters;
+ /*
+ * The CRC value that this function computes in a series of calls to
+ * crc_func is never actually used, so use volatile to ensure that the
+ * computations are done as intended and don't all get optimized out.
+ */
+ volatile u64 crc = 0;
+ u64 t;
+
+ if (!IS_ENABLED(CONFIG_CRC_BENCHMARK))
+ kunit_skip(test, "not enabled");
+
+ /* warm-up */
+ for (i = 0; i < 10000000; i += CRC_KUNIT_MAX_LEN)
+ crc = crc_func(crc, test_buffer, CRC_KUNIT_MAX_LEN);
+
+ for (i = 0; i < ARRAY_SIZE(lens_to_test); i++) {
+ len = lens_to_test[i];
+ KUNIT_ASSERT_LE(test, len, CRC_KUNIT_MAX_LEN);
+ num_iters = 10000000 / (len + 128);
+ preempt_disable();
+ t = ktime_get_ns();
+ for (j = 0; j < num_iters; j++)
+ crc = crc_func(crc, test_buffer, len);
+ t = ktime_get_ns() - t;
+ preempt_enable();
+ kunit_info(test, "len=%zu: %llu MB/s\n",
+ len, div64_u64((u64)len * num_iters * 1000, t));
+ }
+}
+
+/* crc7_be */
+
+static u64 crc7_be_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ /*
+ * crc7_be() left-aligns the 7-bit CRC in a u8, whereas the test wants a
+ * right-aligned CRC (in a u64). Convert between the conventions.
+ */
+ return crc7_be(crc << 1, p, len) >> 1;
+}
+
+static const struct crc_variant crc_variant_crc7_be = {
+ .bits = 7,
+ .poly = 0x9,
+ .func = crc7_be_wrapper,
+};
+
+static void crc7_be_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc7_be);
+}
+
+static void crc7_be_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc7_be_wrapper);
+}
+
+/* crc16 */
+
+static u64 crc16_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ return crc16(crc, p, len);
+}
+
+static const struct crc_variant crc_variant_crc16 = {
+ .bits = 16,
+ .le = true,
+ .poly = 0xa001,
+ .func = crc16_wrapper,
+};
+
+static void crc16_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc16);
+}
+
+static void crc16_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc16_wrapper);
+}
+
+/* crc_t10dif */
+
+static u64 crc_t10dif_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ return crc_t10dif_update(crc, p, len);
+}
+
+static const struct crc_variant crc_variant_crc_t10dif = {
+ .bits = 16,
+ .le = false,
+ .poly = 0x8bb7,
+ .func = crc_t10dif_wrapper,
+};
+
+static void crc_t10dif_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc_t10dif);
+}
+
+static void crc_t10dif_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc_t10dif_wrapper);
+}
+
+/* crc32_le */
+
+static u64 crc32_le_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ return crc32_le(crc, p, len);
+}
+
+static u64 crc32_le_combine_wrapper(u64 crc1, u64 crc2, size_t len2)
+{
+ return crc32_le_combine(crc1, crc2, len2);
+}
+
+static const struct crc_variant crc_variant_crc32_le = {
+ .bits = 32,
+ .le = true,
+ .poly = 0xedb88320,
+ .func = crc32_le_wrapper,
+ .combine_func = crc32_le_combine_wrapper,
+};
+
+static void crc32_le_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc32_le);
+}
+
+static void crc32_le_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc32_le_wrapper);
+}
+
+/* crc32_be */
+
+static u64 crc32_be_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ return crc32_be(crc, p, len);
+}
+
+static const struct crc_variant crc_variant_crc32_be = {
+ .bits = 32,
+ .le = false,
+ .poly = 0x04c11db7,
+ .func = crc32_be_wrapper,
+};
+
+static void crc32_be_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc32_be);
+}
+
+static void crc32_be_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc32_be_wrapper);
+}
+
+/* crc32c */
+
+static u64 crc32c_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ return crc32c(crc, p, len);
+}
+
+static u64 crc32c_combine_wrapper(u64 crc1, u64 crc2, size_t len2)
+{
+ return crc32c_combine(crc1, crc2, len2);
+}
+
+static const struct crc_variant crc_variant_crc32c = {
+ .bits = 32,
+ .le = true,
+ .poly = 0x82f63b78,
+ .func = crc32c_wrapper,
+ .combine_func = crc32c_combine_wrapper,
+};
+
+static void crc32c_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc32c);
+}
+
+static void crc32c_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc32c_wrapper);
+}
+
+/* crc64_be */
+
+static u64 crc64_be_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ return crc64_be(crc, p, len);
+}
+
+static const struct crc_variant crc_variant_crc64_be = {
+ .bits = 64,
+ .le = false,
+ .poly = 0x42f0e1eba9ea3693,
+ .func = crc64_be_wrapper,
+};
+
+static void crc64_be_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc64_be);
+}
+
+static void crc64_be_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc64_be_wrapper);
+}
+
+/* crc64_nvme */
+
+static u64 crc64_nvme_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ /* The inversions that crc64_nvme() does have to be undone here. */
+ return ~crc64_nvme(~crc, p, len);
+}
+
+static const struct crc_variant crc_variant_crc64_nvme = {
+ .bits = 64,
+ .le = true,
+ .poly = 0x9a6c9329ac4bc9b5,
+ .func = crc64_nvme_wrapper,
+};
+
+static void crc64_nvme_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc64_nvme);
+}
+
+static void crc64_nvme_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc64_nvme_wrapper);
+}
+
+static struct kunit_case crc_test_cases[] = {
+ KUNIT_CASE(crc7_be_test),
+ KUNIT_CASE(crc7_be_benchmark),
+ KUNIT_CASE(crc16_test),
+ KUNIT_CASE(crc16_benchmark),
+ KUNIT_CASE(crc_t10dif_test),
+ KUNIT_CASE(crc_t10dif_benchmark),
+ KUNIT_CASE(crc32_le_test),
+ KUNIT_CASE(crc32_le_benchmark),
+ KUNIT_CASE(crc32_be_test),
+ KUNIT_CASE(crc32_be_benchmark),
+ KUNIT_CASE(crc32c_test),
+ KUNIT_CASE(crc32c_benchmark),
+ KUNIT_CASE(crc64_be_test),
+ KUNIT_CASE(crc64_be_benchmark),
+ KUNIT_CASE(crc64_nvme_test),
+ KUNIT_CASE(crc64_nvme_benchmark),
+ {},
+};
+
+static struct kunit_suite crc_test_suite = {
+ .name = "crc",
+ .test_cases = crc_test_cases,
+ .suite_init = crc_suite_init,
+ .suite_exit = crc_suite_exit,
+};
+kunit_test_suite(crc_test_suite);
+
+MODULE_DESCRIPTION("Unit tests and benchmarks for the CRC library functions");
+MODULE_LICENSE("GPL");
diff --git a/lib/fortify_kunit.c b/lib/tests/fortify_kunit.c
index ecb638d4cde1..29ffc62a71e3 100644
--- a/lib/fortify_kunit.c
+++ b/lib/tests/fortify_kunit.c
@@ -60,6 +60,7 @@ static int fortify_write_overflows;
static const char array_of_10[] = "this is 10";
static const char *ptr_of_11 = "this is 11!";
+static const char * const unchanging_12 = "this is 12!!";
static char array_unknown[] = "compiler thinks I might change";
void fortify_add_kunit_error(int write)
@@ -83,12 +84,28 @@ void fortify_add_kunit_error(int write)
static void fortify_test_known_sizes(struct kunit *test)
{
+ char stack[80] = "Test!";
+
+ KUNIT_EXPECT_FALSE(test, __is_constexpr(__builtin_strlen(stack)));
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(stack), 5);
+
+ KUNIT_EXPECT_TRUE(test, __is_constexpr(__builtin_strlen("88888888")));
KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8);
+
+ KUNIT_EXPECT_TRUE(test, __is_constexpr(__builtin_strlen(array_of_10)));
KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10);
+
+ KUNIT_EXPECT_FALSE(test, __is_constexpr(__builtin_strlen(ptr_of_11)));
KUNIT_EXPECT_EQ(test, __compiletime_strlen(ptr_of_11), 11);
+ KUNIT_EXPECT_TRUE(test, __is_constexpr(__builtin_strlen(unchanging_12)));
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(unchanging_12), 12);
+
+ KUNIT_EXPECT_FALSE(test, __is_constexpr(__builtin_strlen(array_unknown)));
KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_unknown), SIZE_MAX);
+
/* Externally defined and dynamically sized string pointer: */
+ KUNIT_EXPECT_FALSE(test, __is_constexpr(__builtin_strlen(test->name)));
KUNIT_EXPECT_EQ(test, __compiletime_strlen(test->name), SIZE_MAX);
}
@@ -394,8 +411,6 @@ struct fortify_padding {
char buf[32];
unsigned long bytes_after;
};
-/* Force compiler into not being able to resolve size at compile-time. */
-static volatile int unconst;
static void fortify_test_strlen(struct kunit *test)
{
@@ -520,57 +535,56 @@ static void fortify_test_strncpy(struct kunit *test)
{
struct fortify_padding pad = { };
char src[] = "Copy me fully into a small buffer and I will overflow!";
+ size_t sizeof_buf = sizeof(pad.buf);
+
+ OPTIMIZER_HIDE_VAR(sizeof_buf);
/* Destination is %NUL-filled to start with. */
KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 3], '\0');
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
/* Legitimate strncpy() 1 less than of max size. */
- KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
- sizeof(pad.buf) + unconst - 1)
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, sizeof_buf - 1)
== pad.buf);
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
/* Only last byte should be %NUL */
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 3], '\0');
/* Legitimate (though unterminated) max-size strncpy. */
- KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
- sizeof(pad.buf) + unconst)
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, sizeof_buf)
== pad.buf);
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
/* No trailing %NUL -- thanks strncpy API. */
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
/* But we will not have gone beyond. */
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
/* Now verify that FORTIFY is working... */
- KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
- sizeof(pad.buf) + unconst + 1)
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, sizeof_buf + 1)
== pad.buf);
/* Should catch the overflow. */
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
/* And we will not have gone beyond. */
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
/* And further... */
- KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
- sizeof(pad.buf) + unconst + 2)
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, sizeof_buf + 2)
== pad.buf);
/* Should catch the overflow. */
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
/* And we will not have gone beyond. */
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
}
@@ -579,55 +593,56 @@ static void fortify_test_strscpy(struct kunit *test)
{
struct fortify_padding pad = { };
char src[] = "Copy me fully into a small buffer and I will overflow!";
+ size_t sizeof_buf = sizeof(pad.buf);
+ size_t sizeof_src = sizeof(src);
+
+ OPTIMIZER_HIDE_VAR(sizeof_buf);
+ OPTIMIZER_HIDE_VAR(sizeof_src);
/* Destination is %NUL-filled to start with. */
KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 3], '\0');
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
/* Legitimate strscpy() 1 less than of max size. */
- KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
- sizeof(pad.buf) + unconst - 1),
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, sizeof_buf - 1),
-E2BIG);
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
/* Keeping space for %NUL, last two bytes should be %NUL */
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 3], '\0');
/* Legitimate max-size strscpy. */
- KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
- sizeof(pad.buf) + unconst),
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, sizeof_buf),
-E2BIG);
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
/* A trailing %NUL will exist. */
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
/* Now verify that FORTIFY is working... */
- KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
- sizeof(pad.buf) + unconst + 1),
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, sizeof_buf + 1),
-E2BIG);
/* Should catch the overflow. */
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
/* And we will not have gone beyond. */
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
/* And much further... */
- KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
- sizeof(src) * 2 + unconst),
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, sizeof_src * 2),
-E2BIG);
/* Should catch the overflow. */
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
/* And we will not have gone beyond. */
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
}
@@ -767,7 +782,9 @@ static void fortify_test_strlcat(struct kunit *test)
struct fortify_padding pad = { };
char src[sizeof(pad.buf)] = { };
int i, partial;
- int len = sizeof(pad.buf) + unconst;
+ int len = sizeof(pad.buf);
+
+ OPTIMIZER_HIDE_VAR(len);
/* Fill 15 bytes with valid characters. */
partial = sizeof(src) / 2 - 1;
@@ -857,28 +874,32 @@ struct fortify_zero_sized {
#define __fortify_test(memfunc) \
static void fortify_test_##memfunc(struct kunit *test) \
{ \
- struct fortify_zero_sized zero = { }; \
+ struct fortify_zero_sized empty = { }; \
struct fortify_padding pad = { }; \
char srcA[sizeof(pad.buf) + 2]; \
char srcB[sizeof(pad.buf) + 2]; \
- size_t len = sizeof(pad.buf) + unconst; \
+ size_t len = sizeof(pad.buf); \
+ size_t zero = 0; \
+ \
+ OPTIMIZER_HIDE_VAR(len); \
+ OPTIMIZER_HIDE_VAR(zero); \
\
memset(srcA, 'A', sizeof(srcA)); \
KUNIT_ASSERT_EQ(test, srcA[0], 'A'); \
memset(srcB, 'B', sizeof(srcB)); \
KUNIT_ASSERT_EQ(test, srcB[0], 'B'); \
\
- memfunc(pad.buf, srcA, 0 + unconst); \
+ memfunc(pad.buf, srcA, zero); \
KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
- memfunc(pad.buf + 1, srcB, 1 + unconst); \
+ memfunc(pad.buf + 1, srcB, zero + 1); \
KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \
KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \
KUNIT_EXPECT_EQ(test, pad.buf[2], '\0'); \
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
- memfunc(pad.buf, srcA, 1 + unconst); \
+ memfunc(pad.buf, srcA, zero + 1); \
KUNIT_EXPECT_EQ(test, pad.buf[0], 'A'); \
KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
@@ -904,10 +925,10 @@ static void fortify_test_##memfunc(struct kunit *test) \
/* Reset error counter. */ \
fortify_write_overflows = 0; \
/* Copy nothing into nothing: no errors. */ \
- memfunc(zero.buf, srcB, 0 + unconst); \
+ memfunc(empty.buf, srcB, zero); \
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
- memfunc(zero.buf, srcB, 1 + unconst); \
+ memfunc(empty.buf, srcB, zero + 1); \
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); \
}
@@ -919,7 +940,9 @@ static void fortify_test_memscan(struct kunit *test)
char haystack[] = "Where oh where is my memory range?";
char *mem = haystack + strlen("Where oh where is ");
char needle = 'm';
- size_t len = sizeof(haystack) + unconst;
+ size_t len = sizeof(haystack);
+
+ OPTIMIZER_HIDE_VAR(len);
KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len),
mem);
@@ -938,7 +961,9 @@ static void fortify_test_memchr(struct kunit *test)
char haystack[] = "Where oh where is my memory range?";
char *mem = haystack + strlen("Where oh where is ");
char needle = 'm';
- size_t len = sizeof(haystack) + unconst;
+ size_t len = sizeof(haystack);
+
+ OPTIMIZER_HIDE_VAR(len);
KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len),
mem);
@@ -957,7 +982,9 @@ static void fortify_test_memchr_inv(struct kunit *test)
char haystack[] = "Where oh where is my memory range?";
char *mem = haystack + 1;
char needle = 'W';
- size_t len = sizeof(haystack) + unconst;
+ size_t len = sizeof(haystack);
+
+ OPTIMIZER_HIDE_VAR(len);
/* Normal search is okay. */
KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len),
@@ -976,8 +1003,11 @@ static void fortify_test_memcmp(struct kunit *test)
{
char one[] = "My mind is going ...";
char two[] = "My mind is going ... I can feel it.";
- size_t one_len = sizeof(one) + unconst - 1;
- size_t two_len = sizeof(two) + unconst - 1;
+ size_t one_len = sizeof(one) - 1;
+ size_t two_len = sizeof(two) - 1;
+
+ OPTIMIZER_HIDE_VAR(one_len);
+ OPTIMIZER_HIDE_VAR(two_len);
/* We match the first string (ignoring the %NUL). */
KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len), 0);
@@ -998,7 +1028,9 @@ static void fortify_test_kmemdup(struct kunit *test)
{
char src[] = "I got Doom running on it!";
char *copy;
- size_t len = sizeof(src) + unconst;
+ size_t len = sizeof(src);
+
+ OPTIMIZER_HIDE_VAR(len);
/* Copy is within bounds. */
copy = kmemdup(src, len, GFP_KERNEL);
diff --git a/lib/hashtable_test.c b/lib/tests/hashtable_test.c
index 3521de6bad15..3521de6bad15 100644
--- a/lib/hashtable_test.c
+++ b/lib/tests/hashtable_test.c
diff --git a/lib/is_signed_type_kunit.c b/lib/tests/is_signed_type_kunit.c
index 88adbe813f3a..88adbe813f3a 100644
--- a/lib/is_signed_type_kunit.c
+++ b/lib/tests/is_signed_type_kunit.c
diff --git a/lib/tests/kfifo_kunit.c b/lib/tests/kfifo_kunit.c
new file mode 100644
index 000000000000..a85eedc3195a
--- /dev/null
+++ b/lib/tests/kfifo_kunit.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the generic kernel FIFO implementation.
+ *
+ * Copyright (C) 2024 Diego Vieira <diego.daniel.professional@gmail.com>
+ */
+#include <kunit/test.h>
+
+#include <linux/kfifo.h>
+
+#define KFIFO_SIZE 32
+#define N_ELEMENTS 5
+
+static void kfifo_test_reset_should_clear_the_fifo(struct kunit *test)
+{
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ kfifo_put(&my_fifo, 1);
+ kfifo_put(&my_fifo, 2);
+ kfifo_put(&my_fifo, 3);
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 3);
+
+ kfifo_reset(&my_fifo);
+
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 0);
+ KUNIT_EXPECT_TRUE(test, kfifo_is_empty(&my_fifo));
+}
+
+static void kfifo_test_define_should_define_an_empty_fifo(struct kunit *test)
+{
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ KUNIT_EXPECT_TRUE(test, kfifo_initialized(&my_fifo));
+ KUNIT_EXPECT_TRUE(test, kfifo_is_empty(&my_fifo));
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 0);
+}
+
+static void kfifo_test_len_should_ret_n_of_stored_elements(struct kunit *test)
+{
+ u8 buffer1[N_ELEMENTS];
+
+ for (int i = 0; i < N_ELEMENTS; i++)
+ buffer1[i] = i + 1;
+
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 0);
+
+ kfifo_in(&my_fifo, buffer1, N_ELEMENTS);
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), N_ELEMENTS);
+
+ kfifo_in(&my_fifo, buffer1, N_ELEMENTS);
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), N_ELEMENTS * 2);
+
+ kfifo_reset(&my_fifo);
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 0);
+}
+
+static void kfifo_test_put_should_insert_and_get_should_pop(struct kunit *test)
+{
+ u8 out_data = 0;
+ int processed_elements;
+ u8 elements[] = { 3, 5, 11 };
+
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ // If the fifo is empty, get returns 0
+ processed_elements = kfifo_get(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 0);
+ KUNIT_EXPECT_EQ(test, out_data, 0);
+
+ for (int i = 0; i < 3; i++)
+ kfifo_put(&my_fifo, elements[i]);
+
+ for (int i = 0; i < 3; i++) {
+ processed_elements = kfifo_get(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 1);
+ KUNIT_EXPECT_EQ(test, out_data, elements[i]);
+ }
+}
+
+static void kfifo_test_in_should_insert_multiple_elements(struct kunit *test)
+{
+ u8 in_buffer[] = { 11, 25, 65 };
+ u8 out_data;
+ int processed_elements;
+
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ kfifo_in(&my_fifo, in_buffer, 3);
+
+ for (int i = 0; i < 3; i++) {
+ processed_elements = kfifo_get(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 1);
+ KUNIT_EXPECT_EQ(test, out_data, in_buffer[i]);
+ }
+}
+
+static void kfifo_test_out_should_pop_multiple_elements(struct kunit *test)
+{
+ u8 in_buffer[] = { 11, 25, 65 };
+ u8 out_buffer[3];
+ int copied_elements;
+
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ for (int i = 0; i < 3; i++)
+ kfifo_put(&my_fifo, in_buffer[i]);
+
+ copied_elements = kfifo_out(&my_fifo, out_buffer, 3);
+ KUNIT_EXPECT_EQ(test, copied_elements, 3);
+
+ for (int i = 0; i < 3; i++)
+ KUNIT_EXPECT_EQ(test, out_buffer[i], in_buffer[i]);
+ KUNIT_EXPECT_TRUE(test, kfifo_is_empty(&my_fifo));
+}
+
+static void kfifo_test_dec_init_should_define_an_empty_fifo(struct kunit *test)
+{
+ DECLARE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ INIT_KFIFO(my_fifo);
+
+ // my_fifo is a struct with an inplace buffer
+ KUNIT_EXPECT_FALSE(test, __is_kfifo_ptr(&my_fifo));
+
+ KUNIT_EXPECT_TRUE(test, kfifo_initialized(&my_fifo));
+}
+
+static void kfifo_test_define_should_equal_declare_init(struct kunit *test)
+{
+ // declare a variable my_fifo of type struct kfifo of u8
+ DECLARE_KFIFO(my_fifo1, u8, KFIFO_SIZE);
+ // initialize the my_fifo variable
+ INIT_KFIFO(my_fifo1);
+
+ // DEFINE_KFIFO declares the variable with the initial value
+ // essentially the same as calling DECLARE_KFIFO and INIT_KFIFO
+ DEFINE_KFIFO(my_fifo2, u8, KFIFO_SIZE);
+
+ // my_fifo1 and my_fifo2 have the same size
+ KUNIT_EXPECT_EQ(test, sizeof(my_fifo1), sizeof(my_fifo2));
+ KUNIT_EXPECT_EQ(test, kfifo_initialized(&my_fifo1),
+ kfifo_initialized(&my_fifo2));
+ KUNIT_EXPECT_EQ(test, kfifo_is_empty(&my_fifo1),
+ kfifo_is_empty(&my_fifo2));
+}
+
+static void kfifo_test_alloc_should_initiliaze_a_ptr_fifo(struct kunit *test)
+{
+ int ret;
+ DECLARE_KFIFO_PTR(my_fifo, u8);
+
+ INIT_KFIFO(my_fifo);
+
+ // kfifo_initialized returns false signaling the buffer pointer is NULL
+ KUNIT_EXPECT_FALSE(test, kfifo_initialized(&my_fifo));
+
+ // kfifo_alloc allocates the buffer
+ ret = kfifo_alloc(&my_fifo, KFIFO_SIZE, GFP_KERNEL);
+ KUNIT_EXPECT_EQ_MSG(test, ret, 0, "Memory allocation should succeed");
+ KUNIT_EXPECT_TRUE(test, kfifo_initialized(&my_fifo));
+
+ // kfifo_free frees the buffer
+ kfifo_free(&my_fifo);
+}
+
+static void kfifo_test_peek_should_not_remove_elements(struct kunit *test)
+{
+ u8 out_data;
+ int processed_elements;
+
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ // If the fifo is empty, peek returns 0
+ processed_elements = kfifo_peek(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 0);
+
+ kfifo_put(&my_fifo, 3);
+ kfifo_put(&my_fifo, 5);
+ kfifo_put(&my_fifo, 11);
+
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 3);
+
+ processed_elements = kfifo_peek(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 1);
+ KUNIT_EXPECT_EQ(test, out_data, 3);
+
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 3);
+
+ // Using peek doesn't remove the element
+ // so the read element and the fifo length
+ // remains the same
+ processed_elements = kfifo_peek(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 1);
+ KUNIT_EXPECT_EQ(test, out_data, 3);
+
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 3);
+}
+
+static struct kunit_case kfifo_test_cases[] = {
+ KUNIT_CASE(kfifo_test_reset_should_clear_the_fifo),
+ KUNIT_CASE(kfifo_test_define_should_define_an_empty_fifo),
+ KUNIT_CASE(kfifo_test_len_should_ret_n_of_stored_elements),
+ KUNIT_CASE(kfifo_test_put_should_insert_and_get_should_pop),
+ KUNIT_CASE(kfifo_test_in_should_insert_multiple_elements),
+ KUNIT_CASE(kfifo_test_out_should_pop_multiple_elements),
+ KUNIT_CASE(kfifo_test_dec_init_should_define_an_empty_fifo),
+ KUNIT_CASE(kfifo_test_define_should_equal_declare_init),
+ KUNIT_CASE(kfifo_test_alloc_should_initiliaze_a_ptr_fifo),
+ KUNIT_CASE(kfifo_test_peek_should_not_remove_elements),
+ {},
+};
+
+static struct kunit_suite kfifo_test_module = {
+ .name = "kfifo",
+ .test_cases = kfifo_test_cases,
+};
+
+kunit_test_suites(&kfifo_test_module);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Diego Vieira <diego.daniel.professional@gmail.com>");
+MODULE_DESCRIPTION("KUnit test for the kernel FIFO");
diff --git a/lib/kunit_iov_iter.c b/lib/tests/kunit_iov_iter.c
index 10a560feb66e..48342736d016 100644
--- a/lib/kunit_iov_iter.c
+++ b/lib/tests/kunit_iov_iter.c
@@ -57,15 +57,12 @@ static void *__init iov_kunit_create_buffer(struct kunit *test,
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pages);
*ppages = pages;
- got = alloc_pages_bulk_array(GFP_KERNEL, npages, pages);
+ got = alloc_pages_bulk(GFP_KERNEL, npages, pages);
if (got != npages) {
release_pages(pages, got);
KUNIT_ASSERT_EQ(test, got, npages);
}
- for (int i = 0; i < npages; i++)
- pages[i]->index = i;
-
buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer);
diff --git a/lib/list-test.c b/lib/tests/list-test.c
index 9135cdc1bb39..9135cdc1bb39 100644
--- a/lib/list-test.c
+++ b/lib/tests/list-test.c
diff --git a/lib/memcpy_kunit.c b/lib/tests/memcpy_kunit.c
index d36933554e46..d36933554e46 100644
--- a/lib/memcpy_kunit.c
+++ b/lib/tests/memcpy_kunit.c
diff --git a/lib/overflow_kunit.c b/lib/tests/overflow_kunit.c
index 5222c6393f11..894691b4411a 100644
--- a/lib/overflow_kunit.c
+++ b/lib/tests/overflow_kunit.c
@@ -1185,22 +1185,40 @@ struct bar {
static void DEFINE_FLEX_test(struct kunit *test)
{
- /* Using _RAW_ on a __counted_by struct will initialize "counter" to zero */
- DEFINE_RAW_FLEX(struct foo, two_but_zero, array, 2);
-#ifdef CONFIG_CC_HAS_COUNTED_BY
- int expected_raw_size = sizeof(struct foo);
-#else
- int expected_raw_size = sizeof(struct foo) + 2 * sizeof(s16);
-#endif
- /* Without annotation, it will always be on-stack size. */
DEFINE_RAW_FLEX(struct bar, two, array, 2);
DEFINE_FLEX(struct foo, eight, array, counter, 8);
DEFINE_FLEX(struct foo, empty, array, counter, 0);
+ /* Using _RAW_ on a __counted_by struct will initialize "counter" to zero */
+ DEFINE_RAW_FLEX(struct foo, two_but_zero, array, 2);
+ int array_size_override = 0;
- KUNIT_EXPECT_EQ(test, __struct_size(two_but_zero), expected_raw_size);
+ KUNIT_EXPECT_EQ(test, sizeof(*two), sizeof(struct bar));
KUNIT_EXPECT_EQ(test, __struct_size(two), sizeof(struct bar) + 2 * sizeof(s16));
- KUNIT_EXPECT_EQ(test, __struct_size(eight), 24);
+ KUNIT_EXPECT_EQ(test, __member_size(two), sizeof(struct bar) + 2 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __struct_size(two->array), 2 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __member_size(two->array), 2 * sizeof(s16));
+
+ KUNIT_EXPECT_EQ(test, sizeof(*eight), sizeof(struct foo));
+ KUNIT_EXPECT_EQ(test, __struct_size(eight), sizeof(struct foo) + 8 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __member_size(eight), sizeof(struct foo) + 8 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __struct_size(eight->array), 8 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __member_size(eight->array), 8 * sizeof(s16));
+
+ KUNIT_EXPECT_EQ(test, sizeof(*empty), sizeof(struct foo));
KUNIT_EXPECT_EQ(test, __struct_size(empty), sizeof(struct foo));
+ KUNIT_EXPECT_EQ(test, __member_size(empty), sizeof(struct foo));
+ KUNIT_EXPECT_EQ(test, __struct_size(empty->array), 0);
+ KUNIT_EXPECT_EQ(test, __member_size(empty->array), 0);
+
+ /* If __counted_by is not being used, array size will have the on-stack size. */
+ if (!IS_ENABLED(CONFIG_CC_HAS_COUNTED_BY))
+ array_size_override = 2 * sizeof(s16);
+
+ KUNIT_EXPECT_EQ(test, sizeof(*two_but_zero), sizeof(struct foo));
+ KUNIT_EXPECT_EQ(test, __struct_size(two_but_zero), sizeof(struct foo) + 2 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __member_size(two_but_zero), sizeof(struct foo) + 2 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __struct_size(two_but_zero->array), array_size_override);
+ KUNIT_EXPECT_EQ(test, __member_size(two_but_zero->array), array_size_override);
}
static struct kunit_case overflow_test_cases[] = {
diff --git a/lib/test_printf.c b/lib/tests/printf_kunit.c
index 59dbe4f9a4cb..2c9f6170bacd 100644
--- a/lib/test_printf.c
+++ b/lib/tests/printf_kunit.c
@@ -3,9 +3,7 @@
* Test cases for printf facility.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
+#include <kunit/test.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/printk.h>
@@ -25,8 +23,6 @@
#include <linux/property.h>
-#include "../tools/testing/selftests/kselftest_module.h"
-
#define BUF_SIZE 256
#define PAD_SIZE 16
#define FILL_CHAR '$'
@@ -37,14 +33,14 @@
block \
__diag_pop();
-KSTM_MODULE_GLOBALS();
+static unsigned int total_tests;
-static char *test_buffer __initdata;
-static char *alloced_buffer __initdata;
+static char *test_buffer;
+static char *alloced_buffer;
-static int __printf(4, 0) __init
-do_test(int bufsize, const char *expect, int elen,
- const char *fmt, va_list ap)
+static void __printf(7, 0)
+do_test(struct kunit *kunittest, const char *file, const int line, int bufsize, const char *expect,
+ int elen, const char *fmt, va_list ap)
{
va_list aq;
int ret, written;
@@ -57,62 +53,70 @@ do_test(int bufsize, const char *expect, int elen,
va_end(aq);
if (ret != elen) {
- pr_warn("vsnprintf(buf, %d, \"%s\", ...) returned %d, expected %d\n",
- bufsize, fmt, ret, elen);
- return 1;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) returned %d, expected %d\n",
+ file, line, bufsize, fmt, ret, elen);
+ return;
}
if (memchr_inv(alloced_buffer, FILL_CHAR, PAD_SIZE)) {
- pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote before buffer\n", bufsize, fmt);
- return 1;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) wrote before buffer\n",
+ file, line, bufsize, fmt);
+ return;
}
if (!bufsize) {
if (memchr_inv(test_buffer, FILL_CHAR, BUF_SIZE + PAD_SIZE)) {
- pr_warn("vsnprintf(buf, 0, \"%s\", ...) wrote to buffer\n",
- fmt);
- return 1;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, 0, \"%s\", ...) wrote to buffer\n",
+ file, line, fmt);
}
- return 0;
+ return;
}
written = min(bufsize-1, elen);
if (test_buffer[written]) {
- pr_warn("vsnprintf(buf, %d, \"%s\", ...) did not nul-terminate buffer\n",
- bufsize, fmt);
- return 1;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) did not nul-terminate buffer\n",
+ file, line, bufsize, fmt);
+ return;
}
if (memchr_inv(test_buffer + written + 1, FILL_CHAR, bufsize - (written + 1))) {
- pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote beyond the nul-terminator\n",
- bufsize, fmt);
- return 1;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) wrote beyond the nul-terminator\n",
+ file, line, bufsize, fmt);
+ return;
}
if (memchr_inv(test_buffer + bufsize, FILL_CHAR, BUF_SIZE + PAD_SIZE - bufsize)) {
- pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote beyond buffer\n", bufsize, fmt);
- return 1;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) wrote beyond buffer\n",
+ file, line, bufsize, fmt);
+ return;
}
if (memcmp(test_buffer, expect, written)) {
- pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote '%s', expected '%.*s'\n",
- bufsize, fmt, test_buffer, written, expect);
- return 1;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) wrote '%s', expected '%.*s'\n",
+ file, line, bufsize, fmt, test_buffer, written, expect);
+ return;
}
- return 0;
}
-static void __printf(3, 4) __init
-__test(const char *expect, int elen, const char *fmt, ...)
+static void __printf(6, 7)
+__test(struct kunit *kunittest, const char *file, const int line, const char *expect, int elen,
+ const char *fmt, ...)
{
va_list ap;
int rand;
char *p;
if (elen >= BUF_SIZE) {
- pr_err("error in test suite: expected output length %d too long. Format was '%s'.\n",
- elen, fmt);
- failed_tests++;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: error in test suite: expected length (%d) >= BUF_SIZE (%d). fmt=\"%s\"\n",
+ file, line, elen, BUF_SIZE, fmt);
return;
}
@@ -124,19 +128,19 @@ __test(const char *expect, int elen, const char *fmt, ...)
* enough and 0), and then we also test that kvasprintf would
* be able to print it as expected.
*/
- failed_tests += do_test(BUF_SIZE, expect, elen, fmt, ap);
+ do_test(kunittest, file, line, BUF_SIZE, expect, elen, fmt, ap);
rand = get_random_u32_inclusive(1, elen + 1);
/* Since elen < BUF_SIZE, we have 1 <= rand <= BUF_SIZE. */
- failed_tests += do_test(rand, expect, elen, fmt, ap);
- failed_tests += do_test(0, expect, elen, fmt, ap);
+ do_test(kunittest, file, line, rand, expect, elen, fmt, ap);
+ do_test(kunittest, file, line, 0, expect, elen, fmt, ap);
p = kvasprintf(GFP_KERNEL, fmt, ap);
if (p) {
total_tests++;
if (memcmp(p, expect, elen+1)) {
- pr_warn("kvasprintf(..., \"%s\", ...) returned '%s', expected '%s'\n",
- fmt, p, expect);
- failed_tests++;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: kvasprintf(..., \"%s\", ...) returned '%s', expected '%s'\n",
+ file, line, fmt, p, expect);
}
kfree(p);
}
@@ -144,10 +148,10 @@ __test(const char *expect, int elen, const char *fmt, ...)
}
#define test(expect, fmt, ...) \
- __test(expect, strlen(expect), fmt, ##__VA_ARGS__)
+ __test(kunittest, __FILE__, __LINE__, expect, strlen(expect), fmt, ##__VA_ARGS__)
-static void __init
-test_basic(void)
+static void
+test_basic(struct kunit *kunittest)
{
/* Work around annoying "warning: zero-length gnu_printf format string". */
char nul = '\0';
@@ -155,11 +159,11 @@ test_basic(void)
test("", &nul);
test("100%", "100%%");
test("xxx%yyy", "xxx%cyyy", '%');
- __test("xxx\0yyy", 7, "xxx%cyyy", '\0');
+ __test(kunittest, __FILE__, __LINE__, "xxx\0yyy", 7, "xxx%cyyy", '\0');
}
-static void __init
-test_number(void)
+static void
+test_number(struct kunit *kunittest)
{
test("0x1234abcd ", "%#-12x", 0x1234abcd);
test(" 0x1234abcd", "%#12x", 0x1234abcd);
@@ -180,8 +184,8 @@ test_number(void)
test("00|0|0|0|0", "%.2d|%.1d|%.0d|%.*d|%1.0d", 0, 0, 0, 0, 0, 0);
}
-static void __init
-test_string(void)
+static void
+test_string(struct kunit *kunittest)
{
test("", "%s%.0s", "", "123");
test("ABCD|abc|123", "%s|%.3s|%.*s", "ABCD", "abcdef", 3, "123456");
@@ -218,29 +222,6 @@ test_string(void)
#define ZEROS "00000000" /* hex 32 zero bits */
#define ONES "ffffffff" /* hex 32 one bits */
-static int __init
-plain_format(void)
-{
- char buf[PLAIN_BUF_SIZE];
- int nchars;
-
- nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
-
- if (nchars != PTR_WIDTH)
- return -1;
-
- if (strncmp(buf, PTR_VAL_NO_CRNG, PTR_WIDTH) == 0) {
- pr_warn("crng possibly not yet initialized. plain 'p' buffer contains \"%s\"",
- PTR_VAL_NO_CRNG);
- return 0;
- }
-
- if (strncmp(buf, ZEROS, strlen(ZEROS)) != 0)
- return -1;
-
- return 0;
-}
-
#else
#define PTR_WIDTH 8
@@ -250,92 +231,47 @@ plain_format(void)
#define ZEROS ""
#define ONES ""
-static int __init
-plain_format(void)
-{
- /* Format is implicitly tested for 32 bit machines by plain_hash() */
- return 0;
-}
-
#endif /* BITS_PER_LONG == 64 */
-static int __init
-plain_hash_to_buffer(const void *p, char *buf, size_t len)
+static void
+plain_hash_to_buffer(struct kunit *kunittest, const void *p, char *buf, size_t len)
{
- int nchars;
-
- nchars = snprintf(buf, len, "%p", p);
-
- if (nchars != PTR_WIDTH)
- return -1;
+ KUNIT_ASSERT_EQ(kunittest, snprintf(buf, len, "%p", p), PTR_WIDTH);
if (strncmp(buf, PTR_VAL_NO_CRNG, PTR_WIDTH) == 0) {
- pr_warn("crng possibly not yet initialized. plain 'p' buffer contains \"%s\"",
- PTR_VAL_NO_CRNG);
- return 0;
+ kunit_skip(kunittest,
+ "crng possibly not yet initialized. plain 'p' buffer contains \"%s\"\n",
+ PTR_VAL_NO_CRNG);
}
-
- return 0;
}
-static int __init
-plain_hash(void)
+static void
+hash_pointer(struct kunit *kunittest)
{
- char buf[PLAIN_BUF_SIZE];
- int ret;
-
- ret = plain_hash_to_buffer(PTR, buf, PLAIN_BUF_SIZE);
- if (ret)
- return ret;
-
- if (strncmp(buf, PTR_STR, PTR_WIDTH) == 0)
- return -1;
-
- return 0;
-}
+ if (no_hash_pointers)
+ kunit_skip(kunittest, "hash pointers disabled");
-/*
- * We can't use test() to test %p because we don't know what output to expect
- * after an address is hashed.
- */
-static void __init
-plain(void)
-{
- int err;
+ char buf[PLAIN_BUF_SIZE];
- if (no_hash_pointers) {
- pr_warn("skipping plain 'p' tests");
- skipped_tests += 2;
- return;
- }
+ plain_hash_to_buffer(kunittest, PTR, buf, PLAIN_BUF_SIZE);
- err = plain_hash();
- if (err) {
- pr_warn("plain 'p' does not appear to be hashed\n");
- failed_tests++;
- return;
- }
+ /*
+ * The hash of %p is unpredictable, therefore test() cannot be used.
+ *
+ * Instead verify that the first 32 bits are zeros on a 64-bit system
+ * and that the non-hashed value is not printed.
+ */
- err = plain_format();
- if (err) {
- pr_warn("hashing plain 'p' has unexpected format\n");
- failed_tests++;
- }
+ KUNIT_EXPECT_MEMEQ(kunittest, buf, ZEROS, strlen(ZEROS));
+ KUNIT_EXPECT_MEMNEQ(kunittest, buf, PTR_STR, PTR_WIDTH);
}
-static void __init
-test_hashed(const char *fmt, const void *p)
+static void
+test_hashed(struct kunit *kunittest, const char *fmt, const void *p)
{
char buf[PLAIN_BUF_SIZE];
- int ret;
- /*
- * No need to increase failed test counter since this is assumed
- * to be called after plain().
- */
- ret = plain_hash_to_buffer(p, buf, PLAIN_BUF_SIZE);
- if (ret)
- return;
+ plain_hash_to_buffer(kunittest, p, buf, PLAIN_BUF_SIZE);
test(buf, fmt, p);
}
@@ -343,8 +279,8 @@ test_hashed(const char *fmt, const void *p)
/*
* NULL pointers aren't hashed.
*/
-static void __init
-null_pointer(void)
+static void
+null_pointer(struct kunit *kunittest)
{
test(ZEROS "00000000", "%p", NULL);
test(ZEROS "00000000", "%px", NULL);
@@ -354,8 +290,8 @@ null_pointer(void)
/*
* Error pointers aren't hashed.
*/
-static void __init
-error_pointer(void)
+static void
+error_pointer(struct kunit *kunittest)
{
test(ONES "fffffff5", "%p", ERR_PTR(-11));
test(ONES "fffffff5", "%px", ERR_PTR(-11));
@@ -364,27 +300,27 @@ error_pointer(void)
#define PTR_INVALID ((void *)0x000000ab)
-static void __init
-invalid_pointer(void)
+static void
+invalid_pointer(struct kunit *kunittest)
{
- test_hashed("%p", PTR_INVALID);
+ test_hashed(kunittest, "%p", PTR_INVALID);
test(ZEROS "000000ab", "%px", PTR_INVALID);
test("(efault)", "%pE", PTR_INVALID);
}
-static void __init
-symbol_ptr(void)
+static void
+symbol_ptr(struct kunit *kunittest)
{
}
-static void __init
-kernel_ptr(void)
+static void
+kernel_ptr(struct kunit *kunittest)
{
/* We can't test this without access to kptr_restrict. */
}
-static void __init
-struct_resource(void)
+static void
+struct_resource(struct kunit *kunittest)
{
struct resource test_resource = {
.start = 0xc0ffee00,
@@ -432,8 +368,8 @@ struct_resource(void)
"%pR", &test_resource);
}
-static void __init
-struct_range(void)
+static void
+struct_range(struct kunit *kunittest)
{
struct range test_range = DEFINE_RANGE(0xc0ffee00ba5eba11,
0xc0ffee00ba5eba11);
@@ -448,18 +384,18 @@ struct_range(void)
"%pra", &test_range);
}
-static void __init
-addr(void)
+static void
+addr(struct kunit *kunittest)
{
}
-static void __init
-escaped_str(void)
+static void
+escaped_str(struct kunit *kunittest)
{
}
-static void __init
-hex_string(void)
+static void
+hex_string(struct kunit *kunittest)
{
const char buf[3] = {0xc0, 0xff, 0xee};
@@ -469,8 +405,8 @@ hex_string(void)
"%*ph|%*phC|%*phD|%*phN", 3, buf, 3, buf, 3, buf, 3, buf);
}
-static void __init
-mac(void)
+static void
+mac(struct kunit *kunittest)
{
const u8 addr[6] = {0x2d, 0x48, 0xd6, 0xfc, 0x7a, 0x05};
@@ -481,8 +417,8 @@ mac(void)
test("057afcd6482d", "%pmR", addr);
}
-static void __init
-ip4(void)
+static void
+ip4(struct kunit *kunittest)
{
struct sockaddr_in sa;
@@ -496,20 +432,13 @@ ip4(void)
test("001.002.003.004:12345|1.2.3.4:12345", "%piSp|%pISp", &sa, &sa);
}
-static void __init
-ip6(void)
+static void
+ip6(struct kunit *kunittest)
{
}
-static void __init
-ip(void)
-{
- ip4();
- ip6();
-}
-
-static void __init
-uuid(void)
+static void
+uuid(struct kunit *kunittest)
{
const char uuid[16] = {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
@@ -520,7 +449,7 @@ uuid(void)
test("03020100-0504-0706-0809-0A0B0C0D0E0F", "%pUL", uuid);
}
-static struct dentry test_dentry[4] __initdata = {
+static struct dentry test_dentry[4] = {
{ .d_parent = &test_dentry[0],
.d_name = QSTR_INIT(test_dentry[0].d_iname, 3),
.d_iname = "foo" },
@@ -535,8 +464,8 @@ static struct dentry test_dentry[4] __initdata = {
.d_iname = "romeo" },
};
-static void __init
-dentry(void)
+static void
+dentry(struct kunit *kunittest)
{
test("foo", "%pd", &test_dentry[0]);
test("foo", "%pd2", &test_dentry[0]);
@@ -556,13 +485,13 @@ dentry(void)
test(" bravo/alfa| bravo/alfa", "%12pd2|%*pd2", &test_dentry[2], 12, &test_dentry[2]);
}
-static void __init
-struct_va_format(void)
+static void
+struct_va_format(struct kunit *kunittest)
{
}
-static void __init
-time_and_date(void)
+static void
+time_and_date(struct kunit *kunittest)
{
/* 1543210543 */
const struct rtc_time tm = {
@@ -595,13 +524,13 @@ time_and_date(void)
test("15:32:23|0119-00-04", "%ptTtrs|%ptTdrs", &t, &t);
}
-static void __init
-struct_clk(void)
+static void
+struct_clk(struct kunit *kunittest)
{
}
-static void __init
-large_bitmap(void)
+static void
+large_bitmap(struct kunit *kunittest)
{
const int nbits = 1 << 16;
unsigned long *bits = bitmap_zalloc(nbits, GFP_KERNEL);
@@ -614,8 +543,8 @@ large_bitmap(void)
bitmap_free(bits);
}
-static void __init
-bitmap(void)
+static void
+bitmap(struct kunit *kunittest)
{
DECLARE_BITMAP(bits, 20);
const int primes[] = {2,3,5,7,11,13,17,19};
@@ -634,11 +563,11 @@ bitmap(void)
test("fffff|fffff", "%20pb|%*pb", bits, 20, bits);
test("0-19|0-19", "%20pbl|%*pbl", bits, 20, bits);
- large_bitmap();
+ large_bitmap(kunittest);
}
-static void __init
-netdev_features(void)
+static void
+netdev_features(struct kunit *kunittest)
{
}
@@ -663,9 +592,9 @@ static const struct page_flags_test pft[] = {
"%#x", "kasantag"},
};
-static void __init
-page_flags_test(int section, int node, int zone, int last_cpupid,
- int kasan_tag, unsigned long flags, const char *name,
+static void
+page_flags_test(struct kunit *kunittest, int section, int node, int zone,
+ int last_cpupid, int kasan_tag, unsigned long flags, const char *name,
char *cmp_buf)
{
unsigned long values[] = {section, node, zone, last_cpupid, kasan_tag};
@@ -701,26 +630,25 @@ page_flags_test(int section, int node, int zone, int last_cpupid,
test(cmp_buf, "%pGp", &flags);
}
-static void __init
-flags(void)
+static void
+flags(struct kunit *kunittest)
{
unsigned long flags;
char *cmp_buffer;
gfp_t gfp;
- cmp_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
- if (!cmp_buffer)
- return;
+ cmp_buffer = kunit_kmalloc(kunittest, BUF_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(kunittest, cmp_buffer);
flags = 0;
- page_flags_test(0, 0, 0, 0, 0, flags, "", cmp_buffer);
+ page_flags_test(kunittest, 0, 0, 0, 0, 0, flags, "", cmp_buffer);
flags = 1UL << NR_PAGEFLAGS;
- page_flags_test(0, 0, 0, 0, 0, flags, "", cmp_buffer);
+ page_flags_test(kunittest, 0, 0, 0, 0, 0, flags, "", cmp_buffer);
flags |= 1UL << PG_uptodate | 1UL << PG_dirty | 1UL << PG_lru
| 1UL << PG_active | 1UL << PG_swapbacked;
- page_flags_test(1, 1, 1, 0x1fffff, 1, flags,
+ page_flags_test(kunittest, 1, 1, 1, 0x1fffff, 1, flags,
"uptodate|dirty|lru|active|swapbacked",
cmp_buffer);
@@ -745,11 +673,9 @@ flags(void)
(unsigned long) gfp);
gfp |= __GFP_HIGH;
test(cmp_buffer, "%pGg", &gfp);
-
- kfree(cmp_buffer);
}
-static void __init fwnode_pointer(void)
+static void fwnode_pointer(struct kunit *kunittest)
{
const struct software_node first = { .name = "first" };
const struct software_node second = { .name = "second", .parent = &first };
@@ -763,8 +689,7 @@ static void __init fwnode_pointer(void)
rval = software_node_register_node_group(group);
if (rval) {
- pr_warn("cannot register softnodes; rval %d\n", rval);
- return;
+ kunit_skip(kunittest, "cannot register softnodes; rval %d\n", rval);
}
test(full_name_second, "%pfw", software_node_fwnode(&second));
@@ -776,7 +701,7 @@ static void __init fwnode_pointer(void)
software_node_unregister_node_group(group);
}
-static void __init fourcc_pointer(void)
+static void fourcc_pointer(struct kunit *kunittest)
{
struct {
u32 code;
@@ -793,14 +718,14 @@ static void __init fourcc_pointer(void)
test(try[i].str, "%p4cc", &try[i].code);
}
-static void __init
-errptr(void)
+static void
+errptr(struct kunit *kunittest)
{
test("-1234", "%pe", ERR_PTR(-1234));
/* Check that %pe with a non-ERR_PTR gets treated as ordinary %p. */
BUILD_BUG_ON(IS_ERR(PTR));
- test_hashed("%pe", PTR);
+ test_hashed(kunittest, "%pe", PTR);
#ifdef CONFIG_SYMBOLIC_ERRNAME
test("(-ENOTSOCK)", "(%pe)", ERR_PTR(-ENOTSOCK));
@@ -813,51 +738,66 @@ errptr(void)
#endif
}
-static void __init
-test_pointer(void)
-{
- plain();
- null_pointer();
- error_pointer();
- invalid_pointer();
- symbol_ptr();
- kernel_ptr();
- struct_resource();
- struct_range();
- addr();
- escaped_str();
- hex_string();
- mac();
- ip();
- uuid();
- dentry();
- struct_va_format();
- time_and_date();
- struct_clk();
- bitmap();
- netdev_features();
- flags();
- errptr();
- fwnode_pointer();
- fourcc_pointer();
-}
-
-static void __init selftest(void)
+static int printf_suite_init(struct kunit_suite *suite)
{
+ total_tests = 0;
+
alloced_buffer = kmalloc(BUF_SIZE + 2*PAD_SIZE, GFP_KERNEL);
if (!alloced_buffer)
- return;
+ return -ENOMEM;
test_buffer = alloced_buffer + PAD_SIZE;
- test_basic();
- test_number();
- test_string();
- test_pointer();
+ return 0;
+}
+static void printf_suite_exit(struct kunit_suite *suite)
+{
kfree(alloced_buffer);
-}
-KSTM_MODULE_LOADERS(test_printf);
+ kunit_info(suite, "ran %u tests\n", total_tests);
+}
+
+static struct kunit_case printf_test_cases[] = {
+ KUNIT_CASE(test_basic),
+ KUNIT_CASE(test_number),
+ KUNIT_CASE(test_string),
+ KUNIT_CASE(hash_pointer),
+ KUNIT_CASE(null_pointer),
+ KUNIT_CASE(error_pointer),
+ KUNIT_CASE(invalid_pointer),
+ KUNIT_CASE(symbol_ptr),
+ KUNIT_CASE(kernel_ptr),
+ KUNIT_CASE(struct_resource),
+ KUNIT_CASE(struct_range),
+ KUNIT_CASE(addr),
+ KUNIT_CASE(escaped_str),
+ KUNIT_CASE(hex_string),
+ KUNIT_CASE(mac),
+ KUNIT_CASE(ip4),
+ KUNIT_CASE(ip6),
+ KUNIT_CASE(uuid),
+ KUNIT_CASE(dentry),
+ KUNIT_CASE(struct_va_format),
+ KUNIT_CASE(time_and_date),
+ KUNIT_CASE(struct_clk),
+ KUNIT_CASE(bitmap),
+ KUNIT_CASE(netdev_features),
+ KUNIT_CASE(flags),
+ KUNIT_CASE(errptr),
+ KUNIT_CASE(fwnode_pointer),
+ KUNIT_CASE(fourcc_pointer),
+ {}
+};
+
+static struct kunit_suite printf_test_suite = {
+ .name = "printf",
+ .suite_init = printf_suite_init,
+ .suite_exit = printf_suite_exit,
+ .test_cases = printf_test_cases,
+};
+
+kunit_test_suite(printf_test_suite);
+
MODULE_AUTHOR("Rasmus Villemoes <linux@rasmusvillemoes.dk>");
MODULE_DESCRIPTION("Test cases for printf facility");
MODULE_LICENSE("GPL");
diff --git a/lib/test_scanf.c b/lib/tests/scanf_kunit.c
index 44f8508c9d88..e9a36ed80575 100644
--- a/lib/test_scanf.c
+++ b/lib/tests/scanf_kunit.c
@@ -3,152 +3,138 @@
* Test cases for sscanf facility.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
+#include <kunit/test.h>
#include <linux/bitops.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/overflow.h>
-#include <linux/printk.h>
#include <linux/prandom.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include "../tools/testing/selftests/kselftest_module.h"
-
#define BUF_SIZE 1024
-KSTM_MODULE_GLOBALS();
-static char *test_buffer __initdata;
-static char *fmt_buffer __initdata;
-static struct rnd_state rnd_state __initdata;
+static char *test_buffer;
+static char *fmt_buffer;
+static struct rnd_state rnd_state;
-typedef int (*check_fn)(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap);
+typedef void (*check_fn)(struct kunit *test, const char *file, const int line,
+ const void *check_data, const char *string, const char *fmt, int n_args,
+ va_list ap);
-static void __scanf(4, 6) __init
-_test(check_fn fn, const void *check_data, const char *string, const char *fmt,
- int n_args, ...)
+static void __scanf(7, 9)
+_test(struct kunit *test, const char *file, const int line, check_fn fn, const void *check_data,
+ const char *string, const char *fmt, int n_args, ...)
{
va_list ap, ap_copy;
int ret;
- total_tests++;
-
va_start(ap, n_args);
va_copy(ap_copy, ap);
ret = vsscanf(string, fmt, ap_copy);
va_end(ap_copy);
if (ret != n_args) {
- pr_warn("vsscanf(\"%s\", \"%s\", ...) returned %d expected %d\n",
- string, fmt, ret, n_args);
- goto fail;
+ KUNIT_FAIL(test, "%s:%d: vsscanf(\"%s\", \"%s\", ...) returned %d expected %d",
+ file, line, string, fmt, ret, n_args);
+ } else {
+ (*fn)(test, file, line, check_data, string, fmt, n_args, ap);
}
- ret = (*fn)(check_data, string, fmt, n_args, ap);
- if (ret)
- goto fail;
-
- va_end(ap);
-
- return;
-
-fail:
- failed_tests++;
va_end(ap);
}
#define _check_numbers_template(arg_fmt, expect, str, fmt, n_args, ap) \
do { \
- pr_debug("\"%s\", \"%s\" ->\n", str, fmt); \
for (; n_args > 0; n_args--, expect++) { \
typeof(*expect) got = *va_arg(ap, typeof(expect)); \
- pr_debug("\t" arg_fmt "\n", got); \
if (got != *expect) { \
- pr_warn("vsscanf(\"%s\", \"%s\", ...) expected " arg_fmt " got " arg_fmt "\n", \
- str, fmt, *expect, got); \
- return 1; \
+ KUNIT_FAIL(test, \
+ "%s:%d: vsscanf(\"%s\", \"%s\", ...) expected " arg_fmt " got " arg_fmt, \
+ file, line, str, fmt, *expect, got); \
+ return; \
} \
} \
- return 0; \
} while (0)
-static int __init check_ull(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_ull(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
{
const unsigned long long *pval = check_data;
_check_numbers_template("%llu", pval, string, fmt, n_args, ap);
}
-static int __init check_ll(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_ll(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
{
const long long *pval = check_data;
_check_numbers_template("%lld", pval, string, fmt, n_args, ap);
}
-static int __init check_ulong(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_ulong(struct kunit *test, const char *file, const int line,
+ const void *check_data, const char *string, const char *fmt, int n_args,
+ va_list ap)
{
const unsigned long *pval = check_data;
_check_numbers_template("%lu", pval, string, fmt, n_args, ap);
}
-static int __init check_long(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_long(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
{
const long *pval = check_data;
_check_numbers_template("%ld", pval, string, fmt, n_args, ap);
}
-static int __init check_uint(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_uint(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
{
const unsigned int *pval = check_data;
_check_numbers_template("%u", pval, string, fmt, n_args, ap);
}
-static int __init check_int(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_int(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
{
const int *pval = check_data;
_check_numbers_template("%d", pval, string, fmt, n_args, ap);
}
-static int __init check_ushort(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_ushort(struct kunit *test, const char *file, const int line,
+ const void *check_data, const char *string, const char *fmt, int n_args,
+ va_list ap)
{
const unsigned short *pval = check_data;
_check_numbers_template("%hu", pval, string, fmt, n_args, ap);
}
-static int __init check_short(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_short(struct kunit *test, const char *file, const int line,
+ const void *check_data, const char *string, const char *fmt, int n_args,
+ va_list ap)
{
const short *pval = check_data;
_check_numbers_template("%hd", pval, string, fmt, n_args, ap);
}
-static int __init check_uchar(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_uchar(struct kunit *test, const char *file, const int line,
+ const void *check_data, const char *string, const char *fmt, int n_args,
+ va_list ap)
{
const unsigned char *pval = check_data;
_check_numbers_template("%hhu", pval, string, fmt, n_args, ap);
}
-static int __init check_char(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_char(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
{
const signed char *pval = check_data;
@@ -156,7 +142,7 @@ static int __init check_char(const void *check_data, const char *string,
}
/* Selection of interesting numbers to test, copied from test-kstrtox.c */
-static const unsigned long long numbers[] __initconst = {
+static const unsigned long long numbers[] = {
0x0ULL,
0x1ULL,
0x7fULL,
@@ -196,7 +182,7 @@ do { \
T result = ~expect_val; /* should be overwritten */ \
\
snprintf(test_buffer, BUF_SIZE, gen_fmt, expect_val); \
- _test(fn, &expect_val, test_buffer, "%" scan_fmt, 1, &result); \
+ _test(test, __FILE__, __LINE__, fn, &expect_val, test_buffer, "%" scan_fmt, 1, &result);\
} while (0)
#define simple_numbers_loop(T, gen_fmt, scan_fmt, fn) \
@@ -214,7 +200,7 @@ do { \
} \
} while (0)
-static void __init numbers_simple(void)
+static void numbers_simple(struct kunit *test)
{
simple_numbers_loop(unsigned long long, "%llu", "llu", check_ull);
simple_numbers_loop(long long, "%lld", "lld", check_ll);
@@ -267,14 +253,14 @@ static void __init numbers_simple(void)
* the raw prandom*() functions (Not mathematically rigorous!!).
* Variabilty of length and value is more important than perfect randomness.
*/
-static u32 __init next_test_random(u32 max_bits)
+static u32 next_test_random(u32 max_bits)
{
u32 n_bits = hweight32(prandom_u32_state(&rnd_state)) % (max_bits + 1);
return prandom_u32_state(&rnd_state) & GENMASK(n_bits, 0);
}
-static unsigned long long __init next_test_random_ull(void)
+static unsigned long long next_test_random_ull(void)
{
u32 rand1 = prandom_u32_state(&rnd_state);
u32 n_bits = (hweight32(rand1) * 3) % 64;
@@ -311,7 +297,7 @@ do { \
* updating buf_pos and returning the number of characters appended.
* On error buf_pos is not changed and return value is 0.
*/
-static int __init __printf(4, 5)
+static int __printf(4, 5)
append_fmt(char *buf, int *buf_pos, int buf_len, const char *val_fmt, ...)
{
va_list ap;
@@ -333,7 +319,7 @@ append_fmt(char *buf, int *buf_pos, int buf_len, const char *val_fmt, ...)
* Convenience function to append the field delimiter string
* to both the value string and format string buffers.
*/
-static void __init append_delim(char *str_buf, int *str_buf_pos, int str_buf_len,
+static void append_delim(char *str_buf, int *str_buf_pos, int str_buf_len,
char *fmt_buf, int *fmt_buf_pos, int fmt_buf_len,
const char *delim_str)
{
@@ -344,7 +330,7 @@ static void __init append_delim(char *str_buf, int *str_buf_pos, int str_buf_len
#define test_array_8(fn, check_data, string, fmt, arr) \
do { \
BUILD_BUG_ON(ARRAY_SIZE(arr) != 8); \
- _test(fn, check_data, string, fmt, 8, \
+ _test(test, __FILE__, __LINE__, fn, check_data, string, fmt, 8, \
&(arr)[0], &(arr)[1], &(arr)[2], &(arr)[3], \
&(arr)[4], &(arr)[5], &(arr)[6], &(arr)[7]); \
} while (0)
@@ -398,7 +384,7 @@ do { \
test_array_8(fn, expect, test_buffer, fmt_buffer, result); \
} while (0)
-static void __init numbers_list_ll(const char *delim)
+static void numbers_list_ll(struct kunit *test, const char *delim)
{
numbers_list_8(unsigned long long, "%llu", delim, "llu", check_ull);
numbers_list_8(long long, "%lld", delim, "lld", check_ll);
@@ -408,7 +394,7 @@ static void __init numbers_list_ll(const char *delim)
numbers_list_8(long long, "0x%llx", delim, "lli", check_ll);
}
-static void __init numbers_list_l(const char *delim)
+static void numbers_list_l(struct kunit *test, const char *delim)
{
numbers_list_8(unsigned long, "%lu", delim, "lu", check_ulong);
numbers_list_8(long, "%ld", delim, "ld", check_long);
@@ -418,7 +404,7 @@ static void __init numbers_list_l(const char *delim)
numbers_list_8(long, "0x%lx", delim, "li", check_long);
}
-static void __init numbers_list_d(const char *delim)
+static void numbers_list_d(struct kunit *test, const char *delim)
{
numbers_list_8(unsigned int, "%u", delim, "u", check_uint);
numbers_list_8(int, "%d", delim, "d", check_int);
@@ -428,7 +414,7 @@ static void __init numbers_list_d(const char *delim)
numbers_list_8(int, "0x%x", delim, "i", check_int);
}
-static void __init numbers_list_h(const char *delim)
+static void numbers_list_h(struct kunit *test, const char *delim)
{
numbers_list_8(unsigned short, "%hu", delim, "hu", check_ushort);
numbers_list_8(short, "%hd", delim, "hd", check_short);
@@ -438,7 +424,7 @@ static void __init numbers_list_h(const char *delim)
numbers_list_8(short, "0x%hx", delim, "hi", check_short);
}
-static void __init numbers_list_hh(const char *delim)
+static void numbers_list_hh(struct kunit *test, const char *delim)
{
numbers_list_8(unsigned char, "%hhu", delim, "hhu", check_uchar);
numbers_list_8(signed char, "%hhd", delim, "hhd", check_char);
@@ -448,16 +434,19 @@ static void __init numbers_list_hh(const char *delim)
numbers_list_8(signed char, "0x%hhx", delim, "hhi", check_char);
}
-static void __init numbers_list(const char *delim)
+static void numbers_list(struct kunit *test)
{
- numbers_list_ll(delim);
- numbers_list_l(delim);
- numbers_list_d(delim);
- numbers_list_h(delim);
- numbers_list_hh(delim);
+ const char * const *param = test->param_value;
+ const char *delim = *param;
+
+ numbers_list_ll(test, delim);
+ numbers_list_l(test, delim);
+ numbers_list_d(test, delim);
+ numbers_list_h(test, delim);
+ numbers_list_hh(test, delim);
}
-static void __init numbers_list_field_width_ll(const char *delim)
+static void numbers_list_field_width_ll(struct kunit *test, const char *delim)
{
numbers_list_fix_width(unsigned long long, "%llu", delim, 20, "llu", check_ull);
numbers_list_fix_width(long long, "%lld", delim, 20, "lld", check_ll);
@@ -467,7 +456,7 @@ static void __init numbers_list_field_width_ll(const char *delim)
numbers_list_fix_width(long long, "0x%llx", delim, 18, "lli", check_ll);
}
-static void __init numbers_list_field_width_l(const char *delim)
+static void numbers_list_field_width_l(struct kunit *test, const char *delim)
{
#if BITS_PER_LONG == 64
numbers_list_fix_width(unsigned long, "%lu", delim, 20, "lu", check_ulong);
@@ -486,7 +475,7 @@ static void __init numbers_list_field_width_l(const char *delim)
#endif
}
-static void __init numbers_list_field_width_d(const char *delim)
+static void numbers_list_field_width_d(struct kunit *test, const char *delim)
{
numbers_list_fix_width(unsigned int, "%u", delim, 10, "u", check_uint);
numbers_list_fix_width(int, "%d", delim, 11, "d", check_int);
@@ -496,7 +485,7 @@ static void __init numbers_list_field_width_d(const char *delim)
numbers_list_fix_width(int, "0x%x", delim, 10, "i", check_int);
}
-static void __init numbers_list_field_width_h(const char *delim)
+static void numbers_list_field_width_h(struct kunit *test, const char *delim)
{
numbers_list_fix_width(unsigned short, "%hu", delim, 5, "hu", check_ushort);
numbers_list_fix_width(short, "%hd", delim, 6, "hd", check_short);
@@ -506,7 +495,7 @@ static void __init numbers_list_field_width_h(const char *delim)
numbers_list_fix_width(short, "0x%hx", delim, 6, "hi", check_short);
}
-static void __init numbers_list_field_width_hh(const char *delim)
+static void numbers_list_field_width_hh(struct kunit *test, const char *delim)
{
numbers_list_fix_width(unsigned char, "%hhu", delim, 3, "hhu", check_uchar);
numbers_list_fix_width(signed char, "%hhd", delim, 4, "hhd", check_char);
@@ -520,16 +509,19 @@ static void __init numbers_list_field_width_hh(const char *delim)
* List of numbers separated by delim. Each field width specifier is the
* maximum possible digits for the given type and base.
*/
-static void __init numbers_list_field_width_typemax(const char *delim)
+static void numbers_list_field_width_typemax(struct kunit *test)
{
- numbers_list_field_width_ll(delim);
- numbers_list_field_width_l(delim);
- numbers_list_field_width_d(delim);
- numbers_list_field_width_h(delim);
- numbers_list_field_width_hh(delim);
+ const char * const *param = test->param_value;
+ const char *delim = *param;
+
+ numbers_list_field_width_ll(test, delim);
+ numbers_list_field_width_l(test, delim);
+ numbers_list_field_width_d(test, delim);
+ numbers_list_field_width_h(test, delim);
+ numbers_list_field_width_hh(test, delim);
}
-static void __init numbers_list_field_width_val_ll(const char *delim)
+static void numbers_list_field_width_val_ll(struct kunit *test, const char *delim)
{
numbers_list_val_width(unsigned long long, "%llu", delim, "llu", check_ull);
numbers_list_val_width(long long, "%lld", delim, "lld", check_ll);
@@ -539,7 +531,7 @@ static void __init numbers_list_field_width_val_ll(const char *delim)
numbers_list_val_width(long long, "0x%llx", delim, "lli", check_ll);
}
-static void __init numbers_list_field_width_val_l(const char *delim)
+static void numbers_list_field_width_val_l(struct kunit *test, const char *delim)
{
numbers_list_val_width(unsigned long, "%lu", delim, "lu", check_ulong);
numbers_list_val_width(long, "%ld", delim, "ld", check_long);
@@ -549,7 +541,7 @@ static void __init numbers_list_field_width_val_l(const char *delim)
numbers_list_val_width(long, "0x%lx", delim, "li", check_long);
}
-static void __init numbers_list_field_width_val_d(const char *delim)
+static void numbers_list_field_width_val_d(struct kunit *test, const char *delim)
{
numbers_list_val_width(unsigned int, "%u", delim, "u", check_uint);
numbers_list_val_width(int, "%d", delim, "d", check_int);
@@ -559,7 +551,7 @@ static void __init numbers_list_field_width_val_d(const char *delim)
numbers_list_val_width(int, "0x%x", delim, "i", check_int);
}
-static void __init numbers_list_field_width_val_h(const char *delim)
+static void numbers_list_field_width_val_h(struct kunit *test, const char *delim)
{
numbers_list_val_width(unsigned short, "%hu", delim, "hu", check_ushort);
numbers_list_val_width(short, "%hd", delim, "hd", check_short);
@@ -569,7 +561,7 @@ static void __init numbers_list_field_width_val_h(const char *delim)
numbers_list_val_width(short, "0x%hx", delim, "hi", check_short);
}
-static void __init numbers_list_field_width_val_hh(const char *delim)
+static void numbers_list_field_width_val_hh(struct kunit *test, const char *delim)
{
numbers_list_val_width(unsigned char, "%hhu", delim, "hhu", check_uchar);
numbers_list_val_width(signed char, "%hhd", delim, "hhd", check_char);
@@ -583,13 +575,16 @@ static void __init numbers_list_field_width_val_hh(const char *delim)
* List of numbers separated by delim. Each field width specifier is the
* exact length of the corresponding value digits in the string being scanned.
*/
-static void __init numbers_list_field_width_val_width(const char *delim)
+static void numbers_list_field_width_val_width(struct kunit *test)
{
- numbers_list_field_width_val_ll(delim);
- numbers_list_field_width_val_l(delim);
- numbers_list_field_width_val_d(delim);
- numbers_list_field_width_val_h(delim);
- numbers_list_field_width_val_hh(delim);
+ const char * const *param = test->param_value;
+ const char *delim = *param;
+
+ numbers_list_field_width_val_ll(test, delim);
+ numbers_list_field_width_val_l(test, delim);
+ numbers_list_field_width_val_d(test, delim);
+ numbers_list_field_width_val_h(test, delim);
+ numbers_list_field_width_val_hh(test, delim);
}
/*
@@ -598,9 +593,14 @@ static void __init numbers_list_field_width_val_width(const char *delim)
* of digits. For example the hex values c0,3,bf01,303 would have a
* string representation of "c03bf01303" and extracted with "%2x%1x%4x%3x".
*/
-static void __init numbers_slice(void)
+static void numbers_slice(struct kunit *test)
{
- numbers_list_field_width_val_width("");
+ const char *delim = "";
+
+ KUNIT_ASSERT_PTR_EQ(test, test->param_value, NULL);
+ test->param_value = &delim;
+
+ numbers_list_field_width_val_width(test);
}
#define test_number_prefix(T, str, scan_fmt, expect0, expect1, n_args, fn) \
@@ -608,14 +608,14 @@ do { \
const T expect[2] = { expect0, expect1 }; \
T result[2] = { (T)~expect[0], (T)~expect[1] }; \
\
- _test(fn, &expect, str, scan_fmt, n_args, &result[0], &result[1]); \
+ _test(test, __FILE__, __LINE__, fn, &expect, str, scan_fmt, n_args, &result[0], &result[1]);\
} while (0)
/*
* Number prefix is >= field width.
* Expected behaviour is derived from testing userland sscanf.
*/
-static void __init numbers_prefix_overflow(void)
+static void numbers_prefix_overflow(struct kunit *test)
{
/*
* Negative decimal with a field of width 1, should quit scanning
@@ -684,25 +684,17 @@ do { \
T got; \
char *endp; \
int len; \
- bool fail = false; \
\
- total_tests++; \
len = snprintf(test_buffer, BUF_SIZE, gen_fmt, expect); \
got = (fn)(test_buffer, &endp, base); \
- pr_debug(#fn "(\"%s\", %d) -> " gen_fmt "\n", test_buffer, base, got); \
if (got != (expect)) { \
- fail = true; \
- pr_warn(#fn "(\"%s\", %d): got " gen_fmt " expected " gen_fmt "\n", \
- test_buffer, base, got, expect); \
+ KUNIT_FAIL(test, #fn "(\"%s\", %d): got " gen_fmt " expected " gen_fmt, \
+ test_buffer, base, got, expect); \
} else if (endp != test_buffer + len) { \
- fail = true; \
- pr_warn(#fn "(\"%s\", %d) startp=0x%px got endp=0x%px expected 0x%px\n", \
- test_buffer, base, test_buffer, \
- test_buffer + len, endp); \
+ KUNIT_FAIL(test, #fn "(\"%s\", %d) startp=0x%px got endp=0x%px expected 0x%px", \
+ test_buffer, base, test_buffer, \
+ test_buffer + len, endp); \
} \
- \
- if (fail) \
- failed_tests++; \
} while (0)
#define test_simple_strtoxx(T, fn, gen_fmt, base) \
@@ -718,7 +710,7 @@ do { \
} \
} while (0)
-static void __init test_simple_strtoull(void)
+static void test_simple_strtoull(struct kunit *test)
{
test_simple_strtoxx(unsigned long long, simple_strtoull, "%llu", 10);
test_simple_strtoxx(unsigned long long, simple_strtoull, "%llu", 0);
@@ -727,7 +719,7 @@ static void __init test_simple_strtoull(void)
test_simple_strtoxx(unsigned long long, simple_strtoull, "0x%llx", 0);
}
-static void __init test_simple_strtoll(void)
+static void test_simple_strtoll(struct kunit *test)
{
test_simple_strtoxx(long long, simple_strtoll, "%lld", 10);
test_simple_strtoxx(long long, simple_strtoll, "%lld", 0);
@@ -736,7 +728,7 @@ static void __init test_simple_strtoll(void)
test_simple_strtoxx(long long, simple_strtoll, "0x%llx", 0);
}
-static void __init test_simple_strtoul(void)
+static void test_simple_strtoul(struct kunit *test)
{
test_simple_strtoxx(unsigned long, simple_strtoul, "%lu", 10);
test_simple_strtoxx(unsigned long, simple_strtoul, "%lu", 0);
@@ -745,7 +737,7 @@ static void __init test_simple_strtoul(void)
test_simple_strtoxx(unsigned long, simple_strtoul, "0x%lx", 0);
}
-static void __init test_simple_strtol(void)
+static void test_simple_strtol(struct kunit *test)
{
test_simple_strtoxx(long, simple_strtol, "%ld", 10);
test_simple_strtoxx(long, simple_strtol, "%ld", 0);
@@ -755,60 +747,69 @@ static void __init test_simple_strtol(void)
}
/* Selection of common delimiters/separators between numbers in a string. */
-static const char * const number_delimiters[] __initconst = {
+static const char * const number_delimiters[] = {
" ", ":", ",", "-", "/",
};
-static void __init test_numbers(void)
+static void number_delimiter_param_desc(const char * const *param,
+ char *desc)
{
- int i;
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "\"%s\"", *param);
+}
- /* String containing only one number. */
- numbers_simple();
+KUNIT_ARRAY_PARAM(number_delimiters, number_delimiters, number_delimiter_param_desc);
+static struct kunit_case scanf_test_cases[] = {
+ KUNIT_CASE(numbers_simple),
/* String with multiple numbers separated by delimiter. */
- for (i = 0; i < ARRAY_SIZE(number_delimiters); i++) {
- numbers_list(number_delimiters[i]);
-
- /* Field width may be longer than actual field digits. */
- numbers_list_field_width_typemax(number_delimiters[i]);
-
- /* Each field width exactly length of actual field digits. */
- numbers_list_field_width_val_width(number_delimiters[i]);
- }
-
+ KUNIT_CASE_PARAM(numbers_list, number_delimiters_gen_params),
+ /* Field width may be longer than actual field digits. */
+ KUNIT_CASE_PARAM(numbers_list_field_width_typemax, number_delimiters_gen_params),
+ /* Each field width exactly length of actual field digits. */
+ KUNIT_CASE_PARAM(numbers_list_field_width_val_width, number_delimiters_gen_params),
/* Slice continuous sequence of digits using field widths. */
- numbers_slice();
-
- numbers_prefix_overflow();
-}
+ KUNIT_CASE(numbers_slice),
+ KUNIT_CASE(numbers_prefix_overflow),
+
+ KUNIT_CASE(test_simple_strtoull),
+ KUNIT_CASE(test_simple_strtoll),
+ KUNIT_CASE(test_simple_strtoul),
+ KUNIT_CASE(test_simple_strtol),
+ {}
+};
-static void __init selftest(void)
+static int scanf_suite_init(struct kunit_suite *suite)
{
test_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
if (!test_buffer)
- return;
+ return -ENOMEM;
fmt_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
if (!fmt_buffer) {
kfree(test_buffer);
- return;
+ return -ENOMEM;
}
prandom_seed_state(&rnd_state, 3141592653589793238ULL);
- test_numbers();
-
- test_simple_strtoull();
- test_simple_strtoll();
- test_simple_strtoul();
- test_simple_strtol();
+ return 0;
+}
+static void scanf_suite_exit(struct kunit_suite *suite)
+{
kfree(fmt_buffer);
kfree(test_buffer);
}
-KSTM_MODULE_LOADERS(test_scanf);
+static struct kunit_suite scanf_test_suite = {
+ .name = "scanf",
+ .suite_init = scanf_suite_init,
+ .suite_exit = scanf_suite_exit,
+ .test_cases = scanf_test_cases,
+};
+
+kunit_test_suite(scanf_test_suite);
+
MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
MODULE_DESCRIPTION("Test cases for sscanf facility");
MODULE_LICENSE("GPL v2");
diff --git a/lib/siphash_kunit.c b/lib/tests/siphash_kunit.c
index 26bd4e8dc03e..26bd4e8dc03e 100644
--- a/lib/siphash_kunit.c
+++ b/lib/tests/siphash_kunit.c
diff --git a/lib/slub_kunit.c b/lib/tests/slub_kunit.c
index f11691315c2f..d47c472b0520 100644
--- a/lib/slub_kunit.c
+++ b/lib/tests/slub_kunit.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/rcupdate.h>
+#include <linux/delay.h>
#include "../mm/slab.h"
static struct kunit_resource resource;
@@ -181,6 +182,63 @@ static void test_kfree_rcu(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, slab_errors);
}
+struct cache_destroy_work {
+ struct work_struct work;
+ struct kmem_cache *s;
+};
+
+static void cache_destroy_workfn(struct work_struct *w)
+{
+ struct cache_destroy_work *cdw;
+
+ cdw = container_of(w, struct cache_destroy_work, work);
+ kmem_cache_destroy(cdw->s);
+}
+
+#define KMEM_CACHE_DESTROY_NR 10
+
+static void test_kfree_rcu_wq_destroy(struct kunit *test)
+{
+ struct test_kfree_rcu_struct *p;
+ struct cache_destroy_work cdw;
+ struct workqueue_struct *wq;
+ struct kmem_cache *s;
+ unsigned int delay;
+ int i;
+
+ if (IS_BUILTIN(CONFIG_SLUB_KUNIT_TEST))
+ kunit_skip(test, "can't do kfree_rcu() when test is built-in");
+
+ INIT_WORK_ONSTACK(&cdw.work, cache_destroy_workfn);
+ wq = alloc_workqueue("test_kfree_rcu_destroy_wq",
+ WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+
+ if (!wq)
+ kunit_skip(test, "failed to alloc wq");
+
+ for (i = 0; i < KMEM_CACHE_DESTROY_NR; i++) {
+ s = test_kmem_cache_create("TestSlub_kfree_rcu_wq_destroy",
+ sizeof(struct test_kfree_rcu_struct),
+ SLAB_NO_MERGE);
+
+ if (!s)
+ kunit_skip(test, "failed to create cache");
+
+ delay = get_random_u8();
+ p = kmem_cache_alloc(s, GFP_KERNEL);
+ kfree_rcu(p, rcu);
+
+ cdw.s = s;
+
+ msleep(delay);
+ queue_work(wq, &cdw.work);
+ flush_work(&cdw.work);
+ }
+
+ destroy_workqueue(wq);
+ KUNIT_EXPECT_EQ(test, 0, slab_errors);
+}
+
static void test_leak_destroy(struct kunit *test)
{
struct kmem_cache *s = test_kmem_cache_create("TestSlub_leak_destroy",
@@ -254,6 +312,7 @@ static struct kunit_case test_cases[] = {
KUNIT_CASE(test_clobber_redzone_free),
KUNIT_CASE(test_kmalloc_redzone_access),
KUNIT_CASE(test_kfree_rcu),
+ KUNIT_CASE(test_kfree_rcu_wq_destroy),
KUNIT_CASE(test_leak_destroy),
KUNIT_CASE(test_krealloc_redzone_zeroing),
{}
diff --git a/lib/stackinit_kunit.c b/lib/tests/stackinit_kunit.c
index c40818ec9c18..63aa78e6f5c1 100644
--- a/lib/stackinit_kunit.c
+++ b/lib/tests/stackinit_kunit.c
@@ -47,10 +47,12 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
#define DO_NOTHING_TYPE_SCALAR(var_type) var_type
#define DO_NOTHING_TYPE_STRING(var_type) void
#define DO_NOTHING_TYPE_STRUCT(var_type) void
+#define DO_NOTHING_TYPE_UNION(var_type) void
#define DO_NOTHING_RETURN_SCALAR(ptr) *(ptr)
#define DO_NOTHING_RETURN_STRING(ptr) /**/
#define DO_NOTHING_RETURN_STRUCT(ptr) /**/
+#define DO_NOTHING_RETURN_UNION(ptr) /**/
#define DO_NOTHING_CALL_SCALAR(var, name) \
(var) = do_nothing_ ## name(&(var))
@@ -58,10 +60,13 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
do_nothing_ ## name(var)
#define DO_NOTHING_CALL_STRUCT(var, name) \
do_nothing_ ## name(&(var))
+#define DO_NOTHING_CALL_UNION(var, name) \
+ do_nothing_ ## name(&(var))
#define FETCH_ARG_SCALAR(var) &var
#define FETCH_ARG_STRING(var) var
#define FETCH_ARG_STRUCT(var) &var
+#define FETCH_ARG_UNION(var) &var
/*
* On m68k, if the leaf function test variable is longer than 8 bytes,
@@ -70,13 +75,16 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
*/
#ifdef CONFIG_M68K
#define FILL_SIZE_STRING 8
+#define FILL_SIZE_ARRAY 2
#else
#define FILL_SIZE_STRING 16
+#define FILL_SIZE_ARRAY 8
#endif
#define INIT_CLONE_SCALAR /**/
#define INIT_CLONE_STRING [FILL_SIZE_STRING]
#define INIT_CLONE_STRUCT /**/
+#define INIT_CLONE_UNION /**/
#define ZERO_CLONE_SCALAR(zero) memset(&(zero), 0x00, sizeof(zero))
#define ZERO_CLONE_STRING(zero) memset(&(zero), 0x00, sizeof(zero))
@@ -92,6 +100,7 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
zero.three = 0; \
zero.four = 0; \
} while (0)
+#define ZERO_CLONE_UNION(zero) ZERO_CLONE_STRUCT(zero)
#define INIT_SCALAR_none(var_type) /**/
#define INIT_SCALAR_zero(var_type) = 0
@@ -101,6 +110,7 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
#define INIT_STRUCT_none(var_type) /**/
#define INIT_STRUCT_zero(var_type) = { }
+#define INIT_STRUCT_old_zero(var_type) = { 0 }
#define __static_partial { .two = 0, }
@@ -146,6 +156,43 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
#define INIT_STRUCT_assigned_copy(var_type) \
; var = *(arg)
+/* Union initialization is the same as structs. */
+#define INIT_UNION_none(var_type) INIT_STRUCT_none(var_type)
+#define INIT_UNION_zero(var_type) INIT_STRUCT_zero(var_type)
+#define INIT_UNION_old_zero(var_type) INIT_STRUCT_old_zero(var_type)
+
+#define INIT_UNION_static_partial(var_type) \
+ INIT_STRUCT_static_partial(var_type)
+#define INIT_UNION_static_all(var_type) \
+ INIT_STRUCT_static_all(var_type)
+#define INIT_UNION_dynamic_partial(var_type) \
+ INIT_STRUCT_dynamic_partial(var_type)
+#define INIT_UNION_dynamic_all(var_type) \
+ INIT_STRUCT_dynamic_all(var_type)
+#define INIT_UNION_runtime_partial(var_type) \
+ INIT_STRUCT_runtime_partial(var_type)
+#define INIT_UNION_runtime_all(var_type) \
+ INIT_STRUCT_runtime_all(var_type)
+#define INIT_UNION_assigned_static_partial(var_type) \
+ INIT_STRUCT_assigned_static_partial(var_type)
+#define INIT_UNION_assigned_static_all(var_type) \
+ INIT_STRUCT_assigned_static_all(var_type)
+#define INIT_UNION_assigned_dynamic_partial(var_type) \
+ INIT_STRUCT_assigned_dynamic_partial(var_type)
+#define INIT_UNION_assigned_dynamic_all(var_type) \
+ INIT_STRUCT_assigned_dynamic_all(var_type)
+#define INIT_UNION_assigned_copy(var_type) \
+ INIT_STRUCT_assigned_copy(var_type)
+
+/*
+ * The "did we actually fill the stack?" check value needs
+ * to be neither 0 nor any of the "pattern" bytes. The
+ * pattern bytes are compiler, architecture, and type based,
+ * so we have to pick a value that never appears for those
+ * combinations. Use 0x99 which is not 0xFF, 0xFE, nor 0xAA.
+ */
+#define FILL_BYTE 0x99
+
/*
* @name: unique string name for the test
* @var_type: type to be tested for zeroing initialization
@@ -168,12 +215,12 @@ static noinline void test_ ## name (struct kunit *test) \
ZERO_CLONE_ ## which(zero); \
/* Clear entire check buffer for 0xFF overlap test. */ \
memset(check_buf, 0x00, sizeof(check_buf)); \
- /* Fill stack with 0xFF. */ \
+ /* Fill stack with FILL_BYTE. */ \
ignored = leaf_ ##name((unsigned long)&ignored, 1, \
FETCH_ARG_ ## which(zero)); \
- /* Verify all bytes overwritten with 0xFF. */ \
+ /* Verify all bytes overwritten with FILL_BYTE. */ \
for (sum = 0, i = 0; i < target_size; i++) \
- sum += (check_buf[i] != 0xFF); \
+ sum += (check_buf[i] != FILL_BYTE); \
/* Clear entire check buffer for later bit tests. */ \
memset(check_buf, 0x00, sizeof(check_buf)); \
/* Extract stack-defined variable contents. */ \
@@ -184,7 +231,8 @@ static noinline void test_ ## name (struct kunit *test) \
* possible between the two leaf function calls. \
*/ \
KUNIT_ASSERT_EQ_MSG(test, sum, 0, \
- "leaf fill was not 0xFF!?\n"); \
+ "leaf fill was not 0x%02X!?\n", \
+ FILL_BYTE); \
\
/* Validate that compiler lined up fill and target. */ \
KUNIT_ASSERT_TRUE_MSG(test, \
@@ -196,9 +244,9 @@ static noinline void test_ ## name (struct kunit *test) \
(int)((ssize_t)(uintptr_t)fill_start - \
(ssize_t)(uintptr_t)target_start)); \
\
- /* Look for any bytes still 0xFF in check region. */ \
+ /* Validate check region has no FILL_BYTE bytes. */ \
for (sum = 0, i = 0; i < target_size; i++) \
- sum += (check_buf[i] == 0xFF); \
+ sum += (check_buf[i] == FILL_BYTE); \
\
if (sum != 0 && xfail) \
kunit_skip(test, \
@@ -233,12 +281,12 @@ static noinline int leaf_ ## name(unsigned long sp, bool fill, \
* stack frame of SOME kind... \
*/ \
memset(buf, (char)(sp & 0xff), sizeof(buf)); \
- /* Fill variable with 0xFF. */ \
+ /* Fill variable with FILL_BYTE. */ \
if (fill) { \
fill_start = &var; \
fill_size = sizeof(var); \
memset(fill_start, \
- (char)((sp & 0xff) | forced_mask), \
+ FILL_BYTE & forced_mask, \
fill_size); \
} \
\
@@ -294,6 +342,33 @@ struct test_user {
unsigned long four;
};
+/* No padding: all members are the same size. */
+union test_same_sizes {
+ unsigned long one;
+ unsigned long two;
+ unsigned long three;
+ unsigned long four;
+};
+
+/* Mismatched sizes, with one and two being small */
+union test_small_start {
+ char one:1;
+ char two;
+ short three;
+ unsigned long four;
+ struct big_struct {
+ unsigned long array[FILL_SIZE_ARRAY];
+ } big;
+};
+
+/* Mismatched sizes, with three and four being small */
+union test_small_end {
+ short one;
+ unsigned long two;
+ char three:1;
+ char four;
+};
+
#define ALWAYS_PASS WANT_SUCCESS
#define ALWAYS_FAIL XFAIL
@@ -332,6 +407,11 @@ struct test_user {
struct test_ ## name, STRUCT, init, \
xfail)
+#define DEFINE_UNION_TEST(name, init, xfail) \
+ DEFINE_TEST(name ## _ ## init, \
+ union test_ ## name, STRUCT, init, \
+ xfail)
+
#define DEFINE_STRUCT_TESTS(init, xfail) \
DEFINE_STRUCT_TEST(small_hole, init, xfail); \
DEFINE_STRUCT_TEST(big_hole, init, xfail); \
@@ -343,9 +423,22 @@ struct test_user {
xfail); \
DEFINE_STRUCT_TESTS(base ## _ ## all, xfail)
+#define DEFINE_UNION_INITIALIZER_TESTS(base, xfail) \
+ DEFINE_UNION_TESTS(base ## _ ## partial, \
+ xfail); \
+ DEFINE_UNION_TESTS(base ## _ ## all, xfail)
+
+#define DEFINE_UNION_TESTS(init, xfail) \
+ DEFINE_UNION_TEST(same_sizes, init, xfail); \
+ DEFINE_UNION_TEST(small_start, init, xfail); \
+ DEFINE_UNION_TEST(small_end, init, xfail);
+
/* These should be fully initialized all the time! */
DEFINE_SCALAR_TESTS(zero, ALWAYS_PASS);
DEFINE_STRUCT_TESTS(zero, ALWAYS_PASS);
+DEFINE_STRUCT_TESTS(old_zero, ALWAYS_PASS);
+DEFINE_UNION_TESTS(zero, ALWAYS_PASS);
+DEFINE_UNION_TESTS(old_zero, ALWAYS_PASS);
/* Struct initializers: padding may be left uninitialized. */
DEFINE_STRUCT_INITIALIZER_TESTS(static, STRONG_PASS);
DEFINE_STRUCT_INITIALIZER_TESTS(dynamic, STRONG_PASS);
@@ -353,6 +446,12 @@ DEFINE_STRUCT_INITIALIZER_TESTS(runtime, STRONG_PASS);
DEFINE_STRUCT_INITIALIZER_TESTS(assigned_static, STRONG_PASS);
DEFINE_STRUCT_INITIALIZER_TESTS(assigned_dynamic, STRONG_PASS);
DEFINE_STRUCT_TESTS(assigned_copy, ALWAYS_FAIL);
+DEFINE_UNION_INITIALIZER_TESTS(static, STRONG_PASS);
+DEFINE_UNION_INITIALIZER_TESTS(dynamic, STRONG_PASS);
+DEFINE_UNION_INITIALIZER_TESTS(runtime, STRONG_PASS);
+DEFINE_UNION_INITIALIZER_TESTS(assigned_static, STRONG_PASS);
+DEFINE_UNION_INITIALIZER_TESTS(assigned_dynamic, STRONG_PASS);
+DEFINE_UNION_TESTS(assigned_copy, ALWAYS_FAIL);
/* No initialization without compiler instrumentation. */
DEFINE_SCALAR_TESTS(none, STRONG_PASS);
DEFINE_STRUCT_TESTS(none, BYREF_PASS);
@@ -380,7 +479,7 @@ static int noinline __leaf_switch_none(int path, bool fill)
fill_start = &var;
fill_size = sizeof(var);
- memset(fill_start, forced_mask | 0x55, fill_size);
+ memset(fill_start, (forced_mask | 0x55) & FILL_BYTE, fill_size);
}
memcpy(check_buf, target_start, target_size);
break;
@@ -391,7 +490,7 @@ static int noinline __leaf_switch_none(int path, bool fill)
fill_start = &var;
fill_size = sizeof(var);
- memset(fill_start, forced_mask | 0xaa, fill_size);
+ memset(fill_start, (forced_mask | 0xaa) & FILL_BYTE, fill_size);
}
memcpy(check_buf, target_start, target_size);
break;
@@ -436,13 +535,23 @@ DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, ALWAYS_FAIL);
KUNIT_CASE(test_trailing_hole_ ## init),\
KUNIT_CASE(test_packed_ ## init) \
+#define KUNIT_test_unions(init) \
+ KUNIT_CASE(test_same_sizes_ ## init), \
+ KUNIT_CASE(test_small_start_ ## init), \
+ KUNIT_CASE(test_small_end_ ## init) \
+
static struct kunit_case stackinit_test_cases[] = {
/* These are explicitly initialized and should always pass. */
KUNIT_test_scalars(zero),
KUNIT_test_structs(zero),
+ KUNIT_test_structs(old_zero),
+ KUNIT_test_unions(zero),
+ KUNIT_test_unions(old_zero),
/* Padding here appears to be accidentally always initialized? */
KUNIT_test_structs(dynamic_partial),
KUNIT_test_structs(assigned_dynamic_partial),
+ KUNIT_test_unions(dynamic_partial),
+ KUNIT_test_unions(assigned_dynamic_partial),
/* Padding initialization depends on compiler behaviors. */
KUNIT_test_structs(static_partial),
KUNIT_test_structs(static_all),
@@ -452,8 +561,17 @@ static struct kunit_case stackinit_test_cases[] = {
KUNIT_test_structs(assigned_static_partial),
KUNIT_test_structs(assigned_static_all),
KUNIT_test_structs(assigned_dynamic_all),
+ KUNIT_test_unions(static_partial),
+ KUNIT_test_unions(static_all),
+ KUNIT_test_unions(dynamic_all),
+ KUNIT_test_unions(runtime_partial),
+ KUNIT_test_unions(runtime_all),
+ KUNIT_test_unions(assigned_static_partial),
+ KUNIT_test_unions(assigned_static_all),
+ KUNIT_test_unions(assigned_dynamic_all),
/* Everything fails this since it effectively performs a memcpy(). */
KUNIT_test_structs(assigned_copy),
+ KUNIT_test_unions(assigned_copy),
/* STRUCTLEAK_BYREF_ALL should cover everything from here down. */
KUNIT_test_scalars(none),
KUNIT_CASE(test_switch_1_none),
diff --git a/lib/string_helpers_kunit.c b/lib/tests/string_helpers_kunit.c
index c853046183d2..c853046183d2 100644
--- a/lib/string_helpers_kunit.c
+++ b/lib/tests/string_helpers_kunit.c
diff --git a/lib/string_kunit.c b/lib/tests/string_kunit.c
index c919e3293da6..0ed7448a26d3 100644
--- a/lib/string_kunit.c
+++ b/lib/tests/string_kunit.c
@@ -579,8 +579,8 @@ static void string_test_strtomem(struct kunit *test)
static void string_test_memtostr(struct kunit *test)
{
- char nonstring[7] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g' };
- char nonstring_small[3] = { 'a', 'b', 'c' };
+ char nonstring[7] __nonstring = { 'a', 'b', 'c', 'd', 'e', 'f', 'g' };
+ char nonstring_small[3] __nonstring = { 'a', 'b', 'c' };
char dest[sizeof(nonstring) + 1];
/* Copy in a non-NUL-terminated string into exactly right-sized dest. */
diff --git a/lib/test_bits.c b/lib/tests/test_bits.c
index c7b38d91e1f1..c7b38d91e1f1 100644
--- a/lib/test_bits.c
+++ b/lib/tests/test_bits.c
diff --git a/lib/test_fprobe.c b/lib/tests/test_fprobe.c
index 24de0e5ff859..cf92111b5c79 100644
--- a/lib/test_fprobe.c
+++ b/lib/tests/test_fprobe.c
@@ -17,10 +17,8 @@ static u32 rand1, entry_val, exit_val;
/* Use indirect calls to avoid inlining the target functions */
static u32 (*target)(u32 value);
static u32 (*target2)(u32 value);
-static u32 (*target_nest)(u32 value, u32 (*nest)(u32));
static unsigned long target_ip;
static unsigned long target2_ip;
-static unsigned long target_nest_ip;
static int entry_return_value;
static noinline u32 fprobe_selftest_target(u32 value)
@@ -33,14 +31,9 @@ static noinline u32 fprobe_selftest_target2(u32 value)
return (value / div_factor) + 1;
}
-static noinline u32 fprobe_selftest_nest_target(u32 value, u32 (*nest)(u32))
-{
- return nest(value + 2);
-}
-
static notrace int fp_entry_handler(struct fprobe *fp, unsigned long ip,
unsigned long ret_ip,
- struct pt_regs *regs, void *data)
+ struct ftrace_regs *fregs, void *data)
{
KUNIT_EXPECT_FALSE(current_test, preemptible());
/* This can be called on the fprobe_selftest_target and the fprobe_selftest_target2 */
@@ -59,9 +52,9 @@ static notrace int fp_entry_handler(struct fprobe *fp, unsigned long ip,
static notrace void fp_exit_handler(struct fprobe *fp, unsigned long ip,
unsigned long ret_ip,
- struct pt_regs *regs, void *data)
+ struct ftrace_regs *fregs, void *data)
{
- unsigned long ret = regs_return_value(regs);
+ unsigned long ret = ftrace_regs_get_return_value(fregs);
KUNIT_EXPECT_FALSE(current_test, preemptible());
if (ip != target_ip) {
@@ -79,22 +72,6 @@ static notrace void fp_exit_handler(struct fprobe *fp, unsigned long ip,
KUNIT_EXPECT_NULL(current_test, data);
}
-static notrace int nest_entry_handler(struct fprobe *fp, unsigned long ip,
- unsigned long ret_ip,
- struct pt_regs *regs, void *data)
-{
- KUNIT_EXPECT_FALSE(current_test, preemptible());
- return 0;
-}
-
-static notrace void nest_exit_handler(struct fprobe *fp, unsigned long ip,
- unsigned long ret_ip,
- struct pt_regs *regs, void *data)
-{
- KUNIT_EXPECT_FALSE(current_test, preemptible());
- KUNIT_EXPECT_EQ(current_test, ip, target_nest_ip);
-}
-
/* Test entry only (no rethook) */
static void test_fprobe_entry(struct kunit *test)
{
@@ -191,25 +168,6 @@ static void test_fprobe_data(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
}
-/* Test nr_maxactive */
-static void test_fprobe_nest(struct kunit *test)
-{
- static const char *syms[] = {"fprobe_selftest_target", "fprobe_selftest_nest_target"};
- struct fprobe fp = {
- .entry_handler = nest_entry_handler,
- .exit_handler = nest_exit_handler,
- .nr_maxactive = 1,
- };
-
- current_test = test;
- KUNIT_EXPECT_EQ(test, 0, register_fprobe_syms(&fp, syms, 2));
-
- target_nest(rand1, target);
- KUNIT_EXPECT_EQ(test, 1, fp.nmissed);
-
- KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
-}
-
static void test_fprobe_skip(struct kunit *test)
{
struct fprobe fp = {
@@ -247,10 +205,8 @@ static int fprobe_test_init(struct kunit *test)
rand1 = get_random_u32_above(div_factor);
target = fprobe_selftest_target;
target2 = fprobe_selftest_target2;
- target_nest = fprobe_selftest_nest_target;
target_ip = get_ftrace_location(target);
target2_ip = get_ftrace_location(target2);
- target_nest_ip = get_ftrace_location(target_nest);
return 0;
}
@@ -260,7 +216,6 @@ static struct kunit_case fprobe_testcases[] = {
KUNIT_CASE(test_fprobe),
KUNIT_CASE(test_fprobe_syms),
KUNIT_CASE(test_fprobe_data),
- KUNIT_CASE(test_fprobe_nest),
KUNIT_CASE(test_fprobe_skip),
{}
};
diff --git a/lib/test_hash.c b/lib/tests/test_hash.c
index a7af39662a0a..a7af39662a0a 100644
--- a/lib/test_hash.c
+++ b/lib/tests/test_hash.c
diff --git a/lib/test_kprobes.c b/lib/tests/test_kprobes.c
index b7582010125c..b7582010125c 100644
--- a/lib/test_kprobes.c
+++ b/lib/tests/test_kprobes.c
diff --git a/lib/test_linear_ranges.c b/lib/tests/test_linear_ranges.c
index f482be00f1bc..f482be00f1bc 100644
--- a/lib/test_linear_ranges.c
+++ b/lib/tests/test_linear_ranges.c
diff --git a/lib/test_list_sort.c b/lib/tests/test_list_sort.c
index 30879abc8a42..30879abc8a42 100644
--- a/lib/test_list_sort.c
+++ b/lib/tests/test_list_sort.c
diff --git a/lib/test_sort.c b/lib/tests/test_sort.c
index cd4a338d1153..cd4a338d1153 100644
--- a/lib/test_sort.c
+++ b/lib/tests/test_sort.c
diff --git a/lib/usercopy_kunit.c b/lib/tests/usercopy_kunit.c
index 77fa00a13df7..77fa00a13df7 100644
--- a/lib/usercopy_kunit.c
+++ b/lib/tests/usercopy_kunit.c
diff --git a/lib/util_macros_kunit.c b/lib/tests/util_macros_kunit.c
index 94cc9f0de50a..94cc9f0de50a 100644
--- a/lib/util_macros_kunit.c
+++ b/lib/tests/util_macros_kunit.c
diff --git a/lib/ubsan.c b/lib/ubsan.c
index a1c983d148f1..cdc1d31c3821 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -44,7 +44,7 @@ const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type)
case ubsan_shift_out_of_bounds:
return "UBSAN: shift out of bounds";
#endif
-#if defined(CONFIG_UBSAN_DIV_ZERO) || defined(CONFIG_UBSAN_SIGNED_WRAP)
+#if defined(CONFIG_UBSAN_DIV_ZERO) || defined(CONFIG_UBSAN_INTEGER_WRAP)
/*
* SanitizerKind::IntegerDivideByZero and
* SanitizerKind::SignedIntegerOverflow emit
@@ -79,7 +79,7 @@ const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type)
case ubsan_type_mismatch:
return "UBSAN: type mismatch";
#endif
-#ifdef CONFIG_UBSAN_SIGNED_WRAP
+#ifdef CONFIG_UBSAN_INTEGER_WRAP
/*
* SanitizerKind::SignedIntegerOverflow emits
* SanitizerHandler::AddOverflow, SanitizerHandler::SubOverflow,
@@ -303,6 +303,30 @@ void __ubsan_handle_negate_overflow(void *_data, void *old_val)
}
EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
+void __ubsan_handle_implicit_conversion(void *_data, void *from_val, void *to_val)
+{
+ struct implicit_conversion_data *data = _data;
+ char from_val_str[VALUE_LENGTH];
+ char to_val_str[VALUE_LENGTH];
+
+ if (suppress_report(&data->location))
+ return;
+
+ val_to_string(from_val_str, sizeof(from_val_str), data->from_type, from_val);
+ val_to_string(to_val_str, sizeof(to_val_str), data->to_type, to_val);
+
+ ubsan_prologue(&data->location, "implicit-conversion");
+
+ pr_err("cannot represent %s value %s during %s %s, truncated to %s\n",
+ data->from_type->type_name,
+ from_val_str,
+ type_check_kinds[data->type_check_kind],
+ data->to_type->type_name,
+ to_val_str);
+
+ ubsan_epilogue();
+}
+EXPORT_SYMBOL(__ubsan_handle_implicit_conversion);
void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs)
{
diff --git a/lib/ubsan.h b/lib/ubsan.h
index 07e37d4429b4..b37e22374e77 100644
--- a/lib/ubsan.h
+++ b/lib/ubsan.h
@@ -62,6 +62,13 @@ struct overflow_data {
struct type_descriptor *type;
};
+struct implicit_conversion_data {
+ struct source_location location;
+ struct type_descriptor *from_type;
+ struct type_descriptor *to_type;
+ unsigned char type_check_kind;
+};
+
struct type_mismatch_data {
struct source_location location;
struct type_descriptor *type;
@@ -142,6 +149,7 @@ void ubsan_linkage __ubsan_handle_sub_overflow(void *data, void *lhs, void *rhs)
void ubsan_linkage __ubsan_handle_mul_overflow(void *data, void *lhs, void *rhs);
void ubsan_linkage __ubsan_handle_negate_overflow(void *_data, void *old_val);
void ubsan_linkage __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_implicit_conversion(void *_data, void *lhs, void *rhs);
void ubsan_linkage __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr);
void ubsan_linkage __ubsan_handle_type_mismatch_v1(void *_data, void *ptr);
void ubsan_linkage __ubsan_handle_out_of_bounds(void *_data, void *index);
diff --git a/lib/vdso/Kconfig b/lib/vdso/Kconfig
index 82fe827af542..45df764b49ad 100644
--- a/lib/vdso/Kconfig
+++ b/lib/vdso/Kconfig
@@ -43,3 +43,8 @@ config VDSO_GETRANDOM
bool
help
Selected by architectures that support vDSO getrandom().
+
+config GENERIC_VDSO_DATA_STORE
+ bool
+ help
+ Selected by architectures that use the generic vDSO data store.
diff --git a/lib/vdso/Makefile b/lib/vdso/Makefile
index cedbf15f8087..aedd40aaa950 100644
--- a/lib/vdso/Makefile
+++ b/lib/vdso/Makefile
@@ -1,18 +1,3 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
-GENERIC_VDSO_MK_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
-GENERIC_VDSO_DIR := $(dir $(GENERIC_VDSO_MK_PATH))
-
-c-gettimeofday-$(CONFIG_GENERIC_GETTIMEOFDAY) := $(addprefix $(GENERIC_VDSO_DIR), gettimeofday.c)
-c-getrandom-$(CONFIG_VDSO_GETRANDOM) := $(addprefix $(GENERIC_VDSO_DIR), getrandom.c)
-
-# This cmd checks that the vdso library does not contain dynamic relocations.
-# It has to be called after the linking of the vdso library and requires it
-# as a parameter.
-#
-# As a workaround for some GNU ld ports which produce unneeded R_*_NONE
-# dynamic relocations, ignore R_*_NONE.
-quiet_cmd_vdso_check = VDSOCHK $@
- cmd_vdso_check = if $(READELF) -rW $@ | grep -v _NONE | grep -q " R_\w*_"; \
- then (echo >&2 "$@: dynamic relocations are not supported"; \
- rm -f $@; /bin/false); fi
+obj-$(CONFIG_GENERIC_VDSO_DATA_STORE) += datastore.o
diff --git a/lib/vdso/Makefile.include b/lib/vdso/Makefile.include
new file mode 100644
index 000000000000..cedbf15f8087
--- /dev/null
+++ b/lib/vdso/Makefile.include
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+
+GENERIC_VDSO_MK_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
+GENERIC_VDSO_DIR := $(dir $(GENERIC_VDSO_MK_PATH))
+
+c-gettimeofday-$(CONFIG_GENERIC_GETTIMEOFDAY) := $(addprefix $(GENERIC_VDSO_DIR), gettimeofday.c)
+c-getrandom-$(CONFIG_VDSO_GETRANDOM) := $(addprefix $(GENERIC_VDSO_DIR), getrandom.c)
+
+# This cmd checks that the vdso library does not contain dynamic relocations.
+# It has to be called after the linking of the vdso library and requires it
+# as a parameter.
+#
+# As a workaround for some GNU ld ports which produce unneeded R_*_NONE
+# dynamic relocations, ignore R_*_NONE.
+quiet_cmd_vdso_check = VDSOCHK $@
+ cmd_vdso_check = if $(READELF) -rW $@ | grep -v _NONE | grep -q " R_\w*_"; \
+ then (echo >&2 "$@: dynamic relocations are not supported"; \
+ rm -f $@; /bin/false); fi
diff --git a/lib/vdso/datastore.c b/lib/vdso/datastore.c
new file mode 100644
index 000000000000..c715e217ec65
--- /dev/null
+++ b/lib/vdso/datastore.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/linkage.h>
+#include <linux/mmap_lock.h>
+#include <linux/mm.h>
+#include <linux/time_namespace.h>
+#include <linux/types.h>
+#include <linux/vdso_datastore.h>
+#include <vdso/datapage.h>
+
+/*
+ * The vDSO data page.
+ */
+#ifdef CONFIG_HAVE_GENERIC_VDSO
+static union {
+ struct vdso_time_data data;
+ u8 page[PAGE_SIZE];
+} vdso_time_data_store __page_aligned_data;
+struct vdso_time_data *vdso_k_time_data = &vdso_time_data_store.data;
+static_assert(sizeof(vdso_time_data_store) == PAGE_SIZE);
+#endif /* CONFIG_HAVE_GENERIC_VDSO */
+
+#ifdef CONFIG_VDSO_GETRANDOM
+static union {
+ struct vdso_rng_data data;
+ u8 page[PAGE_SIZE];
+} vdso_rng_data_store __page_aligned_data;
+struct vdso_rng_data *vdso_k_rng_data = &vdso_rng_data_store.data;
+static_assert(sizeof(vdso_rng_data_store) == PAGE_SIZE);
+#endif /* CONFIG_VDSO_GETRANDOM */
+
+#ifdef CONFIG_ARCH_HAS_VDSO_ARCH_DATA
+static union {
+ struct vdso_arch_data data;
+ u8 page[VDSO_ARCH_DATA_SIZE];
+} vdso_arch_data_store __page_aligned_data;
+struct vdso_arch_data *vdso_k_arch_data = &vdso_arch_data_store.data;
+#endif /* CONFIG_ARCH_HAS_VDSO_ARCH_DATA */
+
+static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct page *timens_page = find_timens_vvar_page(vma);
+ unsigned long addr, pfn;
+ vm_fault_t err;
+
+ switch (vmf->pgoff) {
+ case VDSO_TIME_PAGE_OFFSET:
+ if (!IS_ENABLED(CONFIG_HAVE_GENERIC_VDSO))
+ return VM_FAULT_SIGBUS;
+ pfn = __phys_to_pfn(__pa_symbol(vdso_k_time_data));
+ if (timens_page) {
+ /*
+ * Fault in VVAR page too, since it will be accessed
+ * to get clock data anyway.
+ */
+ addr = vmf->address + VDSO_TIMENS_PAGE_OFFSET * PAGE_SIZE;
+ err = vmf_insert_pfn(vma, addr, pfn);
+ if (unlikely(err & VM_FAULT_ERROR))
+ return err;
+ pfn = page_to_pfn(timens_page);
+ }
+ break;
+ case VDSO_TIMENS_PAGE_OFFSET:
+ /*
+ * If a task belongs to a time namespace then a namespace
+ * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
+ * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
+ * offset.
+ * See also the comment near timens_setup_vdso_data().
+ */
+ if (!IS_ENABLED(CONFIG_TIME_NS) || !timens_page)
+ return VM_FAULT_SIGBUS;
+ pfn = __phys_to_pfn(__pa_symbol(vdso_k_time_data));
+ break;
+ case VDSO_RNG_PAGE_OFFSET:
+ if (!IS_ENABLED(CONFIG_VDSO_GETRANDOM))
+ return VM_FAULT_SIGBUS;
+ pfn = __phys_to_pfn(__pa_symbol(vdso_k_rng_data));
+ break;
+ case VDSO_ARCH_PAGES_START ... VDSO_ARCH_PAGES_END:
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_VDSO_ARCH_DATA))
+ return VM_FAULT_SIGBUS;
+ pfn = __phys_to_pfn(__pa_symbol(vdso_k_arch_data)) +
+ vmf->pgoff - VDSO_ARCH_PAGES_START;
+ break;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+
+ return vmf_insert_pfn(vma, vmf->address, pfn);
+}
+
+const struct vm_special_mapping vdso_vvar_mapping = {
+ .name = "[vvar]",
+ .fault = vvar_fault,
+};
+
+struct vm_area_struct *vdso_install_vvar_mapping(struct mm_struct *mm, unsigned long addr)
+{
+ return _install_special_mapping(mm, addr, VDSO_NR_PAGES * PAGE_SIZE,
+ VM_READ | VM_MAYREAD | VM_IO | VM_DONTDUMP | VM_PFNMAP,
+ &vdso_vvar_mapping);
+}
+
+#ifdef CONFIG_TIME_NS
+/*
+ * The vvar page layout depends on whether a task belongs to the root or
+ * non-root time namespace. Whenever a task changes its namespace, the VVAR
+ * page tables are cleared and then they will be re-faulted with a
+ * corresponding layout.
+ * See also the comment near timens_setup_vdso_clock_data() for details.
+ */
+int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
+{
+ struct mm_struct *mm = task->mm;
+ struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, 0);
+
+ mmap_read_lock(mm);
+ for_each_vma(vmi, vma) {
+ if (vma_is_special_mapping(vma, &vdso_vvar_mapping))
+ zap_vma_pages(vma);
+ }
+ mmap_read_unlock(mm);
+
+ return 0;
+}
+#endif
diff --git a/lib/vdso/getrandom.c b/lib/vdso/getrandom.c
index 938ca539aaa6..440f8a6203a6 100644
--- a/lib/vdso/getrandom.c
+++ b/lib/vdso/getrandom.c
@@ -12,6 +12,9 @@
#include <uapi/linux/mman.h>
#include <uapi/linux/random.h>
+/* Bring in default accessors */
+#include <vdso/vsyscall.h>
+
#undef PAGE_SIZE
#undef PAGE_MASK
#define PAGE_SIZE (1UL << CONFIG_PAGE_SHIFT)
@@ -152,7 +155,7 @@ retry_generation:
/*
* Prevent the syscall from being reordered wrt current_generation. Pairs with the
- * smp_store_release(&_vdso_rng_data.generation) in random.c.
+ * smp_store_release(&vdso_k_rng_data->generation) in random.c.
*/
smp_rmb();
@@ -256,5 +259,6 @@ fallback_syscall:
static __always_inline ssize_t
__cvdso_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len)
{
- return __cvdso_getrandom_data(__arch_get_vdso_rng_data(), buffer, len, flags, opaque_state, opaque_len);
+ return __cvdso_getrandom_data(__arch_get_vdso_u_rng_data(), buffer, len, flags,
+ opaque_state, opaque_len);
}
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index c01eaafd8041..93ef801a97ef 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -5,6 +5,9 @@
#include <vdso/datapage.h>
#include <vdso/helpers.h>
+/* Bring in default accessors */
+#include <vdso/vsyscall.h>
+
#ifndef vdso_calc_ns
#ifdef VDSO_DELTA_NOMASK
@@ -14,12 +17,12 @@
#endif
#ifdef CONFIG_GENERIC_VDSO_OVERFLOW_PROTECT
-static __always_inline bool vdso_delta_ok(const struct vdso_data *vd, u64 delta)
+static __always_inline bool vdso_delta_ok(const struct vdso_clock *vc, u64 delta)
{
- return delta < vd->max_cycles;
+ return delta < vc->max_cycles;
}
#else
-static __always_inline bool vdso_delta_ok(const struct vdso_data *vd, u64 delta)
+static __always_inline bool vdso_delta_ok(const struct vdso_clock *vc, u64 delta)
{
return true;
}
@@ -36,14 +39,14 @@ static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
* Default implementation which works for all sane clocksources. That
* obviously excludes x86/TSC.
*/
-static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles, u64 base)
+static __always_inline u64 vdso_calc_ns(const struct vdso_clock *vc, u64 cycles, u64 base)
{
- u64 delta = (cycles - vd->cycle_last) & VDSO_DELTA_MASK(vd);
+ u64 delta = (cycles - vc->cycle_last) & VDSO_DELTA_MASK(vc);
- if (likely(vdso_delta_ok(vd, delta)))
- return vdso_shift_ns((delta * vd->mult) + base, vd->shift);
+ if (likely(vdso_delta_ok(vc, delta)))
+ return vdso_shift_ns((delta * vc->mult) + base, vc->shift);
- return mul_u64_u32_add_u64_shr(delta, vd->mult, base, vd->shift);
+ return mul_u64_u32_add_u64_shr(delta, vc->mult, base, vc->shift);
}
#endif /* vdso_calc_ns */
@@ -55,9 +58,9 @@ static inline bool __arch_vdso_hres_capable(void)
#endif
#ifndef vdso_clocksource_ok
-static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
+static inline bool vdso_clocksource_ok(const struct vdso_clock *vc)
{
- return vd->clock_mode != VDSO_CLOCKMODE_NONE;
+ return vc->clock_mode != VDSO_CLOCKMODE_NONE;
}
#endif
@@ -69,36 +72,45 @@ static inline bool vdso_cycles_ok(u64 cycles)
#endif
#ifdef CONFIG_TIME_NS
-static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
- struct __kernel_timespec *ts)
+
+#ifdef CONFIG_GENERIC_VDSO_DATA_STORE
+static __always_inline
+const struct vdso_time_data *__arch_get_vdso_u_timens_data(const struct vdso_time_data *vd)
+{
+ return (void *)vd + PAGE_SIZE;
+}
+#endif /* CONFIG_GENERIC_VDSO_DATA_STORE */
+
+static __always_inline
+int do_hres_timens(const struct vdso_time_data *vdns, const struct vdso_clock *vcns,
+ clockid_t clk, struct __kernel_timespec *ts)
{
- const struct timens_offset *offs = &vdns->offset[clk];
+ const struct vdso_time_data *vd = __arch_get_vdso_u_timens_data(vdns);
+ const struct timens_offset *offs = &vcns->offset[clk];
+ const struct vdso_clock *vc = vd->clock_data;
const struct vdso_timestamp *vdso_ts;
- const struct vdso_data *vd;
u64 cycles, ns;
u32 seq;
s64 sec;
- vd = vdns - (clk == CLOCK_MONOTONIC_RAW ? CS_RAW : CS_HRES_COARSE);
- vd = __arch_get_timens_vdso_data(vd);
if (clk != CLOCK_MONOTONIC_RAW)
- vd = &vd[CS_HRES_COARSE];
+ vc = &vc[CS_HRES_COARSE];
else
- vd = &vd[CS_RAW];
- vdso_ts = &vd->basetime[clk];
+ vc = &vc[CS_RAW];
+ vdso_ts = &vc->basetime[clk];
do {
- seq = vdso_read_begin(vd);
+ seq = vdso_read_begin(vc);
- if (unlikely(!vdso_clocksource_ok(vd)))
+ if (unlikely(!vdso_clocksource_ok(vc)))
return -1;
- cycles = __arch_get_hw_counter(vd->clock_mode, vd);
+ cycles = __arch_get_hw_counter(vc->clock_mode, vd);
if (unlikely(!vdso_cycles_ok(cycles)))
return -1;
- ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec);
+ ns = vdso_calc_ns(vc, cycles, vdso_ts->nsec);
sec = vdso_ts->sec;
- } while (unlikely(vdso_read_retry(vd, seq)));
+ } while (unlikely(vdso_read_retry(vc, seq)));
/* Add the namespace offset */
sec += offs->sec;
@@ -115,22 +127,24 @@ static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_
}
#else
static __always_inline
-const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
+const struct vdso_time_data *__arch_get_vdso_u_timens_data(const struct vdso_time_data *vd)
{
return NULL;
}
-static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
- struct __kernel_timespec *ts)
+static __always_inline
+int do_hres_timens(const struct vdso_time_data *vdns, const struct vdso_clock *vcns,
+ clockid_t clk, struct __kernel_timespec *ts)
{
return -EINVAL;
}
#endif
-static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
- struct __kernel_timespec *ts)
+static __always_inline
+int do_hres(const struct vdso_time_data *vd, const struct vdso_clock *vc,
+ clockid_t clk, struct __kernel_timespec *ts)
{
- const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
+ const struct vdso_timestamp *vdso_ts = &vc->basetime[clk];
u64 cycles, sec, ns;
u32 seq;
@@ -142,31 +156,31 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
/*
* Open coded function vdso_read_begin() to handle
* VDSO_CLOCKMODE_TIMENS. Time namespace enabled tasks have a
- * special VVAR page installed which has vd->seq set to 1 and
- * vd->clock_mode set to VDSO_CLOCKMODE_TIMENS. For non time
+ * special VVAR page installed which has vc->seq set to 1 and
+ * vc->clock_mode set to VDSO_CLOCKMODE_TIMENS. For non time
* namespace affected tasks this does not affect performance
- * because if vd->seq is odd, i.e. a concurrent update is in
- * progress the extra check for vd->clock_mode is just a few
- * extra instructions while spin waiting for vd->seq to become
+ * because if vc->seq is odd, i.e. a concurrent update is in
+ * progress the extra check for vc->clock_mode is just a few
+ * extra instructions while spin waiting for vc->seq to become
* even again.
*/
- while (unlikely((seq = READ_ONCE(vd->seq)) & 1)) {
+ while (unlikely((seq = READ_ONCE(vc->seq)) & 1)) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
- vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
- return do_hres_timens(vd, clk, ts);
+ vc->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ return do_hres_timens(vd, vc, clk, ts);
cpu_relax();
}
smp_rmb();
- if (unlikely(!vdso_clocksource_ok(vd)))
+ if (unlikely(!vdso_clocksource_ok(vc)))
return -1;
- cycles = __arch_get_hw_counter(vd->clock_mode, vd);
+ cycles = __arch_get_hw_counter(vc->clock_mode, vd);
if (unlikely(!vdso_cycles_ok(cycles)))
return -1;
- ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec);
+ ns = vdso_calc_ns(vc, cycles, vdso_ts->nsec);
sec = vdso_ts->sec;
- } while (unlikely(vdso_read_retry(vd, seq)));
+ } while (unlikely(vdso_read_retry(vc, seq)));
/*
* Do this outside the loop: a race inside the loop could result
@@ -179,21 +193,25 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
}
#ifdef CONFIG_TIME_NS
-static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
- struct __kernel_timespec *ts)
+static __always_inline
+int do_coarse_timens(const struct vdso_time_data *vdns, const struct vdso_clock *vcns,
+ clockid_t clk, struct __kernel_timespec *ts)
{
- const struct vdso_data *vd = __arch_get_timens_vdso_data(vdns);
- const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
- const struct timens_offset *offs = &vdns->offset[clk];
+ const struct vdso_time_data *vd = __arch_get_vdso_u_timens_data(vdns);
+ const struct timens_offset *offs = &vcns->offset[clk];
+ const struct vdso_clock *vc = vd->clock_data;
+ const struct vdso_timestamp *vdso_ts;
u64 nsec;
s64 sec;
s32 seq;
+ vdso_ts = &vc->basetime[clk];
+
do {
- seq = vdso_read_begin(vd);
+ seq = vdso_read_begin(vc);
sec = vdso_ts->sec;
nsec = vdso_ts->nsec;
- } while (unlikely(vdso_read_retry(vd, seq)));
+ } while (unlikely(vdso_read_retry(vc, seq)));
/* Add the namespace offset */
sec += offs->sec;
@@ -208,17 +226,19 @@ static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clocki
return 0;
}
#else
-static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
- struct __kernel_timespec *ts)
+static __always_inline
+int do_coarse_timens(const struct vdso_time_data *vdns, const struct vdso_clock *vcns,
+ clockid_t clk, struct __kernel_timespec *ts)
{
return -1;
}
#endif
-static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
- struct __kernel_timespec *ts)
+static __always_inline
+int do_coarse(const struct vdso_time_data *vd, const struct vdso_clock *vc,
+ clockid_t clk, struct __kernel_timespec *ts)
{
- const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
+ const struct vdso_timestamp *vdso_ts = &vc->basetime[clk];
u32 seq;
do {
@@ -226,25 +246,26 @@ static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
* Open coded function vdso_read_begin() to handle
* VDSO_CLOCK_TIMENS. See comment in do_hres().
*/
- while ((seq = READ_ONCE(vd->seq)) & 1) {
+ while ((seq = READ_ONCE(vc->seq)) & 1) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
- vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
- return do_coarse_timens(vd, clk, ts);
+ vc->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ return do_coarse_timens(vd, vc, clk, ts);
cpu_relax();
}
smp_rmb();
ts->tv_sec = vdso_ts->sec;
ts->tv_nsec = vdso_ts->nsec;
- } while (unlikely(vdso_read_retry(vd, seq)));
+ } while (unlikely(vdso_read_retry(vc, seq)));
return 0;
}
static __always_inline int
-__cvdso_clock_gettime_common(const struct vdso_data *vd, clockid_t clock,
+__cvdso_clock_gettime_common(const struct vdso_time_data *vd, clockid_t clock,
struct __kernel_timespec *ts)
{
+ const struct vdso_clock *vc = vd->clock_data;
u32 msk;
/* Check for negative values or invalid clocks */
@@ -257,19 +278,19 @@ __cvdso_clock_gettime_common(const struct vdso_data *vd, clockid_t clock,
*/
msk = 1U << clock;
if (likely(msk & VDSO_HRES))
- vd = &vd[CS_HRES_COARSE];
+ vc = &vc[CS_HRES_COARSE];
else if (msk & VDSO_COARSE)
- return do_coarse(&vd[CS_HRES_COARSE], clock, ts);
+ return do_coarse(vd, &vc[CS_HRES_COARSE], clock, ts);
else if (msk & VDSO_RAW)
- vd = &vd[CS_RAW];
+ vc = &vc[CS_RAW];
else
return -1;
- return do_hres(vd, clock, ts);
+ return do_hres(vd, vc, clock, ts);
}
static __maybe_unused int
-__cvdso_clock_gettime_data(const struct vdso_data *vd, clockid_t clock,
+__cvdso_clock_gettime_data(const struct vdso_time_data *vd, clockid_t clock,
struct __kernel_timespec *ts)
{
int ret = __cvdso_clock_gettime_common(vd, clock, ts);
@@ -282,12 +303,12 @@ __cvdso_clock_gettime_data(const struct vdso_data *vd, clockid_t clock,
static __maybe_unused int
__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
{
- return __cvdso_clock_gettime_data(__arch_get_vdso_data(), clock, ts);
+ return __cvdso_clock_gettime_data(__arch_get_vdso_u_time_data(), clock, ts);
}
#ifdef BUILD_VDSO32
static __maybe_unused int
-__cvdso_clock_gettime32_data(const struct vdso_data *vd, clockid_t clock,
+__cvdso_clock_gettime32_data(const struct vdso_time_data *vd, clockid_t clock,
struct old_timespec32 *res)
{
struct __kernel_timespec ts;
@@ -308,19 +329,20 @@ __cvdso_clock_gettime32_data(const struct vdso_data *vd, clockid_t clock,
static __maybe_unused int
__cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
{
- return __cvdso_clock_gettime32_data(__arch_get_vdso_data(), clock, res);
+ return __cvdso_clock_gettime32_data(__arch_get_vdso_u_time_data(), clock, res);
}
#endif /* BUILD_VDSO32 */
static __maybe_unused int
-__cvdso_gettimeofday_data(const struct vdso_data *vd,
+__cvdso_gettimeofday_data(const struct vdso_time_data *vd,
struct __kernel_old_timeval *tv, struct timezone *tz)
{
+ const struct vdso_clock *vc = vd->clock_data;
if (likely(tv != NULL)) {
struct __kernel_timespec ts;
- if (do_hres(&vd[CS_HRES_COARSE], CLOCK_REALTIME, &ts))
+ if (do_hres(vd, &vc[CS_HRES_COARSE], CLOCK_REALTIME, &ts))
return gettimeofday_fallback(tv, tz);
tv->tv_sec = ts.tv_sec;
@@ -329,8 +351,8 @@ __cvdso_gettimeofday_data(const struct vdso_data *vd,
if (unlikely(tz != NULL)) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
- vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
- vd = __arch_get_timens_vdso_data(vd);
+ vc->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ vd = __arch_get_vdso_u_timens_data(vd);
tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest;
tz->tz_dsttime = vd[CS_HRES_COARSE].tz_dsttime;
@@ -342,20 +364,23 @@ __cvdso_gettimeofday_data(const struct vdso_data *vd,
static __maybe_unused int
__cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
{
- return __cvdso_gettimeofday_data(__arch_get_vdso_data(), tv, tz);
+ return __cvdso_gettimeofday_data(__arch_get_vdso_u_time_data(), tv, tz);
}
#ifdef VDSO_HAS_TIME
static __maybe_unused __kernel_old_time_t
-__cvdso_time_data(const struct vdso_data *vd, __kernel_old_time_t *time)
+__cvdso_time_data(const struct vdso_time_data *vd, __kernel_old_time_t *time)
{
+ const struct vdso_clock *vc = vd->clock_data;
__kernel_old_time_t t;
if (IS_ENABLED(CONFIG_TIME_NS) &&
- vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
- vd = __arch_get_timens_vdso_data(vd);
+ vc->clock_mode == VDSO_CLOCKMODE_TIMENS) {
+ vd = __arch_get_vdso_u_timens_data(vd);
+ vc = vd->clock_data;
+ }
- t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
+ t = READ_ONCE(vc[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
if (time)
*time = t;
@@ -365,15 +390,16 @@ __cvdso_time_data(const struct vdso_data *vd, __kernel_old_time_t *time)
static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time)
{
- return __cvdso_time_data(__arch_get_vdso_data(), time);
+ return __cvdso_time_data(__arch_get_vdso_u_time_data(), time);
}
#endif /* VDSO_HAS_TIME */
#ifdef VDSO_HAS_CLOCK_GETRES
static __maybe_unused
-int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock,
+int __cvdso_clock_getres_common(const struct vdso_time_data *vd, clockid_t clock,
struct __kernel_timespec *res)
{
+ const struct vdso_clock *vc = vd->clock_data;
u32 msk;
u64 ns;
@@ -382,8 +408,8 @@ int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock,
return -1;
if (IS_ENABLED(CONFIG_TIME_NS) &&
- vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
- vd = __arch_get_timens_vdso_data(vd);
+ vc->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ vd = __arch_get_vdso_u_timens_data(vd);
/*
* Convert the clockid to a bitmask and use it to check which
@@ -394,7 +420,7 @@ int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock,
/*
* Preserves the behaviour of posix_get_hrtimer_res().
*/
- ns = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
+ ns = READ_ONCE(vd->hrtimer_res);
} else if (msk & VDSO_COARSE) {
/*
* Preserves the behaviour of posix_get_coarse_res().
@@ -412,7 +438,7 @@ int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock,
}
static __maybe_unused
-int __cvdso_clock_getres_data(const struct vdso_data *vd, clockid_t clock,
+int __cvdso_clock_getres_data(const struct vdso_time_data *vd, clockid_t clock,
struct __kernel_timespec *res)
{
int ret = __cvdso_clock_getres_common(vd, clock, res);
@@ -425,12 +451,12 @@ int __cvdso_clock_getres_data(const struct vdso_data *vd, clockid_t clock,
static __maybe_unused
int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
{
- return __cvdso_clock_getres_data(__arch_get_vdso_data(), clock, res);
+ return __cvdso_clock_getres_data(__arch_get_vdso_u_time_data(), clock, res);
}
#ifdef BUILD_VDSO32
static __maybe_unused int
-__cvdso_clock_getres_time32_data(const struct vdso_data *vd, clockid_t clock,
+__cvdso_clock_getres_time32_data(const struct vdso_time_data *vd, clockid_t clock,
struct old_timespec32 *res)
{
struct __kernel_timespec ts;
@@ -451,7 +477,7 @@ __cvdso_clock_getres_time32_data(const struct vdso_data *vd, clockid_t clock,
static __maybe_unused int
__cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
{
- return __cvdso_clock_getres_time32_data(__arch_get_vdso_data(),
+ return __cvdso_clock_getres_time32_data(__arch_get_vdso_u_time_data(),
clock, res);
}
#endif /* BUILD_VDSO32 */
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 56fe96319292..734bd70c8b9b 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -114,6 +114,13 @@ unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
}
EXPORT_SYMBOL(simple_strtoul);
+unsigned long simple_strntoul(const char *cp, char **endp, unsigned int base,
+ size_t max_chars)
+{
+ return simple_strntoull(cp, endp, base, max_chars);
+}
+EXPORT_SYMBOL(simple_strntoul);
+
/**
* simple_strtol - convert a string to a signed long
* @cp: The start of the string
diff --git a/lib/win_minmax.c b/lib/win_minmax.c
index ec10506834b6..1682e614309c 100644
--- a/lib/win_minmax.c
+++ b/lib/win_minmax.c
@@ -97,3 +97,4 @@ u32 minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas)
return minmax_subwin_update(m, win, &val);
}
+EXPORT_SYMBOL(minmax_running_min);
diff --git a/lib/xarray.c b/lib/xarray.c
index 32d4bac8c94c..116e9286c64e 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -125,19 +125,20 @@ static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
*/
static void xas_squash_marks(const struct xa_state *xas)
{
- unsigned int mark = 0;
+ xa_mark_t mark = 0;
unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
- if (!xas->xa_sibs)
- return;
+ for (;;) {
+ unsigned long *marks = node_marks(xas->xa_node, mark);
- do {
- unsigned long *marks = xas->xa_node->marks[mark];
- if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
- continue;
- __set_bit(xas->xa_offset, marks);
- bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
- } while (mark++ != (__force unsigned)XA_MARK_MAX);
+ if (find_next_bit(marks, limit, xas->xa_offset + 1) != limit) {
+ __set_bit(xas->xa_offset, marks);
+ bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
+ }
+ if (mark == XA_MARK_MAX)
+ break;
+ mark_inc(mark);
+ }
}
/* extracts the offset within this node from the index */
@@ -435,6 +436,11 @@ static unsigned long max_index(void *entry)
return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
}
+static inline void *xa_zero_to_null(void *entry)
+{
+ return xa_is_zero(entry) ? NULL : entry;
+}
+
static void xas_shrink(struct xa_state *xas)
{
struct xarray *xa = xas->xa;
@@ -451,8 +457,8 @@ static void xas_shrink(struct xa_state *xas)
break;
if (!xa_is_node(entry) && node->shift)
break;
- if (xa_is_zero(entry) && xa_zero_busy(xa))
- entry = NULL;
+ if (xa_zero_busy(xa))
+ entry = xa_zero_to_null(entry);
xas->xa_node = XAS_BOUNDS;
RCU_INIT_POINTER(xa->xa_head, entry);
@@ -1022,7 +1028,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
unsigned int mask = xas->xa_sibs;
/* XXX: no support for splitting really large entries yet */
- if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
+ if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT <= order))
goto nomem;
if (xas->xa_shift + XA_CHUNK_SHIFT > order)
return;
@@ -1147,6 +1153,7 @@ void xas_pause(struct xa_state *xas)
if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
break;
}
+ xas->xa_index &= ~0UL << node->shift;
xas->xa_index += (offset - xas->xa_offset) << node->shift;
if (xas->xa_index == 0)
xas->xa_node = XAS_BOUNDS;
@@ -1382,6 +1389,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
continue;
+ if (xa_is_sibling(entry))
+ continue;
if (!xa_is_node(entry))
return entry;
xas->xa_node = xa_to_node(entry);
@@ -1474,9 +1483,7 @@ void *xa_load(struct xarray *xa, unsigned long index)
rcu_read_lock();
do {
- entry = xas_load(&xas);
- if (xa_is_zero(entry))
- entry = NULL;
+ entry = xa_zero_to_null(xas_load(&xas));
} while (xas_retry(&xas, entry));
rcu_read_unlock();
@@ -1486,8 +1493,6 @@ EXPORT_SYMBOL(xa_load);
static void *xas_result(struct xa_state *xas, void *curr)
{
- if (xa_is_zero(curr))
- return NULL;
if (xas_error(xas))
curr = xas->xa_node;
return curr;
@@ -1508,7 +1513,7 @@ static void *xas_result(struct xa_state *xas, void *curr)
void *__xa_erase(struct xarray *xa, unsigned long index)
{
XA_STATE(xas, xa, index);
- return xas_result(&xas, xas_store(&xas, NULL));
+ return xas_result(&xas, xa_zero_to_null(xas_store(&xas, NULL)));
}
EXPORT_SYMBOL(__xa_erase);
@@ -1567,7 +1572,7 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
xas_clear_mark(&xas, XA_FREE_MARK);
} while (__xas_nomem(&xas, gfp));
- return xas_result(&xas, curr);
+ return xas_result(&xas, xa_zero_to_null(curr));
}
EXPORT_SYMBOL(__xa_store);
@@ -1600,6 +1605,9 @@ void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
}
EXPORT_SYMBOL(xa_store);
+static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
+ void *old, void *entry, gfp_t gfp);
+
/**
* __xa_cmpxchg() - Store this entry in the XArray.
* @xa: XArray.
@@ -1619,6 +1627,13 @@ EXPORT_SYMBOL(xa_store);
void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
void *old, void *entry, gfp_t gfp)
{
+ return xa_zero_to_null(__xa_cmpxchg_raw(xa, index, old, entry, gfp));
+}
+EXPORT_SYMBOL(__xa_cmpxchg);
+
+static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
+ void *old, void *entry, gfp_t gfp)
+{
XA_STATE(xas, xa, index);
void *curr;
@@ -1636,7 +1651,6 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
return xas_result(&xas, curr);
}
-EXPORT_SYMBOL(__xa_cmpxchg);
/**
* __xa_insert() - Store this entry in the XArray if no entry is present.
@@ -1656,26 +1670,16 @@ EXPORT_SYMBOL(__xa_cmpxchg);
*/
int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
{
- XA_STATE(xas, xa, index);
void *curr;
+ int errno;
- if (WARN_ON_ONCE(xa_is_advanced(entry)))
- return -EINVAL;
if (!entry)
entry = XA_ZERO_ENTRY;
-
- do {
- curr = xas_load(&xas);
- if (!curr) {
- xas_store(&xas, entry);
- if (xa_track_free(xa))
- xas_clear_mark(&xas, XA_FREE_MARK);
- } else {
- xas_set_err(&xas, -EBUSY);
- }
- } while (__xas_nomem(&xas, gfp));
-
- return xas_error(&xas);
+ curr = __xa_cmpxchg_raw(xa, index, NULL, entry, gfp);
+ errno = xa_err(curr);
+ if (errno)
+ return errno;
+ return (curr != NULL) ? -EBUSY : 0;
}
EXPORT_SYMBOL(__xa_insert);
diff --git a/lib/zstd/common/portability_macros.h b/lib/zstd/common/portability_macros.h
index 0e3b2c0a527d..0dde8bf56595 100644
--- a/lib/zstd/common/portability_macros.h
+++ b/lib/zstd/common/portability_macros.h
@@ -55,7 +55,7 @@
#ifndef DYNAMIC_BMI2
#if ((defined(__clang__) && __has_attribute(__target__)) \
|| (defined(__GNUC__) \
- && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
+ && (__GNUC__ >= 11))) \
&& (defined(__x86_64__) || defined(_M_X64)) \
&& !defined(__BMI2__)
# define DYNAMIC_BMI2 1