summaryrefslogtreecommitdiff
path: root/lib/tests
diff options
context:
space:
mode:
Diffstat (limited to 'lib/tests')
-rw-r--r--lib/tests/Makefile48
-rw-r--r--lib/tests/bitfield_kunit.c155
-rw-r--r--lib/tests/blackhole_dev_kunit.c85
-rw-r--r--lib/tests/checksum_kunit.c640
-rw-r--r--lib/tests/cmdline_kunit.c157
-rw-r--r--lib/tests/cpumask_kunit.c156
-rw-r--r--lib/tests/crc_kunit.c495
-rw-r--r--lib/tests/fortify_kunit.c1128
-rw-r--r--lib/tests/hashtable_test.c318
-rw-r--r--lib/tests/is_signed_type_kunit.c50
-rw-r--r--lib/tests/kfifo_kunit.c224
-rw-r--r--lib/tests/kunit_iov_iter.c1033
-rw-r--r--lib/tests/list-test.c1505
-rw-r--r--lib/tests/longest_symbol_kunit.c82
-rw-r--r--lib/tests/memcpy_kunit.c514
-rwxr-xr-xlib/tests/module/gen_test_kallsyms.sh2
-rw-r--r--lib/tests/overflow_kunit.c1258
-rw-r--r--lib/tests/printf_kunit.c803
-rw-r--r--lib/tests/scanf_kunit.c815
-rw-r--r--lib/tests/siphash_kunit.c198
-rw-r--r--lib/tests/slub_kunit.c328
-rw-r--r--lib/tests/stackinit_kunit.c594
-rw-r--r--lib/tests/string_helpers_kunit.c629
-rw-r--r--lib/tests/string_kunit.c637
-rw-r--r--lib/tests/test_bits.c110
-rw-r--r--lib/tests/test_fprobe.c230
-rw-r--r--lib/tests/test_hash.c239
-rw-r--r--lib/tests/test_kprobes.c404
-rw-r--r--lib/tests/test_linear_ranges.c220
-rw-r--r--lib/tests/test_list_sort.c123
-rw-r--r--lib/tests/test_sort.c61
-rw-r--r--lib/tests/usercopy_kunit.c335
-rw-r--r--lib/tests/util_macros_kunit.c240
33 files changed, 13815 insertions, 1 deletions
diff --git a/lib/tests/Makefile b/lib/tests/Makefile
index 8e4f42cb9c54..5a4794c1826e 100644
--- a/lib/tests/Makefile
+++ b/lib/tests/Makefile
@@ -1 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for tests of kernel library functions.
+
+# KUnit tests
+CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN)
+obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
+obj-$(CONFIG_BITS_TEST) += test_bits.o
+obj-$(CONFIG_BLACKHOLE_DEV_KUNIT_TEST) += blackhole_dev_kunit.o
+obj-$(CONFIG_CHECKSUM_KUNIT) += checksum_kunit.o
+obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o
+obj-$(CONFIG_CPUMASK_KUNIT_TEST) += cpumask_kunit.o
+obj-$(CONFIG_CRC_KUNIT_TEST) += crc_kunit.o
+CFLAGS_fortify_kunit.o += $(call cc-disable-warning, unsequenced)
+CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-overread)
+CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-truncation)
+CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN)
+obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
+CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE)
+obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o
+obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o
+obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o
+obj-$(CONFIG_TEST_IOV_ITER) += kunit_iov_iter.o
+obj-$(CONFIG_IS_SIGNED_TYPE_KUNIT_TEST) += is_signed_type_kunit.o
+obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
+obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
+obj-$(CONFIG_KFIFO_KUNIT_TEST) += kfifo_kunit.o
+obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
+obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
+
+CFLAGS_longest_symbol_kunit.o += $(call cc-disable-warning, missing-prototypes)
+obj-$(CONFIG_LONGEST_SYM_KUNIT_TEST) += longest_symbol_kunit.o
+
+obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o
+CFLAGS_overflow_kunit.o = $(call cc-disable-warning, tautological-constant-out-of-range-compare)
+obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
+obj-$(CONFIG_PRINTF_KUNIT_TEST) += printf_kunit.o
+obj-$(CONFIG_SCANF_KUNIT_TEST) += scanf_kunit.o
+obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
+obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o
+obj-$(CONFIG_TEST_SORT) += test_sort.o
+CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable)
+obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o
+obj-$(CONFIG_STRING_KUNIT_TEST) += string_kunit.o
+obj-$(CONFIG_STRING_HELPERS_KUNIT_TEST) += string_helpers_kunit.o
+obj-$(CONFIG_USERCOPY_KUNIT_TEST) += usercopy_kunit.o
+obj-$(CONFIG_UTIL_MACROS_KUNIT) += util_macros_kunit.o
+
obj-$(CONFIG_TEST_RUNTIME_MODULE) += module/
diff --git a/lib/tests/bitfield_kunit.c b/lib/tests/bitfield_kunit.c
new file mode 100644
index 000000000000..5ccd86f61896
--- /dev/null
+++ b/lib/tests/bitfield_kunit.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases for bitfield helpers.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/bitfield.h>
+
+#define CHECK_ENC_GET_U(tp, v, field, res) do { \
+ { \
+ u##tp _res; \
+ \
+ _res = u##tp##_encode_bits(v, field); \
+ KUNIT_ASSERT_FALSE_MSG(context, _res != res, \
+ "u" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != " #res "\n", \
+ (u64)_res); \
+ KUNIT_ASSERT_FALSE(context, \
+ u##tp##_get_bits(_res, field) != v); \
+ } \
+ } while (0)
+
+#define CHECK_ENC_GET_LE(tp, v, field, res) do { \
+ { \
+ __le##tp _res; \
+ \
+ _res = le##tp##_encode_bits(v, field); \
+ KUNIT_ASSERT_FALSE_MSG(context, \
+ _res != cpu_to_le##tp(res), \
+ "le" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx",\
+ (u64)le##tp##_to_cpu(_res), \
+ (u64)(res)); \
+ KUNIT_ASSERT_FALSE(context, \
+ le##tp##_get_bits(_res, field) != v);\
+ } \
+ } while (0)
+
+#define CHECK_ENC_GET_BE(tp, v, field, res) do { \
+ { \
+ __be##tp _res; \
+ \
+ _res = be##tp##_encode_bits(v, field); \
+ KUNIT_ASSERT_FALSE_MSG(context, \
+ _res != cpu_to_be##tp(res), \
+ "be" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx", \
+ (u64)be##tp##_to_cpu(_res), \
+ (u64)(res)); \
+ KUNIT_ASSERT_FALSE(context, \
+ be##tp##_get_bits(_res, field) != v);\
+ } \
+ } while (0)
+
+#define CHECK_ENC_GET(tp, v, field, res) do { \
+ CHECK_ENC_GET_U(tp, v, field, res); \
+ CHECK_ENC_GET_LE(tp, v, field, res); \
+ CHECK_ENC_GET_BE(tp, v, field, res); \
+ } while (0)
+
+static void __init test_bitfields_constants(struct kunit *context)
+{
+ /*
+ * NOTE
+ * This whole function compiles (or at least should, if everything
+ * is going according to plan) to nothing after optimisation.
+ */
+
+ CHECK_ENC_GET(16, 1, 0x000f, 0x0001);
+ CHECK_ENC_GET(16, 3, 0x00f0, 0x0030);
+ CHECK_ENC_GET(16, 5, 0x0f00, 0x0500);
+ CHECK_ENC_GET(16, 7, 0xf000, 0x7000);
+ CHECK_ENC_GET(16, 14, 0x000f, 0x000e);
+ CHECK_ENC_GET(16, 15, 0x00f0, 0x00f0);
+
+ CHECK_ENC_GET_U(8, 1, 0x0f, 0x01);
+ CHECK_ENC_GET_U(8, 3, 0xf0, 0x30);
+ CHECK_ENC_GET_U(8, 14, 0x0f, 0x0e);
+ CHECK_ENC_GET_U(8, 15, 0xf0, 0xf0);
+
+ CHECK_ENC_GET(32, 1, 0x00000f00, 0x00000100);
+ CHECK_ENC_GET(32, 3, 0x0000f000, 0x00003000);
+ CHECK_ENC_GET(32, 5, 0x000f0000, 0x00050000);
+ CHECK_ENC_GET(32, 7, 0x00f00000, 0x00700000);
+ CHECK_ENC_GET(32, 14, 0x0f000000, 0x0e000000);
+ CHECK_ENC_GET(32, 15, 0xf0000000, 0xf0000000);
+
+ CHECK_ENC_GET(64, 1, 0x00000f0000000000ull, 0x0000010000000000ull);
+ CHECK_ENC_GET(64, 3, 0x0000f00000000000ull, 0x0000300000000000ull);
+ CHECK_ENC_GET(64, 5, 0x000f000000000000ull, 0x0005000000000000ull);
+ CHECK_ENC_GET(64, 7, 0x00f0000000000000ull, 0x0070000000000000ull);
+ CHECK_ENC_GET(64, 14, 0x0f00000000000000ull, 0x0e00000000000000ull);
+ CHECK_ENC_GET(64, 15, 0xf000000000000000ull, 0xf000000000000000ull);
+}
+
+#define CHECK(tp, mask) do { \
+ u64 v; \
+ \
+ for (v = 0; v < 1 << hweight32(mask); v++) \
+ KUNIT_ASSERT_FALSE(context, \
+ tp##_encode_bits(v, mask) != v << __ffs64(mask));\
+ } while (0)
+
+static void __init test_bitfields_variables(struct kunit *context)
+{
+ CHECK(u8, 0x0f);
+ CHECK(u8, 0xf0);
+ CHECK(u8, 0x38);
+
+ CHECK(u16, 0x0038);
+ CHECK(u16, 0x0380);
+ CHECK(u16, 0x3800);
+ CHECK(u16, 0x8000);
+
+ CHECK(u32, 0x80000000);
+ CHECK(u32, 0x7f000000);
+ CHECK(u32, 0x07e00000);
+ CHECK(u32, 0x00018000);
+
+ CHECK(u64, 0x8000000000000000ull);
+ CHECK(u64, 0x7f00000000000000ull);
+ CHECK(u64, 0x0001800000000000ull);
+ CHECK(u64, 0x0000000080000000ull);
+ CHECK(u64, 0x000000007f000000ull);
+ CHECK(u64, 0x0000000018000000ull);
+ CHECK(u64, 0x0000001f8000000ull);
+}
+
+#ifdef TEST_BITFIELD_COMPILE
+static void __init test_bitfields_compile(struct kunit *context)
+{
+ /* these should fail compilation */
+ CHECK_ENC_GET(16, 16, 0x0f00, 0x1000);
+ u32_encode_bits(7, 0x06000000);
+
+ /* this should at least give a warning */
+ u16_encode_bits(0, 0x60000);
+}
+#endif
+
+static struct kunit_case __refdata bitfields_test_cases[] = {
+ KUNIT_CASE(test_bitfields_constants),
+ KUNIT_CASE(test_bitfields_variables),
+ {}
+};
+
+static struct kunit_suite bitfields_test_suite = {
+ .name = "bitfields",
+ .test_cases = bitfields_test_cases,
+};
+
+kunit_test_suites(&bitfields_test_suite);
+
+MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
+MODULE_DESCRIPTION("Test cases for bitfield helpers");
+MODULE_LICENSE("GPL");
diff --git a/lib/tests/blackhole_dev_kunit.c b/lib/tests/blackhole_dev_kunit.c
new file mode 100644
index 000000000000..06834ab35f43
--- /dev/null
+++ b/lib/tests/blackhole_dev_kunit.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This tests the blackhole_dev that is created during the
+ * net subsystem initialization. The test this module performs is
+ * by injecting an skb into the stack with skb->dev as the
+ * blackhole_dev and expects kernel to behave in a sane manner
+ * (in other words, *not crash*)!
+ *
+ * Copyright (c) 2018, Mahesh Bandewar <maheshb@google.com>
+ */
+
+#include <kunit/test.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/udp.h>
+#include <linux/ipv6.h>
+
+#include <net/dst.h>
+
+#define SKB_SIZE 256
+#define HEAD_SIZE (14+40+8) /* Ether + IPv6 + UDP */
+#define TAIL_SIZE 32 /* random tail-room */
+
+#define UDP_PORT 1234
+
+static void test_blackholedev(struct kunit *test)
+{
+ struct ipv6hdr *ip6h;
+ struct sk_buff *skb;
+ struct udphdr *uh;
+ int data_len;
+
+ skb = alloc_skb(SKB_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, skb);
+
+ /* Reserve head-room for the headers */
+ skb_reserve(skb, HEAD_SIZE);
+
+ /* Add data to the skb */
+ data_len = SKB_SIZE - (HEAD_SIZE + TAIL_SIZE);
+ memset(__skb_put(skb, data_len), 0xf, data_len);
+
+ /* Add protocol data */
+ /* (Transport) UDP */
+ uh = (struct udphdr *)skb_push(skb, sizeof(struct udphdr));
+ skb_set_transport_header(skb, 0);
+ uh->source = uh->dest = htons(UDP_PORT);
+ uh->len = htons(data_len);
+ uh->check = 0;
+ /* (Network) IPv6 */
+ ip6h = (struct ipv6hdr *)skb_push(skb, sizeof(struct ipv6hdr));
+ skb_set_network_header(skb, 0);
+ ip6h->hop_limit = 32;
+ ip6h->payload_len = htons(data_len + sizeof(struct udphdr));
+ ip6h->nexthdr = IPPROTO_UDP;
+ ip6h->saddr = in6addr_loopback;
+ ip6h->daddr = in6addr_loopback;
+ /* Ether */
+ skb_push(skb, sizeof(struct ethhdr));
+ skb_set_mac_header(skb, 0);
+
+ skb->protocol = htons(ETH_P_IPV6);
+ skb->pkt_type = PACKET_HOST;
+ skb->dev = blackhole_netdev;
+
+ /* Now attempt to send the packet */
+ KUNIT_EXPECT_EQ(test, dev_queue_xmit(skb), NET_XMIT_SUCCESS);
+}
+
+static struct kunit_case blackholedev_cases[] = {
+ KUNIT_CASE(test_blackholedev),
+ {},
+};
+
+static struct kunit_suite blackholedev_suite = {
+ .name = "blackholedev",
+ .test_cases = blackholedev_cases,
+};
+
+kunit_test_suite(blackholedev_suite);
+
+MODULE_AUTHOR("Mahesh Bandewar <maheshb@google.com>");
+MODULE_DESCRIPTION("module test of the blackhole_dev");
+MODULE_LICENSE("GPL");
diff --git a/lib/tests/checksum_kunit.c b/lib/tests/checksum_kunit.c
new file mode 100644
index 000000000000..be04aa42125c
--- /dev/null
+++ b/lib/tests/checksum_kunit.c
@@ -0,0 +1,640 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases csum_partial, csum_fold, ip_fast_csum, csum_ipv6_magic
+ */
+
+#include <kunit/test.h>
+#include <asm/checksum.h>
+#include <net/ip6_checksum.h>
+
+#define MAX_LEN 512
+#define MAX_ALIGN 64
+#define TEST_BUFLEN (MAX_LEN + MAX_ALIGN)
+
+#define IPv4_MIN_WORDS 5
+#define IPv4_MAX_WORDS 15
+#define NUM_IPv6_TESTS 200
+#define NUM_IP_FAST_CSUM_TESTS 181
+
+/* Values for a little endian CPU. Byte swap each half on big endian CPU. */
+static const u32 random_init_sum = 0x2847aab;
+static const u8 random_buf[] = {
+ 0xac, 0xd7, 0x76, 0x69, 0x6e, 0xf2, 0x93, 0x2c, 0x1f, 0xe0, 0xde, 0x86,
+ 0x8f, 0x54, 0x33, 0x90, 0x95, 0xbf, 0xff, 0xb9, 0xea, 0x62, 0x6e, 0xb5,
+ 0xd3, 0x4f, 0xf5, 0x60, 0x50, 0x5c, 0xc7, 0xfa, 0x6d, 0x1a, 0xc7, 0xf0,
+ 0xd2, 0x2c, 0x12, 0x3d, 0x88, 0xe3, 0x14, 0x21, 0xb1, 0x5e, 0x45, 0x31,
+ 0xa2, 0x85, 0x36, 0x76, 0xba, 0xd8, 0xad, 0xbb, 0x9e, 0x49, 0x8f, 0xf7,
+ 0xce, 0xea, 0xef, 0xca, 0x2c, 0x29, 0xf7, 0x15, 0x5c, 0x1d, 0x4d, 0x09,
+ 0x1f, 0xe2, 0x14, 0x31, 0x8c, 0x07, 0x57, 0x23, 0x1f, 0x6f, 0x03, 0xe1,
+ 0x93, 0x19, 0x53, 0x03, 0x45, 0x49, 0x9a, 0x3b, 0x8e, 0x0c, 0x12, 0x5d,
+ 0x8a, 0xb8, 0x9b, 0x8c, 0x9a, 0x03, 0xe5, 0xa2, 0x43, 0xd2, 0x3b, 0x4e,
+ 0x7e, 0x30, 0x3c, 0x22, 0x2d, 0xc5, 0xfc, 0x9e, 0xdb, 0xc6, 0xf9, 0x69,
+ 0x12, 0x39, 0x1f, 0xa0, 0x11, 0x0c, 0x3f, 0xf5, 0x53, 0xc9, 0x30, 0xfb,
+ 0xb0, 0xdd, 0x21, 0x1d, 0x34, 0xe2, 0x65, 0x30, 0xf1, 0xe8, 0x1b, 0xe7,
+ 0x55, 0x0d, 0xeb, 0xbd, 0xcc, 0x9d, 0x24, 0xa4, 0xad, 0xa7, 0x93, 0x47,
+ 0x19, 0x2e, 0xc4, 0x5c, 0x3b, 0xc7, 0x6d, 0x95, 0x0c, 0x47, 0x60, 0xaf,
+ 0x5b, 0x47, 0xee, 0xdc, 0x31, 0x31, 0x14, 0x12, 0x7e, 0x9e, 0x45, 0xb1,
+ 0xc1, 0x69, 0x4b, 0x84, 0xfc, 0x88, 0xc1, 0x9e, 0x46, 0xb4, 0xc2, 0x25,
+ 0xc5, 0x6c, 0x4c, 0x22, 0x58, 0x5c, 0xbe, 0xff, 0xea, 0x88, 0x88, 0x7a,
+ 0xcb, 0x1c, 0x5d, 0x63, 0xa1, 0xf2, 0x33, 0x0c, 0xa2, 0x16, 0x0b, 0x6e,
+ 0x2b, 0x79, 0x58, 0xf7, 0xac, 0xd3, 0x6a, 0x3f, 0x81, 0x57, 0x48, 0x45,
+ 0xe3, 0x7c, 0xdc, 0xd6, 0x34, 0x7e, 0xe6, 0x73, 0xfa, 0xcb, 0x31, 0x18,
+ 0xa9, 0x0b, 0xee, 0x6b, 0x99, 0xb9, 0x2d, 0xde, 0x22, 0x0e, 0x71, 0x57,
+ 0x0e, 0x9b, 0x11, 0xd1, 0x15, 0x41, 0xd0, 0x6b, 0x50, 0x8a, 0x23, 0x64,
+ 0xe3, 0x9c, 0xb3, 0x55, 0x09, 0xe9, 0x32, 0x67, 0xf9, 0xe0, 0x73, 0xf1,
+ 0x60, 0x66, 0x0b, 0x88, 0x79, 0x8d, 0x4b, 0x52, 0x83, 0x20, 0x26, 0x78,
+ 0x49, 0x27, 0xe7, 0x3e, 0x29, 0xa8, 0x18, 0x82, 0x41, 0xdd, 0x1e, 0xcc,
+ 0x3b, 0xc4, 0x65, 0xd1, 0x21, 0x40, 0x72, 0xb2, 0x87, 0x5e, 0x16, 0x10,
+ 0x80, 0x3f, 0x4b, 0x58, 0x1c, 0xc2, 0x79, 0x20, 0xf0, 0xe0, 0x80, 0xd3,
+ 0x52, 0xa5, 0x19, 0x6e, 0x47, 0x90, 0x08, 0xf5, 0x50, 0xe2, 0xd6, 0xae,
+ 0xe9, 0x2e, 0xdc, 0xd5, 0xb4, 0x90, 0x1f, 0x79, 0x49, 0x82, 0x21, 0x84,
+ 0xa0, 0xb5, 0x2f, 0xff, 0x30, 0x71, 0xed, 0x80, 0x68, 0xb1, 0x6d, 0xef,
+ 0xf6, 0xcf, 0xb8, 0x41, 0x79, 0xf5, 0x01, 0xbc, 0x0c, 0x9b, 0x0e, 0x06,
+ 0xf3, 0xb0, 0xbb, 0x97, 0xb8, 0xb1, 0xfd, 0x51, 0x4e, 0xef, 0x0a, 0x3d,
+ 0x7a, 0x3d, 0xbd, 0x61, 0x00, 0xa2, 0xb3, 0xf0, 0x1d, 0x77, 0x7b, 0x6c,
+ 0x01, 0x61, 0xa5, 0xa3, 0xdb, 0xd5, 0xd5, 0xf4, 0xb5, 0x28, 0x9f, 0x0a,
+ 0xa3, 0x82, 0x5f, 0x4b, 0x40, 0x0f, 0x05, 0x0e, 0x78, 0xed, 0xbf, 0x17,
+ 0xf6, 0x5a, 0x8a, 0x7d, 0xf9, 0x45, 0xc1, 0xd7, 0x1b, 0x9d, 0x6c, 0x07,
+ 0x88, 0xf3, 0xbc, 0xf1, 0xea, 0x28, 0x1f, 0xb8, 0x7a, 0x60, 0x3c, 0xce,
+ 0x3e, 0x50, 0xb2, 0x0b, 0xcf, 0xe5, 0x08, 0x1f, 0x48, 0x04, 0xf9, 0x35,
+ 0x29, 0x15, 0xbe, 0x82, 0x96, 0xc2, 0x55, 0x04, 0x6c, 0x19, 0x45, 0x29,
+ 0x0b, 0xb6, 0x49, 0x12, 0xfb, 0x8d, 0x1b, 0x75, 0x8b, 0xd9, 0x6a, 0x5c,
+ 0xbe, 0x46, 0x2b, 0x41, 0xfe, 0x21, 0xad, 0x1f, 0x75, 0xe7, 0x90, 0x3d,
+ 0xe1, 0xdf, 0x4b, 0xe1, 0x81, 0xe2, 0x17, 0x02, 0x7b, 0x58, 0x8b, 0x92,
+ 0x1a, 0xac, 0x46, 0xdd, 0x2e, 0xce, 0x40, 0x09
+};
+
+/* Values for a little endian CPU. Byte swap on big endian CPU. */
+static const u16 expected_results[] = {
+ 0x82d0, 0x8224, 0xab23, 0xaaad, 0x41ad, 0x413f, 0x4f3e, 0x4eab, 0x22ab,
+ 0x228c, 0x428b, 0x41ad, 0xbbac, 0xbb1d, 0x671d, 0x66ea, 0xd6e9, 0xd654,
+ 0x1754, 0x1655, 0x5d54, 0x5c6a, 0xfa69, 0xf9fb, 0x44fb, 0x4428, 0xf527,
+ 0xf432, 0x9432, 0x93e2, 0x37e2, 0x371b, 0x3d1a, 0x3cad, 0x22ad, 0x21e6,
+ 0x31e5, 0x3113, 0x0513, 0x0501, 0xc800, 0xc778, 0xe477, 0xe463, 0xc363,
+ 0xc2b2, 0x64b2, 0x646d, 0x336d, 0x32cb, 0xadca, 0xad94, 0x3794, 0x36da,
+ 0x5ed9, 0x5e2c, 0xa32b, 0xa28d, 0x598d, 0x58fe, 0x61fd, 0x612f, 0x772e,
+ 0x763f, 0xac3e, 0xac12, 0x8312, 0x821b, 0x6d1b, 0x6cbf, 0x4fbf, 0x4f72,
+ 0x4672, 0x4653, 0x6452, 0x643e, 0x333e, 0x32b2, 0x2bb2, 0x2b5b, 0x085b,
+ 0x083c, 0x993b, 0x9938, 0xb837, 0xb7a4, 0x9ea4, 0x9e51, 0x9b51, 0x9b0c,
+ 0x520c, 0x5172, 0x1672, 0x15e4, 0x09e4, 0x09d2, 0xacd1, 0xac47, 0xf446,
+ 0xf3ab, 0x67ab, 0x6711, 0x6411, 0x632c, 0xc12b, 0xc0e8, 0xeee7, 0xeeac,
+ 0xa0ac, 0xa02e, 0x702e, 0x6ff2, 0x4df2, 0x4dc5, 0x88c4, 0x87c8, 0xe9c7,
+ 0xe8ec, 0x22ec, 0x21f3, 0xb8f2, 0xb8e0, 0x7fe0, 0x7fc1, 0xdfc0, 0xdfaf,
+ 0xd3af, 0xd370, 0xde6f, 0xde1c, 0x151c, 0x14ec, 0x19eb, 0x193b, 0x3c3a,
+ 0x3c19, 0x1f19, 0x1ee5, 0x3ce4, 0x3c7f, 0x0c7f, 0x0b8e, 0x238d, 0x2372,
+ 0x3c71, 0x3c1c, 0x2f1c, 0x2e31, 0x7130, 0x7064, 0xd363, 0xd33f, 0x2f3f,
+ 0x2e92, 0x8791, 0x86fe, 0x3ffe, 0x3fe5, 0x11e5, 0x1121, 0xb520, 0xb4e5,
+ 0xede4, 0xed77, 0x5877, 0x586b, 0x116b, 0x110b, 0x620a, 0x61af, 0x1aaf,
+ 0x19c1, 0x3dc0, 0x3d8f, 0x0c8f, 0x0c7b, 0xfa7a, 0xf9fc, 0x5bfc, 0x5bb7,
+ 0xaab6, 0xa9f5, 0x40f5, 0x40aa, 0xbca9, 0xbbad, 0x33ad, 0x32ec, 0x94eb,
+ 0x94a5, 0xe0a4, 0xdfe2, 0xbae2, 0xba1d, 0x4e1d, 0x4dd1, 0x2bd1, 0x2b79,
+ 0xcf78, 0xceba, 0xcfb9, 0xcecf, 0x46cf, 0x4647, 0xcc46, 0xcb7b, 0xaf7b,
+ 0xaf1e, 0x4c1e, 0x4b7d, 0x597c, 0x5949, 0x4d49, 0x4ca7, 0x36a7, 0x369c,
+ 0xc89b, 0xc870, 0x4f70, 0x4f18, 0x5817, 0x576b, 0x846a, 0x8400, 0x4500,
+ 0x447f, 0xed7e, 0xed36, 0xa836, 0xa753, 0x2b53, 0x2a77, 0x5476, 0x5442,
+ 0xd641, 0xd55b, 0x625b, 0x6161, 0x9660, 0x962f, 0x7e2f, 0x7d86, 0x7286,
+ 0x7198, 0x0698, 0x05ff, 0x4cfe, 0x4cd1, 0x6ed0, 0x6eae, 0x60ae, 0x603d,
+ 0x093d, 0x092f, 0x6e2e, 0x6e1d, 0x9d1c, 0x9d07, 0x5c07, 0x5b37, 0xf036,
+ 0xefe6, 0x65e6, 0x65c3, 0x01c3, 0x00e0, 0x64df, 0x642c, 0x0f2c, 0x0f23,
+ 0x2622, 0x25f0, 0xbeef, 0xbdf6, 0xddf5, 0xdd82, 0xec81, 0xec21, 0x8621,
+ 0x8616, 0xfe15, 0xfd9c, 0x709c, 0x7051, 0x1e51, 0x1dce, 0xfdcd, 0xfda7,
+ 0x85a7, 0x855e, 0x5e5e, 0x5d77, 0x1f77, 0x1f4e, 0x774d, 0x7735, 0xf534,
+ 0xf4f3, 0x17f3, 0x17d5, 0x4bd4, 0x4b99, 0x8798, 0x8733, 0xb632, 0xb611,
+ 0x7611, 0x759f, 0xc39e, 0xc317, 0x6517, 0x6501, 0x5501, 0x5481, 0x1581,
+ 0x1536, 0xbd35, 0xbd19, 0xfb18, 0xfa9f, 0xda9f, 0xd9af, 0xf9ae, 0xf92e,
+ 0x262e, 0x25dc, 0x80db, 0x80c2, 0x12c2, 0x127b, 0x827a, 0x8272, 0x8d71,
+ 0x8d21, 0xab20, 0xaa4a, 0xfc49, 0xfb60, 0xcd60, 0xcc84, 0xf783, 0xf6cf,
+ 0x66cf, 0x66b0, 0xedaf, 0xed66, 0x6b66, 0x6b45, 0xe744, 0xe6a4, 0x31a4,
+ 0x3175, 0x3274, 0x3244, 0xc143, 0xc056, 0x4056, 0x3fee, 0x8eed, 0x8e80,
+ 0x9f7f, 0x9e89, 0xcf88, 0xced0, 0x8dd0, 0x8d57, 0x9856, 0x9855, 0xdc54,
+ 0xdc48, 0x4148, 0x413a, 0x3b3a, 0x3a47, 0x8a46, 0x898b, 0xf28a, 0xf1d2,
+ 0x40d2, 0x3fd5, 0xeed4, 0xee86, 0xff85, 0xff7b, 0xc27b, 0xc201, 0x8501,
+ 0x8444, 0x2344, 0x2344, 0x8143, 0x8090, 0x908f, 0x9072, 0x1972, 0x18f7,
+ 0xacf6, 0xacf5, 0x4bf5, 0x4b50, 0xa84f, 0xa774, 0xd273, 0xd19e, 0xdd9d,
+ 0xdce8, 0xb4e8, 0xb449, 0xaa49, 0xa9a6, 0x27a6, 0x2747, 0xdc46, 0xdc06,
+ 0xcd06, 0xcd01, 0xbf01, 0xbe89, 0xd188, 0xd0c9, 0xb9c9, 0xb8d3, 0x5ed3,
+ 0x5e49, 0xe148, 0xe04f, 0x9b4f, 0x9a8e, 0xc38d, 0xc372, 0x2672, 0x2606,
+ 0x1f06, 0x1e7e, 0x2b7d, 0x2ac1, 0x39c0, 0x38d6, 0x10d6, 0x10b7, 0x58b6,
+ 0x583c, 0xf83b, 0xf7ff, 0x29ff, 0x29c1, 0xd9c0, 0xd90e, 0xce0e, 0xcd3f,
+ 0xe83e, 0xe836, 0xc936, 0xc8ee, 0xc4ee, 0xc3f5, 0x8ef5, 0x8ecc, 0x79cc,
+ 0x790e, 0xf70d, 0xf677, 0x3477, 0x3422, 0x3022, 0x2fb6, 0x16b6, 0x1671,
+ 0xed70, 0xed65, 0x3765, 0x371c, 0x251c, 0x2421, 0x9720, 0x9705, 0x2205,
+ 0x217a, 0x4879, 0x480f, 0xec0e, 0xeb50, 0xa550, 0xa525, 0x6425, 0x6327,
+ 0x4227, 0x417a, 0x227a, 0x2205, 0x3b04, 0x3a74, 0xfd73, 0xfc92, 0x1d92,
+ 0x1d47, 0x3c46, 0x3bc5, 0x59c4, 0x59ad, 0x57ad, 0x5732, 0xff31, 0xfea6,
+ 0x6ca6, 0x6c8c, 0xc08b, 0xc045, 0xe344, 0xe316, 0x1516, 0x14d6,
+};
+
+/* Values for a little endian CPU. Byte swap each half on big endian CPU. */
+static const u32 init_sums_no_overflow[] = {
+ 0xffffffff, 0xfffffffb, 0xfffffbfb, 0xfffffbf7, 0xfffff7f7, 0xfffff7f3,
+ 0xfffff3f3, 0xfffff3ef, 0xffffefef, 0xffffefeb, 0xffffebeb, 0xffffebe7,
+ 0xffffe7e7, 0xffffe7e3, 0xffffe3e3, 0xffffe3df, 0xffffdfdf, 0xffffdfdb,
+ 0xffffdbdb, 0xffffdbd7, 0xffffd7d7, 0xffffd7d3, 0xffffd3d3, 0xffffd3cf,
+ 0xffffcfcf, 0xffffcfcb, 0xffffcbcb, 0xffffcbc7, 0xffffc7c7, 0xffffc7c3,
+ 0xffffc3c3, 0xffffc3bf, 0xffffbfbf, 0xffffbfbb, 0xffffbbbb, 0xffffbbb7,
+ 0xffffb7b7, 0xffffb7b3, 0xffffb3b3, 0xffffb3af, 0xffffafaf, 0xffffafab,
+ 0xffffabab, 0xffffaba7, 0xffffa7a7, 0xffffa7a3, 0xffffa3a3, 0xffffa39f,
+ 0xffff9f9f, 0xffff9f9b, 0xffff9b9b, 0xffff9b97, 0xffff9797, 0xffff9793,
+ 0xffff9393, 0xffff938f, 0xffff8f8f, 0xffff8f8b, 0xffff8b8b, 0xffff8b87,
+ 0xffff8787, 0xffff8783, 0xffff8383, 0xffff837f, 0xffff7f7f, 0xffff7f7b,
+ 0xffff7b7b, 0xffff7b77, 0xffff7777, 0xffff7773, 0xffff7373, 0xffff736f,
+ 0xffff6f6f, 0xffff6f6b, 0xffff6b6b, 0xffff6b67, 0xffff6767, 0xffff6763,
+ 0xffff6363, 0xffff635f, 0xffff5f5f, 0xffff5f5b, 0xffff5b5b, 0xffff5b57,
+ 0xffff5757, 0xffff5753, 0xffff5353, 0xffff534f, 0xffff4f4f, 0xffff4f4b,
+ 0xffff4b4b, 0xffff4b47, 0xffff4747, 0xffff4743, 0xffff4343, 0xffff433f,
+ 0xffff3f3f, 0xffff3f3b, 0xffff3b3b, 0xffff3b37, 0xffff3737, 0xffff3733,
+ 0xffff3333, 0xffff332f, 0xffff2f2f, 0xffff2f2b, 0xffff2b2b, 0xffff2b27,
+ 0xffff2727, 0xffff2723, 0xffff2323, 0xffff231f, 0xffff1f1f, 0xffff1f1b,
+ 0xffff1b1b, 0xffff1b17, 0xffff1717, 0xffff1713, 0xffff1313, 0xffff130f,
+ 0xffff0f0f, 0xffff0f0b, 0xffff0b0b, 0xffff0b07, 0xffff0707, 0xffff0703,
+ 0xffff0303, 0xffff02ff, 0xfffffefe, 0xfffffefa, 0xfffffafa, 0xfffffaf6,
+ 0xfffff6f6, 0xfffff6f2, 0xfffff2f2, 0xfffff2ee, 0xffffeeee, 0xffffeeea,
+ 0xffffeaea, 0xffffeae6, 0xffffe6e6, 0xffffe6e2, 0xffffe2e2, 0xffffe2de,
+ 0xffffdede, 0xffffdeda, 0xffffdada, 0xffffdad6, 0xffffd6d6, 0xffffd6d2,
+ 0xffffd2d2, 0xffffd2ce, 0xffffcece, 0xffffceca, 0xffffcaca, 0xffffcac6,
+ 0xffffc6c6, 0xffffc6c2, 0xffffc2c2, 0xffffc2be, 0xffffbebe, 0xffffbeba,
+ 0xffffbaba, 0xffffbab6, 0xffffb6b6, 0xffffb6b2, 0xffffb2b2, 0xffffb2ae,
+ 0xffffaeae, 0xffffaeaa, 0xffffaaaa, 0xffffaaa6, 0xffffa6a6, 0xffffa6a2,
+ 0xffffa2a2, 0xffffa29e, 0xffff9e9e, 0xffff9e9a, 0xffff9a9a, 0xffff9a96,
+ 0xffff9696, 0xffff9692, 0xffff9292, 0xffff928e, 0xffff8e8e, 0xffff8e8a,
+ 0xffff8a8a, 0xffff8a86, 0xffff8686, 0xffff8682, 0xffff8282, 0xffff827e,
+ 0xffff7e7e, 0xffff7e7a, 0xffff7a7a, 0xffff7a76, 0xffff7676, 0xffff7672,
+ 0xffff7272, 0xffff726e, 0xffff6e6e, 0xffff6e6a, 0xffff6a6a, 0xffff6a66,
+ 0xffff6666, 0xffff6662, 0xffff6262, 0xffff625e, 0xffff5e5e, 0xffff5e5a,
+ 0xffff5a5a, 0xffff5a56, 0xffff5656, 0xffff5652, 0xffff5252, 0xffff524e,
+ 0xffff4e4e, 0xffff4e4a, 0xffff4a4a, 0xffff4a46, 0xffff4646, 0xffff4642,
+ 0xffff4242, 0xffff423e, 0xffff3e3e, 0xffff3e3a, 0xffff3a3a, 0xffff3a36,
+ 0xffff3636, 0xffff3632, 0xffff3232, 0xffff322e, 0xffff2e2e, 0xffff2e2a,
+ 0xffff2a2a, 0xffff2a26, 0xffff2626, 0xffff2622, 0xffff2222, 0xffff221e,
+ 0xffff1e1e, 0xffff1e1a, 0xffff1a1a, 0xffff1a16, 0xffff1616, 0xffff1612,
+ 0xffff1212, 0xffff120e, 0xffff0e0e, 0xffff0e0a, 0xffff0a0a, 0xffff0a06,
+ 0xffff0606, 0xffff0602, 0xffff0202, 0xffff01fe, 0xfffffdfd, 0xfffffdf9,
+ 0xfffff9f9, 0xfffff9f5, 0xfffff5f5, 0xfffff5f1, 0xfffff1f1, 0xfffff1ed,
+ 0xffffeded, 0xffffede9, 0xffffe9e9, 0xffffe9e5, 0xffffe5e5, 0xffffe5e1,
+ 0xffffe1e1, 0xffffe1dd, 0xffffdddd, 0xffffddd9, 0xffffd9d9, 0xffffd9d5,
+ 0xffffd5d5, 0xffffd5d1, 0xffffd1d1, 0xffffd1cd, 0xffffcdcd, 0xffffcdc9,
+ 0xffffc9c9, 0xffffc9c5, 0xffffc5c5, 0xffffc5c1, 0xffffc1c1, 0xffffc1bd,
+ 0xffffbdbd, 0xffffbdb9, 0xffffb9b9, 0xffffb9b5, 0xffffb5b5, 0xffffb5b1,
+ 0xffffb1b1, 0xffffb1ad, 0xffffadad, 0xffffada9, 0xffffa9a9, 0xffffa9a5,
+ 0xffffa5a5, 0xffffa5a1, 0xffffa1a1, 0xffffa19d, 0xffff9d9d, 0xffff9d99,
+ 0xffff9999, 0xffff9995, 0xffff9595, 0xffff9591, 0xffff9191, 0xffff918d,
+ 0xffff8d8d, 0xffff8d89, 0xffff8989, 0xffff8985, 0xffff8585, 0xffff8581,
+ 0xffff8181, 0xffff817d, 0xffff7d7d, 0xffff7d79, 0xffff7979, 0xffff7975,
+ 0xffff7575, 0xffff7571, 0xffff7171, 0xffff716d, 0xffff6d6d, 0xffff6d69,
+ 0xffff6969, 0xffff6965, 0xffff6565, 0xffff6561, 0xffff6161, 0xffff615d,
+ 0xffff5d5d, 0xffff5d59, 0xffff5959, 0xffff5955, 0xffff5555, 0xffff5551,
+ 0xffff5151, 0xffff514d, 0xffff4d4d, 0xffff4d49, 0xffff4949, 0xffff4945,
+ 0xffff4545, 0xffff4541, 0xffff4141, 0xffff413d, 0xffff3d3d, 0xffff3d39,
+ 0xffff3939, 0xffff3935, 0xffff3535, 0xffff3531, 0xffff3131, 0xffff312d,
+ 0xffff2d2d, 0xffff2d29, 0xffff2929, 0xffff2925, 0xffff2525, 0xffff2521,
+ 0xffff2121, 0xffff211d, 0xffff1d1d, 0xffff1d19, 0xffff1919, 0xffff1915,
+ 0xffff1515, 0xffff1511, 0xffff1111, 0xffff110d, 0xffff0d0d, 0xffff0d09,
+ 0xffff0909, 0xffff0905, 0xffff0505, 0xffff0501, 0xffff0101, 0xffff00fd,
+ 0xfffffcfc, 0xfffffcf8, 0xfffff8f8, 0xfffff8f4, 0xfffff4f4, 0xfffff4f0,
+ 0xfffff0f0, 0xfffff0ec, 0xffffecec, 0xffffece8, 0xffffe8e8, 0xffffe8e4,
+ 0xffffe4e4, 0xffffe4e0, 0xffffe0e0, 0xffffe0dc, 0xffffdcdc, 0xffffdcd8,
+ 0xffffd8d8, 0xffffd8d4, 0xffffd4d4, 0xffffd4d0, 0xffffd0d0, 0xffffd0cc,
+ 0xffffcccc, 0xffffccc8, 0xffffc8c8, 0xffffc8c4, 0xffffc4c4, 0xffffc4c0,
+ 0xffffc0c0, 0xffffc0bc, 0xffffbcbc, 0xffffbcb8, 0xffffb8b8, 0xffffb8b4,
+ 0xffffb4b4, 0xffffb4b0, 0xffffb0b0, 0xffffb0ac, 0xffffacac, 0xffffaca8,
+ 0xffffa8a8, 0xffffa8a4, 0xffffa4a4, 0xffffa4a0, 0xffffa0a0, 0xffffa09c,
+ 0xffff9c9c, 0xffff9c98, 0xffff9898, 0xffff9894, 0xffff9494, 0xffff9490,
+ 0xffff9090, 0xffff908c, 0xffff8c8c, 0xffff8c88, 0xffff8888, 0xffff8884,
+ 0xffff8484, 0xffff8480, 0xffff8080, 0xffff807c, 0xffff7c7c, 0xffff7c78,
+ 0xffff7878, 0xffff7874, 0xffff7474, 0xffff7470, 0xffff7070, 0xffff706c,
+ 0xffff6c6c, 0xffff6c68, 0xffff6868, 0xffff6864, 0xffff6464, 0xffff6460,
+ 0xffff6060, 0xffff605c, 0xffff5c5c, 0xffff5c58, 0xffff5858, 0xffff5854,
+ 0xffff5454, 0xffff5450, 0xffff5050, 0xffff504c, 0xffff4c4c, 0xffff4c48,
+ 0xffff4848, 0xffff4844, 0xffff4444, 0xffff4440, 0xffff4040, 0xffff403c,
+ 0xffff3c3c, 0xffff3c38, 0xffff3838, 0xffff3834, 0xffff3434, 0xffff3430,
+ 0xffff3030, 0xffff302c, 0xffff2c2c, 0xffff2c28, 0xffff2828, 0xffff2824,
+ 0xffff2424, 0xffff2420, 0xffff2020, 0xffff201c, 0xffff1c1c, 0xffff1c18,
+ 0xffff1818, 0xffff1814, 0xffff1414, 0xffff1410, 0xffff1010, 0xffff100c,
+ 0xffff0c0c, 0xffff0c08, 0xffff0808, 0xffff0804, 0xffff0404, 0xffff0400,
+ 0xffff0000, 0xfffffffb,
+};
+
+static const u16 expected_csum_ipv6_magic[] = {
+ 0x18d4, 0x3085, 0x2e4b, 0xd9f4, 0xbdc8, 0x78f, 0x1034, 0x8422, 0x6fc0,
+ 0xd2f6, 0xbeb5, 0x9d3, 0x7e2a, 0x312e, 0x778e, 0xc1bb, 0x7cf2, 0x9d1e,
+ 0xca21, 0xf3ff, 0x7569, 0xb02e, 0xca86, 0x7e76, 0x4539, 0x45e3, 0xf28d,
+ 0xdf81, 0x8fd5, 0x3b5d, 0x8324, 0xf471, 0x83be, 0x1daf, 0x8c46, 0xe682,
+ 0xd1fb, 0x6b2e, 0xe687, 0x2a33, 0x4833, 0x2d67, 0x660f, 0x2e79, 0xd65e,
+ 0x6b62, 0x6672, 0x5dbd, 0x8680, 0xbaa5, 0x2229, 0x2125, 0x2d01, 0x1cc0,
+ 0x6d36, 0x33c0, 0xee36, 0xd832, 0x9820, 0x8a31, 0x53c5, 0x2e2, 0xdb0e,
+ 0x49ed, 0x17a7, 0x77a0, 0xd72e, 0x3d72, 0x7dc8, 0x5b17, 0xf55d, 0xa4d9,
+ 0x1446, 0x5d56, 0x6b2e, 0x69a5, 0xadb6, 0xff2a, 0x92e, 0xe044, 0x3402,
+ 0xbb60, 0xec7f, 0xe7e6, 0x1986, 0x32f4, 0x8f8, 0x5e00, 0x47c6, 0x3059,
+ 0x3969, 0xe957, 0x4388, 0x2854, 0x3334, 0xea71, 0xa6de, 0x33f9, 0x83fc,
+ 0x37b4, 0x5531, 0x3404, 0x1010, 0xed30, 0x610a, 0xc95, 0x9aed, 0x6ff,
+ 0x5136, 0x2741, 0x660e, 0x8b80, 0xf71, 0xa263, 0x88af, 0x7a73, 0x3c37,
+ 0x1908, 0x6db5, 0x2e92, 0x1cd2, 0x70c8, 0xee16, 0xe80, 0xcd55, 0x6e6,
+ 0x6434, 0x127, 0x655d, 0x2ea0, 0xb4f4, 0xdc20, 0x5671, 0xe462, 0xe52b,
+ 0xdb44, 0x3589, 0xc48f, 0xe60b, 0xd2d2, 0x66ad, 0x498, 0x436, 0xb917,
+ 0xf0ca, 0x1a6e, 0x1cb7, 0xbf61, 0x2870, 0xc7e8, 0x5b30, 0xe4a5, 0x168,
+ 0xadfc, 0xd035, 0xe690, 0xe283, 0xfb27, 0xe4ad, 0xb1a5, 0xf2d5, 0xc4b6,
+ 0x8a30, 0xd7d5, 0x7df9, 0x91d5, 0x63ed, 0x2d21, 0x312b, 0xab19, 0xa632,
+ 0x8d2e, 0xef06, 0x57b9, 0xc373, 0xbd1f, 0xa41f, 0x8444, 0x9975, 0x90cb,
+ 0xc49c, 0xe965, 0x4eff, 0x5a, 0xef6d, 0xe81a, 0xe260, 0x853a, 0xff7a,
+ 0x99aa, 0xb06b, 0xee19, 0xcc2c, 0xf34c, 0x7c49, 0xdac3, 0xa71e, 0xc988,
+ 0x3845, 0x1014
+};
+
+static const u16 expected_fast_csum[] = {
+ 0xda83, 0x45da, 0x4f46, 0x4e4f, 0x34e, 0xe902, 0xa5e9, 0x87a5, 0x7187,
+ 0x5671, 0xf556, 0x6df5, 0x816d, 0x8f81, 0xbb8f, 0xfbba, 0x5afb, 0xbe5a,
+ 0xedbe, 0xabee, 0x6aac, 0xe6b, 0xea0d, 0x67ea, 0x7e68, 0x8a7e, 0x6f8a,
+ 0x3a70, 0x9f3a, 0xe89e, 0x75e8, 0x7976, 0xfa79, 0x2cfa, 0x3c2c, 0x463c,
+ 0x7146, 0x7a71, 0x547a, 0xfd53, 0x99fc, 0xb699, 0x92b6, 0xdb91, 0xe8da,
+ 0x5fe9, 0x1e60, 0xae1d, 0x39ae, 0xf439, 0xa1f4, 0xdda1, 0xede, 0x790f,
+ 0x579, 0x1206, 0x9012, 0x2490, 0xd224, 0x5cd2, 0xa65d, 0xca7, 0x220d,
+ 0xf922, 0xbf9, 0x920b, 0x1b92, 0x361c, 0x2e36, 0x4d2e, 0x24d, 0x2,
+ 0xcfff, 0x90cf, 0xa591, 0x93a5, 0x7993, 0x9579, 0xc894, 0x50c8, 0x5f50,
+ 0xd55e, 0xcad5, 0xf3c9, 0x8f4, 0x4409, 0x5043, 0x5b50, 0x55b, 0x2205,
+ 0x1e22, 0x801e, 0x3780, 0xe137, 0x7ee0, 0xf67d, 0x3cf6, 0xa53c, 0x2ea5,
+ 0x472e, 0x5147, 0xcf51, 0x1bcf, 0x951c, 0x1e95, 0xc71e, 0xe4c7, 0xc3e4,
+ 0x3dc3, 0xee3d, 0xa4ed, 0xf9a4, 0xcbf8, 0x75cb, 0xb375, 0x50b4, 0x3551,
+ 0xf835, 0x19f8, 0x8c1a, 0x538c, 0xad52, 0xa3ac, 0xb0a3, 0x5cb0, 0x6c5c,
+ 0x5b6c, 0xc05a, 0x92c0, 0x4792, 0xbe47, 0x53be, 0x1554, 0x5715, 0x4b57,
+ 0xe54a, 0x20e5, 0x21, 0xd500, 0xa1d4, 0xa8a1, 0x57a9, 0xca57, 0x5ca,
+ 0x1c06, 0x4f1c, 0xe24e, 0xd9e2, 0xf0d9, 0x4af1, 0x474b, 0x8146, 0xe81,
+ 0xfd0e, 0x84fd, 0x7c85, 0xba7c, 0x17ba, 0x4a17, 0x964a, 0xf595, 0xff5,
+ 0x5310, 0x3253, 0x6432, 0x4263, 0x2242, 0xe121, 0x32e1, 0xf632, 0xc5f5,
+ 0x21c6, 0x7d22, 0x8e7c, 0x418e, 0x5641, 0x3156, 0x7c31, 0x737c, 0x373,
+ 0x2503, 0xc22a, 0x3c2, 0x4a04, 0x8549, 0x5285, 0xa352, 0xe8a3, 0x6fe8,
+ 0x1a6f, 0x211a, 0xe021, 0x38e0, 0x7638, 0xf575, 0x9df5, 0x169e, 0xf116,
+ 0x23f1, 0xcd23, 0xece, 0x660f, 0x4866, 0x6a48, 0x716a, 0xee71, 0xa2ee,
+ 0xb8a2, 0x61b9, 0xa361, 0xf7a2, 0x26f7, 0x1127, 0x6611, 0xe065, 0x36e0,
+ 0x1837, 0x3018, 0x1c30, 0x721b, 0x3e71, 0xe43d, 0x99e4, 0x9e9a, 0xb79d,
+ 0xa9b7, 0xcaa, 0xeb0c, 0x4eb, 0x1305, 0x8813, 0xb687, 0xa9b6, 0xfba9,
+ 0xd7fb, 0xccd8, 0x2ecd, 0x652f, 0xae65, 0x3fae, 0x3a40, 0x563a, 0x7556,
+ 0x2776, 0x1228, 0xef12, 0xf9ee, 0xcef9, 0x56cf, 0xa956, 0x24a9, 0xba24,
+ 0x5fba, 0x665f, 0xf465, 0x8ff4, 0x6d8f, 0x346d, 0x5f34, 0x385f, 0xd137,
+ 0xb8d0, 0xacb8, 0x55ac, 0x7455, 0xe874, 0x89e8, 0xd189, 0xa0d1, 0xb2a0,
+ 0xb8b2, 0x36b8, 0x5636, 0xd355, 0x8d3, 0x1908, 0x2118, 0xc21, 0x990c,
+ 0x8b99, 0x158c, 0x7815, 0x9e78, 0x6f9e, 0x4470, 0x1d44, 0x341d, 0x2634,
+ 0x3f26, 0x793e, 0xc79, 0xcc0b, 0x26cc, 0xd126, 0x1fd1, 0xb41f, 0xb6b4,
+ 0x22b7, 0xa122, 0xa1, 0x7f01, 0x837e, 0x3b83, 0xaf3b, 0x6fae, 0x916f,
+ 0xb490, 0xffb3, 0xceff, 0x50cf, 0x7550, 0x7275, 0x1272, 0x2613, 0xaa26,
+ 0xd5aa, 0x7d5, 0x9607, 0x96, 0xb100, 0xf8b0, 0x4bf8, 0xdd4c, 0xeddd,
+ 0x98ed, 0x2599, 0x9325, 0xeb92, 0x8feb, 0xcc8f, 0x2acd, 0x392b, 0x3b39,
+ 0xcb3b, 0x6acb, 0xd46a, 0xb8d4, 0x6ab8, 0x106a, 0x2f10, 0x892f, 0x789,
+ 0xc806, 0x45c8, 0x7445, 0x3c74, 0x3a3c, 0xcf39, 0xd7ce, 0x58d8, 0x6e58,
+ 0x336e, 0x1034, 0xee10, 0xe9ed, 0xc2e9, 0x3fc2, 0xd53e, 0xd2d4, 0xead2,
+ 0x8fea, 0x2190, 0x1162, 0xbe11, 0x8cbe, 0x6d8c, 0xfb6c, 0x6dfb, 0xd36e,
+ 0x3ad3, 0xf3a, 0x870e, 0xc287, 0x53c3, 0xc54, 0x5b0c, 0x7d5a, 0x797d,
+ 0xec79, 0x5dec, 0x4d5e, 0x184e, 0xd618, 0x60d6, 0xb360, 0x98b3, 0xf298,
+ 0xb1f2, 0x69b1, 0xf969, 0xef9, 0xab0e, 0x21ab, 0xe321, 0x24e3, 0x8224,
+ 0x5481, 0x5954, 0x7a59, 0xff7a, 0x7dff, 0x1a7d, 0xa51a, 0x46a5, 0x6b47,
+ 0xe6b, 0x830e, 0xa083, 0xff9f, 0xd0ff, 0xffd0, 0xe6ff, 0x7de7, 0xc67d,
+ 0xd0c6, 0x61d1, 0x3a62, 0xc3b, 0x150c, 0x1715, 0x4517, 0x5345, 0x3954,
+ 0xdd39, 0xdadd, 0x32db, 0x6a33, 0xd169, 0x86d1, 0xb687, 0x3fb6, 0x883f,
+ 0xa487, 0x39a4, 0x2139, 0xbe20, 0xffbe, 0xedfe, 0x8ded, 0x368e, 0xc335,
+ 0x51c3, 0x9851, 0xf297, 0xd6f2, 0xb9d6, 0x95ba, 0x2096, 0xea1f, 0x76e9,
+ 0x4e76, 0xe04d, 0xd0df, 0x80d0, 0xa280, 0xfca2, 0x75fc, 0xef75, 0x32ef,
+ 0x6833, 0xdf68, 0xc4df, 0x76c4, 0xb77, 0xb10a, 0xbfb1, 0x58bf, 0x5258,
+ 0x4d52, 0x6c4d, 0x7e6c, 0xb67e, 0xccb5, 0x8ccc, 0xbe8c, 0xc8bd, 0x9ac8,
+ 0xa99b, 0x52a9, 0x2f53, 0xc30, 0x3e0c, 0xb83d, 0x83b7, 0x5383, 0x7e53,
+ 0x4f7e, 0xe24e, 0xb3e1, 0x8db3, 0x618e, 0xc861, 0xfcc8, 0x34fc, 0x9b35,
+ 0xaa9b, 0xb1aa, 0x5eb1, 0x395e, 0x8639, 0xd486, 0x8bd4, 0x558b, 0x2156,
+ 0xf721, 0x4ef6, 0x14f, 0x7301, 0xdd72, 0x49de, 0x894a, 0x9889, 0x8898,
+ 0x7788, 0x7b77, 0x637b, 0xb963, 0xabb9, 0x7cab, 0xc87b, 0x21c8, 0xcb21,
+ 0xdfca, 0xbfdf, 0xf2bf, 0x6af2, 0x626b, 0xb261, 0x3cb2, 0xc63c, 0xc9c6,
+ 0xc9c9, 0xb4c9, 0xf9b4, 0x91f9, 0x4091, 0x3a40, 0xcc39, 0xd1cb, 0x7ed1,
+ 0x537f, 0x6753, 0xa167, 0xba49, 0x88ba, 0x7789, 0x3877, 0xf037, 0xd3ef,
+ 0xb5d4, 0x55b6, 0xa555, 0xeca4, 0xa1ec, 0xb6a2, 0x7b7, 0x9507, 0xfd94,
+ 0x82fd, 0x5c83, 0x765c, 0x9676, 0x3f97, 0xda3f, 0x6fda, 0x646f, 0x3064,
+ 0x5e30, 0x655e, 0x6465, 0xcb64, 0xcdca, 0x4ccd, 0x3f4c, 0x243f, 0x6f24,
+ 0x656f, 0x6065, 0x3560, 0x3b36, 0xac3b, 0x4aac, 0x714a, 0x7e71, 0xda7e,
+ 0x7fda, 0xda7f, 0x6fda, 0xff6f, 0xc6ff, 0xedc6, 0xd4ed, 0x70d5, 0xeb70,
+ 0xa3eb, 0x80a3, 0xca80, 0x3fcb, 0x2540, 0xf825, 0x7ef8, 0xf87e, 0x73f8,
+ 0xb474, 0xb4b4, 0x92b5, 0x9293, 0x93, 0x3500, 0x7134, 0x9071, 0xfa8f,
+ 0x51fa, 0x1452, 0xba13, 0x7ab9, 0x957a, 0x8a95, 0x6e8a, 0x6d6e, 0x7c6d,
+ 0x447c, 0x9744, 0x4597, 0x8945, 0xef88, 0x8fee, 0x3190, 0x4831, 0x8447,
+ 0xa183, 0x1da1, 0xd41d, 0x2dd4, 0x4f2e, 0xc94e, 0xcbc9, 0xc9cb, 0x9ec9,
+ 0x319e, 0xd531, 0x20d5, 0x4021, 0xb23f, 0x29b2, 0xd828, 0xecd8, 0x5ded,
+ 0xfc5d, 0x4dfc, 0xd24d, 0x6bd2, 0x5f6b, 0xb35e, 0x7fb3, 0xee7e, 0x56ee,
+ 0xa657, 0x68a6, 0x8768, 0x7787, 0xb077, 0x4cb1, 0x764c, 0xb175, 0x7b1,
+ 0x3d07, 0x603d, 0x3560, 0x3e35, 0xb03d, 0xd6b0, 0xc8d6, 0xd8c8, 0x8bd8,
+ 0x3e8c, 0x303f, 0xd530, 0xf1d4, 0x42f1, 0xca42, 0xddca, 0x41dd, 0x3141,
+ 0x132, 0xe901, 0x8e9, 0xbe09, 0xe0bd, 0x2ce0, 0x862d, 0x3986, 0x9139,
+ 0x6d91, 0x6a6d, 0x8d6a, 0x1b8d, 0xac1b, 0xedab, 0x54ed, 0xc054, 0xcebf,
+ 0xc1ce, 0x5c2, 0x3805, 0x6038, 0x5960, 0xd359, 0xdd3, 0xbe0d, 0xafbd,
+ 0x6daf, 0x206d, 0x2c20, 0x862c, 0x8e86, 0xec8d, 0xa2ec, 0xa3a2, 0x51a3,
+ 0x8051, 0xfd7f, 0x91fd, 0xa292, 0xaf14, 0xeeae, 0x59ef, 0x535a, 0x8653,
+ 0x3986, 0x9539, 0xb895, 0xa0b8, 0x26a0, 0x2227, 0xc022, 0x77c0, 0xad77,
+ 0x46ad, 0xaa46, 0x60aa, 0x8560, 0x4785, 0xd747, 0x45d7, 0x2346, 0x5f23,
+ 0x25f, 0x1d02, 0x71d, 0x8206, 0xc82, 0x180c, 0x3018, 0x4b30, 0x4b,
+ 0x3001, 0x1230, 0x2d12, 0x8c2d, 0x148d, 0x4015, 0x5f3f, 0x3d5f, 0x6b3d,
+ 0x396b, 0x473a, 0xf746, 0x44f7, 0x8945, 0x3489, 0xcb34, 0x84ca, 0xd984,
+ 0xf0d9, 0xbcf0, 0x63bd, 0x3264, 0xf332, 0x45f3, 0x7346, 0x5673, 0xb056,
+ 0xd3b0, 0x4ad4, 0x184b, 0x7d18, 0x6c7d, 0xbb6c, 0xfeba, 0xe0fe, 0x10e1,
+ 0x5410, 0x2954, 0x9f28, 0x3a9f, 0x5a3a, 0xdb59, 0xbdc, 0xb40b, 0x1ab4,
+ 0x131b, 0x5d12, 0x6d5c, 0xe16c, 0xb0e0, 0x89b0, 0xba88, 0xbb, 0x3c01,
+ 0xe13b, 0x6fe1, 0x446f, 0xa344, 0x81a3, 0xfe81, 0xc7fd, 0x38c8, 0xb38,
+ 0x1a0b, 0x6d19, 0xf36c, 0x47f3, 0x6d48, 0xb76d, 0xd3b7, 0xd8d2, 0x52d9,
+ 0x4b53, 0xa54a, 0x34a5, 0xc534, 0x9bc4, 0xed9b, 0xbeed, 0x3ebe, 0x233e,
+ 0x9f22, 0x4a9f, 0x774b, 0x4577, 0xa545, 0x64a5, 0xb65, 0x870b, 0x487,
+ 0x9204, 0x5f91, 0xd55f, 0x35d5, 0x1a35, 0x71a, 0x7a07, 0x4e7a, 0xfc4e,
+ 0x1efc, 0x481f, 0x7448, 0xde74, 0xa7dd, 0x1ea7, 0xaa1e, 0xcfaa, 0xfbcf,
+ 0xedfb, 0x6eee, 0x386f, 0x4538, 0x6e45, 0xd96d, 0x11d9, 0x7912, 0x4b79,
+ 0x494b, 0x6049, 0xac5f, 0x65ac, 0x1366, 0x5913, 0xe458, 0x7ae4, 0x387a,
+ 0x3c38, 0xb03c, 0x76b0, 0x9376, 0xe193, 0x42e1, 0x7742, 0x6476, 0x3564,
+ 0x3c35, 0x6a3c, 0xcc69, 0x94cc, 0x5d95, 0xe5e, 0xee0d, 0x4ced, 0xce4c,
+ 0x52ce, 0xaa52, 0xdaaa, 0xe4da, 0x1de5, 0x4530, 0x5445, 0x3954, 0xb639,
+ 0x81b6, 0x7381, 0x1574, 0xc215, 0x10c2, 0x3f10, 0x6b3f, 0xe76b, 0x7be7,
+ 0xbc7b, 0xf7bb, 0x41f7, 0xcc41, 0x38cc, 0x4239, 0xa942, 0x4a9, 0xc504,
+ 0x7cc4, 0x437c, 0x6743, 0xea67, 0x8dea, 0xe88d, 0xd8e8, 0xdcd8, 0x17dd,
+ 0x5718, 0x958, 0xa609, 0x41a5, 0x5842, 0x159, 0x9f01, 0x269f, 0x5a26,
+ 0x405a, 0xc340, 0xb4c3, 0xd4b4, 0xf4d3, 0xf1f4, 0x39f2, 0xe439, 0x67e4,
+ 0x4168, 0xa441, 0xdda3, 0xdedd, 0x9df, 0xab0a, 0xa5ab, 0x9a6, 0xba09,
+ 0x9ab9, 0xad9a, 0x5ae, 0xe205, 0xece2, 0xecec, 0x14ed, 0xd614, 0x6bd5,
+ 0x916c, 0x3391, 0x6f33, 0x206f, 0x8020, 0x780, 0x7207, 0x2472, 0x8a23,
+ 0xb689, 0x3ab6, 0xf739, 0x97f6, 0xb097, 0xa4b0, 0xe6a4, 0x88e6, 0x2789,
+ 0xb28, 0x350b, 0x1f35, 0x431e, 0x1043, 0xc30f, 0x79c3, 0x379, 0x5703,
+ 0x3256, 0x4732, 0x7247, 0x9d72, 0x489d, 0xd348, 0xa4d3, 0x7ca4, 0xbf7b,
+ 0x45c0, 0x7b45, 0x337b, 0x4034, 0x843f, 0xd083, 0x35d0, 0x6335, 0x4d63,
+ 0xe14c, 0xcce0, 0xfecc, 0x35ff, 0x5636, 0xf856, 0xeef8, 0x2def, 0xfc2d,
+ 0x4fc, 0x6e04, 0xb66d, 0x78b6, 0xbb78, 0x3dbb, 0x9a3d, 0x839a, 0x9283,
+ 0x593, 0xd504, 0x23d5, 0x5424, 0xd054, 0x61d0, 0xdb61, 0x17db, 0x1f18,
+ 0x381f, 0x9e37, 0x679e, 0x1d68, 0x381d, 0x8038, 0x917f, 0x491, 0xbb04,
+ 0x23bb, 0x4124, 0xd41, 0xa30c, 0x8ba3, 0x8b8b, 0xc68b, 0xd2c6, 0xebd2,
+ 0x93eb, 0xbd93, 0x99bd, 0x1a99, 0xea19, 0x58ea, 0xcf58, 0x73cf, 0x1073,
+ 0x9e10, 0x139e, 0xea13, 0xcde9, 0x3ecd, 0x883f, 0xf89, 0x180f, 0x2a18,
+ 0x212a, 0xce20, 0x73ce, 0xf373, 0x60f3, 0xad60, 0x4093, 0x8e40, 0xb98e,
+ 0xbfb9, 0xf1bf, 0x8bf1, 0x5e8c, 0xe95e, 0x14e9, 0x4e14, 0x1c4e, 0x7f1c,
+ 0xe77e, 0x6fe7, 0xf26f, 0x13f2, 0x8b13, 0xda8a, 0x5fda, 0xea5f, 0x4eea,
+ 0xa84f, 0x88a8, 0x1f88, 0x2820, 0x9728, 0x5a97, 0x3f5b, 0xb23f, 0x70b2,
+ 0x2c70, 0x232d, 0xf623, 0x4f6, 0x905, 0x7509, 0xd675, 0x28d7, 0x9428,
+ 0x3794, 0xf036, 0x2bf0, 0xba2c, 0xedb9, 0xd7ed, 0x59d8, 0xed59, 0x4ed,
+ 0xe304, 0x18e3, 0x5c19, 0x3d5c, 0x753d, 0x6d75, 0x956d, 0x7f95, 0xc47f,
+ 0x83c4, 0xa84, 0x2e0a, 0x5f2e, 0xb95f, 0x77b9, 0x6d78, 0xf46d, 0x1bf4,
+ 0xed1b, 0xd6ed, 0xe0d6, 0x5e1, 0x3905, 0x5638, 0xa355, 0x99a2, 0xbe99,
+ 0xb4bd, 0x85b4, 0x2e86, 0x542e, 0x6654, 0xd765, 0x73d7, 0x3a74, 0x383a,
+ 0x2638, 0x7826, 0x7677, 0x9a76, 0x7e99, 0x2e7e, 0xea2d, 0xa6ea, 0x8a7,
+ 0x109, 0x3300, 0xad32, 0x5fad, 0x465f, 0x2f46, 0xc62f, 0xd4c5, 0xad5,
+ 0xcb0a, 0x4cb, 0xb004, 0x7baf, 0xe47b, 0x92e4, 0x8e92, 0x638e, 0x1763,
+ 0xc17, 0xf20b, 0x1ff2, 0x8920, 0x5889, 0xcb58, 0xf8cb, 0xcaf8, 0x84cb,
+ 0x9f84, 0x8a9f, 0x918a, 0x4991, 0x8249, 0xff81, 0x46ff, 0x5046, 0x5f50,
+ 0x725f, 0xf772, 0x8ef7, 0xe08f, 0xc1e0, 0x1fc2, 0x9e1f, 0x8b9d, 0x108b,
+ 0x411, 0x2b04, 0xb02a, 0x1fb0, 0x1020, 0x7a0f, 0x587a, 0x8958, 0xb188,
+ 0xb1b1, 0x49b2, 0xb949, 0x7ab9, 0x917a, 0xfc91, 0xe6fc, 0x47e7, 0xbc47,
+ 0x8fbb, 0xea8e, 0x34ea, 0x2635, 0x1726, 0x9616, 0xc196, 0xa6c1, 0xf3a6,
+ 0x11f3, 0x4811, 0x3e48, 0xeb3e, 0xf7ea, 0x1bf8, 0xdb1c, 0x8adb, 0xe18a,
+ 0x42e1, 0x9d42, 0x5d9c, 0x6e5d, 0x286e, 0x4928, 0x9a49, 0xb09c, 0xa6b0,
+ 0x2a7, 0xe702, 0xf5e6, 0x9af5, 0xf9b, 0x810f, 0x8080, 0x180, 0x1702,
+ 0x5117, 0xa650, 0x11a6, 0x1011, 0x550f, 0xd554, 0xbdd5, 0x6bbe, 0xc66b,
+ 0xfc7, 0x5510, 0x5555, 0x7655, 0x177, 0x2b02, 0x6f2a, 0xb70, 0x9f0b,
+ 0xcf9e, 0xf3cf, 0x3ff4, 0xcb40, 0x8ecb, 0x768e, 0x5277, 0x8652, 0x9186,
+ 0x9991, 0x5099, 0xd350, 0x93d3, 0x6d94, 0xe6d, 0x530e, 0x3153, 0xa531,
+ 0x64a5, 0x7964, 0x7c79, 0x467c, 0x1746, 0x3017, 0x3730, 0x538, 0x5,
+ 0x1e00, 0x5b1e, 0x955a, 0xae95, 0x3eaf, 0xff3e, 0xf8ff, 0xb2f9, 0xa1b3,
+ 0xb2a1, 0x5b2, 0xad05, 0x7cac, 0x2d7c, 0xd32c, 0x80d2, 0x7280, 0x8d72,
+ 0x1b8e, 0x831b, 0xac82, 0xfdac, 0xa7fd, 0x15a8, 0xd614, 0xe0d5, 0x7be0,
+ 0xb37b, 0x61b3, 0x9661, 0x9d95, 0xc79d, 0x83c7, 0xd883, 0xead7, 0xceb,
+ 0xf60c, 0xa9f5, 0x19a9, 0xa019, 0x8f9f, 0xd48f, 0x3ad5, 0x853a, 0x985,
+ 0x5309, 0x6f52, 0x1370, 0x6e13, 0xa96d, 0x98a9, 0x5198, 0x9f51, 0xb69f,
+ 0xa1b6, 0x2ea1, 0x672e, 0x2067, 0x6520, 0xaf65, 0x6eaf, 0x7e6f, 0xee7e,
+ 0x17ef, 0xa917, 0xcea8, 0x9ace, 0xff99, 0x5dff, 0xdf5d, 0x38df, 0xa39,
+ 0x1c0b, 0xe01b, 0x46e0, 0xcb46, 0x90cb, 0xba90, 0x4bb, 0x9104, 0x9d90,
+ 0xc89c, 0xf6c8, 0x6cf6, 0x886c, 0x1789, 0xbd17, 0x70bc, 0x7e71, 0x17e,
+ 0x1f01, 0xa01f, 0xbaa0, 0x14bb, 0xfc14, 0x7afb, 0xa07a, 0x3da0, 0xbf3d,
+ 0x48bf, 0x8c48, 0x968b, 0x9d96, 0xfd9d, 0x96fd, 0x9796, 0x6b97, 0xd16b,
+ 0xf4d1, 0x3bf4, 0x253c, 0x9125, 0x6691, 0xc166, 0x34c1, 0x5735, 0x1a57,
+ 0xdc19, 0x77db, 0x8577, 0x4a85, 0x824a, 0x9182, 0x7f91, 0xfd7f, 0xb4c3,
+ 0xb5b4, 0xb3b5, 0x7eb3, 0x617e, 0x4e61, 0xa4f, 0x530a, 0x3f52, 0xa33e,
+ 0x34a3, 0x9234, 0xf091, 0xf4f0, 0x1bf5, 0x311b, 0x9631, 0x6a96, 0x386b,
+ 0x1d39, 0xe91d, 0xe8e9, 0x69e8, 0x426a, 0xee42, 0x89ee, 0x368a, 0x2837,
+ 0x7428, 0x5974, 0x6159, 0x1d62, 0x7b1d, 0xf77a, 0x7bf7, 0x6b7c, 0x696c,
+ 0xf969, 0x4cf9, 0x714c, 0x4e71, 0x6b4e, 0x256c, 0x6e25, 0xe96d, 0x94e9,
+ 0x8f94, 0x3e8f, 0x343e, 0x4634, 0xb646, 0x97b5, 0x8997, 0xe8a, 0x900e,
+ 0x8090, 0xfd80, 0xa0fd, 0x16a1, 0xf416, 0xebf4, 0x95ec, 0x1196, 0x8911,
+ 0x3d89, 0xda3c, 0x9fd9, 0xd79f, 0x4bd7, 0x214c, 0x3021, 0x4f30, 0x994e,
+ 0x5c99, 0x6f5d, 0x326f, 0xab31, 0x6aab, 0xe969, 0x90e9, 0x1190, 0xff10,
+ 0xa2fe, 0xe0a2, 0x66e1, 0x4067, 0x9e3f, 0x2d9e, 0x712d, 0x8170, 0xd180,
+ 0xffd1, 0x25ff, 0x3826, 0x2538, 0x5f24, 0xc45e, 0x1cc4, 0xdf1c, 0x93df,
+ 0xc793, 0x80c7, 0x2380, 0xd223, 0x7ed2, 0xfc7e, 0x22fd, 0x7422, 0x1474,
+ 0xb714, 0x7db6, 0x857d, 0xa85, 0xa60a, 0x88a6, 0x4289, 0x7842, 0xc278,
+ 0xf7c2, 0xcdf7, 0x84cd, 0xae84, 0x8cae, 0xb98c, 0x1aba, 0x4d1a, 0x884c,
+ 0x4688, 0xcc46, 0xd8cb, 0x2bd9, 0xbe2b, 0xa2be, 0x72a2, 0xf772, 0xd2f6,
+ 0x75d2, 0xc075, 0xa3c0, 0x63a3, 0xae63, 0x8fae, 0x2a90, 0x5f2a, 0xef5f,
+ 0x5cef, 0xa05c, 0x89a0, 0x5e89, 0x6b5e, 0x736b, 0x773, 0x9d07, 0xe99c,
+ 0x27ea, 0x2028, 0xc20, 0x980b, 0x4797, 0x2848, 0x9828, 0xc197, 0x48c2,
+ 0x2449, 0x7024, 0x570, 0x3e05, 0xd3e, 0xf60c, 0xbbf5, 0x69bb, 0x3f6a,
+ 0x740, 0xf006, 0xe0ef, 0xbbe0, 0xadbb, 0x56ad, 0xcf56, 0xbfce, 0xa9bf,
+ 0x205b, 0x6920, 0xae69, 0x50ae, 0x2050, 0xf01f, 0x27f0, 0x9427, 0x8993,
+ 0x8689, 0x4087, 0x6e40, 0xb16e, 0xa1b1, 0xe8a1, 0x87e8, 0x6f88, 0xfe6f,
+ 0x4cfe, 0xe94d, 0xd5e9, 0x47d6, 0x3148, 0x5f31, 0xc35f, 0x13c4, 0xa413,
+ 0x5a5, 0x2405, 0xc223, 0x66c2, 0x3667, 0x5e37, 0x5f5e, 0x2f5f, 0x8c2f,
+ 0xe48c, 0xd0e4, 0x4d1, 0xd104, 0xe4d0, 0xcee4, 0xfcf, 0x480f, 0xa447,
+ 0x5ea4, 0xff5e, 0xbefe, 0x8dbe, 0x1d8e, 0x411d, 0x1841, 0x6918, 0x5469,
+ 0x1155, 0xc611, 0xaac6, 0x37ab, 0x2f37, 0xca2e, 0x87ca, 0xbd87, 0xabbd,
+ 0xb3ab, 0xcb4, 0xce0c, 0xfccd, 0xa5fd, 0x72a5, 0xf072, 0x83f0, 0xfe83,
+ 0x97fd, 0xc997, 0xb0c9, 0xadb0, 0xe6ac, 0x88e6, 0x1088, 0xbe10, 0x16be,
+ 0xa916, 0xa3a8, 0x46a3, 0x5447, 0xe953, 0x84e8, 0x2085, 0xa11f, 0xfa1,
+ 0xdd0f, 0xbedc, 0x5abe, 0x805a, 0xc97f, 0x6dc9, 0x826d, 0x4a82, 0x934a,
+ 0x5293, 0xd852, 0xd3d8, 0xadd3, 0xf4ad, 0xf3f4, 0xfcf3, 0xfefc, 0xcafe,
+ 0xb7ca, 0x3cb8, 0xa13c, 0x18a1, 0x1418, 0xea13, 0x91ea, 0xf891, 0x53f8,
+ 0xa254, 0xe9a2, 0x87ea, 0x4188, 0x1c41, 0xdc1b, 0xf5db, 0xcaf5, 0x45ca,
+ 0x6d45, 0x396d, 0xde39, 0x90dd, 0x1e91, 0x1e, 0x7b00, 0x6a7b, 0xa46a,
+ 0xc9a3, 0x9bc9, 0x389b, 0x1139, 0x5211, 0x1f52, 0xeb1f, 0xabeb, 0x48ab,
+ 0x9348, 0xb392, 0x17b3, 0x1618, 0x5b16, 0x175b, 0xdc17, 0xdedb, 0x1cdf,
+ 0xeb1c, 0xd1ea, 0x4ad2, 0xd4b, 0xc20c, 0x24c2, 0x7b25, 0x137b, 0x8b13,
+ 0x618b, 0xa061, 0xff9f, 0xfffe, 0x72ff, 0xf572, 0xe2f5, 0xcfe2, 0xd2cf,
+ 0x75d3, 0x6a76, 0xc469, 0x1ec4, 0xfc1d, 0x59fb, 0x455a, 0x7a45, 0xa479,
+ 0xb7a4
+};
+
+static u8 tmp_buf[TEST_BUFLEN];
+
+#define full_csum(buff, len, sum) csum_fold(csum_partial(buff, len, sum))
+
+#define CHECK_EQ(lhs, rhs) KUNIT_ASSERT_EQ(test, (__force u64)lhs, (__force u64)rhs)
+
+static __sum16 to_sum16(u16 x)
+{
+ return (__force __sum16)le16_to_cpu((__force __le16)x);
+}
+
+/* This function swaps the bytes inside each half of a __wsum */
+static __wsum to_wsum(u32 x)
+{
+ u16 hi = le16_to_cpu((__force __le16)(x >> 16));
+ u16 lo = le16_to_cpu((__force __le16)x);
+
+ return (__force __wsum)((hi << 16) | lo);
+}
+
+static void assert_setup_correct(struct kunit *test)
+{
+ CHECK_EQ(ARRAY_SIZE(random_buf), MAX_LEN);
+ CHECK_EQ(ARRAY_SIZE(expected_results), MAX_LEN);
+ CHECK_EQ(ARRAY_SIZE(init_sums_no_overflow), MAX_LEN);
+}
+
+/*
+ * Test with randomized input (pre determined random with known results).
+ */
+static void test_csum_fixed_random_inputs(struct kunit *test)
+{
+ int len, align;
+ __wsum sum;
+ __sum16 result, expec;
+
+ assert_setup_correct(test);
+ for (align = 0; align < TEST_BUFLEN; ++align) {
+ memcpy(&tmp_buf[align], random_buf,
+ min(MAX_LEN, TEST_BUFLEN - align));
+ for (len = 0; len < MAX_LEN && (align + len) < TEST_BUFLEN;
+ ++len) {
+ /*
+ * Test the precomputed random input.
+ */
+ sum = to_wsum(random_init_sum);
+ result = full_csum(&tmp_buf[align], len, sum);
+ expec = to_sum16(expected_results[len]);
+ CHECK_EQ(result, expec);
+ }
+ }
+}
+
+/*
+ * All ones input test. If there are any missing carry operations, it fails.
+ */
+static void test_csum_all_carry_inputs(struct kunit *test)
+{
+ int len, align;
+ __wsum sum;
+ __sum16 result, expec;
+
+ assert_setup_correct(test);
+ memset(tmp_buf, 0xff, TEST_BUFLEN);
+ for (align = 0; align < TEST_BUFLEN; ++align) {
+ for (len = 0; len < MAX_LEN && (align + len) < TEST_BUFLEN;
+ ++len) {
+ /*
+ * All carries from input and initial sum.
+ */
+ sum = to_wsum(0xffffffff);
+ result = full_csum(&tmp_buf[align], len, sum);
+ expec = to_sum16((len & 1) ? 0xff00 : 0);
+ CHECK_EQ(result, expec);
+
+ /*
+ * All carries from input.
+ */
+ sum = 0;
+ result = full_csum(&tmp_buf[align], len, sum);
+ if (len & 1)
+ expec = to_sum16(0xff00);
+ else if (len)
+ expec = 0;
+ else
+ expec = to_sum16(0xffff);
+ CHECK_EQ(result, expec);
+ }
+ }
+}
+
+/*
+ * Test with input that alone doesn't cause any carries. By selecting the
+ * maximum initial sum, this allows us to test that there are no carries
+ * where there shouldn't be.
+ */
+static void test_csum_no_carry_inputs(struct kunit *test)
+{
+ int len, align;
+ __wsum sum;
+ __sum16 result, expec;
+
+ assert_setup_correct(test);
+ memset(tmp_buf, 0x4, TEST_BUFLEN);
+ for (align = 0; align < TEST_BUFLEN; ++align) {
+ for (len = 0; len < MAX_LEN && (align + len) < TEST_BUFLEN;
+ ++len) {
+ /*
+ * Expect no carries.
+ */
+ sum = to_wsum(init_sums_no_overflow[len]);
+ result = full_csum(&tmp_buf[align], len, sum);
+ expec = 0;
+ CHECK_EQ(result, expec);
+
+ /*
+ * Expect one carry.
+ */
+ sum = to_wsum(init_sums_no_overflow[len] + 1);
+ result = full_csum(&tmp_buf[align], len, sum);
+ expec = to_sum16(len ? 0xfffe : 0xffff);
+ CHECK_EQ(result, expec);
+ }
+ }
+}
+
+static void test_ip_fast_csum(struct kunit *test)
+{
+ __sum16 csum_result;
+ u16 expected;
+
+ for (int len = IPv4_MIN_WORDS; len < IPv4_MAX_WORDS; len++) {
+ for (int index = 0; index < NUM_IP_FAST_CSUM_TESTS; index++) {
+ csum_result = ip_fast_csum(random_buf + index, len);
+ expected =
+ expected_fast_csum[(len - IPv4_MIN_WORDS) *
+ NUM_IP_FAST_CSUM_TESTS +
+ index];
+ CHECK_EQ(to_sum16(expected), csum_result);
+ }
+ }
+}
+
+static void test_csum_ipv6_magic(struct kunit *test)
+{
+ const struct in6_addr *saddr;
+ const struct in6_addr *daddr;
+ unsigned int len;
+ unsigned char proto;
+ __wsum csum;
+
+ if (!IS_ENABLED(CONFIG_NET))
+ return;
+
+ const int daddr_offset = sizeof(struct in6_addr);
+ const int len_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr);
+ const int proto_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr) +
+ sizeof(int);
+ const int csum_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr) +
+ sizeof(int) + sizeof(char);
+
+ for (int i = 0; i < NUM_IPv6_TESTS; i++) {
+ saddr = (const struct in6_addr *)(random_buf + i);
+ daddr = (const struct in6_addr *)(random_buf + i +
+ daddr_offset);
+ len = le32_to_cpu(*(__le32 *)(random_buf + i + len_offset));
+ proto = *(random_buf + i + proto_offset);
+ csum = *(__wsum *)(random_buf + i + csum_offset);
+ CHECK_EQ(to_sum16(expected_csum_ipv6_magic[i]),
+ csum_ipv6_magic(saddr, daddr, len, proto, csum));
+ }
+}
+
+static struct kunit_case __refdata checksum_test_cases[] = {
+ KUNIT_CASE(test_csum_fixed_random_inputs),
+ KUNIT_CASE(test_csum_all_carry_inputs),
+ KUNIT_CASE(test_csum_no_carry_inputs),
+ KUNIT_CASE(test_ip_fast_csum),
+ KUNIT_CASE(test_csum_ipv6_magic),
+ {}
+};
+
+static struct kunit_suite checksum_test_suite = {
+ .name = "checksum",
+ .test_cases = checksum_test_cases,
+};
+
+kunit_test_suites(&checksum_test_suite);
+
+MODULE_AUTHOR("Noah Goldstein <goldstein.w.n@gmail.com>");
+MODULE_DESCRIPTION("Test cases csum_* APIs");
+MODULE_LICENSE("GPL");
diff --git a/lib/tests/cmdline_kunit.c b/lib/tests/cmdline_kunit.c
new file mode 100644
index 000000000000..c1602f797637
--- /dev/null
+++ b/lib/tests/cmdline_kunit.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases for API provided by cmdline.c
+ */
+
+#include <kunit/test.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <linux/string.h>
+
+static const char *cmdline_test_strings[] = {
+ "\"\"", "" , "=" , "\"-", "," , "-," , ",-" , "-" ,
+ "+," , "--", ",,", "''" , "\"\",", "\",\"", "-\"\"", "\"",
+};
+
+static const int cmdline_test_values[] = {
+ 1, 1, 1, 1, 2, 3, 2, 3,
+ 1, 3, 2, 1, 1, 1, 3, 1,
+};
+
+static_assert(ARRAY_SIZE(cmdline_test_strings) == ARRAY_SIZE(cmdline_test_values));
+
+static const char *cmdline_test_range_strings[] = {
+ "-7" , "--7" , "-1-2" , "7--9",
+ "7-" , "-7--9", "7-9," , "9-7" ,
+ "5-a", "a-5" , "5-8" , ",8-5",
+ "+,1", "-,4" , "-3,0-1,6", "4,-" ,
+ " +2", " -9" , "0-1,-3,6", "- 9" ,
+};
+
+static const int cmdline_test_range_values[][16] = {
+ { 1, -7, }, { 0, -0, }, { 4, -1, 0, +1, 2, }, { 0, 7, },
+ { 0, +7, }, { 0, -7, }, { 3, +7, 8, +9, 0, }, { 0, 9, },
+ { 0, +5, }, { 0, -0, }, { 4, +5, 6, +7, 8, }, { 0, 0, },
+ { 0, +0, }, { 0, -0, }, { 4, -3, 0, +1, 6, }, { 1, 4, },
+ { 0, +0, }, { 0, -0, }, { 4, +0, 1, -3, 6, }, { 0, 0, },
+};
+
+static_assert(ARRAY_SIZE(cmdline_test_range_strings) == ARRAY_SIZE(cmdline_test_range_values));
+
+static void cmdline_do_one_test(struct kunit *test, const char *in, int rc, int offset)
+{
+ const char *fmt = "Pattern: %s";
+ const char *out = in;
+ int dummy;
+ int ret;
+
+ ret = get_option((char **)&out, &dummy);
+
+ KUNIT_EXPECT_EQ_MSG(test, ret, rc, fmt, in);
+ KUNIT_EXPECT_PTR_EQ_MSG(test, out, in + offset, fmt, in);
+}
+
+static void cmdline_test_noint(struct kunit *test)
+{
+ unsigned int i = 0;
+
+ do {
+ const char *str = cmdline_test_strings[i];
+ int rc = 0;
+ int offset;
+
+ /* Only first and leading '-' will advance the pointer */
+ offset = !!(*str == '-');
+ cmdline_do_one_test(test, str, rc, offset);
+ } while (++i < ARRAY_SIZE(cmdline_test_strings));
+}
+
+static void cmdline_test_lead_int(struct kunit *test)
+{
+ unsigned int i = 0;
+ char in[32];
+
+ do {
+ const char *str = cmdline_test_strings[i];
+ int rc = cmdline_test_values[i];
+ int offset;
+
+ sprintf(in, "%u%s", get_random_u8(), str);
+ /* Only first '-' after the number will advance the pointer */
+ offset = strlen(in) - strlen(str) + !!(rc == 2);
+ cmdline_do_one_test(test, in, rc, offset);
+ } while (++i < ARRAY_SIZE(cmdline_test_strings));
+}
+
+static void cmdline_test_tail_int(struct kunit *test)
+{
+ unsigned int i = 0;
+ char in[32];
+
+ do {
+ const char *str = cmdline_test_strings[i];
+ /* When "" or "-" the result will be valid integer */
+ int rc = strcmp(str, "") ? (strcmp(str, "-") ? 0 : 1) : 1;
+ int offset;
+
+ sprintf(in, "%s%u", str, get_random_u8());
+ /*
+ * Only first and leading '-' not followed by integer
+ * will advance the pointer.
+ */
+ offset = rc ? strlen(in) : !!(*str == '-');
+ cmdline_do_one_test(test, in, rc, offset);
+ } while (++i < ARRAY_SIZE(cmdline_test_strings));
+}
+
+static void cmdline_do_one_range_test(struct kunit *test, const char *in,
+ unsigned int n, const int *e)
+{
+ unsigned int i;
+ int r[16];
+ int *p;
+
+ memset(r, 0, sizeof(r));
+ get_options(in, ARRAY_SIZE(r), r);
+ KUNIT_EXPECT_EQ_MSG(test, r[0], e[0], "in test %u (parsed) expected %d numbers, got %d",
+ n, e[0], r[0]);
+ for (i = 1; i < ARRAY_SIZE(r); i++)
+ KUNIT_EXPECT_EQ_MSG(test, r[i], e[i], "in test %u at %u", n, i);
+
+ memset(r, 0, sizeof(r));
+ get_options(in, 0, r);
+ KUNIT_EXPECT_EQ_MSG(test, r[0], e[0], "in test %u (validated) expected %d numbers, got %d",
+ n, e[0], r[0]);
+
+ p = memchr_inv(&r[1], 0, sizeof(r) - sizeof(r[0]));
+ KUNIT_EXPECT_PTR_EQ_MSG(test, p, NULL, "in test %u at %td out of bound", n, p - r);
+}
+
+static void cmdline_test_range(struct kunit *test)
+{
+ unsigned int i = 0;
+
+ do {
+ const char *str = cmdline_test_range_strings[i];
+ const int *e = cmdline_test_range_values[i];
+
+ cmdline_do_one_range_test(test, str, i, e);
+ } while (++i < ARRAY_SIZE(cmdline_test_range_strings));
+}
+
+static struct kunit_case cmdline_test_cases[] = {
+ KUNIT_CASE(cmdline_test_noint),
+ KUNIT_CASE(cmdline_test_lead_int),
+ KUNIT_CASE(cmdline_test_tail_int),
+ KUNIT_CASE(cmdline_test_range),
+ {}
+};
+
+static struct kunit_suite cmdline_test_suite = {
+ .name = "cmdline",
+ .test_cases = cmdline_test_cases,
+};
+kunit_test_suite(cmdline_test_suite);
+
+MODULE_DESCRIPTION("Test cases for API provided by cmdline.c");
+MODULE_LICENSE("GPL");
diff --git a/lib/tests/cpumask_kunit.c b/lib/tests/cpumask_kunit.c
new file mode 100644
index 000000000000..6b62a6bdd50e
--- /dev/null
+++ b/lib/tests/cpumask_kunit.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KUnit tests for cpumask.
+ *
+ * Author: Sander Vanheule <sander@svanheule.net>
+ */
+
+#include <kunit/test.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+
+#define MASK_MSG(m) \
+ "%s contains %sCPUs %*pbl", #m, (cpumask_weight(m) ? "" : "no "), \
+ nr_cpumask_bits, cpumask_bits(m)
+
+#define EXPECT_FOR_EACH_CPU_EQ(test, mask) \
+ do { \
+ const cpumask_t *m = (mask); \
+ int mask_weight = cpumask_weight(m); \
+ int cpu, iter = 0; \
+ for_each_cpu(cpu, m) \
+ iter++; \
+ KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(mask)); \
+ } while (0)
+
+#define EXPECT_FOR_EACH_CPU_OP_EQ(test, op, mask1, mask2) \
+ do { \
+ const cpumask_t *m1 = (mask1); \
+ const cpumask_t *m2 = (mask2); \
+ int weight; \
+ int cpu, iter = 0; \
+ cpumask_##op(&mask_tmp, m1, m2); \
+ weight = cpumask_weight(&mask_tmp); \
+ for_each_cpu_##op(cpu, mask1, mask2) \
+ iter++; \
+ KUNIT_EXPECT_EQ((test), weight, iter); \
+ } while (0)
+
+#define EXPECT_FOR_EACH_CPU_WRAP_EQ(test, mask) \
+ do { \
+ const cpumask_t *m = (mask); \
+ int mask_weight = cpumask_weight(m); \
+ int cpu, iter = 0; \
+ for_each_cpu_wrap(cpu, m, nr_cpu_ids / 2) \
+ iter++; \
+ KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(mask)); \
+ } while (0)
+
+#define EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, name) \
+ do { \
+ int mask_weight = num_##name##_cpus(); \
+ int cpu, iter = 0; \
+ for_each_##name##_cpu(cpu) \
+ iter++; \
+ KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(cpu_##name##_mask)); \
+ } while (0)
+
+static cpumask_t mask_empty;
+static cpumask_t mask_all;
+static cpumask_t mask_tmp;
+
+static void test_cpumask_weight(struct kunit *test)
+{
+ KUNIT_EXPECT_TRUE_MSG(test, cpumask_empty(&mask_empty), MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_TRUE_MSG(test, cpumask_full(&mask_all), MASK_MSG(&mask_all));
+
+ KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_weight(&mask_empty), MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(cpu_possible_mask),
+ MASK_MSG(cpu_possible_mask));
+ KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(&mask_all), MASK_MSG(&mask_all));
+}
+
+static void test_cpumask_first(struct kunit *test)
+{
+ KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first(&mask_empty), MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_first(cpu_possible_mask), MASK_MSG(cpu_possible_mask));
+
+ KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_first_zero(&mask_empty), MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first_zero(cpu_possible_mask),
+ MASK_MSG(cpu_possible_mask));
+}
+
+static void test_cpumask_last(struct kunit *test)
+{
+ KUNIT_EXPECT_LE_MSG(test, nr_cpumask_bits, cpumask_last(&mask_empty),
+ MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids - 1, cpumask_last(cpu_possible_mask),
+ MASK_MSG(cpu_possible_mask));
+}
+
+static void test_cpumask_next(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_next_zero(-1, &mask_empty), MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next_zero(-1, cpu_possible_mask),
+ MASK_MSG(cpu_possible_mask));
+
+ KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next(-1, &mask_empty),
+ MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_next(-1, cpu_possible_mask),
+ MASK_MSG(cpu_possible_mask));
+}
+
+static void test_cpumask_iterators(struct kunit *test)
+{
+ EXPECT_FOR_EACH_CPU_EQ(test, &mask_empty);
+ EXPECT_FOR_EACH_CPU_WRAP_EQ(test, &mask_empty);
+ EXPECT_FOR_EACH_CPU_OP_EQ(test, and, &mask_empty, &mask_empty);
+ EXPECT_FOR_EACH_CPU_OP_EQ(test, and, cpu_possible_mask, &mask_empty);
+ EXPECT_FOR_EACH_CPU_OP_EQ(test, andnot, &mask_empty, &mask_empty);
+
+ EXPECT_FOR_EACH_CPU_EQ(test, cpu_possible_mask);
+ EXPECT_FOR_EACH_CPU_WRAP_EQ(test, cpu_possible_mask);
+ EXPECT_FOR_EACH_CPU_OP_EQ(test, and, cpu_possible_mask, cpu_possible_mask);
+ EXPECT_FOR_EACH_CPU_OP_EQ(test, andnot, cpu_possible_mask, &mask_empty);
+}
+
+static void test_cpumask_iterators_builtin(struct kunit *test)
+{
+ EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, possible);
+
+ /* Ensure the dynamic masks are stable while running the tests */
+ cpu_hotplug_disable();
+
+ EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, online);
+ EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, present);
+
+ cpu_hotplug_enable();
+}
+
+static int test_cpumask_init(struct kunit *test)
+{
+ cpumask_clear(&mask_empty);
+ cpumask_setall(&mask_all);
+
+ return 0;
+}
+
+static struct kunit_case test_cpumask_cases[] = {
+ KUNIT_CASE(test_cpumask_weight),
+ KUNIT_CASE(test_cpumask_first),
+ KUNIT_CASE(test_cpumask_last),
+ KUNIT_CASE(test_cpumask_next),
+ KUNIT_CASE(test_cpumask_iterators),
+ KUNIT_CASE(test_cpumask_iterators_builtin),
+ {}
+};
+
+static struct kunit_suite test_cpumask_suite = {
+ .name = "cpumask",
+ .init = test_cpumask_init,
+ .test_cases = test_cpumask_cases,
+};
+kunit_test_suite(test_cpumask_suite);
+
+MODULE_DESCRIPTION("KUnit tests for cpumask");
+MODULE_LICENSE("GPL");
diff --git a/lib/tests/crc_kunit.c b/lib/tests/crc_kunit.c
new file mode 100644
index 000000000000..585c48b65cef
--- /dev/null
+++ b/lib/tests/crc_kunit.c
@@ -0,0 +1,495 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Unit tests and benchmarks for the CRC library functions
+ *
+ * Copyright 2024 Google LLC
+ *
+ * Author: Eric Biggers <ebiggers@google.com>
+ */
+#include <kunit/test.h>
+#include <linux/crc7.h>
+#include <linux/crc16.h>
+#include <linux/crc-t10dif.h>
+#include <linux/crc32.h>
+#include <linux/crc32c.h>
+#include <linux/crc64.h>
+#include <linux/prandom.h>
+#include <linux/vmalloc.h>
+
+#define CRC_KUNIT_SEED 42
+#define CRC_KUNIT_MAX_LEN 16384
+#define CRC_KUNIT_NUM_TEST_ITERS 1000
+
+static struct rnd_state rng;
+static u8 *test_buffer;
+static size_t test_buflen;
+
+/**
+ * struct crc_variant - describes a CRC variant
+ * @bits: Number of bits in the CRC, 1 <= @bits <= 64.
+ * @le: true if it's a "little endian" CRC (reversed mapping between bits and
+ * polynomial coefficients in each byte), false if it's a "big endian" CRC
+ * (natural mapping between bits and polynomial coefficients in each byte)
+ * @poly: The generator polynomial with the highest-order term omitted.
+ * Bit-reversed if @le is true.
+ * @func: The function to compute a CRC. The type signature uses u64 so that it
+ * can fit any CRC up to CRC-64. The CRC is passed in, and is expected
+ * to be returned in, the least significant bits of the u64. The
+ * function is expected to *not* invert the CRC at the beginning and end.
+ * @combine_func: Optional function to combine two CRCs.
+ */
+struct crc_variant {
+ int bits;
+ bool le;
+ u64 poly;
+ u64 (*func)(u64 crc, const u8 *p, size_t len);
+ u64 (*combine_func)(u64 crc1, u64 crc2, size_t len2);
+};
+
+static u32 rand32(void)
+{
+ return prandom_u32_state(&rng);
+}
+
+static u64 rand64(void)
+{
+ u32 n = rand32();
+
+ return ((u64)n << 32) | rand32();
+}
+
+static u64 crc_mask(const struct crc_variant *v)
+{
+ return (u64)-1 >> (64 - v->bits);
+}
+
+/* Reference implementation of any CRC variant */
+static u64 crc_ref(const struct crc_variant *v,
+ u64 crc, const u8 *p, size_t len)
+{
+ size_t i, j;
+
+ for (i = 0; i < len; i++) {
+ for (j = 0; j < 8; j++) {
+ if (v->le) {
+ crc ^= (p[i] >> j) & 1;
+ crc = (crc >> 1) ^ ((crc & 1) ? v->poly : 0);
+ } else {
+ crc ^= (u64)((p[i] >> (7 - j)) & 1) <<
+ (v->bits - 1);
+ if (crc & (1ULL << (v->bits - 1)))
+ crc = ((crc << 1) ^ v->poly) &
+ crc_mask(v);
+ else
+ crc <<= 1;
+ }
+ }
+ }
+ return crc;
+}
+
+static int crc_suite_init(struct kunit_suite *suite)
+{
+ /*
+ * Allocate the test buffer using vmalloc() with a page-aligned length
+ * so that it is immediately followed by a guard page. This allows
+ * buffer overreads to be detected, even in assembly code.
+ */
+ test_buflen = round_up(CRC_KUNIT_MAX_LEN, PAGE_SIZE);
+ test_buffer = vmalloc(test_buflen);
+ if (!test_buffer)
+ return -ENOMEM;
+
+ prandom_seed_state(&rng, CRC_KUNIT_SEED);
+ prandom_bytes_state(&rng, test_buffer, test_buflen);
+ return 0;
+}
+
+static void crc_suite_exit(struct kunit_suite *suite)
+{
+ vfree(test_buffer);
+ test_buffer = NULL;
+}
+
+/* Generate a random initial CRC. */
+static u64 generate_random_initial_crc(const struct crc_variant *v)
+{
+ switch (rand32() % 4) {
+ case 0:
+ return 0;
+ case 1:
+ return crc_mask(v); /* All 1 bits */
+ default:
+ return rand64() & crc_mask(v);
+ }
+}
+
+/* Generate a random length, preferring small lengths. */
+static size_t generate_random_length(size_t max_length)
+{
+ size_t len;
+
+ switch (rand32() % 3) {
+ case 0:
+ len = rand32() % 128;
+ break;
+ case 1:
+ len = rand32() % 3072;
+ break;
+ default:
+ len = rand32();
+ break;
+ }
+ return len % (max_length + 1);
+}
+
+/* Test that v->func gives the same CRCs as a reference implementation. */
+static void crc_main_test(struct kunit *test, const struct crc_variant *v)
+{
+ size_t i;
+
+ for (i = 0; i < CRC_KUNIT_NUM_TEST_ITERS; i++) {
+ u64 init_crc, expected_crc, actual_crc;
+ size_t len, offset;
+ bool nosimd;
+
+ init_crc = generate_random_initial_crc(v);
+ len = generate_random_length(CRC_KUNIT_MAX_LEN);
+
+ /* Generate a random offset. */
+ if (rand32() % 2 == 0) {
+ /* Use a random alignment mod 64 */
+ offset = rand32() % 64;
+ offset = min(offset, CRC_KUNIT_MAX_LEN - len);
+ } else {
+ /* Go up to the guard page, to catch buffer overreads */
+ offset = test_buflen - len;
+ }
+
+ if (rand32() % 8 == 0)
+ /* Refresh the data occasionally. */
+ prandom_bytes_state(&rng, &test_buffer[offset], len);
+
+ nosimd = rand32() % 8 == 0;
+
+ /*
+ * Compute the CRC, and verify that it equals the CRC computed
+ * by a simple bit-at-a-time reference implementation.
+ */
+ expected_crc = crc_ref(v, init_crc, &test_buffer[offset], len);
+ if (nosimd)
+ local_irq_disable();
+ actual_crc = v->func(init_crc, &test_buffer[offset], len);
+ if (nosimd)
+ local_irq_enable();
+ KUNIT_EXPECT_EQ_MSG(test, expected_crc, actual_crc,
+ "Wrong result with len=%zu offset=%zu nosimd=%d",
+ len, offset, nosimd);
+ }
+}
+
+/* Test that CRC(concat(A, B)) == combine_CRCs(CRC(A), CRC(B), len(B)). */
+static void crc_combine_test(struct kunit *test, const struct crc_variant *v)
+{
+ int i;
+
+ for (i = 0; i < 100; i++) {
+ u64 init_crc = generate_random_initial_crc(v);
+ size_t len1 = generate_random_length(CRC_KUNIT_MAX_LEN);
+ size_t len2 = generate_random_length(CRC_KUNIT_MAX_LEN - len1);
+ u64 crc1, crc2, expected_crc, actual_crc;
+
+ prandom_bytes_state(&rng, test_buffer, len1 + len2);
+ crc1 = v->func(init_crc, test_buffer, len1);
+ crc2 = v->func(0, &test_buffer[len1], len2);
+ expected_crc = v->func(init_crc, test_buffer, len1 + len2);
+ actual_crc = v->combine_func(crc1, crc2, len2);
+ KUNIT_EXPECT_EQ_MSG(test, expected_crc, actual_crc,
+ "CRC combination gave wrong result with len1=%zu len2=%zu\n",
+ len1, len2);
+ }
+}
+
+static void crc_test(struct kunit *test, const struct crc_variant *v)
+{
+ crc_main_test(test, v);
+ if (v->combine_func)
+ crc_combine_test(test, v);
+}
+
+static __always_inline void
+crc_benchmark(struct kunit *test,
+ u64 (*crc_func)(u64 crc, const u8 *p, size_t len))
+{
+ static const size_t lens_to_test[] = {
+ 1, 16, 64, 127, 128, 200, 256, 511, 512, 1024, 3173, 4096, 16384,
+ };
+ size_t len, i, j, num_iters;
+ /*
+ * The CRC value that this function computes in a series of calls to
+ * crc_func is never actually used, so use volatile to ensure that the
+ * computations are done as intended and don't all get optimized out.
+ */
+ volatile u64 crc = 0;
+ u64 t;
+
+ if (!IS_ENABLED(CONFIG_CRC_BENCHMARK))
+ kunit_skip(test, "not enabled");
+
+ /* warm-up */
+ for (i = 0; i < 10000000; i += CRC_KUNIT_MAX_LEN)
+ crc = crc_func(crc, test_buffer, CRC_KUNIT_MAX_LEN);
+
+ for (i = 0; i < ARRAY_SIZE(lens_to_test); i++) {
+ len = lens_to_test[i];
+ KUNIT_ASSERT_LE(test, len, CRC_KUNIT_MAX_LEN);
+ num_iters = 10000000 / (len + 128);
+ preempt_disable();
+ t = ktime_get_ns();
+ for (j = 0; j < num_iters; j++)
+ crc = crc_func(crc, test_buffer, len);
+ t = ktime_get_ns() - t;
+ preempt_enable();
+ kunit_info(test, "len=%zu: %llu MB/s\n",
+ len, div64_u64((u64)len * num_iters * 1000, t));
+ }
+}
+
+/* crc7_be */
+
+static u64 crc7_be_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ /*
+ * crc7_be() left-aligns the 7-bit CRC in a u8, whereas the test wants a
+ * right-aligned CRC (in a u64). Convert between the conventions.
+ */
+ return crc7_be(crc << 1, p, len) >> 1;
+}
+
+static const struct crc_variant crc_variant_crc7_be = {
+ .bits = 7,
+ .poly = 0x9,
+ .func = crc7_be_wrapper,
+};
+
+static void crc7_be_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc7_be);
+}
+
+static void crc7_be_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc7_be_wrapper);
+}
+
+/* crc16 */
+
+static u64 crc16_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ return crc16(crc, p, len);
+}
+
+static const struct crc_variant crc_variant_crc16 = {
+ .bits = 16,
+ .le = true,
+ .poly = 0xa001,
+ .func = crc16_wrapper,
+};
+
+static void crc16_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc16);
+}
+
+static void crc16_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc16_wrapper);
+}
+
+/* crc_t10dif */
+
+static u64 crc_t10dif_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ return crc_t10dif_update(crc, p, len);
+}
+
+static const struct crc_variant crc_variant_crc_t10dif = {
+ .bits = 16,
+ .le = false,
+ .poly = 0x8bb7,
+ .func = crc_t10dif_wrapper,
+};
+
+static void crc_t10dif_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc_t10dif);
+}
+
+static void crc_t10dif_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc_t10dif_wrapper);
+}
+
+/* crc32_le */
+
+static u64 crc32_le_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ return crc32_le(crc, p, len);
+}
+
+static u64 crc32_le_combine_wrapper(u64 crc1, u64 crc2, size_t len2)
+{
+ return crc32_le_combine(crc1, crc2, len2);
+}
+
+static const struct crc_variant crc_variant_crc32_le = {
+ .bits = 32,
+ .le = true,
+ .poly = 0xedb88320,
+ .func = crc32_le_wrapper,
+ .combine_func = crc32_le_combine_wrapper,
+};
+
+static void crc32_le_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc32_le);
+}
+
+static void crc32_le_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc32_le_wrapper);
+}
+
+/* crc32_be */
+
+static u64 crc32_be_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ return crc32_be(crc, p, len);
+}
+
+static const struct crc_variant crc_variant_crc32_be = {
+ .bits = 32,
+ .le = false,
+ .poly = 0x04c11db7,
+ .func = crc32_be_wrapper,
+};
+
+static void crc32_be_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc32_be);
+}
+
+static void crc32_be_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc32_be_wrapper);
+}
+
+/* crc32c */
+
+static u64 crc32c_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ return crc32c(crc, p, len);
+}
+
+static u64 crc32c_combine_wrapper(u64 crc1, u64 crc2, size_t len2)
+{
+ return crc32c_combine(crc1, crc2, len2);
+}
+
+static const struct crc_variant crc_variant_crc32c = {
+ .bits = 32,
+ .le = true,
+ .poly = 0x82f63b78,
+ .func = crc32c_wrapper,
+ .combine_func = crc32c_combine_wrapper,
+};
+
+static void crc32c_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc32c);
+}
+
+static void crc32c_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc32c_wrapper);
+}
+
+/* crc64_be */
+
+static u64 crc64_be_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ return crc64_be(crc, p, len);
+}
+
+static const struct crc_variant crc_variant_crc64_be = {
+ .bits = 64,
+ .le = false,
+ .poly = 0x42f0e1eba9ea3693,
+ .func = crc64_be_wrapper,
+};
+
+static void crc64_be_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc64_be);
+}
+
+static void crc64_be_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc64_be_wrapper);
+}
+
+/* crc64_nvme */
+
+static u64 crc64_nvme_wrapper(u64 crc, const u8 *p, size_t len)
+{
+ /* The inversions that crc64_nvme() does have to be undone here. */
+ return ~crc64_nvme(~crc, p, len);
+}
+
+static const struct crc_variant crc_variant_crc64_nvme = {
+ .bits = 64,
+ .le = true,
+ .poly = 0x9a6c9329ac4bc9b5,
+ .func = crc64_nvme_wrapper,
+};
+
+static void crc64_nvme_test(struct kunit *test)
+{
+ crc_test(test, &crc_variant_crc64_nvme);
+}
+
+static void crc64_nvme_benchmark(struct kunit *test)
+{
+ crc_benchmark(test, crc64_nvme_wrapper);
+}
+
+static struct kunit_case crc_test_cases[] = {
+ KUNIT_CASE(crc7_be_test),
+ KUNIT_CASE(crc7_be_benchmark),
+ KUNIT_CASE(crc16_test),
+ KUNIT_CASE(crc16_benchmark),
+ KUNIT_CASE(crc_t10dif_test),
+ KUNIT_CASE(crc_t10dif_benchmark),
+ KUNIT_CASE(crc32_le_test),
+ KUNIT_CASE(crc32_le_benchmark),
+ KUNIT_CASE(crc32_be_test),
+ KUNIT_CASE(crc32_be_benchmark),
+ KUNIT_CASE(crc32c_test),
+ KUNIT_CASE(crc32c_benchmark),
+ KUNIT_CASE(crc64_be_test),
+ KUNIT_CASE(crc64_be_benchmark),
+ KUNIT_CASE(crc64_nvme_test),
+ KUNIT_CASE(crc64_nvme_benchmark),
+ {},
+};
+
+static struct kunit_suite crc_test_suite = {
+ .name = "crc",
+ .test_cases = crc_test_cases,
+ .suite_init = crc_suite_init,
+ .suite_exit = crc_suite_exit,
+};
+kunit_test_suite(crc_test_suite);
+
+MODULE_DESCRIPTION("Unit tests and benchmarks for the CRC library functions");
+MODULE_LICENSE("GPL");
diff --git a/lib/tests/fortify_kunit.c b/lib/tests/fortify_kunit.c
new file mode 100644
index 000000000000..29ffc62a71e3
--- /dev/null
+++ b/lib/tests/fortify_kunit.c
@@ -0,0 +1,1128 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Runtime test cases for CONFIG_FORTIFY_SOURCE. For additional memcpy()
+ * testing see FORTIFY_MEM_* tests in LKDTM (drivers/misc/lkdtm/fortify.c).
+ *
+ * For corner cases with UBSAN, try testing with:
+ *
+ * ./tools/testing/kunit/kunit.py run --arch=x86_64 \
+ * --kconfig_add CONFIG_FORTIFY_SOURCE=y \
+ * --kconfig_add CONFIG_UBSAN=y \
+ * --kconfig_add CONFIG_UBSAN_TRAP=y \
+ * --kconfig_add CONFIG_UBSAN_BOUNDS=y \
+ * --kconfig_add CONFIG_UBSAN_LOCAL_BOUNDS=y \
+ * --make_options LLVM=1 fortify
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+/* We don't need to fill dmesg with the fortify WARNs during testing. */
+#ifdef DEBUG
+# define FORTIFY_REPORT_KUNIT(x...) __fortify_report(x)
+# define FORTIFY_WARN_KUNIT(x...) WARN_ONCE(x)
+#else
+# define FORTIFY_REPORT_KUNIT(x...) do { } while (0)
+# define FORTIFY_WARN_KUNIT(x...) do { } while (0)
+#endif
+
+/* Redefine fortify_panic() to track failures. */
+void fortify_add_kunit_error(int write);
+#define fortify_panic(func, write, avail, size, retfail) do { \
+ FORTIFY_REPORT_KUNIT(FORTIFY_REASON(func, write), avail, size); \
+ fortify_add_kunit_error(write); \
+ return (retfail); \
+} while (0)
+
+/* Redefine fortify_warn_once() to track memcpy() failures. */
+#define fortify_warn_once(chk_func, x...) do { \
+ bool __result = chk_func; \
+ FORTIFY_WARN_KUNIT(__result, x); \
+ if (__result) \
+ fortify_add_kunit_error(1); \
+} while (0)
+
+#include <kunit/device.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+/* Handle being built without CONFIG_FORTIFY_SOURCE */
+#ifndef __compiletime_strlen
+# define __compiletime_strlen __builtin_strlen
+#endif
+
+static struct kunit_resource read_resource;
+static struct kunit_resource write_resource;
+static int fortify_read_overflows;
+static int fortify_write_overflows;
+
+static const char array_of_10[] = "this is 10";
+static const char *ptr_of_11 = "this is 11!";
+static const char * const unchanging_12 = "this is 12!!";
+static char array_unknown[] = "compiler thinks I might change";
+
+void fortify_add_kunit_error(int write)
+{
+ struct kunit_resource *resource;
+ struct kunit *current_test;
+
+ current_test = kunit_get_current_test();
+ if (!current_test)
+ return;
+
+ resource = kunit_find_named_resource(current_test,
+ write ? "fortify_write_overflows"
+ : "fortify_read_overflows");
+ if (!resource)
+ return;
+
+ (*(int *)resource->data)++;
+ kunit_put_resource(resource);
+}
+
+static void fortify_test_known_sizes(struct kunit *test)
+{
+ char stack[80] = "Test!";
+
+ KUNIT_EXPECT_FALSE(test, __is_constexpr(__builtin_strlen(stack)));
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(stack), 5);
+
+ KUNIT_EXPECT_TRUE(test, __is_constexpr(__builtin_strlen("88888888")));
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8);
+
+ KUNIT_EXPECT_TRUE(test, __is_constexpr(__builtin_strlen(array_of_10)));
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10);
+
+ KUNIT_EXPECT_FALSE(test, __is_constexpr(__builtin_strlen(ptr_of_11)));
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(ptr_of_11), 11);
+
+ KUNIT_EXPECT_TRUE(test, __is_constexpr(__builtin_strlen(unchanging_12)));
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(unchanging_12), 12);
+
+ KUNIT_EXPECT_FALSE(test, __is_constexpr(__builtin_strlen(array_unknown)));
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_unknown), SIZE_MAX);
+
+ /* Externally defined and dynamically sized string pointer: */
+ KUNIT_EXPECT_FALSE(test, __is_constexpr(__builtin_strlen(test->name)));
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(test->name), SIZE_MAX);
+}
+
+/* This is volatile so the optimizer can't perform DCE below. */
+static volatile int pick;
+
+/* Not inline to keep optimizer from figuring out which string we want. */
+static noinline size_t want_minus_one(int pick)
+{
+ const char *str;
+
+ switch (pick) {
+ case 1:
+ str = "4444";
+ break;
+ case 2:
+ str = "333";
+ break;
+ default:
+ str = "1";
+ break;
+ }
+ return __compiletime_strlen(str);
+}
+
+static void fortify_test_control_flow_split(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, want_minus_one(pick), SIZE_MAX);
+}
+
+#define KUNIT_EXPECT_BOS(test, p, expected, name) \
+ KUNIT_EXPECT_EQ_MSG(test, __builtin_object_size(p, 1), \
+ expected, \
+ "__alloc_size() not working with __bos on " name "\n")
+
+#if !__has_builtin(__builtin_dynamic_object_size)
+#define KUNIT_EXPECT_BDOS(test, p, expected, name) \
+ /* Silence "unused variable 'expected'" warning. */ \
+ KUNIT_EXPECT_EQ(test, expected, expected)
+#else
+#define KUNIT_EXPECT_BDOS(test, p, expected, name) \
+ KUNIT_EXPECT_EQ_MSG(test, __builtin_dynamic_object_size(p, 1), \
+ expected, \
+ "__alloc_size() not working with __bdos on " name "\n")
+#endif
+
+/* If the execpted size is a constant value, __bos can see it. */
+#define check_const(_expected, alloc, free) do { \
+ size_t expected = (_expected); \
+ void *p = alloc; \
+ KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n"); \
+ KUNIT_EXPECT_BOS(test, p, expected, #alloc); \
+ KUNIT_EXPECT_BDOS(test, p, expected, #alloc); \
+ free; \
+} while (0)
+
+/* If the execpted size is NOT a constant value, __bos CANNOT see it. */
+#define check_dynamic(_expected, alloc, free) do { \
+ size_t expected = (_expected); \
+ void *p = alloc; \
+ KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n"); \
+ KUNIT_EXPECT_BOS(test, p, SIZE_MAX, #alloc); \
+ KUNIT_EXPECT_BDOS(test, p, expected, #alloc); \
+ free; \
+} while (0)
+
+/* Assortment of constant-value kinda-edge cases. */
+#define CONST_TEST_BODY(TEST_alloc) do { \
+ /* Special-case vmalloc()-family to skip 0-sized allocs. */ \
+ if (strcmp(#TEST_alloc, "TEST_vmalloc") != 0) \
+ TEST_alloc(check_const, 0, 0); \
+ TEST_alloc(check_const, 1, 1); \
+ TEST_alloc(check_const, 128, 128); \
+ TEST_alloc(check_const, 1023, 1023); \
+ TEST_alloc(check_const, 1025, 1025); \
+ TEST_alloc(check_const, 4096, 4096); \
+ TEST_alloc(check_const, 4097, 4097); \
+} while (0)
+
+static volatile size_t zero_size;
+static volatile size_t unknown_size = 50;
+
+#if !__has_builtin(__builtin_dynamic_object_size)
+#define DYNAMIC_TEST_BODY(TEST_alloc) \
+ kunit_skip(test, "Compiler is missing __builtin_dynamic_object_size() support\n")
+#else
+#define DYNAMIC_TEST_BODY(TEST_alloc) do { \
+ size_t size = unknown_size; \
+ \
+ /* \
+ * Expected size is "size" in each test, before it is then \
+ * internally incremented in each test. Requires we disable \
+ * -Wunsequenced. \
+ */ \
+ TEST_alloc(check_dynamic, size, size++); \
+ /* Make sure incrementing actually happened. */ \
+ KUNIT_EXPECT_NE(test, size, unknown_size); \
+} while (0)
+#endif
+
+#define DEFINE_ALLOC_SIZE_TEST_PAIR(allocator) \
+static void fortify_test_alloc_size_##allocator##_const(struct kunit *test) \
+{ \
+ CONST_TEST_BODY(TEST_##allocator); \
+} \
+static void fortify_test_alloc_size_##allocator##_dynamic(struct kunit *test) \
+{ \
+ DYNAMIC_TEST_BODY(TEST_##allocator); \
+}
+
+#define TEST_kmalloc(checker, expected_size, alloc_size) do { \
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
+ void *orig; \
+ size_t len; \
+ \
+ checker(expected_size, kmalloc(alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, kzalloc(alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ kzalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, kcalloc(1, alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, kcalloc(alloc_size, 1, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ kcalloc_node(1, alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, \
+ kcalloc_node(alloc_size, 1, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, kmalloc_array(1, alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, kmalloc_array(alloc_size, 1, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ kmalloc_array_node(1, alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, \
+ kmalloc_array_node(alloc_size, 1, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ \
+ orig = kmalloc(alloc_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker((expected_size) * 2, \
+ krealloc(orig, (alloc_size) * 2, gfp), \
+ kfree(p)); \
+ orig = kmalloc(alloc_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker((expected_size) * 2, \
+ krealloc_array(orig, 1, (alloc_size) * 2, gfp), \
+ kfree(p)); \
+ orig = kmalloc(alloc_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker((expected_size) * 2, \
+ krealloc_array(orig, (alloc_size) * 2, 1, gfp), \
+ kfree(p)); \
+ \
+ len = 11; \
+ /* Using memdup() with fixed size, so force unknown length. */ \
+ if (!__builtin_constant_p(expected_size)) \
+ len += zero_size; \
+ checker(len, kmemdup("hello there", len, gfp), kfree(p)); \
+} while (0)
+DEFINE_ALLOC_SIZE_TEST_PAIR(kmalloc)
+
+/* Sizes are in pages, not bytes. */
+#define TEST_vmalloc(checker, expected_pages, alloc_pages) do { \
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
+ checker((expected_pages) * PAGE_SIZE, \
+ vmalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ vzalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ __vmalloc((alloc_pages) * PAGE_SIZE, gfp), vfree(p)); \
+} while (0)
+DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc)
+
+/* Sizes are in pages (and open-coded for side-effects), not bytes. */
+#define TEST_kvmalloc(checker, expected_pages, alloc_pages) do { \
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
+ size_t prev_size; \
+ void *orig; \
+ \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc((alloc_pages) * PAGE_SIZE, gfp), \
+ kvfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
+ kvfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvzalloc((alloc_pages) * PAGE_SIZE, gfp), \
+ kvfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvzalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
+ kvfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp), \
+ kvfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp), \
+ kvfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp), \
+ kvfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp), \
+ kvfree(p)); \
+ \
+ prev_size = (expected_pages) * PAGE_SIZE; \
+ orig = kvmalloc(prev_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker(((expected_pages) * PAGE_SIZE) * 2, \
+ kvrealloc(orig, ((alloc_pages) * PAGE_SIZE) * 2, gfp), \
+ kvfree(p)); \
+} while (0)
+DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
+
+#define TEST_devm_kmalloc(checker, expected_size, alloc_size) do { \
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
+ const char dev_name[] = "fortify-test"; \
+ struct device *dev; \
+ void *orig; \
+ size_t len; \
+ \
+ /* Create dummy device for devm_kmalloc()-family tests. */ \
+ dev = kunit_device_register(test, dev_name); \
+ KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev), \
+ "Cannot register test device\n"); \
+ \
+ checker(expected_size, devm_kmalloc(dev, alloc_size, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, devm_kzalloc(dev, alloc_size, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, \
+ devm_kmalloc_array(dev, 1, alloc_size, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, \
+ devm_kmalloc_array(dev, alloc_size, 1, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, \
+ devm_kcalloc(dev, 1, alloc_size, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, \
+ devm_kcalloc(dev, alloc_size, 1, gfp), \
+ devm_kfree(dev, p)); \
+ \
+ orig = devm_kmalloc(dev, alloc_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker((expected_size) * 2, \
+ devm_krealloc(dev, orig, (alloc_size) * 2, gfp), \
+ devm_kfree(dev, p)); \
+ \
+ len = 4; \
+ /* Using memdup() with fixed size, so force unknown length. */ \
+ if (!__builtin_constant_p(expected_size)) \
+ len += zero_size; \
+ checker(len, devm_kmemdup(dev, "Ohai", len, gfp), \
+ devm_kfree(dev, p)); \
+ \
+ kunit_device_unregister(test, dev); \
+} while (0)
+DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc)
+
+static const char * const test_strs[] = {
+ "",
+ "Hello there",
+ "A longer string, just for variety",
+};
+
+#define TEST_realloc(checker) do { \
+ gfp_t gfp = GFP_KERNEL; \
+ size_t len; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(test_strs); i++) { \
+ len = strlen(test_strs[i]); \
+ KUNIT_EXPECT_EQ(test, __builtin_constant_p(len), 0); \
+ checker(len, kmemdup_array(test_strs[i], 1, len, gfp), \
+ kfree(p)); \
+ checker(len, kmemdup(test_strs[i], len, gfp), \
+ kfree(p)); \
+ } \
+} while (0)
+static void fortify_test_realloc_size(struct kunit *test)
+{
+ TEST_realloc(check_dynamic);
+}
+
+/*
+ * We can't have an array at the end of a structure or else
+ * builds without -fstrict-flex-arrays=3 will report them as
+ * being an unknown length. Additionally, add bytes before
+ * and after the string to catch over/underflows if tests
+ * fail.
+ */
+struct fortify_padding {
+ unsigned long bytes_before;
+ char buf[32];
+ unsigned long bytes_after;
+};
+
+static void fortify_test_strlen(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ int i, end = sizeof(pad.buf) - 1;
+
+ /* Fill 31 bytes with valid characters. */
+ for (i = 0; i < sizeof(pad.buf) - 1; i++)
+ pad.buf[i] = i + '0';
+ /* Trailing bytes are still %NUL. */
+ KUNIT_EXPECT_EQ(test, pad.buf[end], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* String is terminated, so strlen() is valid. */
+ KUNIT_EXPECT_EQ(test, strlen(pad.buf), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+
+ /* Make string unterminated, and recount. */
+ pad.buf[end] = 'A';
+ end = sizeof(pad.buf);
+ KUNIT_EXPECT_EQ(test, strlen(pad.buf), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+}
+
+static void fortify_test_strnlen(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ int i, end = sizeof(pad.buf) - 1;
+
+ /* Fill 31 bytes with valid characters. */
+ for (i = 0; i < sizeof(pad.buf) - 1; i++)
+ pad.buf[i] = i + '0';
+ /* Trailing bytes are still %NUL. */
+ KUNIT_EXPECT_EQ(test, pad.buf[end], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* String is terminated, so strnlen() is valid. */
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf)), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ /* A truncated strnlen() will be safe, too. */
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf) / 2),
+ sizeof(pad.buf) / 2);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+
+ /* Make string unterminated, and recount. */
+ pad.buf[end] = 'A';
+ end = sizeof(pad.buf);
+ /* Reading beyond with strncpy() will fail. */
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 1), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 2), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+
+ /* Early-truncated is safe still, though. */
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+
+ end = sizeof(pad.buf) / 2;
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+}
+
+static void fortify_test_strcpy(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[sizeof(pad.buf) + 1] = { };
+ int i;
+
+ /* Fill 31 bytes with valid characters. */
+ for (i = 0; i < sizeof(src) - 2; i++)
+ src[i] = i + '0';
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strcpy() 1 less than of max size. */
+ KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
+ == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Only last byte should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ src[sizeof(src) - 2] = 'A';
+ /* But now we trip the overflow checking. */
+ KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
+ == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ /* Trailing %NUL -- thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ src[sizeof(src) - 1] = 'A';
+ /* And for sure now, two bytes past. */
+ KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
+ == pad.buf);
+ /*
+ * Which trips both the strlen() on the unterminated src,
+ * and the resulting copy attempt.
+ */
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ /* Trailing %NUL -- thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+static void fortify_test_strncpy(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[] = "Copy me fully into a small buffer and I will overflow!";
+ size_t sizeof_buf = sizeof(pad.buf);
+
+ OPTIMIZER_HIDE_VAR(sizeof_buf);
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strncpy() 1 less than of max size. */
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, sizeof_buf - 1)
+ == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Only last byte should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 3], '\0');
+
+ /* Legitimate (though unterminated) max-size strncpy. */
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, sizeof_buf)
+ == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* No trailing %NUL -- thanks strncpy API. */
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ /* But we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Now verify that FORTIFY is working... */
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, sizeof_buf + 1)
+ == pad.buf);
+ /* Should catch the overflow. */
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* And further... */
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, sizeof_buf + 2)
+ == pad.buf);
+ /* Should catch the overflow. */
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+static void fortify_test_strscpy(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[] = "Copy me fully into a small buffer and I will overflow!";
+ size_t sizeof_buf = sizeof(pad.buf);
+ size_t sizeof_src = sizeof(src);
+
+ OPTIMIZER_HIDE_VAR(sizeof_buf);
+ OPTIMIZER_HIDE_VAR(sizeof_src);
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strscpy() 1 less than of max size. */
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, sizeof_buf - 1),
+ -E2BIG);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Keeping space for %NUL, last two bytes should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 3], '\0');
+
+ /* Legitimate max-size strscpy. */
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, sizeof_buf),
+ -E2BIG);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* A trailing %NUL will exist. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+
+ /* Now verify that FORTIFY is working... */
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, sizeof_buf + 1),
+ -E2BIG);
+ /* Should catch the overflow. */
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* And much further... */
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, sizeof_src * 2),
+ -E2BIG);
+ /* Should catch the overflow. */
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+static void fortify_test_strcat(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[sizeof(pad.buf) / 2] = { };
+ char one[] = "A";
+ char two[] = "BC";
+ int i;
+
+ /* Fill 15 bytes with valid characters. */
+ for (i = 0; i < sizeof(src) - 1; i++)
+ src[i] = i + 'A';
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strcat() using less than half max size. */
+ KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Legitimate strcat() now 2 bytes shy of end. */
+ KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last two bytes should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* Add one more character to the end. */
+ KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last byte should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* And this one char will overflow. */
+ KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* And adding two will overflow more. */
+ KUNIT_ASSERT_TRUE(test, strcat(pad.buf, two) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+static void fortify_test_strncat(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[sizeof(pad.buf)] = { };
+ int i, partial;
+
+ /* Fill 31 bytes with valid characters. */
+ partial = sizeof(src) / 2 - 1;
+ for (i = 0; i < partial; i++)
+ src[i] = i + 'A';
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strncat() using less than half max size. */
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Legitimate strncat() now 2 bytes shy of end. */
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last two bytes should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* Add one more character to the end. */
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last byte should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* And this one char will overflow. */
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* And adding two will overflow more. */
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 2) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Force an unterminated destination, and overflow. */
+ pad.buf[sizeof(pad.buf) - 1] = 'A';
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
+ /* This will have tripped both strlen() and strcat(). */
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3);
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ /* But we should not go beyond the end. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+static void fortify_test_strlcat(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[sizeof(pad.buf)] = { };
+ int i, partial;
+ int len = sizeof(pad.buf);
+
+ OPTIMIZER_HIDE_VAR(len);
+
+ /* Fill 15 bytes with valid characters. */
+ partial = sizeof(src) / 2 - 1;
+ for (i = 0; i < partial; i++)
+ src[i] = i + 'A';
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strlcat() using less than half max size. */
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Legitimate strlcat() now 2 bytes shy of end. */
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial * 2);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last two bytes should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* Add one more character to the end. */
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "Q", len), partial * 2 + 1);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last byte should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* And this one char will overflow. */
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "V", len * 2), len);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* And adding two will overflow more. */
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "QQ", len * 2), len + 1);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Force an unterminated destination, and overflow. */
+ pad.buf[sizeof(pad.buf) - 1] = 'A';
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "TT", len * 2), len + 2);
+ /* This will have tripped both strlen() and strlcat(). */
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ /* But we should not go beyond the end. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Force an unterminated source, and overflow. */
+ memset(src, 'B', sizeof(src));
+ pad.buf[sizeof(pad.buf) - 1] = '\0';
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len * 3), len - 1 + sizeof(src));
+ /* This will have tripped both strlen() and strlcat(). */
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ /* But we should not go beyond the end. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+/* Check for 0-sized arrays... */
+struct fortify_zero_sized {
+ unsigned long bytes_before;
+ char buf[0];
+ unsigned long bytes_after;
+};
+
+#define __fortify_test(memfunc) \
+static void fortify_test_##memfunc(struct kunit *test) \
+{ \
+ struct fortify_zero_sized empty = { }; \
+ struct fortify_padding pad = { }; \
+ char srcA[sizeof(pad.buf) + 2]; \
+ char srcB[sizeof(pad.buf) + 2]; \
+ size_t len = sizeof(pad.buf); \
+ size_t zero = 0; \
+ \
+ OPTIMIZER_HIDE_VAR(len); \
+ OPTIMIZER_HIDE_VAR(zero); \
+ \
+ memset(srcA, 'A', sizeof(srcA)); \
+ KUNIT_ASSERT_EQ(test, srcA[0], 'A'); \
+ memset(srcB, 'B', sizeof(srcB)); \
+ KUNIT_ASSERT_EQ(test, srcB[0], 'B'); \
+ \
+ memfunc(pad.buf, srcA, zero); \
+ KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ memfunc(pad.buf + 1, srcB, zero + 1); \
+ KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \
+ KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \
+ KUNIT_EXPECT_EQ(test, pad.buf[2], '\0'); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ memfunc(pad.buf, srcA, zero + 1); \
+ KUNIT_EXPECT_EQ(test, pad.buf[0], 'A'); \
+ KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ memfunc(pad.buf, srcA, len - 1); \
+ KUNIT_EXPECT_EQ(test, pad.buf[1], 'A'); \
+ KUNIT_EXPECT_EQ(test, pad.buf[len - 1], '\0'); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ memfunc(pad.buf, srcA, len); \
+ KUNIT_EXPECT_EQ(test, pad.buf[1], 'A'); \
+ KUNIT_EXPECT_EQ(test, pad.buf[len - 1], 'A'); \
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ memfunc(pad.buf, srcA, len + 1); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); \
+ memfunc(pad.buf + 1, srcB, len); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); \
+ \
+ /* Reset error counter. */ \
+ fortify_write_overflows = 0; \
+ /* Copy nothing into nothing: no errors. */ \
+ memfunc(empty.buf, srcB, zero); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ memfunc(empty.buf, srcB, zero + 1); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); \
+}
+__fortify_test(memcpy)
+__fortify_test(memmove)
+
+static void fortify_test_memscan(struct kunit *test)
+{
+ char haystack[] = "Where oh where is my memory range?";
+ char *mem = haystack + strlen("Where oh where is ");
+ char needle = 'm';
+ size_t len = sizeof(haystack);
+
+ OPTIMIZER_HIDE_VAR(len);
+
+ KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len),
+ mem);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ /* Catch too-large range. */
+ KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len + 1),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len * 2),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+}
+
+static void fortify_test_memchr(struct kunit *test)
+{
+ char haystack[] = "Where oh where is my memory range?";
+ char *mem = haystack + strlen("Where oh where is ");
+ char needle = 'm';
+ size_t len = sizeof(haystack);
+
+ OPTIMIZER_HIDE_VAR(len);
+
+ KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len),
+ mem);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ /* Catch too-large range. */
+ KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len + 1),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len * 2),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+}
+
+static void fortify_test_memchr_inv(struct kunit *test)
+{
+ char haystack[] = "Where oh where is my memory range?";
+ char *mem = haystack + 1;
+ char needle = 'W';
+ size_t len = sizeof(haystack);
+
+ OPTIMIZER_HIDE_VAR(len);
+
+ /* Normal search is okay. */
+ KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len),
+ mem);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ /* Catch too-large range. */
+ KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len + 1),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len * 2),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+}
+
+static void fortify_test_memcmp(struct kunit *test)
+{
+ char one[] = "My mind is going ...";
+ char two[] = "My mind is going ... I can feel it.";
+ size_t one_len = sizeof(one) - 1;
+ size_t two_len = sizeof(two) - 1;
+
+ OPTIMIZER_HIDE_VAR(one_len);
+ OPTIMIZER_HIDE_VAR(two_len);
+
+ /* We match the first string (ignoring the %NUL). */
+ KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len), 0);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ /* Still in bounds, but no longer matching. */
+ KUNIT_ASSERT_LT(test, memcmp(one, two, one_len + 1), 0);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+
+ /* Catch too-large ranges. */
+ KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len + 2), INT_MIN);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+
+ KUNIT_ASSERT_EQ(test, memcmp(two, one, two_len + 2), INT_MIN);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+}
+
+static void fortify_test_kmemdup(struct kunit *test)
+{
+ char src[] = "I got Doom running on it!";
+ char *copy;
+ size_t len = sizeof(src);
+
+ OPTIMIZER_HIDE_VAR(len);
+
+ /* Copy is within bounds. */
+ copy = kmemdup(src, len, GFP_KERNEL);
+ KUNIT_EXPECT_NOT_NULL(test, copy);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ kfree(copy);
+
+ /* Without %NUL. */
+ copy = kmemdup(src, len - 1, GFP_KERNEL);
+ KUNIT_EXPECT_NOT_NULL(test, copy);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ kfree(copy);
+
+ /* Tiny bounds. */
+ copy = kmemdup(src, 1, GFP_KERNEL);
+ KUNIT_EXPECT_NOT_NULL(test, copy);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ kfree(copy);
+
+ /* Out of bounds by 1 byte. */
+ copy = kmemdup(src, len + 1, GFP_KERNEL);
+ KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ kfree(copy);
+
+ /* Way out of bounds. */
+ copy = kmemdup(src, len * 2, GFP_KERNEL);
+ KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+ kfree(copy);
+
+ /* Starting offset causing out of bounds. */
+ copy = kmemdup(src + 1, len, GFP_KERNEL);
+ KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
+ kfree(copy);
+}
+
+static int fortify_test_init(struct kunit *test)
+{
+ if (!IS_ENABLED(CONFIG_FORTIFY_SOURCE))
+ kunit_skip(test, "Not built with CONFIG_FORTIFY_SOURCE=y");
+
+ fortify_read_overflows = 0;
+ kunit_add_named_resource(test, NULL, NULL, &read_resource,
+ "fortify_read_overflows",
+ &fortify_read_overflows);
+ fortify_write_overflows = 0;
+ kunit_add_named_resource(test, NULL, NULL, &write_resource,
+ "fortify_write_overflows",
+ &fortify_write_overflows);
+ return 0;
+}
+
+static struct kunit_case fortify_test_cases[] = {
+ KUNIT_CASE(fortify_test_known_sizes),
+ KUNIT_CASE(fortify_test_control_flow_split),
+ KUNIT_CASE(fortify_test_alloc_size_kmalloc_const),
+ KUNIT_CASE(fortify_test_alloc_size_kmalloc_dynamic),
+ KUNIT_CASE(fortify_test_alloc_size_vmalloc_const),
+ KUNIT_CASE(fortify_test_alloc_size_vmalloc_dynamic),
+ KUNIT_CASE(fortify_test_alloc_size_kvmalloc_const),
+ KUNIT_CASE(fortify_test_alloc_size_kvmalloc_dynamic),
+ KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_const),
+ KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_dynamic),
+ KUNIT_CASE(fortify_test_realloc_size),
+ KUNIT_CASE(fortify_test_strlen),
+ KUNIT_CASE(fortify_test_strnlen),
+ KUNIT_CASE(fortify_test_strcpy),
+ KUNIT_CASE(fortify_test_strncpy),
+ KUNIT_CASE(fortify_test_strscpy),
+ KUNIT_CASE(fortify_test_strcat),
+ KUNIT_CASE(fortify_test_strncat),
+ KUNIT_CASE(fortify_test_strlcat),
+ /* skip memset: performs bounds checking on whole structs */
+ KUNIT_CASE(fortify_test_memcpy),
+ KUNIT_CASE(fortify_test_memmove),
+ KUNIT_CASE(fortify_test_memscan),
+ KUNIT_CASE(fortify_test_memchr),
+ KUNIT_CASE(fortify_test_memchr_inv),
+ KUNIT_CASE(fortify_test_memcmp),
+ KUNIT_CASE(fortify_test_kmemdup),
+ {}
+};
+
+static struct kunit_suite fortify_test_suite = {
+ .name = "fortify",
+ .init = fortify_test_init,
+ .test_cases = fortify_test_cases,
+};
+
+kunit_test_suite(fortify_test_suite);
+
+MODULE_DESCRIPTION("Runtime test cases for CONFIG_FORTIFY_SOURCE");
+MODULE_LICENSE("GPL");
diff --git a/lib/tests/hashtable_test.c b/lib/tests/hashtable_test.c
new file mode 100644
index 000000000000..3521de6bad15
--- /dev/null
+++ b/lib/tests/hashtable_test.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the Kernel Hashtable structures.
+ *
+ * Copyright (C) 2022, Google LLC.
+ * Author: Rae Moar <rmoar@google.com>
+ */
+#include <kunit/test.h>
+
+#include <linux/hashtable.h>
+
+struct hashtable_test_entry {
+ int key;
+ int data;
+ struct hlist_node node;
+ int visited;
+};
+
+static void hashtable_test_hash_init(struct kunit *test)
+{
+ /* Test the different ways of initialising a hashtable. */
+ DEFINE_HASHTABLE(hash1, 2);
+ DECLARE_HASHTABLE(hash2, 3);
+
+ /* When using DECLARE_HASHTABLE, must use hash_init to
+ * initialize the hashtable.
+ */
+ hash_init(hash2);
+
+ KUNIT_EXPECT_TRUE(test, hash_empty(hash1));
+ KUNIT_EXPECT_TRUE(test, hash_empty(hash2));
+}
+
+static void hashtable_test_hash_empty(struct kunit *test)
+{
+ struct hashtable_test_entry a;
+ DEFINE_HASHTABLE(hash, 1);
+
+ KUNIT_EXPECT_TRUE(test, hash_empty(hash));
+
+ a.key = 1;
+ a.data = 13;
+ hash_add(hash, &a.node, a.key);
+
+ /* Hashtable should no longer be empty. */
+ KUNIT_EXPECT_FALSE(test, hash_empty(hash));
+}
+
+static void hashtable_test_hash_hashed(struct kunit *test)
+{
+ struct hashtable_test_entry a, b;
+ DEFINE_HASHTABLE(hash, 4);
+
+ a.key = 1;
+ a.data = 13;
+ hash_add(hash, &a.node, a.key);
+ b.key = 1;
+ b.data = 2;
+ hash_add(hash, &b.node, b.key);
+
+ KUNIT_EXPECT_TRUE(test, hash_hashed(&a.node));
+ KUNIT_EXPECT_TRUE(test, hash_hashed(&b.node));
+}
+
+static void hashtable_test_hash_add(struct kunit *test)
+{
+ struct hashtable_test_entry a, b, *x;
+ int bkt;
+ DEFINE_HASHTABLE(hash, 3);
+
+ a.key = 1;
+ a.data = 13;
+ a.visited = 0;
+ hash_add(hash, &a.node, a.key);
+ b.key = 2;
+ b.data = 10;
+ b.visited = 0;
+ hash_add(hash, &b.node, b.key);
+
+ hash_for_each(hash, bkt, x, node) {
+ x->visited++;
+ if (x->key == a.key)
+ KUNIT_EXPECT_EQ(test, x->data, 13);
+ else if (x->key == b.key)
+ KUNIT_EXPECT_EQ(test, x->data, 10);
+ else
+ KUNIT_FAIL(test, "Unexpected key in hashtable.");
+ }
+
+ /* Both entries should have been visited exactly once. */
+ KUNIT_EXPECT_EQ(test, a.visited, 1);
+ KUNIT_EXPECT_EQ(test, b.visited, 1);
+}
+
+static void hashtable_test_hash_del(struct kunit *test)
+{
+ struct hashtable_test_entry a, b, *x;
+ DEFINE_HASHTABLE(hash, 6);
+
+ a.key = 1;
+ a.data = 13;
+ hash_add(hash, &a.node, a.key);
+ b.key = 2;
+ b.data = 10;
+ b.visited = 0;
+ hash_add(hash, &b.node, b.key);
+
+ hash_del(&b.node);
+ hash_for_each_possible(hash, x, node, b.key) {
+ x->visited++;
+ KUNIT_EXPECT_NE(test, x->key, b.key);
+ }
+
+ /* The deleted entry should not have been visited. */
+ KUNIT_EXPECT_EQ(test, b.visited, 0);
+
+ hash_del(&a.node);
+
+ /* The hashtable should be empty. */
+ KUNIT_EXPECT_TRUE(test, hash_empty(hash));
+}
+
+static void hashtable_test_hash_for_each(struct kunit *test)
+{
+ struct hashtable_test_entry entries[3];
+ struct hashtable_test_entry *x;
+ int bkt, i, j, count;
+ DEFINE_HASHTABLE(hash, 3);
+
+ /* Add three entries to the hashtable. */
+ for (i = 0; i < 3; i++) {
+ entries[i].key = i;
+ entries[i].data = i + 10;
+ entries[i].visited = 0;
+ hash_add(hash, &entries[i].node, entries[i].key);
+ }
+
+ count = 0;
+ hash_for_each(hash, bkt, x, node) {
+ x->visited += 1;
+ KUNIT_ASSERT_GE_MSG(test, x->key, 0, "Unexpected key in hashtable.");
+ KUNIT_ASSERT_LT_MSG(test, x->key, 3, "Unexpected key in hashtable.");
+ count++;
+ }
+
+ /* Should have visited each entry exactly once. */
+ KUNIT_EXPECT_EQ(test, count, 3);
+ for (j = 0; j < 3; j++)
+ KUNIT_EXPECT_EQ(test, entries[j].visited, 1);
+}
+
+static void hashtable_test_hash_for_each_safe(struct kunit *test)
+{
+ struct hashtable_test_entry entries[3];
+ struct hashtable_test_entry *x;
+ struct hlist_node *tmp;
+ int bkt, i, j, count;
+ DEFINE_HASHTABLE(hash, 3);
+
+ /* Add three entries to the hashtable. */
+ for (i = 0; i < 3; i++) {
+ entries[i].key = i;
+ entries[i].data = i + 10;
+ entries[i].visited = 0;
+ hash_add(hash, &entries[i].node, entries[i].key);
+ }
+
+ count = 0;
+ hash_for_each_safe(hash, bkt, tmp, x, node) {
+ x->visited += 1;
+ KUNIT_ASSERT_GE_MSG(test, x->key, 0, "Unexpected key in hashtable.");
+ KUNIT_ASSERT_LT_MSG(test, x->key, 3, "Unexpected key in hashtable.");
+ count++;
+
+ /* Delete entry during loop. */
+ hash_del(&x->node);
+ }
+
+ /* Should have visited each entry exactly once. */
+ KUNIT_EXPECT_EQ(test, count, 3);
+ for (j = 0; j < 3; j++)
+ KUNIT_EXPECT_EQ(test, entries[j].visited, 1);
+}
+
+static void hashtable_test_hash_for_each_possible(struct kunit *test)
+{
+ struct hashtable_test_entry entries[4];
+ struct hashtable_test_entry *x, *y;
+ int buckets[2];
+ int bkt, i, j, count;
+ DEFINE_HASHTABLE(hash, 5);
+
+ /* Add three entries with key = 0 to the hashtable. */
+ for (i = 0; i < 3; i++) {
+ entries[i].key = 0;
+ entries[i].data = i;
+ entries[i].visited = 0;
+ hash_add(hash, &entries[i].node, entries[i].key);
+ }
+
+ /* Add an entry with key = 1. */
+ entries[3].key = 1;
+ entries[3].data = 3;
+ entries[3].visited = 0;
+ hash_add(hash, &entries[3].node, entries[3].key);
+
+ count = 0;
+ hash_for_each_possible(hash, x, node, 0) {
+ x->visited += 1;
+ KUNIT_ASSERT_GE_MSG(test, x->data, 0, "Unexpected data in hashtable.");
+ KUNIT_ASSERT_LT_MSG(test, x->data, 4, "Unexpected data in hashtable.");
+ count++;
+ }
+
+ /* Should have visited each entry with key = 0 exactly once. */
+ for (j = 0; j < 3; j++)
+ KUNIT_EXPECT_EQ(test, entries[j].visited, 1);
+
+ /* Save the buckets for the different keys. */
+ hash_for_each(hash, bkt, y, node) {
+ KUNIT_ASSERT_GE_MSG(test, y->key, 0, "Unexpected key in hashtable.");
+ KUNIT_ASSERT_LE_MSG(test, y->key, 1, "Unexpected key in hashtable.");
+ buckets[y->key] = bkt;
+ }
+
+ /* If entry with key = 1 is in the same bucket as the entries with
+ * key = 0, check it was visited. Otherwise ensure that only three
+ * entries were visited.
+ */
+ if (buckets[0] == buckets[1]) {
+ KUNIT_EXPECT_EQ(test, count, 4);
+ KUNIT_EXPECT_EQ(test, entries[3].visited, 1);
+ } else {
+ KUNIT_EXPECT_EQ(test, count, 3);
+ KUNIT_EXPECT_EQ(test, entries[3].visited, 0);
+ }
+}
+
+static void hashtable_test_hash_for_each_possible_safe(struct kunit *test)
+{
+ struct hashtable_test_entry entries[4];
+ struct hashtable_test_entry *x, *y;
+ struct hlist_node *tmp;
+ int buckets[2];
+ int bkt, i, j, count;
+ DEFINE_HASHTABLE(hash, 5);
+
+ /* Add three entries with key = 0 to the hashtable. */
+ for (i = 0; i < 3; i++) {
+ entries[i].key = 0;
+ entries[i].data = i;
+ entries[i].visited = 0;
+ hash_add(hash, &entries[i].node, entries[i].key);
+ }
+
+ /* Add an entry with key = 1. */
+ entries[3].key = 1;
+ entries[3].data = 3;
+ entries[3].visited = 0;
+ hash_add(hash, &entries[3].node, entries[3].key);
+
+ count = 0;
+ hash_for_each_possible_safe(hash, x, tmp, node, 0) {
+ x->visited += 1;
+ KUNIT_ASSERT_GE_MSG(test, x->data, 0, "Unexpected data in hashtable.");
+ KUNIT_ASSERT_LT_MSG(test, x->data, 4, "Unexpected data in hashtable.");
+ count++;
+
+ /* Delete entry during loop. */
+ hash_del(&x->node);
+ }
+
+ /* Should have visited each entry with key = 0 exactly once. */
+ for (j = 0; j < 3; j++)
+ KUNIT_EXPECT_EQ(test, entries[j].visited, 1);
+
+ /* Save the buckets for the different keys. */
+ hash_for_each(hash, bkt, y, node) {
+ KUNIT_ASSERT_GE_MSG(test, y->key, 0, "Unexpected key in hashtable.");
+ KUNIT_ASSERT_LE_MSG(test, y->key, 1, "Unexpected key in hashtable.");
+ buckets[y->key] = bkt;
+ }
+
+ /* If entry with key = 1 is in the same bucket as the entries with
+ * key = 0, check it was visited. Otherwise ensure that only three
+ * entries were visited.
+ */
+ if (buckets[0] == buckets[1]) {
+ KUNIT_EXPECT_EQ(test, count, 4);
+ KUNIT_EXPECT_EQ(test, entries[3].visited, 1);
+ } else {
+ KUNIT_EXPECT_EQ(test, count, 3);
+ KUNIT_EXPECT_EQ(test, entries[3].visited, 0);
+ }
+}
+
+static struct kunit_case hashtable_test_cases[] = {
+ KUNIT_CASE(hashtable_test_hash_init),
+ KUNIT_CASE(hashtable_test_hash_empty),
+ KUNIT_CASE(hashtable_test_hash_hashed),
+ KUNIT_CASE(hashtable_test_hash_add),
+ KUNIT_CASE(hashtable_test_hash_del),
+ KUNIT_CASE(hashtable_test_hash_for_each),
+ KUNIT_CASE(hashtable_test_hash_for_each_safe),
+ KUNIT_CASE(hashtable_test_hash_for_each_possible),
+ KUNIT_CASE(hashtable_test_hash_for_each_possible_safe),
+ {},
+};
+
+static struct kunit_suite hashtable_test_module = {
+ .name = "hashtable",
+ .test_cases = hashtable_test_cases,
+};
+
+kunit_test_suites(&hashtable_test_module);
+
+MODULE_DESCRIPTION("KUnit test for the Kernel Hashtable structures");
+MODULE_LICENSE("GPL");
diff --git a/lib/tests/is_signed_type_kunit.c b/lib/tests/is_signed_type_kunit.c
new file mode 100644
index 000000000000..88adbe813f3a
--- /dev/null
+++ b/lib/tests/is_signed_type_kunit.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * ./tools/testing/kunit/kunit.py run is_signed_type [--raw_output]
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/compiler.h>
+
+enum unsigned_enum {
+ constant_a = 3,
+};
+
+enum signed_enum {
+ constant_b = -1,
+ constant_c = 2,
+};
+
+static void is_signed_type_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, is_signed_type(bool), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(signed char), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(unsigned char), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(char), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(int), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(unsigned int), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(long), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(unsigned long), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(long long), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(unsigned long long), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(enum unsigned_enum), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(enum signed_enum), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(void *), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(const char *), false);
+}
+
+static struct kunit_case is_signed_type_test_cases[] = {
+ KUNIT_CASE(is_signed_type_test),
+ {}
+};
+
+static struct kunit_suite is_signed_type_test_suite = {
+ .name = "is_signed_type",
+ .test_cases = is_signed_type_test_cases,
+};
+
+kunit_test_suite(is_signed_type_test_suite);
+
+MODULE_DESCRIPTION("is_signed_type() KUnit test suite");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/lib/tests/kfifo_kunit.c b/lib/tests/kfifo_kunit.c
new file mode 100644
index 000000000000..a85eedc3195a
--- /dev/null
+++ b/lib/tests/kfifo_kunit.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the generic kernel FIFO implementation.
+ *
+ * Copyright (C) 2024 Diego Vieira <diego.daniel.professional@gmail.com>
+ */
+#include <kunit/test.h>
+
+#include <linux/kfifo.h>
+
+#define KFIFO_SIZE 32
+#define N_ELEMENTS 5
+
+static void kfifo_test_reset_should_clear_the_fifo(struct kunit *test)
+{
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ kfifo_put(&my_fifo, 1);
+ kfifo_put(&my_fifo, 2);
+ kfifo_put(&my_fifo, 3);
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 3);
+
+ kfifo_reset(&my_fifo);
+
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 0);
+ KUNIT_EXPECT_TRUE(test, kfifo_is_empty(&my_fifo));
+}
+
+static void kfifo_test_define_should_define_an_empty_fifo(struct kunit *test)
+{
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ KUNIT_EXPECT_TRUE(test, kfifo_initialized(&my_fifo));
+ KUNIT_EXPECT_TRUE(test, kfifo_is_empty(&my_fifo));
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 0);
+}
+
+static void kfifo_test_len_should_ret_n_of_stored_elements(struct kunit *test)
+{
+ u8 buffer1[N_ELEMENTS];
+
+ for (int i = 0; i < N_ELEMENTS; i++)
+ buffer1[i] = i + 1;
+
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 0);
+
+ kfifo_in(&my_fifo, buffer1, N_ELEMENTS);
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), N_ELEMENTS);
+
+ kfifo_in(&my_fifo, buffer1, N_ELEMENTS);
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), N_ELEMENTS * 2);
+
+ kfifo_reset(&my_fifo);
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 0);
+}
+
+static void kfifo_test_put_should_insert_and_get_should_pop(struct kunit *test)
+{
+ u8 out_data = 0;
+ int processed_elements;
+ u8 elements[] = { 3, 5, 11 };
+
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ // If the fifo is empty, get returns 0
+ processed_elements = kfifo_get(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 0);
+ KUNIT_EXPECT_EQ(test, out_data, 0);
+
+ for (int i = 0; i < 3; i++)
+ kfifo_put(&my_fifo, elements[i]);
+
+ for (int i = 0; i < 3; i++) {
+ processed_elements = kfifo_get(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 1);
+ KUNIT_EXPECT_EQ(test, out_data, elements[i]);
+ }
+}
+
+static void kfifo_test_in_should_insert_multiple_elements(struct kunit *test)
+{
+ u8 in_buffer[] = { 11, 25, 65 };
+ u8 out_data;
+ int processed_elements;
+
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ kfifo_in(&my_fifo, in_buffer, 3);
+
+ for (int i = 0; i < 3; i++) {
+ processed_elements = kfifo_get(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 1);
+ KUNIT_EXPECT_EQ(test, out_data, in_buffer[i]);
+ }
+}
+
+static void kfifo_test_out_should_pop_multiple_elements(struct kunit *test)
+{
+ u8 in_buffer[] = { 11, 25, 65 };
+ u8 out_buffer[3];
+ int copied_elements;
+
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ for (int i = 0; i < 3; i++)
+ kfifo_put(&my_fifo, in_buffer[i]);
+
+ copied_elements = kfifo_out(&my_fifo, out_buffer, 3);
+ KUNIT_EXPECT_EQ(test, copied_elements, 3);
+
+ for (int i = 0; i < 3; i++)
+ KUNIT_EXPECT_EQ(test, out_buffer[i], in_buffer[i]);
+ KUNIT_EXPECT_TRUE(test, kfifo_is_empty(&my_fifo));
+}
+
+static void kfifo_test_dec_init_should_define_an_empty_fifo(struct kunit *test)
+{
+ DECLARE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ INIT_KFIFO(my_fifo);
+
+ // my_fifo is a struct with an inplace buffer
+ KUNIT_EXPECT_FALSE(test, __is_kfifo_ptr(&my_fifo));
+
+ KUNIT_EXPECT_TRUE(test, kfifo_initialized(&my_fifo));
+}
+
+static void kfifo_test_define_should_equal_declare_init(struct kunit *test)
+{
+ // declare a variable my_fifo of type struct kfifo of u8
+ DECLARE_KFIFO(my_fifo1, u8, KFIFO_SIZE);
+ // initialize the my_fifo variable
+ INIT_KFIFO(my_fifo1);
+
+ // DEFINE_KFIFO declares the variable with the initial value
+ // essentially the same as calling DECLARE_KFIFO and INIT_KFIFO
+ DEFINE_KFIFO(my_fifo2, u8, KFIFO_SIZE);
+
+ // my_fifo1 and my_fifo2 have the same size
+ KUNIT_EXPECT_EQ(test, sizeof(my_fifo1), sizeof(my_fifo2));
+ KUNIT_EXPECT_EQ(test, kfifo_initialized(&my_fifo1),
+ kfifo_initialized(&my_fifo2));
+ KUNIT_EXPECT_EQ(test, kfifo_is_empty(&my_fifo1),
+ kfifo_is_empty(&my_fifo2));
+}
+
+static void kfifo_test_alloc_should_initiliaze_a_ptr_fifo(struct kunit *test)
+{
+ int ret;
+ DECLARE_KFIFO_PTR(my_fifo, u8);
+
+ INIT_KFIFO(my_fifo);
+
+ // kfifo_initialized returns false signaling the buffer pointer is NULL
+ KUNIT_EXPECT_FALSE(test, kfifo_initialized(&my_fifo));
+
+ // kfifo_alloc allocates the buffer
+ ret = kfifo_alloc(&my_fifo, KFIFO_SIZE, GFP_KERNEL);
+ KUNIT_EXPECT_EQ_MSG(test, ret, 0, "Memory allocation should succeed");
+ KUNIT_EXPECT_TRUE(test, kfifo_initialized(&my_fifo));
+
+ // kfifo_free frees the buffer
+ kfifo_free(&my_fifo);
+}
+
+static void kfifo_test_peek_should_not_remove_elements(struct kunit *test)
+{
+ u8 out_data;
+ int processed_elements;
+
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ // If the fifo is empty, peek returns 0
+ processed_elements = kfifo_peek(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 0);
+
+ kfifo_put(&my_fifo, 3);
+ kfifo_put(&my_fifo, 5);
+ kfifo_put(&my_fifo, 11);
+
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 3);
+
+ processed_elements = kfifo_peek(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 1);
+ KUNIT_EXPECT_EQ(test, out_data, 3);
+
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 3);
+
+ // Using peek doesn't remove the element
+ // so the read element and the fifo length
+ // remains the same
+ processed_elements = kfifo_peek(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 1);
+ KUNIT_EXPECT_EQ(test, out_data, 3);
+
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 3);
+}
+
+static struct kunit_case kfifo_test_cases[] = {
+ KUNIT_CASE(kfifo_test_reset_should_clear_the_fifo),
+ KUNIT_CASE(kfifo_test_define_should_define_an_empty_fifo),
+ KUNIT_CASE(kfifo_test_len_should_ret_n_of_stored_elements),
+ KUNIT_CASE(kfifo_test_put_should_insert_and_get_should_pop),
+ KUNIT_CASE(kfifo_test_in_should_insert_multiple_elements),
+ KUNIT_CASE(kfifo_test_out_should_pop_multiple_elements),
+ KUNIT_CASE(kfifo_test_dec_init_should_define_an_empty_fifo),
+ KUNIT_CASE(kfifo_test_define_should_equal_declare_init),
+ KUNIT_CASE(kfifo_test_alloc_should_initiliaze_a_ptr_fifo),
+ KUNIT_CASE(kfifo_test_peek_should_not_remove_elements),
+ {},
+};
+
+static struct kunit_suite kfifo_test_module = {
+ .name = "kfifo",
+ .test_cases = kfifo_test_cases,
+};
+
+kunit_test_suites(&kfifo_test_module);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Diego Vieira <diego.daniel.professional@gmail.com>");
+MODULE_DESCRIPTION("KUnit test for the kernel FIFO");
diff --git a/lib/tests/kunit_iov_iter.c b/lib/tests/kunit_iov_iter.c
new file mode 100644
index 000000000000..48342736d016
--- /dev/null
+++ b/lib/tests/kunit_iov_iter.c
@@ -0,0 +1,1033 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* I/O iterator tests. This can only test kernel-backed iterator types.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/uio.h>
+#include <linux/bvec.h>
+#include <linux/folio_queue.h>
+#include <kunit/test.h>
+
+MODULE_DESCRIPTION("iov_iter testing");
+MODULE_AUTHOR("David Howells <dhowells@redhat.com>");
+MODULE_LICENSE("GPL");
+
+struct kvec_test_range {
+ int from, to;
+};
+
+static const struct kvec_test_range kvec_test_ranges[] = {
+ { 0x00002, 0x00002 },
+ { 0x00027, 0x03000 },
+ { 0x05193, 0x18794 },
+ { 0x20000, 0x20000 },
+ { 0x20000, 0x24000 },
+ { 0x24000, 0x27001 },
+ { 0x29000, 0xffffb },
+ { 0xffffd, 0xffffe },
+ { -1 }
+};
+
+static inline u8 pattern(unsigned long x)
+{
+ return x & 0xff;
+}
+
+static void iov_kunit_unmap(void *data)
+{
+ vunmap(data);
+}
+
+static void *__init iov_kunit_create_buffer(struct kunit *test,
+ struct page ***ppages,
+ size_t npages)
+{
+ struct page **pages;
+ unsigned long got;
+ void *buffer;
+
+ pages = kunit_kcalloc(test, npages, sizeof(struct page *), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pages);
+ *ppages = pages;
+
+ got = alloc_pages_bulk(GFP_KERNEL, npages, pages);
+ if (got != npages) {
+ release_pages(pages, got);
+ KUNIT_ASSERT_EQ(test, got, npages);
+ }
+
+ buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer);
+
+ kunit_add_action_or_reset(test, iov_kunit_unmap, buffer);
+ return buffer;
+}
+
+static void __init iov_kunit_load_kvec(struct kunit *test,
+ struct iov_iter *iter, int dir,
+ struct kvec *kvec, unsigned int kvmax,
+ void *buffer, size_t bufsize,
+ const struct kvec_test_range *pr)
+{
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < kvmax; i++, pr++) {
+ if (pr->from < 0)
+ break;
+ KUNIT_ASSERT_GE(test, pr->to, pr->from);
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+ kvec[i].iov_base = buffer + pr->from;
+ kvec[i].iov_len = pr->to - pr->from;
+ size += pr->to - pr->from;
+ }
+ KUNIT_ASSERT_LE(test, size, bufsize);
+
+ iov_iter_kvec(iter, dir, kvec, i, size);
+}
+
+/*
+ * Test copying to a ITER_KVEC-type iterator.
+ */
+static void __init iov_kunit_copy_to_kvec(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct page **spages, **bpages;
+ struct kvec kvec[8];
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, patt;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ for (i = 0; i < bufsize; i++)
+ scratch[i] = pattern(i);
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ memset(buffer, 0, bufsize);
+
+ iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
+ buffer, bufsize, kvec_test_ranges);
+ size = iter.count;
+
+ copied = copy_to_iter(scratch, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
+
+ /* Build the expected image in the scratch buffer. */
+ patt = 0;
+ memset(scratch, 0, bufsize);
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++)
+ for (i = pr->from; i < pr->to; i++)
+ scratch[i] = pattern(patt++);
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
+ if (buffer[i] != scratch[i])
+ return;
+ }
+
+ KUNIT_SUCCEED(test);
+}
+
+/*
+ * Test copying from a ITER_KVEC-type iterator.
+ */
+static void __init iov_kunit_copy_from_kvec(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct page **spages, **bpages;
+ struct kvec kvec[8];
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, j;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ for (i = 0; i < bufsize; i++)
+ buffer[i] = pattern(i);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ memset(scratch, 0, bufsize);
+
+ iov_kunit_load_kvec(test, &iter, WRITE, kvec, ARRAY_SIZE(kvec),
+ buffer, bufsize, kvec_test_ranges);
+ size = min(iter.count, bufsize);
+
+ copied = copy_from_iter(scratch, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
+
+ /* Build the expected image in the main buffer. */
+ i = 0;
+ memset(buffer, 0, bufsize);
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ for (j = pr->from; j < pr->to; j++) {
+ buffer[i++] = pattern(j);
+ if (i >= bufsize)
+ goto stop;
+ }
+ }
+stop:
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
+ if (scratch[i] != buffer[i])
+ return;
+ }
+
+ KUNIT_SUCCEED(test);
+}
+
+struct bvec_test_range {
+ int page, from, to;
+};
+
+static const struct bvec_test_range bvec_test_ranges[] = {
+ { 0, 0x0002, 0x0002 },
+ { 1, 0x0027, 0x0893 },
+ { 2, 0x0193, 0x0794 },
+ { 3, 0x0000, 0x1000 },
+ { 4, 0x0000, 0x1000 },
+ { 5, 0x0000, 0x1000 },
+ { 6, 0x0000, 0x0ffb },
+ { 6, 0x0ffd, 0x0ffe },
+ { -1, -1, -1 }
+};
+
+static void __init iov_kunit_load_bvec(struct kunit *test,
+ struct iov_iter *iter, int dir,
+ struct bio_vec *bvec, unsigned int bvmax,
+ struct page **pages, size_t npages,
+ size_t bufsize,
+ const struct bvec_test_range *pr)
+{
+ struct page *can_merge = NULL, *page;
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < bvmax; i++, pr++) {
+ if (pr->from < 0)
+ break;
+ KUNIT_ASSERT_LT(test, pr->page, npages);
+ KUNIT_ASSERT_LT(test, pr->page * PAGE_SIZE, bufsize);
+ KUNIT_ASSERT_GE(test, pr->from, 0);
+ KUNIT_ASSERT_GE(test, pr->to, pr->from);
+ KUNIT_ASSERT_LE(test, pr->to, PAGE_SIZE);
+
+ page = pages[pr->page];
+ if (pr->from == 0 && pr->from != pr->to && page == can_merge) {
+ i--;
+ bvec[i].bv_len += pr->to;
+ } else {
+ bvec_set_page(&bvec[i], page, pr->to - pr->from, pr->from);
+ }
+
+ size += pr->to - pr->from;
+ if ((pr->to & ~PAGE_MASK) == 0)
+ can_merge = page + pr->to / PAGE_SIZE;
+ else
+ can_merge = NULL;
+ }
+
+ iov_iter_bvec(iter, dir, bvec, i, size);
+}
+
+/*
+ * Test copying to a ITER_BVEC-type iterator.
+ */
+static void __init iov_kunit_copy_to_bvec(struct kunit *test)
+{
+ const struct bvec_test_range *pr;
+ struct iov_iter iter;
+ struct bio_vec bvec[8];
+ struct page **spages, **bpages;
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, b, patt;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ for (i = 0; i < bufsize; i++)
+ scratch[i] = pattern(i);
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ memset(buffer, 0, bufsize);
+
+ iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
+ bpages, npages, bufsize, bvec_test_ranges);
+ size = iter.count;
+
+ copied = copy_to_iter(scratch, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
+
+ /* Build the expected image in the scratch buffer. */
+ b = 0;
+ patt = 0;
+ memset(scratch, 0, bufsize);
+ for (pr = bvec_test_ranges; pr->from >= 0; pr++, b++) {
+ u8 *p = scratch + pr->page * PAGE_SIZE;
+
+ for (i = pr->from; i < pr->to; i++)
+ p[i] = pattern(patt++);
+ }
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
+ if (buffer[i] != scratch[i])
+ return;
+ }
+
+ KUNIT_SUCCEED(test);
+}
+
+/*
+ * Test copying from a ITER_BVEC-type iterator.
+ */
+static void __init iov_kunit_copy_from_bvec(struct kunit *test)
+{
+ const struct bvec_test_range *pr;
+ struct iov_iter iter;
+ struct bio_vec bvec[8];
+ struct page **spages, **bpages;
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, j;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ for (i = 0; i < bufsize; i++)
+ buffer[i] = pattern(i);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ memset(scratch, 0, bufsize);
+
+ iov_kunit_load_bvec(test, &iter, WRITE, bvec, ARRAY_SIZE(bvec),
+ bpages, npages, bufsize, bvec_test_ranges);
+ size = iter.count;
+
+ copied = copy_from_iter(scratch, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
+
+ /* Build the expected image in the main buffer. */
+ i = 0;
+ memset(buffer, 0, bufsize);
+ for (pr = bvec_test_ranges; pr->from >= 0; pr++) {
+ size_t patt = pr->page * PAGE_SIZE;
+
+ for (j = pr->from; j < pr->to; j++) {
+ buffer[i++] = pattern(patt + j);
+ if (i >= bufsize)
+ goto stop;
+ }
+ }
+stop:
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
+ if (scratch[i] != buffer[i])
+ return;
+ }
+
+ KUNIT_SUCCEED(test);
+}
+
+static void iov_kunit_destroy_folioq(void *data)
+{
+ struct folio_queue *folioq, *next;
+
+ for (folioq = data; folioq; folioq = next) {
+ next = folioq->next;
+ for (int i = 0; i < folioq_nr_slots(folioq); i++)
+ if (folioq_folio(folioq, i))
+ folio_put(folioq_folio(folioq, i));
+ kfree(folioq);
+ }
+}
+
+static void __init iov_kunit_load_folioq(struct kunit *test,
+ struct iov_iter *iter, int dir,
+ struct folio_queue *folioq,
+ struct page **pages, size_t npages)
+{
+ struct folio_queue *p = folioq;
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < npages; i++) {
+ if (folioq_full(p)) {
+ p->next = kzalloc(sizeof(struct folio_queue), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p->next);
+ folioq_init(p->next, 0);
+ p->next->prev = p;
+ p = p->next;
+ }
+ folioq_append(p, page_folio(pages[i]));
+ size += PAGE_SIZE;
+ }
+ iov_iter_folio_queue(iter, dir, folioq, 0, 0, size);
+}
+
+static struct folio_queue *iov_kunit_create_folioq(struct kunit *test)
+{
+ struct folio_queue *folioq;
+
+ folioq = kzalloc(sizeof(struct folio_queue), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, folioq);
+ kunit_add_action_or_reset(test, iov_kunit_destroy_folioq, folioq);
+ folioq_init(folioq, 0);
+ return folioq;
+}
+
+/*
+ * Test copying to a ITER_FOLIOQ-type iterator.
+ */
+static void __init iov_kunit_copy_to_folioq(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct folio_queue *folioq;
+ struct page **spages, **bpages;
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, patt;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ folioq = iov_kunit_create_folioq(test);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ for (i = 0; i < bufsize; i++)
+ scratch[i] = pattern(i);
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ memset(buffer, 0, bufsize);
+
+ iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
+
+ i = 0;
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ size = pr->to - pr->from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_folio_queue(&iter, READ, folioq, 0, 0, pr->to);
+ iov_iter_advance(&iter, pr->from);
+ copied = copy_to_iter(scratch + i, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to % PAGE_SIZE);
+ i += size;
+ if (test->status == KUNIT_FAILURE)
+ goto stop;
+ }
+
+ /* Build the expected image in the scratch buffer. */
+ patt = 0;
+ memset(scratch, 0, bufsize);
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++)
+ for (i = pr->from; i < pr->to; i++)
+ scratch[i] = pattern(patt++);
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
+ if (buffer[i] != scratch[i])
+ return;
+ }
+
+stop:
+ KUNIT_SUCCEED(test);
+}
+
+/*
+ * Test copying from a ITER_FOLIOQ-type iterator.
+ */
+static void __init iov_kunit_copy_from_folioq(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct folio_queue *folioq;
+ struct page **spages, **bpages;
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, j;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ folioq = iov_kunit_create_folioq(test);
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ for (i = 0; i < bufsize; i++)
+ buffer[i] = pattern(i);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ memset(scratch, 0, bufsize);
+
+ iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
+
+ i = 0;
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ size = pr->to - pr->from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_folio_queue(&iter, WRITE, folioq, 0, 0, pr->to);
+ iov_iter_advance(&iter, pr->from);
+ copied = copy_from_iter(scratch + i, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to % PAGE_SIZE);
+ i += size;
+ }
+
+ /* Build the expected image in the main buffer. */
+ i = 0;
+ memset(buffer, 0, bufsize);
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ for (j = pr->from; j < pr->to; j++) {
+ buffer[i++] = pattern(j);
+ if (i >= bufsize)
+ goto stop;
+ }
+ }
+stop:
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
+ if (scratch[i] != buffer[i])
+ return;
+ }
+
+ KUNIT_SUCCEED(test);
+}
+
+static void iov_kunit_destroy_xarray(void *data)
+{
+ struct xarray *xarray = data;
+
+ xa_destroy(xarray);
+ kfree(xarray);
+}
+
+static void __init iov_kunit_load_xarray(struct kunit *test,
+ struct iov_iter *iter, int dir,
+ struct xarray *xarray,
+ struct page **pages, size_t npages)
+{
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < npages; i++) {
+ void *x = xa_store(xarray, i, pages[i], GFP_KERNEL);
+
+ KUNIT_ASSERT_FALSE(test, xa_is_err(x));
+ size += PAGE_SIZE;
+ }
+ iov_iter_xarray(iter, dir, xarray, 0, size);
+}
+
+static struct xarray *iov_kunit_create_xarray(struct kunit *test)
+{
+ struct xarray *xarray;
+
+ xarray = kzalloc(sizeof(struct xarray), GFP_KERNEL);
+ xa_init(xarray);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xarray);
+ kunit_add_action_or_reset(test, iov_kunit_destroy_xarray, xarray);
+ return xarray;
+}
+
+/*
+ * Test copying to a ITER_XARRAY-type iterator.
+ */
+static void __init iov_kunit_copy_to_xarray(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct xarray *xarray;
+ struct page **spages, **bpages;
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, patt;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ xarray = iov_kunit_create_xarray(test);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ for (i = 0; i < bufsize; i++)
+ scratch[i] = pattern(i);
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ memset(buffer, 0, bufsize);
+
+ iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
+
+ i = 0;
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ size = pr->to - pr->from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_xarray(&iter, READ, xarray, pr->from, size);
+ copied = copy_to_iter(scratch + i, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
+ i += size;
+ }
+
+ /* Build the expected image in the scratch buffer. */
+ patt = 0;
+ memset(scratch, 0, bufsize);
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++)
+ for (i = pr->from; i < pr->to; i++)
+ scratch[i] = pattern(patt++);
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
+ if (buffer[i] != scratch[i])
+ return;
+ }
+
+ KUNIT_SUCCEED(test);
+}
+
+/*
+ * Test copying from a ITER_XARRAY-type iterator.
+ */
+static void __init iov_kunit_copy_from_xarray(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct xarray *xarray;
+ struct page **spages, **bpages;
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, j;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ xarray = iov_kunit_create_xarray(test);
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ for (i = 0; i < bufsize; i++)
+ buffer[i] = pattern(i);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ memset(scratch, 0, bufsize);
+
+ iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
+
+ i = 0;
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ size = pr->to - pr->from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_xarray(&iter, WRITE, xarray, pr->from, size);
+ copied = copy_from_iter(scratch + i, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
+ i += size;
+ }
+
+ /* Build the expected image in the main buffer. */
+ i = 0;
+ memset(buffer, 0, bufsize);
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ for (j = pr->from; j < pr->to; j++) {
+ buffer[i++] = pattern(j);
+ if (i >= bufsize)
+ goto stop;
+ }
+ }
+stop:
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
+ if (scratch[i] != buffer[i])
+ return;
+ }
+
+ KUNIT_SUCCEED(test);
+}
+
+/*
+ * Test the extraction of ITER_KVEC-type iterators.
+ */
+static void __init iov_kunit_extract_pages_kvec(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct page **bpages, *pagelist[8], **pages = pagelist;
+ struct kvec kvec[8];
+ u8 *buffer;
+ ssize_t len;
+ size_t bufsize, size = 0, npages;
+ int i, from;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+
+ iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
+ buffer, bufsize, kvec_test_ranges);
+ size = iter.count;
+
+ pr = kvec_test_ranges;
+ from = pr->from;
+ do {
+ size_t offset0 = LONG_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++)
+ pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
+
+ len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
+ ARRAY_SIZE(pagelist), 0, &offset0);
+ KUNIT_EXPECT_GE(test, len, 0);
+ if (len < 0)
+ break;
+ KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
+ KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
+ KUNIT_EXPECT_LE(test, len, size);
+ KUNIT_EXPECT_EQ(test, iter.count, size - len);
+ size -= len;
+
+ if (len == 0)
+ break;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
+ struct page *p;
+ ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
+ int ix;
+
+ KUNIT_ASSERT_GE(test, part, 0);
+ while (from == pr->to) {
+ pr++;
+ from = pr->from;
+ if (from < 0)
+ goto stop;
+ }
+ ix = from / PAGE_SIZE;
+ KUNIT_ASSERT_LT(test, ix, npages);
+ p = bpages[ix];
+ KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
+ KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
+ from += part;
+ len -= part;
+ KUNIT_ASSERT_GE(test, len, 0);
+ if (len == 0)
+ break;
+ offset0 = 0;
+ }
+
+ if (test->status == KUNIT_FAILURE)
+ break;
+ } while (iov_iter_count(&iter) > 0);
+
+stop:
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_SUCCEED(test);
+}
+
+/*
+ * Test the extraction of ITER_BVEC-type iterators.
+ */
+static void __init iov_kunit_extract_pages_bvec(struct kunit *test)
+{
+ const struct bvec_test_range *pr;
+ struct iov_iter iter;
+ struct page **bpages, *pagelist[8], **pages = pagelist;
+ struct bio_vec bvec[8];
+ ssize_t len;
+ size_t bufsize, size = 0, npages;
+ int i, from;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ iov_kunit_create_buffer(test, &bpages, npages);
+ iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
+ bpages, npages, bufsize, bvec_test_ranges);
+ size = iter.count;
+
+ pr = bvec_test_ranges;
+ from = pr->from;
+ do {
+ size_t offset0 = LONG_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++)
+ pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
+
+ len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
+ ARRAY_SIZE(pagelist), 0, &offset0);
+ KUNIT_EXPECT_GE(test, len, 0);
+ if (len < 0)
+ break;
+ KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
+ KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
+ KUNIT_EXPECT_LE(test, len, size);
+ KUNIT_EXPECT_EQ(test, iter.count, size - len);
+ size -= len;
+
+ if (len == 0)
+ break;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
+ struct page *p;
+ ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
+ int ix;
+
+ KUNIT_ASSERT_GE(test, part, 0);
+ while (from == pr->to) {
+ pr++;
+ from = pr->from;
+ if (from < 0)
+ goto stop;
+ }
+ ix = pr->page + from / PAGE_SIZE;
+ KUNIT_ASSERT_LT(test, ix, npages);
+ p = bpages[ix];
+ KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
+ KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
+ from += part;
+ len -= part;
+ KUNIT_ASSERT_GE(test, len, 0);
+ if (len == 0)
+ break;
+ offset0 = 0;
+ }
+
+ if (test->status == KUNIT_FAILURE)
+ break;
+ } while (iov_iter_count(&iter) > 0);
+
+stop:
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_SUCCEED(test);
+}
+
+/*
+ * Test the extraction of ITER_FOLIOQ-type iterators.
+ */
+static void __init iov_kunit_extract_pages_folioq(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct folio_queue *folioq;
+ struct iov_iter iter;
+ struct page **bpages, *pagelist[8], **pages = pagelist;
+ ssize_t len;
+ size_t bufsize, size = 0, npages;
+ int i, from;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ folioq = iov_kunit_create_folioq(test);
+
+ iov_kunit_create_buffer(test, &bpages, npages);
+ iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
+
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ from = pr->from;
+ size = pr->to - from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_folio_queue(&iter, WRITE, folioq, 0, 0, pr->to);
+ iov_iter_advance(&iter, from);
+
+ do {
+ size_t offset0 = LONG_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++)
+ pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
+
+ len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
+ ARRAY_SIZE(pagelist), 0, &offset0);
+ KUNIT_EXPECT_GE(test, len, 0);
+ if (len < 0)
+ break;
+ KUNIT_EXPECT_LE(test, len, size);
+ KUNIT_EXPECT_EQ(test, iter.count, size - len);
+ if (len == 0)
+ break;
+ size -= len;
+ KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
+ KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
+ struct page *p;
+ ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
+ int ix;
+
+ KUNIT_ASSERT_GE(test, part, 0);
+ ix = from / PAGE_SIZE;
+ KUNIT_ASSERT_LT(test, ix, npages);
+ p = bpages[ix];
+ KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
+ KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
+ from += part;
+ len -= part;
+ KUNIT_ASSERT_GE(test, len, 0);
+ if (len == 0)
+ break;
+ offset0 = 0;
+ }
+
+ if (test->status == KUNIT_FAILURE)
+ goto stop;
+ } while (iov_iter_count(&iter) > 0);
+
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ }
+
+stop:
+ KUNIT_SUCCEED(test);
+}
+
+/*
+ * Test the extraction of ITER_XARRAY-type iterators.
+ */
+static void __init iov_kunit_extract_pages_xarray(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct xarray *xarray;
+ struct page **bpages, *pagelist[8], **pages = pagelist;
+ ssize_t len;
+ size_t bufsize, size = 0, npages;
+ int i, from;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ xarray = iov_kunit_create_xarray(test);
+
+ iov_kunit_create_buffer(test, &bpages, npages);
+ iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
+
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ from = pr->from;
+ size = pr->to - from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_xarray(&iter, WRITE, xarray, from, size);
+
+ do {
+ size_t offset0 = LONG_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++)
+ pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
+
+ len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
+ ARRAY_SIZE(pagelist), 0, &offset0);
+ KUNIT_EXPECT_GE(test, len, 0);
+ if (len < 0)
+ break;
+ KUNIT_EXPECT_LE(test, len, size);
+ KUNIT_EXPECT_EQ(test, iter.count, size - len);
+ if (len == 0)
+ break;
+ size -= len;
+ KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
+ KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
+ struct page *p;
+ ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
+ int ix;
+
+ KUNIT_ASSERT_GE(test, part, 0);
+ ix = from / PAGE_SIZE;
+ KUNIT_ASSERT_LT(test, ix, npages);
+ p = bpages[ix];
+ KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
+ KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
+ from += part;
+ len -= part;
+ KUNIT_ASSERT_GE(test, len, 0);
+ if (len == 0)
+ break;
+ offset0 = 0;
+ }
+
+ if (test->status == KUNIT_FAILURE)
+ goto stop;
+ } while (iov_iter_count(&iter) > 0);
+
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to - pr->from);
+ }
+
+stop:
+ KUNIT_SUCCEED(test);
+}
+
+static struct kunit_case __refdata iov_kunit_cases[] = {
+ KUNIT_CASE(iov_kunit_copy_to_kvec),
+ KUNIT_CASE(iov_kunit_copy_from_kvec),
+ KUNIT_CASE(iov_kunit_copy_to_bvec),
+ KUNIT_CASE(iov_kunit_copy_from_bvec),
+ KUNIT_CASE(iov_kunit_copy_to_folioq),
+ KUNIT_CASE(iov_kunit_copy_from_folioq),
+ KUNIT_CASE(iov_kunit_copy_to_xarray),
+ KUNIT_CASE(iov_kunit_copy_from_xarray),
+ KUNIT_CASE(iov_kunit_extract_pages_kvec),
+ KUNIT_CASE(iov_kunit_extract_pages_bvec),
+ KUNIT_CASE(iov_kunit_extract_pages_folioq),
+ KUNIT_CASE(iov_kunit_extract_pages_xarray),
+ {}
+};
+
+static struct kunit_suite iov_kunit_suite = {
+ .name = "iov_iter",
+ .test_cases = iov_kunit_cases,
+};
+
+kunit_test_suites(&iov_kunit_suite);
diff --git a/lib/tests/list-test.c b/lib/tests/list-test.c
new file mode 100644
index 000000000000..9135cdc1bb39
--- /dev/null
+++ b/lib/tests/list-test.c
@@ -0,0 +1,1505 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the Kernel Linked-list structures.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+#include <kunit/test.h>
+
+#include <linux/list.h>
+#include <linux/klist.h>
+
+struct list_test_struct {
+ int data;
+ struct list_head list;
+};
+
+static void list_test_list_init(struct kunit *test)
+{
+ /* Test the different ways of initialising a list. */
+ struct list_head list1 = LIST_HEAD_INIT(list1);
+ struct list_head list2;
+ LIST_HEAD(list3);
+ struct list_head *list4;
+ struct list_head *list5;
+
+ INIT_LIST_HEAD(&list2);
+
+ list4 = kzalloc(sizeof(*list4), GFP_KERNEL | __GFP_NOFAIL);
+ INIT_LIST_HEAD(list4);
+
+ list5 = kmalloc(sizeof(*list5), GFP_KERNEL | __GFP_NOFAIL);
+ memset(list5, 0xFF, sizeof(*list5));
+ INIT_LIST_HEAD(list5);
+
+ /* list_empty_careful() checks both next and prev. */
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list1));
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list2));
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list3));
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(list4));
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(list5));
+
+ kfree(list4);
+ kfree(list5);
+}
+
+static void list_test_list_add(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add(&a, &list);
+ list_add(&b, &list);
+
+ /* should be [list] -> b -> a */
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
+ KUNIT_EXPECT_PTR_EQ(test, b.next, &a);
+}
+
+static void list_test_list_add_tail(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* should be [list] -> a -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &a);
+ KUNIT_EXPECT_PTR_EQ(test, a.prev, &list);
+ KUNIT_EXPECT_PTR_EQ(test, a.next, &b);
+}
+
+static void list_test_list_del(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a -> b */
+ list_del(&a);
+
+ /* now: [list] -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
+}
+
+static void list_test_list_replace(struct kunit *test)
+{
+ struct list_head a_old, a_new, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a_old, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a_old -> b */
+ list_replace(&a_old, &a_new);
+
+ /* now: [list] -> a_new -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &a_new);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &a_new);
+ KUNIT_EXPECT_PTR_EQ(test, a_new.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, a_new.prev, &list);
+}
+
+static void list_test_list_replace_init(struct kunit *test)
+{
+ struct list_head a_old, a_new, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a_old, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a_old -> b */
+ list_replace_init(&a_old, &a_new);
+
+ /* now: [list] -> a_new -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &a_new);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &a_new);
+ KUNIT_EXPECT_PTR_EQ(test, a_new.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, a_new.prev, &list);
+
+ /* check a_old is empty (initialized) */
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&a_old));
+}
+
+static void list_test_list_swap(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a -> b */
+ list_swap(&a, &b);
+
+ /* after: [list] -> b -> a */
+ KUNIT_EXPECT_PTR_EQ(test, &b, list.next);
+ KUNIT_EXPECT_PTR_EQ(test, &a, list.prev);
+
+ KUNIT_EXPECT_PTR_EQ(test, &a, b.next);
+ KUNIT_EXPECT_PTR_EQ(test, &list, b.prev);
+
+ KUNIT_EXPECT_PTR_EQ(test, &list, a.next);
+ KUNIT_EXPECT_PTR_EQ(test, &b, a.prev);
+}
+
+static void list_test_list_del_init(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a -> b */
+ list_del_init(&a);
+ /* after: [list] -> b, a initialised */
+
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&a));
+}
+
+static void list_test_list_del_init_careful(struct kunit *test)
+{
+ /* NOTE: This test only checks the behaviour of this function in
+ * isolation. It does not verify memory model guarantees.
+ */
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a -> b */
+ list_del_init_careful(&a);
+ /* after: [list] -> b, a initialised */
+
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&a));
+}
+
+static void list_test_list_move(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+
+ list_add_tail(&a, &list1);
+ list_add_tail(&b, &list2);
+
+ /* before: [list1] -> a, [list2] -> b */
+ list_move(&a, &list2);
+ /* after: [list1] empty, [list2] -> a -> b */
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&list1));
+
+ KUNIT_EXPECT_PTR_EQ(test, &a, list2.next);
+ KUNIT_EXPECT_PTR_EQ(test, &b, a.next);
+}
+
+static void list_test_list_move_tail(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+
+ list_add_tail(&a, &list1);
+ list_add_tail(&b, &list2);
+
+ /* before: [list1] -> a, [list2] -> b */
+ list_move_tail(&a, &list2);
+ /* after: [list1] empty, [list2] -> b -> a */
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&list1));
+
+ KUNIT_EXPECT_PTR_EQ(test, &b, list2.next);
+ KUNIT_EXPECT_PTR_EQ(test, &a, b.next);
+}
+
+static void list_test_list_bulk_move_tail(struct kunit *test)
+{
+ struct list_head a, b, c, d, x, y;
+ struct list_head *list1_values[] = { &x, &b, &c, &y };
+ struct list_head *list2_values[] = { &a, &d };
+ struct list_head *ptr;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&x, &list1);
+ list_add_tail(&y, &list1);
+
+ list_add_tail(&a, &list2);
+ list_add_tail(&b, &list2);
+ list_add_tail(&c, &list2);
+ list_add_tail(&d, &list2);
+
+ /* before: [list1] -> x -> y, [list2] -> a -> b -> c -> d */
+ list_bulk_move_tail(&y, &b, &c);
+ /* after: [list1] -> x -> b -> c -> y, [list2] -> a -> d */
+
+ list_for_each(ptr, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, ptr, list1_values[i]);
+ i++;
+ }
+ KUNIT_EXPECT_EQ(test, i, 4);
+ i = 0;
+ list_for_each(ptr, &list2) {
+ KUNIT_EXPECT_PTR_EQ(test, ptr, list2_values[i]);
+ i++;
+ }
+ KUNIT_EXPECT_EQ(test, i, 2);
+}
+
+static void list_test_list_is_head(struct kunit *test)
+{
+ struct list_head a, b, c;
+
+ /* Two lists: [a] -> b, [c] */
+ INIT_LIST_HEAD(&a);
+ INIT_LIST_HEAD(&c);
+ list_add_tail(&b, &a);
+
+ KUNIT_EXPECT_TRUE_MSG(test, list_is_head(&a, &a),
+ "Head element of same list");
+ KUNIT_EXPECT_FALSE_MSG(test, list_is_head(&a, &b),
+ "Non-head element of same list");
+ KUNIT_EXPECT_FALSE_MSG(test, list_is_head(&a, &c),
+ "Head element of different list");
+}
+
+
+static void list_test_list_is_first(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ KUNIT_EXPECT_TRUE(test, list_is_first(&a, &list));
+ KUNIT_EXPECT_FALSE(test, list_is_first(&b, &list));
+}
+
+static void list_test_list_is_last(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ KUNIT_EXPECT_FALSE(test, list_is_last(&a, &list));
+ KUNIT_EXPECT_TRUE(test, list_is_last(&b, &list));
+}
+
+static void list_test_list_empty(struct kunit *test)
+{
+ struct list_head a;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+
+ list_add_tail(&a, &list1);
+
+ KUNIT_EXPECT_FALSE(test, list_empty(&list1));
+ KUNIT_EXPECT_TRUE(test, list_empty(&list2));
+}
+
+static void list_test_list_empty_careful(struct kunit *test)
+{
+ /* This test doesn't check correctness under concurrent access */
+ struct list_head a;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+
+ list_add_tail(&a, &list1);
+
+ KUNIT_EXPECT_FALSE(test, list_empty_careful(&list1));
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list2));
+}
+
+static void list_test_list_rotate_left(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a -> b */
+ list_rotate_left(&list);
+ /* after: [list] -> b -> a */
+
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
+ KUNIT_EXPECT_PTR_EQ(test, b.next, &a);
+}
+
+static void list_test_list_rotate_to_front(struct kunit *test)
+{
+ struct list_head a, b, c, d;
+ struct list_head *list_values[] = { &c, &d, &a, &b };
+ struct list_head *ptr;
+ LIST_HEAD(list);
+ int i = 0;
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+ list_add_tail(&c, &list);
+ list_add_tail(&d, &list);
+
+ /* before: [list] -> a -> b -> c -> d */
+ list_rotate_to_front(&c, &list);
+ /* after: [list] -> c -> d -> a -> b */
+
+ list_for_each(ptr, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, ptr, list_values[i]);
+ i++;
+ }
+ KUNIT_EXPECT_EQ(test, i, 4);
+}
+
+static void list_test_list_is_singular(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ /* [list] empty */
+ KUNIT_EXPECT_FALSE(test, list_is_singular(&list));
+
+ list_add_tail(&a, &list);
+
+ /* [list] -> a */
+ KUNIT_EXPECT_TRUE(test, list_is_singular(&list));
+
+ list_add_tail(&b, &list);
+
+ /* [list] -> a -> b */
+ KUNIT_EXPECT_FALSE(test, list_is_singular(&list));
+}
+
+static void list_test_list_cut_position(struct kunit *test)
+{
+ struct list_head entries[3], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list1);
+
+ /* before: [list1] -> entries[0] -> entries[1] -> entries[2] */
+ list_cut_position(&list2, &list1, &entries[1]);
+ /* after: [list2] -> entries[0] -> entries[1], [list1] -> entries[2] */
+
+ list_for_each(cur, &list2) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 2);
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 3);
+}
+
+static void list_test_list_cut_before(struct kunit *test)
+{
+ struct list_head entries[3], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list1);
+
+ /* before: [list1] -> entries[0] -> entries[1] -> entries[2] */
+ list_cut_before(&list2, &list1, &entries[1]);
+ /* after: [list2] -> entries[0], [list1] -> entries[1] -> entries[2] */
+
+ list_for_each(cur, &list2) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 1);
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 3);
+}
+
+static void list_test_list_splice(struct kunit *test)
+{
+ struct list_head entries[5], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list2);
+ list_add_tail(&entries[3], &list2);
+ list_add_tail(&entries[4], &list1);
+
+ /* before: [list1]->e[0]->e[1]->e[4], [list2]->e[2]->e[3] */
+ list_splice(&list2, &entries[1]);
+ /* after: [list1]->e[0]->e[1]->e[2]->e[3]->e[4], [list2] uninit */
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+}
+
+static void list_test_list_splice_tail(struct kunit *test)
+{
+ struct list_head entries[5], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list2);
+ list_add_tail(&entries[3], &list2);
+ list_add_tail(&entries[4], &list1);
+
+ /* before: [list1]->e[0]->e[1]->e[4], [list2]->e[2]->e[3] */
+ list_splice_tail(&list2, &entries[4]);
+ /* after: [list1]->e[0]->e[1]->e[2]->e[3]->e[4], [list2] uninit */
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+}
+
+static void list_test_list_splice_init(struct kunit *test)
+{
+ struct list_head entries[5], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list2);
+ list_add_tail(&entries[3], &list2);
+ list_add_tail(&entries[4], &list1);
+
+ /* before: [list1]->e[0]->e[1]->e[4], [list2]->e[2]->e[3] */
+ list_splice_init(&list2, &entries[1]);
+ /* after: [list1]->e[0]->e[1]->e[2]->e[3]->e[4], [list2] empty */
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list2));
+}
+
+static void list_test_list_splice_tail_init(struct kunit *test)
+{
+ struct list_head entries[5], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list2);
+ list_add_tail(&entries[3], &list2);
+ list_add_tail(&entries[4], &list1);
+
+ /* before: [list1]->e[0]->e[1]->e[4], [list2]->e[2]->e[3] */
+ list_splice_tail_init(&list2, &entries[4]);
+ /* after: [list1]->e[0]->e[1]->e[2]->e[3]->e[4], [list2] empty */
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list2));
+}
+
+static void list_test_list_entry(struct kunit *test)
+{
+ struct list_test_struct test_struct;
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct, list_entry(&(test_struct.list),
+ struct list_test_struct, list));
+}
+
+static void list_test_list_entry_is_head(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2, test_struct3;
+
+ INIT_LIST_HEAD(&test_struct1.list);
+ INIT_LIST_HEAD(&test_struct3.list);
+
+ list_add_tail(&test_struct2.list, &test_struct1.list);
+
+ KUNIT_EXPECT_TRUE_MSG(test,
+ list_entry_is_head((&test_struct1), &test_struct1.list, list),
+ "Head element of same list");
+ KUNIT_EXPECT_FALSE_MSG(test,
+ list_entry_is_head((&test_struct2), &test_struct1.list, list),
+ "Non-head element of same list");
+ KUNIT_EXPECT_FALSE_MSG(test,
+ list_entry_is_head((&test_struct3), &test_struct1.list, list),
+ "Head element of different list");
+}
+
+static void list_test_list_first_entry(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2;
+ LIST_HEAD(list);
+
+ list_add_tail(&test_struct1.list, &list);
+ list_add_tail(&test_struct2.list, &list);
+
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct1, list_first_entry(&list,
+ struct list_test_struct, list));
+}
+
+static void list_test_list_last_entry(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2;
+ LIST_HEAD(list);
+
+ list_add_tail(&test_struct1.list, &list);
+ list_add_tail(&test_struct2.list, &list);
+
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct2, list_last_entry(&list,
+ struct list_test_struct, list));
+}
+
+static void list_test_list_first_entry_or_null(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2;
+ LIST_HEAD(list);
+
+ KUNIT_EXPECT_FALSE(test, list_first_entry_or_null(&list,
+ struct list_test_struct, list));
+
+ list_add_tail(&test_struct1.list, &list);
+ list_add_tail(&test_struct2.list, &list);
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct1,
+ list_first_entry_or_null(&list,
+ struct list_test_struct, list));
+}
+
+static void list_test_list_next_entry(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2;
+ LIST_HEAD(list);
+
+ list_add_tail(&test_struct1.list, &list);
+ list_add_tail(&test_struct2.list, &list);
+
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct2, list_next_entry(&test_struct1,
+ list));
+}
+
+static void list_test_list_prev_entry(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2;
+ LIST_HEAD(list);
+
+ list_add_tail(&test_struct1.list, &list);
+ list_add_tail(&test_struct2.list, &list);
+
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct1, list_prev_entry(&test_struct2,
+ list));
+}
+
+static void list_test_list_for_each(struct kunit *test)
+{
+ struct list_head entries[3], *cur;
+ LIST_HEAD(list);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list);
+ list_add_tail(&entries[1], &list);
+ list_add_tail(&entries[2], &list);
+
+ list_for_each(cur, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 3);
+}
+
+static void list_test_list_for_each_prev(struct kunit *test)
+{
+ struct list_head entries[3], *cur;
+ LIST_HEAD(list);
+ int i = 2;
+
+ list_add_tail(&entries[0], &list);
+ list_add_tail(&entries[1], &list);
+ list_add_tail(&entries[2], &list);
+
+ list_for_each_prev(cur, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i--;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, -1);
+}
+
+static void list_test_list_for_each_safe(struct kunit *test)
+{
+ struct list_head entries[3], *cur, *n;
+ LIST_HEAD(list);
+ int i = 0;
+
+
+ list_add_tail(&entries[0], &list);
+ list_add_tail(&entries[1], &list);
+ list_add_tail(&entries[2], &list);
+
+ list_for_each_safe(cur, n, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ list_del(&entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 3);
+ KUNIT_EXPECT_TRUE(test, list_empty(&list));
+}
+
+static void list_test_list_for_each_prev_safe(struct kunit *test)
+{
+ struct list_head entries[3], *cur, *n;
+ LIST_HEAD(list);
+ int i = 2;
+
+ list_add_tail(&entries[0], &list);
+ list_add_tail(&entries[1], &list);
+ list_add_tail(&entries[2], &list);
+
+ list_for_each_prev_safe(cur, n, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ list_del(&entries[i]);
+ i--;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, -1);
+ KUNIT_EXPECT_TRUE(test, list_empty(&list));
+}
+
+static void list_test_list_for_each_entry(struct kunit *test)
+{
+ struct list_test_struct entries[5], *cur;
+ LIST_HEAD(list);
+ int i = 0;
+
+ for (i = 0; i < 5; ++i) {
+ entries[i].data = i;
+ list_add_tail(&entries[i].list, &list);
+ }
+
+ i = 0;
+
+ list_for_each_entry(cur, &list, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+}
+
+static void list_test_list_for_each_entry_reverse(struct kunit *test)
+{
+ struct list_test_struct entries[5], *cur;
+ LIST_HEAD(list);
+ int i = 0;
+
+ for (i = 0; i < 5; ++i) {
+ entries[i].data = i;
+ list_add_tail(&entries[i].list, &list);
+ }
+
+ i = 4;
+
+ list_for_each_entry_reverse(cur, &list, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ i--;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, -1);
+}
+
+static struct kunit_case list_test_cases[] = {
+ KUNIT_CASE(list_test_list_init),
+ KUNIT_CASE(list_test_list_add),
+ KUNIT_CASE(list_test_list_add_tail),
+ KUNIT_CASE(list_test_list_del),
+ KUNIT_CASE(list_test_list_replace),
+ KUNIT_CASE(list_test_list_replace_init),
+ KUNIT_CASE(list_test_list_swap),
+ KUNIT_CASE(list_test_list_del_init),
+ KUNIT_CASE(list_test_list_del_init_careful),
+ KUNIT_CASE(list_test_list_move),
+ KUNIT_CASE(list_test_list_move_tail),
+ KUNIT_CASE(list_test_list_bulk_move_tail),
+ KUNIT_CASE(list_test_list_is_head),
+ KUNIT_CASE(list_test_list_is_first),
+ KUNIT_CASE(list_test_list_is_last),
+ KUNIT_CASE(list_test_list_empty),
+ KUNIT_CASE(list_test_list_empty_careful),
+ KUNIT_CASE(list_test_list_rotate_left),
+ KUNIT_CASE(list_test_list_rotate_to_front),
+ KUNIT_CASE(list_test_list_is_singular),
+ KUNIT_CASE(list_test_list_cut_position),
+ KUNIT_CASE(list_test_list_cut_before),
+ KUNIT_CASE(list_test_list_splice),
+ KUNIT_CASE(list_test_list_splice_tail),
+ KUNIT_CASE(list_test_list_splice_init),
+ KUNIT_CASE(list_test_list_splice_tail_init),
+ KUNIT_CASE(list_test_list_entry),
+ KUNIT_CASE(list_test_list_entry_is_head),
+ KUNIT_CASE(list_test_list_first_entry),
+ KUNIT_CASE(list_test_list_last_entry),
+ KUNIT_CASE(list_test_list_first_entry_or_null),
+ KUNIT_CASE(list_test_list_next_entry),
+ KUNIT_CASE(list_test_list_prev_entry),
+ KUNIT_CASE(list_test_list_for_each),
+ KUNIT_CASE(list_test_list_for_each_prev),
+ KUNIT_CASE(list_test_list_for_each_safe),
+ KUNIT_CASE(list_test_list_for_each_prev_safe),
+ KUNIT_CASE(list_test_list_for_each_entry),
+ KUNIT_CASE(list_test_list_for_each_entry_reverse),
+ {},
+};
+
+static struct kunit_suite list_test_module = {
+ .name = "list-kunit-test",
+ .test_cases = list_test_cases,
+};
+
+struct hlist_test_struct {
+ int data;
+ struct hlist_node list;
+};
+
+static void hlist_test_init(struct kunit *test)
+{
+ /* Test the different ways of initialising a list. */
+ struct hlist_head list1 = HLIST_HEAD_INIT;
+ struct hlist_head list2;
+ HLIST_HEAD(list3);
+ struct hlist_head *list4;
+ struct hlist_head *list5;
+
+ INIT_HLIST_HEAD(&list2);
+
+ list4 = kzalloc(sizeof(*list4), GFP_KERNEL | __GFP_NOFAIL);
+ INIT_HLIST_HEAD(list4);
+
+ list5 = kmalloc(sizeof(*list5), GFP_KERNEL | __GFP_NOFAIL);
+ memset(list5, 0xFF, sizeof(*list5));
+ INIT_HLIST_HEAD(list5);
+
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list1));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list2));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list3));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(list4));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(list5));
+
+ kfree(list4);
+ kfree(list5);
+}
+
+static void hlist_test_unhashed(struct kunit *test)
+{
+ struct hlist_node a;
+ HLIST_HEAD(list);
+
+ INIT_HLIST_NODE(&a);
+
+ /* is unhashed by default */
+ KUNIT_EXPECT_TRUE(test, hlist_unhashed(&a));
+
+ hlist_add_head(&a, &list);
+
+ /* is hashed once added to list */
+ KUNIT_EXPECT_FALSE(test, hlist_unhashed(&a));
+
+ hlist_del_init(&a);
+
+ /* is again unhashed after del_init */
+ KUNIT_EXPECT_TRUE(test, hlist_unhashed(&a));
+}
+
+/* Doesn't test concurrency guarantees */
+static void hlist_test_unhashed_lockless(struct kunit *test)
+{
+ struct hlist_node a;
+ HLIST_HEAD(list);
+
+ INIT_HLIST_NODE(&a);
+
+ /* is unhashed by default */
+ KUNIT_EXPECT_TRUE(test, hlist_unhashed_lockless(&a));
+
+ hlist_add_head(&a, &list);
+
+ /* is hashed once added to list */
+ KUNIT_EXPECT_FALSE(test, hlist_unhashed_lockless(&a));
+
+ hlist_del_init(&a);
+
+ /* is again unhashed after del_init */
+ KUNIT_EXPECT_TRUE(test, hlist_unhashed_lockless(&a));
+}
+
+static void hlist_test_del(struct kunit *test)
+{
+ struct hlist_node a, b;
+ HLIST_HEAD(list);
+
+ hlist_add_head(&a, &list);
+ hlist_add_behind(&b, &a);
+
+ /* before: [list] -> a -> b */
+ hlist_del(&a);
+
+ /* now: [list] -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.first, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.pprev, &list.first);
+}
+
+static void hlist_test_del_init(struct kunit *test)
+{
+ struct hlist_node a, b;
+ HLIST_HEAD(list);
+
+ hlist_add_head(&a, &list);
+ hlist_add_behind(&b, &a);
+
+ /* before: [list] -> a -> b */
+ hlist_del_init(&a);
+
+ /* now: [list] -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.first, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.pprev, &list.first);
+
+ /* a is now initialised */
+ KUNIT_EXPECT_PTR_EQ(test, a.next, NULL);
+ KUNIT_EXPECT_PTR_EQ(test, a.pprev, NULL);
+}
+
+/* Tests all three hlist_add_* functions */
+static void hlist_test_add(struct kunit *test)
+{
+ struct hlist_node a, b, c, d;
+ HLIST_HEAD(list);
+
+ hlist_add_head(&a, &list);
+ hlist_add_head(&b, &list);
+ hlist_add_before(&c, &a);
+ hlist_add_behind(&d, &a);
+
+ /* should be [list] -> b -> c -> a -> d */
+ KUNIT_EXPECT_PTR_EQ(test, list.first, &b);
+
+ KUNIT_EXPECT_PTR_EQ(test, c.pprev, &(b.next));
+ KUNIT_EXPECT_PTR_EQ(test, b.next, &c);
+
+ KUNIT_EXPECT_PTR_EQ(test, a.pprev, &(c.next));
+ KUNIT_EXPECT_PTR_EQ(test, c.next, &a);
+
+ KUNIT_EXPECT_PTR_EQ(test, d.pprev, &(a.next));
+ KUNIT_EXPECT_PTR_EQ(test, a.next, &d);
+}
+
+/* Tests both hlist_fake() and hlist_add_fake() */
+static void hlist_test_fake(struct kunit *test)
+{
+ struct hlist_node a;
+
+ INIT_HLIST_NODE(&a);
+
+ /* not fake after init */
+ KUNIT_EXPECT_FALSE(test, hlist_fake(&a));
+
+ hlist_add_fake(&a);
+
+ /* is now fake */
+ KUNIT_EXPECT_TRUE(test, hlist_fake(&a));
+}
+
+static void hlist_test_is_singular_node(struct kunit *test)
+{
+ struct hlist_node a, b;
+ HLIST_HEAD(list);
+
+ INIT_HLIST_NODE(&a);
+ KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&a, &list));
+
+ hlist_add_head(&a, &list);
+ KUNIT_EXPECT_TRUE(test, hlist_is_singular_node(&a, &list));
+
+ hlist_add_head(&b, &list);
+ KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&a, &list));
+ KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&b, &list));
+}
+
+static void hlist_test_empty(struct kunit *test)
+{
+ struct hlist_node a;
+ HLIST_HEAD(list);
+
+ /* list starts off empty */
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list));
+
+ hlist_add_head(&a, &list);
+
+ /* list is no longer empty */
+ KUNIT_EXPECT_FALSE(test, hlist_empty(&list));
+}
+
+static void hlist_test_move_list(struct kunit *test)
+{
+ struct hlist_node a;
+ HLIST_HEAD(list1);
+ HLIST_HEAD(list2);
+
+ hlist_add_head(&a, &list1);
+
+ KUNIT_EXPECT_FALSE(test, hlist_empty(&list1));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list2));
+ hlist_move_list(&list1, &list2);
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list1));
+ KUNIT_EXPECT_FALSE(test, hlist_empty(&list2));
+
+}
+
+static void hlist_test_entry(struct kunit *test)
+{
+ struct hlist_test_struct test_struct;
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct,
+ hlist_entry(&(test_struct.list),
+ struct hlist_test_struct, list));
+}
+
+static void hlist_test_entry_safe(struct kunit *test)
+{
+ struct hlist_test_struct test_struct;
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct,
+ hlist_entry_safe(&(test_struct.list),
+ struct hlist_test_struct, list));
+
+ KUNIT_EXPECT_PTR_EQ(test, NULL,
+ hlist_entry_safe((struct hlist_node *)NULL,
+ struct hlist_test_struct, list));
+}
+
+static void hlist_test_for_each(struct kunit *test)
+{
+ struct hlist_node entries[3], *cur;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ hlist_add_head(&entries[0], &list);
+ hlist_add_behind(&entries[1], &entries[0]);
+ hlist_add_behind(&entries[2], &entries[1]);
+
+ hlist_for_each(cur, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 3);
+}
+
+
+static void hlist_test_for_each_safe(struct kunit *test)
+{
+ struct hlist_node entries[3], *cur, *n;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ hlist_add_head(&entries[0], &list);
+ hlist_add_behind(&entries[1], &entries[0]);
+ hlist_add_behind(&entries[2], &entries[1]);
+
+ hlist_for_each_safe(cur, n, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ hlist_del(&entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 3);
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list));
+}
+
+static void hlist_test_for_each_entry(struct kunit *test)
+{
+ struct hlist_test_struct entries[5], *cur;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ entries[0].data = 0;
+ hlist_add_head(&entries[0].list, &list);
+ for (i = 1; i < 5; ++i) {
+ entries[i].data = i;
+ hlist_add_behind(&entries[i].list, &entries[i-1].list);
+ }
+
+ i = 0;
+
+ hlist_for_each_entry(cur, &list, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+}
+
+static void hlist_test_for_each_entry_continue(struct kunit *test)
+{
+ struct hlist_test_struct entries[5], *cur;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ entries[0].data = 0;
+ hlist_add_head(&entries[0].list, &list);
+ for (i = 1; i < 5; ++i) {
+ entries[i].data = i;
+ hlist_add_behind(&entries[i].list, &entries[i-1].list);
+ }
+
+ /* We skip the first (zero-th) entry. */
+ i = 1;
+
+ cur = &entries[0];
+ hlist_for_each_entry_continue(cur, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ /* Stamp over the entry. */
+ cur->data = 42;
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+ /* The first entry was not visited. */
+ KUNIT_EXPECT_EQ(test, entries[0].data, 0);
+ /* The second (and presumably others), were. */
+ KUNIT_EXPECT_EQ(test, entries[1].data, 42);
+}
+
+static void hlist_test_for_each_entry_from(struct kunit *test)
+{
+ struct hlist_test_struct entries[5], *cur;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ entries[0].data = 0;
+ hlist_add_head(&entries[0].list, &list);
+ for (i = 1; i < 5; ++i) {
+ entries[i].data = i;
+ hlist_add_behind(&entries[i].list, &entries[i-1].list);
+ }
+
+ i = 0;
+
+ cur = &entries[0];
+ hlist_for_each_entry_from(cur, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ /* Stamp over the entry. */
+ cur->data = 42;
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+ /* The first entry was visited. */
+ KUNIT_EXPECT_EQ(test, entries[0].data, 42);
+}
+
+static void hlist_test_for_each_entry_safe(struct kunit *test)
+{
+ struct hlist_test_struct entries[5], *cur;
+ struct hlist_node *tmp_node;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ entries[0].data = 0;
+ hlist_add_head(&entries[0].list, &list);
+ for (i = 1; i < 5; ++i) {
+ entries[i].data = i;
+ hlist_add_behind(&entries[i].list, &entries[i-1].list);
+ }
+
+ i = 0;
+
+ hlist_for_each_entry_safe(cur, tmp_node, &list, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ hlist_del(&cur->list);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list));
+}
+
+
+static struct kunit_case hlist_test_cases[] = {
+ KUNIT_CASE(hlist_test_init),
+ KUNIT_CASE(hlist_test_unhashed),
+ KUNIT_CASE(hlist_test_unhashed_lockless),
+ KUNIT_CASE(hlist_test_del),
+ KUNIT_CASE(hlist_test_del_init),
+ KUNIT_CASE(hlist_test_add),
+ KUNIT_CASE(hlist_test_fake),
+ KUNIT_CASE(hlist_test_is_singular_node),
+ KUNIT_CASE(hlist_test_empty),
+ KUNIT_CASE(hlist_test_move_list),
+ KUNIT_CASE(hlist_test_entry),
+ KUNIT_CASE(hlist_test_entry_safe),
+ KUNIT_CASE(hlist_test_for_each),
+ KUNIT_CASE(hlist_test_for_each_safe),
+ KUNIT_CASE(hlist_test_for_each_entry),
+ KUNIT_CASE(hlist_test_for_each_entry_continue),
+ KUNIT_CASE(hlist_test_for_each_entry_from),
+ KUNIT_CASE(hlist_test_for_each_entry_safe),
+ {},
+};
+
+static struct kunit_suite hlist_test_module = {
+ .name = "hlist",
+ .test_cases = hlist_test_cases,
+};
+
+
+static int node_count;
+static struct klist_node *last_node;
+
+static void check_node(struct klist_node *node_ptr)
+{
+ node_count++;
+ last_node = node_ptr;
+}
+
+static void check_delete_node(struct klist_node *node_ptr)
+{
+ node_count--;
+ last_node = node_ptr;
+}
+
+static void klist_test_add_tail(struct kunit *test)
+{
+ struct klist_node a, b;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, NULL);
+
+ klist_add_tail(&a, &mylist);
+ KUNIT_EXPECT_EQ(test, node_count, 1);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &a);
+
+ klist_add_tail(&b, &mylist);
+ KUNIT_EXPECT_EQ(test, node_count, 2);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &b);
+
+ /* should be [list] -> a -> b */
+ klist_iter_init(&mylist, &i);
+
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+static void klist_test_add_head(struct kunit *test)
+{
+ struct klist_node a, b;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, NULL);
+
+ klist_add_head(&a, &mylist);
+ KUNIT_EXPECT_EQ(test, node_count, 1);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &a);
+
+ klist_add_head(&b, &mylist);
+ KUNIT_EXPECT_EQ(test, node_count, 2);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &b);
+
+ /* should be [list] -> b -> a */
+ klist_iter_init(&mylist, &i);
+
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+static void klist_test_add_behind(struct kunit *test)
+{
+ struct klist_node a, b, c, d;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, NULL);
+
+ klist_add_head(&a, &mylist);
+ klist_add_head(&b, &mylist);
+
+ klist_add_behind(&c, &a);
+ KUNIT_EXPECT_EQ(test, node_count, 3);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
+
+ klist_add_behind(&d, &b);
+ KUNIT_EXPECT_EQ(test, node_count, 4);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &d);
+
+ klist_iter_init(&mylist, &i);
+
+ /* should be [list] -> b -> d -> a -> c*/
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &c);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+static void klist_test_add_before(struct kunit *test)
+{
+ struct klist_node a, b, c, d;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, NULL);
+
+ klist_add_head(&a, &mylist);
+ klist_add_head(&b, &mylist);
+ klist_add_before(&c, &a);
+ KUNIT_EXPECT_EQ(test, node_count, 3);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
+
+ klist_add_before(&d, &b);
+ KUNIT_EXPECT_EQ(test, node_count, 4);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &d);
+
+ klist_iter_init(&mylist, &i);
+
+ /* should be [list] -> b -> d -> a -> c*/
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &c);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+/*
+ * Verify that klist_del() delays the deletion of a node until there
+ * are no other references to it
+ */
+static void klist_test_del_refcount_greater_than_zero(struct kunit *test)
+{
+ struct klist_node a, b, c, d;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, &check_delete_node);
+
+ /* Add nodes a,b,c,d to the list*/
+ klist_add_tail(&a, &mylist);
+ klist_add_tail(&b, &mylist);
+ klist_add_tail(&c, &mylist);
+ klist_add_tail(&d, &mylist);
+
+ klist_iter_init(&mylist, &i);
+
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ /* Advance the iterator to point to node c*/
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &c);
+
+ /* Try to delete node c while there is a reference to it*/
+ klist_del(&c);
+
+ /*
+ * Verify that node c is still attached to the list even after being
+ * deleted. Since the iterator still points to c, the reference count is not
+ * decreased to 0
+ */
+ KUNIT_EXPECT_TRUE(test, klist_node_attached(&c));
+
+ /* Check that node c has not been removed yet*/
+ KUNIT_EXPECT_EQ(test, node_count, 4);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &d);
+
+ klist_iter_exit(&i);
+
+ /*
+ * Since the iterator is no longer pointing to node c, node c is removed
+ * from the list
+ */
+ KUNIT_EXPECT_EQ(test, node_count, 3);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
+
+}
+
+/*
+ * Verify that klist_del() deletes a node immediately when there are no
+ * other references to it.
+ */
+static void klist_test_del_refcount_zero(struct kunit *test)
+{
+ struct klist_node a, b, c, d;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, &check_delete_node);
+
+ /* Add nodes a,b,c,d to the list*/
+ klist_add_tail(&a, &mylist);
+ klist_add_tail(&b, &mylist);
+ klist_add_tail(&c, &mylist);
+ klist_add_tail(&d, &mylist);
+ /* Delete node c*/
+ klist_del(&c);
+
+ /* Check that node c is deleted from the list*/
+ KUNIT_EXPECT_EQ(test, node_count, 3);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
+
+ /* Should be [list] -> a -> b -> d*/
+ klist_iter_init(&mylist, &i);
+
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+static void klist_test_remove(struct kunit *test)
+{
+ /* This test doesn't check correctness under concurrent access */
+ struct klist_node a, b, c, d;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, &check_delete_node);
+
+ /* Add nodes a,b,c,d to the list*/
+ klist_add_tail(&a, &mylist);
+ klist_add_tail(&b, &mylist);
+ klist_add_tail(&c, &mylist);
+ klist_add_tail(&d, &mylist);
+ /* Delete node c*/
+ klist_remove(&c);
+
+ /* Check the nodes in the list*/
+ KUNIT_EXPECT_EQ(test, node_count, 3);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
+
+ /* should be [list] -> a -> b -> d*/
+ klist_iter_init(&mylist, &i);
+
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+static void klist_test_node_attached(struct kunit *test)
+{
+ struct klist_node a = {};
+ struct klist mylist;
+
+ klist_init(&mylist, NULL, NULL);
+
+ KUNIT_EXPECT_FALSE(test, klist_node_attached(&a));
+ klist_add_head(&a, &mylist);
+ KUNIT_EXPECT_TRUE(test, klist_node_attached(&a));
+ klist_del(&a);
+ KUNIT_EXPECT_FALSE(test, klist_node_attached(&a));
+
+}
+
+static struct kunit_case klist_test_cases[] = {
+ KUNIT_CASE(klist_test_add_tail),
+ KUNIT_CASE(klist_test_add_head),
+ KUNIT_CASE(klist_test_add_behind),
+ KUNIT_CASE(klist_test_add_before),
+ KUNIT_CASE(klist_test_del_refcount_greater_than_zero),
+ KUNIT_CASE(klist_test_del_refcount_zero),
+ KUNIT_CASE(klist_test_remove),
+ KUNIT_CASE(klist_test_node_attached),
+ {},
+};
+
+static struct kunit_suite klist_test_module = {
+ .name = "klist",
+ .test_cases = klist_test_cases,
+};
+
+kunit_test_suites(&list_test_module, &hlist_test_module, &klist_test_module);
+
+MODULE_DESCRIPTION("KUnit test for the Kernel Linked-list structures");
+MODULE_LICENSE("GPL v2");
diff --git a/lib/tests/longest_symbol_kunit.c b/lib/tests/longest_symbol_kunit.c
new file mode 100644
index 000000000000..e3c28ff1807f
--- /dev/null
+++ b/lib/tests/longest_symbol_kunit.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test the longest symbol length. Execute with:
+ * ./tools/testing/kunit/kunit.py run longest-symbol
+ * --arch=x86_64 --kconfig_add CONFIG_KPROBES=y --kconfig_add CONFIG_MODULES=y
+ * --kconfig_add CONFIG_RETPOLINE=n --kconfig_add CONFIG_CFI_CLANG=n
+ * --kconfig_add CONFIG_MITIGATION_RETPOLINE=n
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/stringify.h>
+#include <linux/kprobes.h>
+#include <linux/kallsyms.h>
+
+#define DI(name) s##name##name
+#define DDI(name) DI(n##name##name)
+#define DDDI(name) DDI(n##name##name)
+#define DDDDI(name) DDDI(n##name##name)
+#define DDDDDI(name) DDDDI(n##name##name)
+
+/*Generate a symbol whose name length is 511 */
+#define LONGEST_SYM_NAME DDDDDI(g1h2i3j4k5l6m7n)
+
+#define RETURN_LONGEST_SYM 0xAAAAA
+
+noinline int LONGEST_SYM_NAME(void);
+noinline int LONGEST_SYM_NAME(void)
+{
+ return RETURN_LONGEST_SYM;
+}
+
+_Static_assert(sizeof(__stringify(LONGEST_SYM_NAME)) == KSYM_NAME_LEN,
+"Incorrect symbol length found. Expected KSYM_NAME_LEN: "
+__stringify(KSYM_NAME_LEN) ", but found: "
+__stringify(sizeof(LONGEST_SYM_NAME)));
+
+static void test_longest_symbol(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, RETURN_LONGEST_SYM, LONGEST_SYM_NAME());
+};
+
+static void test_longest_symbol_kallsyms(struct kunit *test)
+{
+ unsigned long (*kallsyms_lookup_name)(const char *name);
+ static int (*longest_sym)(void);
+
+ struct kprobe kp = {
+ .symbol_name = "kallsyms_lookup_name",
+ };
+
+ if (register_kprobe(&kp) < 0) {
+ pr_info("%s: kprobe not registered", __func__);
+ KUNIT_FAIL(test, "test_longest_symbol kallsyms: kprobe not registered\n");
+ return;
+ }
+
+ kunit_warn(test, "test_longest_symbol kallsyms: kprobe registered\n");
+ kallsyms_lookup_name = (unsigned long (*)(const char *name))kp.addr;
+ unregister_kprobe(&kp);
+
+ longest_sym =
+ (void *) kallsyms_lookup_name(__stringify(LONGEST_SYM_NAME));
+ KUNIT_EXPECT_EQ(test, RETURN_LONGEST_SYM, longest_sym());
+};
+
+static struct kunit_case longest_symbol_test_cases[] = {
+ KUNIT_CASE(test_longest_symbol),
+ KUNIT_CASE(test_longest_symbol_kallsyms),
+ {}
+};
+
+static struct kunit_suite longest_symbol_test_suite = {
+ .name = "longest-symbol",
+ .test_cases = longest_symbol_test_cases,
+};
+kunit_test_suite(longest_symbol_test_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Test the longest symbol length");
+MODULE_AUTHOR("Sergio González Collado");
diff --git a/lib/tests/memcpy_kunit.c b/lib/tests/memcpy_kunit.c
new file mode 100644
index 000000000000..d36933554e46
--- /dev/null
+++ b/lib/tests/memcpy_kunit.c
@@ -0,0 +1,514 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for memcpy(), memmove(), and memset().
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+
+struct some_bytes {
+ union {
+ u8 data[32];
+ struct {
+ u32 one;
+ u16 two;
+ u8 three;
+ /* 1 byte hole */
+ u32 four[4];
+ };
+ };
+};
+
+#define check(instance, v) do { \
+ BUILD_BUG_ON(sizeof(instance.data) != 32); \
+ for (size_t i = 0; i < sizeof(instance.data); i++) { \
+ KUNIT_ASSERT_EQ_MSG(test, instance.data[i], v, \
+ "line %d: '%s' not initialized to 0x%02x @ %zu (saw 0x%02x)\n", \
+ __LINE__, #instance, v, i, instance.data[i]); \
+ } \
+} while (0)
+
+#define compare(name, one, two) do { \
+ BUILD_BUG_ON(sizeof(one) != sizeof(two)); \
+ for (size_t i = 0; i < sizeof(one); i++) { \
+ KUNIT_EXPECT_EQ_MSG(test, one.data[i], two.data[i], \
+ "line %d: %s.data[%zu] (0x%02x) != %s.data[%zu] (0x%02x)\n", \
+ __LINE__, #one, i, one.data[i], #two, i, two.data[i]); \
+ } \
+ kunit_info(test, "ok: " TEST_OP "() " name "\n"); \
+} while (0)
+
+static void memcpy_test(struct kunit *test)
+{
+#define TEST_OP "memcpy"
+ struct some_bytes control = {
+ .data = { 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ },
+ };
+ struct some_bytes zero = { };
+ struct some_bytes middle = {
+ .data = { 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ },
+ };
+ struct some_bytes three = {
+ .data = { 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x00, 0x00, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ },
+ };
+ struct some_bytes dest = { };
+ int count;
+ u8 *ptr;
+
+ /* Verify static initializers. */
+ check(control, 0x20);
+ check(zero, 0);
+ compare("static initializers", dest, zero);
+
+ /* Verify assignment. */
+ dest = control;
+ compare("direct assignment", dest, control);
+
+ /* Verify complete overwrite. */
+ memcpy(dest.data, zero.data, sizeof(dest.data));
+ compare("complete overwrite", dest, zero);
+
+ /* Verify middle overwrite. */
+ dest = control;
+ memcpy(dest.data + 12, zero.data, 7);
+ compare("middle overwrite", dest, middle);
+
+ /* Verify argument side-effects aren't repeated. */
+ dest = control;
+ ptr = dest.data;
+ count = 1;
+ memcpy(ptr++, zero.data, count++);
+ ptr += 8;
+ memcpy(ptr++, zero.data, count++);
+ compare("argument side-effects", dest, three);
+#undef TEST_OP
+}
+
+static unsigned char larger_array [2048];
+
+static void memmove_test(struct kunit *test)
+{
+#define TEST_OP "memmove"
+ struct some_bytes control = {
+ .data = { 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ },
+ };
+ struct some_bytes zero = { };
+ struct some_bytes middle = {
+ .data = { 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ },
+ };
+ struct some_bytes five = {
+ .data = { 0x00, 0x00, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x00, 0x00, 0x00, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ },
+ };
+ struct some_bytes overlap = {
+ .data = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ },
+ };
+ struct some_bytes overlap_expected = {
+ .data = { 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x04, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ },
+ };
+ struct some_bytes dest = { };
+ int count;
+ u8 *ptr;
+
+ /* Verify static initializers. */
+ check(control, 0x99);
+ check(zero, 0);
+ compare("static initializers", zero, dest);
+
+ /* Verify assignment. */
+ dest = control;
+ compare("direct assignment", dest, control);
+
+ /* Verify complete overwrite. */
+ memmove(dest.data, zero.data, sizeof(dest.data));
+ compare("complete overwrite", dest, zero);
+
+ /* Verify middle overwrite. */
+ dest = control;
+ memmove(dest.data + 12, zero.data, 7);
+ compare("middle overwrite", dest, middle);
+
+ /* Verify argument side-effects aren't repeated. */
+ dest = control;
+ ptr = dest.data;
+ count = 2;
+ memmove(ptr++, zero.data, count++);
+ ptr += 9;
+ memmove(ptr++, zero.data, count++);
+ compare("argument side-effects", dest, five);
+
+ /* Verify overlapping overwrite is correct. */
+ ptr = &overlap.data[2];
+ memmove(ptr, overlap.data, 5);
+ compare("overlapping write", overlap, overlap_expected);
+
+ /* Verify larger overlapping moves. */
+ larger_array[256] = 0xAAu;
+ /*
+ * Test a backwards overlapping memmove first. 256 and 1024 are
+ * important for i386 to use rep movsl.
+ */
+ memmove(larger_array, larger_array + 256, 1024);
+ KUNIT_ASSERT_EQ(test, larger_array[0], 0xAAu);
+ KUNIT_ASSERT_EQ(test, larger_array[256], 0x00);
+ KUNIT_ASSERT_NULL(test,
+ memchr(larger_array + 1, 0xaa, ARRAY_SIZE(larger_array) - 1));
+ /* Test a forwards overlapping memmove. */
+ larger_array[0] = 0xBBu;
+ memmove(larger_array + 256, larger_array, 1024);
+ KUNIT_ASSERT_EQ(test, larger_array[0], 0xBBu);
+ KUNIT_ASSERT_EQ(test, larger_array[256], 0xBBu);
+ KUNIT_ASSERT_NULL(test, memchr(larger_array + 1, 0xBBu, 256 - 1));
+ KUNIT_ASSERT_NULL(test,
+ memchr(larger_array + 257, 0xBBu, ARRAY_SIZE(larger_array) - 257));
+#undef TEST_OP
+}
+
+static void memset_test(struct kunit *test)
+{
+#define TEST_OP "memset"
+ struct some_bytes control = {
+ .data = { 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ },
+ };
+ struct some_bytes complete = {
+ .data = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ },
+ };
+ struct some_bytes middle = {
+ .data = { 0x30, 0x30, 0x30, 0x30, 0x31, 0x31, 0x31, 0x31,
+ 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31,
+ 0x31, 0x31, 0x31, 0x31, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ },
+ };
+ struct some_bytes three = {
+ .data = { 0x60, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x61, 0x61, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ },
+ };
+ struct some_bytes after = {
+ .data = { 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x72,
+ 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72,
+ 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72,
+ 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72,
+ },
+ };
+ struct some_bytes startat = {
+ .data = { 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79,
+ 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79,
+ 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79,
+ },
+ };
+ struct some_bytes dest = { };
+ int count, value;
+ u8 *ptr;
+
+ /* Verify static initializers. */
+ check(control, 0x30);
+ check(dest, 0);
+
+ /* Verify assignment. */
+ dest = control;
+ compare("direct assignment", dest, control);
+
+ /* Verify complete overwrite. */
+ memset(dest.data, 0xff, sizeof(dest.data));
+ compare("complete overwrite", dest, complete);
+
+ /* Verify middle overwrite. */
+ dest = control;
+ memset(dest.data + 4, 0x31, 16);
+ compare("middle overwrite", dest, middle);
+
+ /* Verify argument side-effects aren't repeated. */
+ dest = control;
+ ptr = dest.data;
+ value = 0x60;
+ count = 1;
+ memset(ptr++, value++, count++);
+ ptr += 8;
+ memset(ptr++, value++, count++);
+ compare("argument side-effects", dest, three);
+
+ /* Verify memset_after() */
+ dest = control;
+ memset_after(&dest, 0x72, three);
+ compare("memset_after()", dest, after);
+
+ /* Verify memset_startat() */
+ dest = control;
+ memset_startat(&dest, 0x79, four);
+ compare("memset_startat()", dest, startat);
+#undef TEST_OP
+}
+
+static u8 large_src[1024];
+static u8 large_dst[2048];
+static const u8 large_zero[2048];
+
+static void set_random_nonzero(struct kunit *test, u8 *byte)
+{
+ int failed_rng = 0;
+
+ while (*byte == 0) {
+ get_random_bytes(byte, 1);
+ KUNIT_ASSERT_LT_MSG(test, failed_rng++, 100,
+ "Is the RNG broken?");
+ }
+}
+
+static void init_large(struct kunit *test)
+{
+ /* Get many bit patterns. */
+ get_random_bytes(large_src, ARRAY_SIZE(large_src));
+
+ /* Make sure we have non-zero edges. */
+ set_random_nonzero(test, &large_src[0]);
+ set_random_nonzero(test, &large_src[ARRAY_SIZE(large_src) - 1]);
+
+ /* Explicitly zero the entire destination. */
+ memset(large_dst, 0, ARRAY_SIZE(large_dst));
+}
+
+/*
+ * Instead of an indirect function call for "copy" or a giant macro,
+ * use a bool to pick memcpy or memmove.
+ */
+static void copy_large_test(struct kunit *test, bool use_memmove)
+{
+ init_large(test);
+
+ /* Copy a growing number of non-overlapping bytes ... */
+ for (int bytes = 1; bytes <= ARRAY_SIZE(large_src); bytes++) {
+ /* Over a shifting destination window ... */
+ for (int offset = 0; offset < ARRAY_SIZE(large_src); offset++) {
+ int right_zero_pos = offset + bytes;
+ int right_zero_size = ARRAY_SIZE(large_dst) - right_zero_pos;
+
+ /* Copy! */
+ if (use_memmove)
+ memmove(large_dst + offset, large_src, bytes);
+ else
+ memcpy(large_dst + offset, large_src, bytes);
+
+ /* Did we touch anything before the copy area? */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(large_dst, large_zero, offset), 0,
+ "with size %d at offset %d", bytes, offset);
+ /* Did we touch anything after the copy area? */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(&large_dst[right_zero_pos], large_zero, right_zero_size), 0,
+ "with size %d at offset %d", bytes, offset);
+
+ /* Are we byte-for-byte exact across the copy? */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(large_dst + offset, large_src, bytes), 0,
+ "with size %d at offset %d", bytes, offset);
+
+ /* Zero out what we copied for the next cycle. */
+ memset(large_dst + offset, 0, bytes);
+ }
+ /* Avoid stall warnings if this loop gets slow. */
+ cond_resched();
+ }
+}
+
+static void memcpy_large_test(struct kunit *test)
+{
+ copy_large_test(test, false);
+}
+
+static void memmove_large_test(struct kunit *test)
+{
+ copy_large_test(test, true);
+}
+
+/*
+ * On the assumption that boundary conditions are going to be the most
+ * sensitive, instead of taking a full step (inc) each iteration,
+ * take single index steps for at least the first "inc"-many indexes
+ * from the "start" and at least the last "inc"-many indexes before
+ * the "end". When in the middle, take full "inc"-wide steps. For
+ * example, calling next_step(idx, 1, 15, 3) with idx starting at 0
+ * would see the following pattern: 1 2 3 4 7 10 11 12 13 14 15.
+ */
+static int next_step(int idx, int start, int end, int inc)
+{
+ start += inc;
+ end -= inc;
+
+ if (idx < start || idx + inc > end)
+ inc = 1;
+ return idx + inc;
+}
+
+static void inner_loop(struct kunit *test, int bytes, int d_off, int s_off)
+{
+ int left_zero_pos, left_zero_size;
+ int right_zero_pos, right_zero_size;
+ int src_pos, src_orig_pos, src_size;
+ int pos;
+
+ /* Place the source in the destination buffer. */
+ memcpy(&large_dst[s_off], large_src, bytes);
+
+ /* Copy to destination offset. */
+ memmove(&large_dst[d_off], &large_dst[s_off], bytes);
+
+ /* Make sure destination entirely matches. */
+ KUNIT_ASSERT_EQ_MSG(test, memcmp(&large_dst[d_off], large_src, bytes), 0,
+ "with size %d at src offset %d and dest offset %d",
+ bytes, s_off, d_off);
+
+ /* Calculate the expected zero spans. */
+ if (s_off < d_off) {
+ left_zero_pos = 0;
+ left_zero_size = s_off;
+
+ right_zero_pos = d_off + bytes;
+ right_zero_size = ARRAY_SIZE(large_dst) - right_zero_pos;
+
+ src_pos = s_off;
+ src_orig_pos = 0;
+ src_size = d_off - s_off;
+ } else {
+ left_zero_pos = 0;
+ left_zero_size = d_off;
+
+ right_zero_pos = s_off + bytes;
+ right_zero_size = ARRAY_SIZE(large_dst) - right_zero_pos;
+
+ src_pos = d_off + bytes;
+ src_orig_pos = src_pos - s_off;
+ src_size = right_zero_pos - src_pos;
+ }
+
+ /* Check non-overlapping source is unchanged.*/
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(&large_dst[src_pos], &large_src[src_orig_pos], src_size), 0,
+ "with size %d at src offset %d and dest offset %d",
+ bytes, s_off, d_off);
+
+ /* Check leading buffer contents are zero. */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(&large_dst[left_zero_pos], large_zero, left_zero_size), 0,
+ "with size %d at src offset %d and dest offset %d",
+ bytes, s_off, d_off);
+ /* Check trailing buffer contents are zero. */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(&large_dst[right_zero_pos], large_zero, right_zero_size), 0,
+ "with size %d at src offset %d and dest offset %d",
+ bytes, s_off, d_off);
+
+ /* Zero out everything not already zeroed.*/
+ pos = left_zero_pos + left_zero_size;
+ memset(&large_dst[pos], 0, right_zero_pos - pos);
+}
+
+static void memmove_overlap_test(struct kunit *test)
+{
+ /*
+ * Running all possible offset and overlap combinations takes a
+ * very long time. Instead, only check up to 128 bytes offset
+ * into the destination buffer (which should result in crossing
+ * cachelines), with a step size of 1 through 7 to try to skip some
+ * redundancy.
+ */
+ static const int offset_max = 128; /* less than ARRAY_SIZE(large_src); */
+ static const int bytes_step = 7;
+ static const int window_step = 7;
+
+ static const int bytes_start = 1;
+ static const int bytes_end = ARRAY_SIZE(large_src) + 1;
+
+ init_large(test);
+
+ /* Copy a growing number of overlapping bytes ... */
+ for (int bytes = bytes_start; bytes < bytes_end;
+ bytes = next_step(bytes, bytes_start, bytes_end, bytes_step)) {
+
+ /* Over a shifting destination window ... */
+ for (int d_off = 0; d_off < offset_max; d_off++) {
+ int s_start = max(d_off - bytes, 0);
+ int s_end = min_t(int, d_off + bytes, ARRAY_SIZE(large_src));
+
+ /* Over a shifting source window ... */
+ for (int s_off = s_start; s_off < s_end;
+ s_off = next_step(s_off, s_start, s_end, window_step))
+ inner_loop(test, bytes, d_off, s_off);
+
+ /* Avoid stall warnings. */
+ cond_resched();
+ }
+ }
+}
+
+static struct kunit_case memcpy_test_cases[] = {
+ KUNIT_CASE(memset_test),
+ KUNIT_CASE(memcpy_test),
+ KUNIT_CASE_SLOW(memcpy_large_test),
+ KUNIT_CASE_SLOW(memmove_test),
+ KUNIT_CASE_SLOW(memmove_large_test),
+ KUNIT_CASE_SLOW(memmove_overlap_test),
+ {}
+};
+
+static struct kunit_suite memcpy_test_suite = {
+ .name = "memcpy",
+ .test_cases = memcpy_test_cases,
+};
+
+kunit_test_suite(memcpy_test_suite);
+
+MODULE_DESCRIPTION("test cases for memcpy(), memmove(), and memset()");
+MODULE_LICENSE("GPL");
diff --git a/lib/tests/module/gen_test_kallsyms.sh b/lib/tests/module/gen_test_kallsyms.sh
index 561dcac0f359..31fe4ed63de8 100755
--- a/lib/tests/module/gen_test_kallsyms.sh
+++ b/lib/tests/module/gen_test_kallsyms.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
TARGET=$(basename $1)
DIR=lib/tests/module
diff --git a/lib/tests/overflow_kunit.c b/lib/tests/overflow_kunit.c
new file mode 100644
index 000000000000..894691b4411a
--- /dev/null
+++ b/lib/tests/overflow_kunit.c
@@ -0,0 +1,1258 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Test cases for arithmetic overflow checks. See:
+ * "Running tests with kunit_tool" at Documentation/dev-tools/kunit/start.rst
+ * ./tools/testing/kunit/kunit.py run overflow [--raw_output]
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/device.h>
+#include <kunit/test.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+
+#define SKIP(cond, reason) do { \
+ if (cond) { \
+ kunit_skip(test, reason); \
+ return; \
+ } \
+} while (0)
+
+/*
+ * Clang 11 and earlier generate unwanted libcalls for signed output
+ * on unsigned input.
+ */
+#if defined(CONFIG_CC_IS_CLANG) && __clang_major__ <= 11
+# define SKIP_SIGN_MISMATCH(t) SKIP(t, "Clang 11 unwanted libcalls")
+#else
+# define SKIP_SIGN_MISMATCH(t) do { } while (0)
+#endif
+
+/*
+ * Clang 13 and earlier generate unwanted libcalls for 64-bit tests on
+ * 32-bit hosts.
+ */
+#if defined(CONFIG_CC_IS_CLANG) && __clang_major__ <= 13 && \
+ BITS_PER_LONG != 64
+# define SKIP_64_ON_32(t) SKIP(t, "Clang 13 unwanted libcalls")
+#else
+# define SKIP_64_ON_32(t) do { } while (0)
+#endif
+
+#define DEFINE_TEST_ARRAY_TYPED(t1, t2, t) \
+ static const struct test_ ## t1 ## _ ## t2 ## __ ## t { \
+ t1 a; \
+ t2 b; \
+ t sum, diff, prod; \
+ bool s_of, d_of, p_of; \
+ } t1 ## _ ## t2 ## __ ## t ## _tests[]
+
+#define DEFINE_TEST_ARRAY(t) DEFINE_TEST_ARRAY_TYPED(t, t, t)
+
+DEFINE_TEST_ARRAY(u8) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 1, 2, 0, 1, false, false, false},
+ {0, 1, 1, U8_MAX, 0, false, true, false},
+ {1, 0, 1, 1, 0, false, false, false},
+ {0, U8_MAX, U8_MAX, 1, 0, false, true, false},
+ {U8_MAX, 0, U8_MAX, U8_MAX, 0, false, false, false},
+ {1, U8_MAX, 0, 2, U8_MAX, true, true, false},
+ {U8_MAX, 1, 0, U8_MAX-1, U8_MAX, true, false, false},
+ {U8_MAX, U8_MAX, U8_MAX-1, 0, 1, true, false, true},
+
+ {U8_MAX, U8_MAX-1, U8_MAX-2, 1, 2, true, false, true},
+ {U8_MAX-1, U8_MAX, U8_MAX-2, U8_MAX, 2, true, true, true},
+
+ {1U << 3, 1U << 3, 1U << 4, 0, 1U << 6, false, false, false},
+ {1U << 4, 1U << 4, 1U << 5, 0, 0, false, false, true},
+ {1U << 4, 1U << 3, 3*(1U << 3), 1U << 3, 1U << 7, false, false, false},
+ {1U << 7, 1U << 7, 0, 0, 0, true, false, true},
+
+ {48, 32, 80, 16, 0, false, false, true},
+ {128, 128, 0, 0, 0, true, false, true},
+ {123, 234, 101, 145, 110, true, true, true},
+};
+DEFINE_TEST_ARRAY(u16) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 1, 2, 0, 1, false, false, false},
+ {0, 1, 1, U16_MAX, 0, false, true, false},
+ {1, 0, 1, 1, 0, false, false, false},
+ {0, U16_MAX, U16_MAX, 1, 0, false, true, false},
+ {U16_MAX, 0, U16_MAX, U16_MAX, 0, false, false, false},
+ {1, U16_MAX, 0, 2, U16_MAX, true, true, false},
+ {U16_MAX, 1, 0, U16_MAX-1, U16_MAX, true, false, false},
+ {U16_MAX, U16_MAX, U16_MAX-1, 0, 1, true, false, true},
+
+ {U16_MAX, U16_MAX-1, U16_MAX-2, 1, 2, true, false, true},
+ {U16_MAX-1, U16_MAX, U16_MAX-2, U16_MAX, 2, true, true, true},
+
+ {1U << 7, 1U << 7, 1U << 8, 0, 1U << 14, false, false, false},
+ {1U << 8, 1U << 8, 1U << 9, 0, 0, false, false, true},
+ {1U << 8, 1U << 7, 3*(1U << 7), 1U << 7, 1U << 15, false, false, false},
+ {1U << 15, 1U << 15, 0, 0, 0, true, false, true},
+
+ {123, 234, 357, 65425, 28782, false, true, false},
+ {1234, 2345, 3579, 64425, 10146, false, true, true},
+};
+DEFINE_TEST_ARRAY(u32) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 1, 2, 0, 1, false, false, false},
+ {0, 1, 1, U32_MAX, 0, false, true, false},
+ {1, 0, 1, 1, 0, false, false, false},
+ {0, U32_MAX, U32_MAX, 1, 0, false, true, false},
+ {U32_MAX, 0, U32_MAX, U32_MAX, 0, false, false, false},
+ {1, U32_MAX, 0, 2, U32_MAX, true, true, false},
+ {U32_MAX, 1, 0, U32_MAX-1, U32_MAX, true, false, false},
+ {U32_MAX, U32_MAX, U32_MAX-1, 0, 1, true, false, true},
+
+ {U32_MAX, U32_MAX-1, U32_MAX-2, 1, 2, true, false, true},
+ {U32_MAX-1, U32_MAX, U32_MAX-2, U32_MAX, 2, true, true, true},
+
+ {1U << 15, 1U << 15, 1U << 16, 0, 1U << 30, false, false, false},
+ {1U << 16, 1U << 16, 1U << 17, 0, 0, false, false, true},
+ {1U << 16, 1U << 15, 3*(1U << 15), 1U << 15, 1U << 31, false, false, false},
+ {1U << 31, 1U << 31, 0, 0, 0, true, false, true},
+
+ {-2U, 1U, -1U, -3U, -2U, false, false, false},
+ {-4U, 5U, 1U, -9U, -20U, true, false, true},
+};
+
+DEFINE_TEST_ARRAY(u64) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 1, 2, 0, 1, false, false, false},
+ {0, 1, 1, U64_MAX, 0, false, true, false},
+ {1, 0, 1, 1, 0, false, false, false},
+ {0, U64_MAX, U64_MAX, 1, 0, false, true, false},
+ {U64_MAX, 0, U64_MAX, U64_MAX, 0, false, false, false},
+ {1, U64_MAX, 0, 2, U64_MAX, true, true, false},
+ {U64_MAX, 1, 0, U64_MAX-1, U64_MAX, true, false, false},
+ {U64_MAX, U64_MAX, U64_MAX-1, 0, 1, true, false, true},
+
+ {U64_MAX, U64_MAX-1, U64_MAX-2, 1, 2, true, false, true},
+ {U64_MAX-1, U64_MAX, U64_MAX-2, U64_MAX, 2, true, true, true},
+
+ {1ULL << 31, 1ULL << 31, 1ULL << 32, 0, 1ULL << 62, false, false, false},
+ {1ULL << 32, 1ULL << 32, 1ULL << 33, 0, 0, false, false, true},
+ {1ULL << 32, 1ULL << 31, 3*(1ULL << 31), 1ULL << 31, 1ULL << 63, false, false, false},
+ {1ULL << 63, 1ULL << 63, 0, 0, 0, true, false, true},
+ {1000000000ULL /* 10^9 */, 10000000000ULL /* 10^10 */,
+ 11000000000ULL, 18446744064709551616ULL, 10000000000000000000ULL,
+ false, true, false},
+ {-15ULL, 10ULL, -5ULL, -25ULL, -150ULL, false, false, true},
+};
+
+DEFINE_TEST_ARRAY(s8) = {
+ {0, 0, 0, 0, 0, false, false, false},
+
+ {0, S8_MAX, S8_MAX, -S8_MAX, 0, false, false, false},
+ {S8_MAX, 0, S8_MAX, S8_MAX, 0, false, false, false},
+ {0, S8_MIN, S8_MIN, S8_MIN, 0, false, true, false},
+ {S8_MIN, 0, S8_MIN, S8_MIN, 0, false, false, false},
+
+ {-1, S8_MIN, S8_MAX, S8_MAX, S8_MIN, true, false, true},
+ {S8_MIN, -1, S8_MAX, -S8_MAX, S8_MIN, true, false, true},
+ {-1, S8_MAX, S8_MAX-1, S8_MIN, -S8_MAX, false, false, false},
+ {S8_MAX, -1, S8_MAX-1, S8_MIN, -S8_MAX, false, true, false},
+ {-1, -S8_MAX, S8_MIN, S8_MAX-1, S8_MAX, false, false, false},
+ {-S8_MAX, -1, S8_MIN, S8_MIN+2, S8_MAX, false, false, false},
+
+ {1, S8_MIN, -S8_MAX, -S8_MAX, S8_MIN, false, true, false},
+ {S8_MIN, 1, -S8_MAX, S8_MAX, S8_MIN, false, true, false},
+ {1, S8_MAX, S8_MIN, S8_MIN+2, S8_MAX, true, false, false},
+ {S8_MAX, 1, S8_MIN, S8_MAX-1, S8_MAX, true, false, false},
+
+ {S8_MIN, S8_MIN, 0, 0, 0, true, false, true},
+ {S8_MAX, S8_MAX, -2, 0, 1, true, false, true},
+
+ {-4, -32, -36, 28, -128, false, false, true},
+ {-4, 32, 28, -36, -128, false, false, false},
+};
+
+DEFINE_TEST_ARRAY(s16) = {
+ {0, 0, 0, 0, 0, false, false, false},
+
+ {0, S16_MAX, S16_MAX, -S16_MAX, 0, false, false, false},
+ {S16_MAX, 0, S16_MAX, S16_MAX, 0, false, false, false},
+ {0, S16_MIN, S16_MIN, S16_MIN, 0, false, true, false},
+ {S16_MIN, 0, S16_MIN, S16_MIN, 0, false, false, false},
+
+ {-1, S16_MIN, S16_MAX, S16_MAX, S16_MIN, true, false, true},
+ {S16_MIN, -1, S16_MAX, -S16_MAX, S16_MIN, true, false, true},
+ {-1, S16_MAX, S16_MAX-1, S16_MIN, -S16_MAX, false, false, false},
+ {S16_MAX, -1, S16_MAX-1, S16_MIN, -S16_MAX, false, true, false},
+ {-1, -S16_MAX, S16_MIN, S16_MAX-1, S16_MAX, false, false, false},
+ {-S16_MAX, -1, S16_MIN, S16_MIN+2, S16_MAX, false, false, false},
+
+ {1, S16_MIN, -S16_MAX, -S16_MAX, S16_MIN, false, true, false},
+ {S16_MIN, 1, -S16_MAX, S16_MAX, S16_MIN, false, true, false},
+ {1, S16_MAX, S16_MIN, S16_MIN+2, S16_MAX, true, false, false},
+ {S16_MAX, 1, S16_MIN, S16_MAX-1, S16_MAX, true, false, false},
+
+ {S16_MIN, S16_MIN, 0, 0, 0, true, false, true},
+ {S16_MAX, S16_MAX, -2, 0, 1, true, false, true},
+};
+DEFINE_TEST_ARRAY(s32) = {
+ {0, 0, 0, 0, 0, false, false, false},
+
+ {0, S32_MAX, S32_MAX, -S32_MAX, 0, false, false, false},
+ {S32_MAX, 0, S32_MAX, S32_MAX, 0, false, false, false},
+ {0, S32_MIN, S32_MIN, S32_MIN, 0, false, true, false},
+ {S32_MIN, 0, S32_MIN, S32_MIN, 0, false, false, false},
+
+ {-1, S32_MIN, S32_MAX, S32_MAX, S32_MIN, true, false, true},
+ {S32_MIN, -1, S32_MAX, -S32_MAX, S32_MIN, true, false, true},
+ {-1, S32_MAX, S32_MAX-1, S32_MIN, -S32_MAX, false, false, false},
+ {S32_MAX, -1, S32_MAX-1, S32_MIN, -S32_MAX, false, true, false},
+ {-1, -S32_MAX, S32_MIN, S32_MAX-1, S32_MAX, false, false, false},
+ {-S32_MAX, -1, S32_MIN, S32_MIN+2, S32_MAX, false, false, false},
+
+ {1, S32_MIN, -S32_MAX, -S32_MAX, S32_MIN, false, true, false},
+ {S32_MIN, 1, -S32_MAX, S32_MAX, S32_MIN, false, true, false},
+ {1, S32_MAX, S32_MIN, S32_MIN+2, S32_MAX, true, false, false},
+ {S32_MAX, 1, S32_MIN, S32_MAX-1, S32_MAX, true, false, false},
+
+ {S32_MIN, S32_MIN, 0, 0, 0, true, false, true},
+ {S32_MAX, S32_MAX, -2, 0, 1, true, false, true},
+};
+
+DEFINE_TEST_ARRAY(s64) = {
+ {0, 0, 0, 0, 0, false, false, false},
+
+ {0, S64_MAX, S64_MAX, -S64_MAX, 0, false, false, false},
+ {S64_MAX, 0, S64_MAX, S64_MAX, 0, false, false, false},
+ {0, S64_MIN, S64_MIN, S64_MIN, 0, false, true, false},
+ {S64_MIN, 0, S64_MIN, S64_MIN, 0, false, false, false},
+
+ {-1, S64_MIN, S64_MAX, S64_MAX, S64_MIN, true, false, true},
+ {S64_MIN, -1, S64_MAX, -S64_MAX, S64_MIN, true, false, true},
+ {-1, S64_MAX, S64_MAX-1, S64_MIN, -S64_MAX, false, false, false},
+ {S64_MAX, -1, S64_MAX-1, S64_MIN, -S64_MAX, false, true, false},
+ {-1, -S64_MAX, S64_MIN, S64_MAX-1, S64_MAX, false, false, false},
+ {-S64_MAX, -1, S64_MIN, S64_MIN+2, S64_MAX, false, false, false},
+
+ {1, S64_MIN, -S64_MAX, -S64_MAX, S64_MIN, false, true, false},
+ {S64_MIN, 1, -S64_MAX, S64_MAX, S64_MIN, false, true, false},
+ {1, S64_MAX, S64_MIN, S64_MIN+2, S64_MAX, true, false, false},
+ {S64_MAX, 1, S64_MIN, S64_MAX-1, S64_MAX, true, false, false},
+
+ {S64_MIN, S64_MIN, 0, 0, 0, true, false, true},
+ {S64_MAX, S64_MAX, -2, 0, 1, true, false, true},
+
+ {-1, -1, -2, 0, 1, false, false, false},
+ {-1, -128, -129, 127, 128, false, false, false},
+ {-128, -1, -129, -127, 128, false, false, false},
+ {0, -S64_MAX, -S64_MAX, S64_MAX, 0, false, false, false},
+};
+
+#define check_one_op(t, fmt, op, sym, a, b, r, of) do { \
+ int _a_orig = a, _a_bump = a + 1; \
+ int _b_orig = b, _b_bump = b + 1; \
+ bool _of; \
+ t _r; \
+ \
+ _of = check_ ## op ## _overflow(a, b, &_r); \
+ KUNIT_EXPECT_EQ_MSG(test, _of, of, \
+ "expected check "fmt" "sym" "fmt" to%s overflow (type %s)\n", \
+ a, b, of ? "" : " not", #t); \
+ KUNIT_EXPECT_EQ_MSG(test, _r, r, \
+ "expected check "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \
+ a, b, r, _r, #t); \
+ /* Check for internal macro side-effects. */ \
+ _of = check_ ## op ## _overflow(_a_orig++, _b_orig++, &_r); \
+ KUNIT_EXPECT_EQ_MSG(test, _a_orig, _a_bump, \
+ "Unexpected check " #op " macro side-effect!\n"); \
+ KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, \
+ "Unexpected check " #op " macro side-effect!\n"); \
+ \
+ _r = wrapping_ ## op(t, a, b); \
+ KUNIT_EXPECT_TRUE_MSG(test, _r == r, \
+ "expected wrap "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \
+ a, b, r, _r, #t); \
+ /* Check for internal macro side-effects. */ \
+ _a_orig = a; \
+ _b_orig = b; \
+ _r = wrapping_ ## op(t, _a_orig++, _b_orig++); \
+ KUNIT_EXPECT_EQ_MSG(test, _a_orig, _a_bump, \
+ "Unexpected wrap " #op " macro side-effect!\n"); \
+ KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, \
+ "Unexpected wrap " #op " macro side-effect!\n"); \
+} while (0)
+
+static int global_counter;
+static void bump_counter(void)
+{
+ global_counter++;
+}
+
+static int get_index(void)
+{
+ volatile int index = 0;
+ bump_counter();
+ return index;
+}
+
+#define check_self_op(fmt, op, sym, a, b) do { \
+ typeof(a + 0) _a = a; \
+ typeof(b + 0) _b = b; \
+ typeof(a + 0) _a_sym = a; \
+ typeof(a + 0) _a_orig[1] = { a }; \
+ typeof(b + 0) _b_orig = b; \
+ typeof(b + 0) _b_bump = b + 1; \
+ typeof(a + 0) _r; \
+ \
+ _a_sym sym _b; \
+ _r = wrapping_ ## op(_a, _b); \
+ KUNIT_EXPECT_TRUE_MSG(test, _r == _a_sym, \
+ "expected "fmt" "#op" "fmt" == "fmt", got "fmt"\n", \
+ a, b, _a_sym, _r); \
+ KUNIT_EXPECT_TRUE_MSG(test, _a == _a_sym, \
+ "expected "fmt" "#op" "fmt" == "fmt", got "fmt"\n", \
+ a, b, _a_sym, _a); \
+ /* Check for internal macro side-effects. */ \
+ global_counter = 0; \
+ wrapping_ ## op(_a_orig[get_index()], _b_orig++); \
+ KUNIT_EXPECT_EQ_MSG(test, global_counter, 1, \
+ "Unexpected wrapping_" #op " macro side-effect on arg1!\n"); \
+ KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, \
+ "Unexpected wrapping_" #op " macro side-effect on arg2!\n"); \
+} while (0)
+
+#define DEFINE_TEST_FUNC_TYPED(n, t, fmt) \
+static void do_test_ ## n(struct kunit *test, const struct test_ ## n *p) \
+{ \
+ /* check_{add,sub,mul}_overflow() and wrapping_{add,sub,mul} */ \
+ check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \
+ check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \
+ check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \
+ check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \
+ check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \
+ /* wrapping_assign_{add,sub}() */ \
+ check_self_op(fmt, assign_add, +=, p->a, p->b); \
+ check_self_op(fmt, assign_add, +=, p->b, p->a); \
+ check_self_op(fmt, assign_sub, -=, p->a, p->b); \
+} \
+ \
+static void n ## _overflow_test(struct kunit *test) { \
+ unsigned i; \
+ \
+ SKIP_64_ON_32(__same_type(t, u64)); \
+ SKIP_64_ON_32(__same_type(t, s64)); \
+ SKIP_SIGN_MISMATCH(__same_type(n ## _tests[0].a, u32) && \
+ __same_type(n ## _tests[0].b, u32) && \
+ __same_type(n ## _tests[0].sum, int)); \
+ \
+ for (i = 0; i < ARRAY_SIZE(n ## _tests); ++i) \
+ do_test_ ## n(test, &n ## _tests[i]); \
+ kunit_info(test, "%zu %s arithmetic tests finished\n", \
+ ARRAY_SIZE(n ## _tests), #n); \
+}
+
+#define DEFINE_TEST_FUNC(t, fmt) \
+ DEFINE_TEST_FUNC_TYPED(t ## _ ## t ## __ ## t, t, fmt)
+
+DEFINE_TEST_FUNC(u8, "%d");
+DEFINE_TEST_FUNC(s8, "%d");
+DEFINE_TEST_FUNC(u16, "%d");
+DEFINE_TEST_FUNC(s16, "%d");
+DEFINE_TEST_FUNC(u32, "%u");
+DEFINE_TEST_FUNC(s32, "%d");
+DEFINE_TEST_FUNC(u64, "%llu");
+DEFINE_TEST_FUNC(s64, "%lld");
+
+DEFINE_TEST_ARRAY_TYPED(u32, u32, u8) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {U8_MAX, 2, 1, U8_MAX - 2, U8_MAX - 1, true, false, true},
+ {U8_MAX + 1, 0, 0, 0, 0, true, true, false},
+};
+DEFINE_TEST_FUNC_TYPED(u32_u32__u8, u8, "%d");
+
+DEFINE_TEST_ARRAY_TYPED(u32, u32, int) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {U32_MAX, 0, -1, -1, 0, true, true, false},
+};
+DEFINE_TEST_FUNC_TYPED(u32_u32__int, int, "%d");
+
+DEFINE_TEST_ARRAY_TYPED(u8, u8, int) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {U8_MAX, U8_MAX, 2 * U8_MAX, 0, U8_MAX * U8_MAX, false, false, false},
+ {1, 2, 3, -1, 2, false, false, false},
+};
+DEFINE_TEST_FUNC_TYPED(u8_u8__int, int, "%d");
+
+DEFINE_TEST_ARRAY_TYPED(int, int, u8) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 2, 3, U8_MAX, 2, false, true, false},
+ {-1, 0, U8_MAX, U8_MAX, 0, true, true, false},
+};
+DEFINE_TEST_FUNC_TYPED(int_int__u8, u8, "%d");
+
+/* Args are: value, shift, type, expected result, overflow expected */
+#define TEST_ONE_SHIFT(a, s, t, expect, of) do { \
+ typeof(a) __a = (a); \
+ typeof(s) __s = (s); \
+ t __e = (expect); \
+ t __d; \
+ bool __of = check_shl_overflow(__a, __s, &__d); \
+ if (__of != of) { \
+ KUNIT_EXPECT_EQ_MSG(test, __of, of, \
+ "expected (%s)(%s << %s) to%s overflow\n", \
+ #t, #a, #s, of ? "" : " not"); \
+ } else if (!__of && __d != __e) { \
+ KUNIT_EXPECT_EQ_MSG(test, __d, __e, \
+ "expected (%s)(%s << %s) == %s\n", \
+ #t, #a, #s, #expect); \
+ if ((t)-1 < 0) \
+ kunit_info(test, "got %lld\n", (s64)__d); \
+ else \
+ kunit_info(test, "got %llu\n", (u64)__d); \
+ } \
+ count++; \
+} while (0)
+
+static void shift_sane_test(struct kunit *test)
+{
+ int count = 0;
+
+ /* Sane shifts. */
+ TEST_ONE_SHIFT(1, 0, u8, 1 << 0, false);
+ TEST_ONE_SHIFT(1, 4, u8, 1 << 4, false);
+ TEST_ONE_SHIFT(1, 7, u8, 1 << 7, false);
+ TEST_ONE_SHIFT(0xF, 4, u8, 0xF << 4, false);
+ TEST_ONE_SHIFT(1, 0, u16, 1 << 0, false);
+ TEST_ONE_SHIFT(1, 10, u16, 1 << 10, false);
+ TEST_ONE_SHIFT(1, 15, u16, 1 << 15, false);
+ TEST_ONE_SHIFT(0xFF, 8, u16, 0xFF << 8, false);
+ TEST_ONE_SHIFT(1, 0, int, 1 << 0, false);
+ TEST_ONE_SHIFT(1, 16, int, 1 << 16, false);
+ TEST_ONE_SHIFT(1, 30, int, 1 << 30, false);
+ TEST_ONE_SHIFT(1, 0, s32, 1 << 0, false);
+ TEST_ONE_SHIFT(1, 16, s32, 1 << 16, false);
+ TEST_ONE_SHIFT(1, 30, s32, 1 << 30, false);
+ TEST_ONE_SHIFT(1, 0, unsigned int, 1U << 0, false);
+ TEST_ONE_SHIFT(1, 20, unsigned int, 1U << 20, false);
+ TEST_ONE_SHIFT(1, 31, unsigned int, 1U << 31, false);
+ TEST_ONE_SHIFT(0xFFFFU, 16, unsigned int, 0xFFFFU << 16, false);
+ TEST_ONE_SHIFT(1, 0, u32, 1U << 0, false);
+ TEST_ONE_SHIFT(1, 20, u32, 1U << 20, false);
+ TEST_ONE_SHIFT(1, 31, u32, 1U << 31, false);
+ TEST_ONE_SHIFT(0xFFFFU, 16, u32, 0xFFFFU << 16, false);
+ TEST_ONE_SHIFT(1, 0, u64, 1ULL << 0, false);
+ TEST_ONE_SHIFT(1, 40, u64, 1ULL << 40, false);
+ TEST_ONE_SHIFT(1, 63, u64, 1ULL << 63, false);
+ TEST_ONE_SHIFT(0xFFFFFFFFULL, 32, u64, 0xFFFFFFFFULL << 32, false);
+
+ /* Sane shift: start and end with 0, without a too-wide shift. */
+ TEST_ONE_SHIFT(0, 7, u8, 0, false);
+ TEST_ONE_SHIFT(0, 15, u16, 0, false);
+ TEST_ONE_SHIFT(0, 31, unsigned int, 0, false);
+ TEST_ONE_SHIFT(0, 31, u32, 0, false);
+ TEST_ONE_SHIFT(0, 63, u64, 0, false);
+
+ /* Sane shift: start and end with 0, without reaching signed bit. */
+ TEST_ONE_SHIFT(0, 6, s8, 0, false);
+ TEST_ONE_SHIFT(0, 14, s16, 0, false);
+ TEST_ONE_SHIFT(0, 30, int, 0, false);
+ TEST_ONE_SHIFT(0, 30, s32, 0, false);
+ TEST_ONE_SHIFT(0, 62, s64, 0, false);
+
+ kunit_info(test, "%d sane shift tests finished\n", count);
+}
+
+static void shift_overflow_test(struct kunit *test)
+{
+ int count = 0;
+
+ /* Overflow: shifted the bit off the end. */
+ TEST_ONE_SHIFT(1, 8, u8, 0, true);
+ TEST_ONE_SHIFT(1, 16, u16, 0, true);
+ TEST_ONE_SHIFT(1, 32, unsigned int, 0, true);
+ TEST_ONE_SHIFT(1, 32, u32, 0, true);
+ TEST_ONE_SHIFT(1, 64, u64, 0, true);
+
+ /* Overflow: shifted into the signed bit. */
+ TEST_ONE_SHIFT(1, 7, s8, 0, true);
+ TEST_ONE_SHIFT(1, 15, s16, 0, true);
+ TEST_ONE_SHIFT(1, 31, int, 0, true);
+ TEST_ONE_SHIFT(1, 31, s32, 0, true);
+ TEST_ONE_SHIFT(1, 63, s64, 0, true);
+
+ /* Overflow: high bit falls off unsigned types. */
+ /* 10010110 */
+ TEST_ONE_SHIFT(150, 1, u8, 0, true);
+ /* 1000100010010110 */
+ TEST_ONE_SHIFT(34966, 1, u16, 0, true);
+ /* 10000100000010001000100010010110 */
+ TEST_ONE_SHIFT(2215151766U, 1, u32, 0, true);
+ TEST_ONE_SHIFT(2215151766U, 1, unsigned int, 0, true);
+ /* 1000001000010000010000000100000010000100000010001000100010010110 */
+ TEST_ONE_SHIFT(9372061470395238550ULL, 1, u64, 0, true);
+
+ /* Overflow: bit shifted into signed bit on signed types. */
+ /* 01001011 */
+ TEST_ONE_SHIFT(75, 1, s8, 0, true);
+ /* 0100010001001011 */
+ TEST_ONE_SHIFT(17483, 1, s16, 0, true);
+ /* 01000010000001000100010001001011 */
+ TEST_ONE_SHIFT(1107575883, 1, s32, 0, true);
+ TEST_ONE_SHIFT(1107575883, 1, int, 0, true);
+ /* 0100000100001000001000000010000001000010000001000100010001001011 */
+ TEST_ONE_SHIFT(4686030735197619275LL, 1, s64, 0, true);
+
+ /* Overflow: bit shifted past signed bit on signed types. */
+ /* 01001011 */
+ TEST_ONE_SHIFT(75, 2, s8, 0, true);
+ /* 0100010001001011 */
+ TEST_ONE_SHIFT(17483, 2, s16, 0, true);
+ /* 01000010000001000100010001001011 */
+ TEST_ONE_SHIFT(1107575883, 2, s32, 0, true);
+ TEST_ONE_SHIFT(1107575883, 2, int, 0, true);
+ /* 0100000100001000001000000010000001000010000001000100010001001011 */
+ TEST_ONE_SHIFT(4686030735197619275LL, 2, s64, 0, true);
+
+ kunit_info(test, "%d overflow shift tests finished\n", count);
+}
+
+static void shift_truncate_test(struct kunit *test)
+{
+ int count = 0;
+
+ /* Overflow: values larger than destination type. */
+ TEST_ONE_SHIFT(0x100, 0, u8, 0, true);
+ TEST_ONE_SHIFT(0xFF, 0, s8, 0, true);
+ TEST_ONE_SHIFT(0x10000U, 0, u16, 0, true);
+ TEST_ONE_SHIFT(0xFFFFU, 0, s16, 0, true);
+ TEST_ONE_SHIFT(0x100000000ULL, 0, u32, 0, true);
+ TEST_ONE_SHIFT(0x100000000ULL, 0, unsigned int, 0, true);
+ TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, s32, 0, true);
+ TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, int, 0, true);
+ TEST_ONE_SHIFT(0xFFFFFFFFFFFFFFFFULL, 0, s64, 0, true);
+
+ /* Overflow: shifted at or beyond entire type's bit width. */
+ TEST_ONE_SHIFT(0, 8, u8, 0, true);
+ TEST_ONE_SHIFT(0, 9, u8, 0, true);
+ TEST_ONE_SHIFT(0, 8, s8, 0, true);
+ TEST_ONE_SHIFT(0, 9, s8, 0, true);
+ TEST_ONE_SHIFT(0, 16, u16, 0, true);
+ TEST_ONE_SHIFT(0, 17, u16, 0, true);
+ TEST_ONE_SHIFT(0, 16, s16, 0, true);
+ TEST_ONE_SHIFT(0, 17, s16, 0, true);
+ TEST_ONE_SHIFT(0, 32, u32, 0, true);
+ TEST_ONE_SHIFT(0, 33, u32, 0, true);
+ TEST_ONE_SHIFT(0, 32, int, 0, true);
+ TEST_ONE_SHIFT(0, 33, int, 0, true);
+ TEST_ONE_SHIFT(0, 32, s32, 0, true);
+ TEST_ONE_SHIFT(0, 33, s32, 0, true);
+ TEST_ONE_SHIFT(0, 64, u64, 0, true);
+ TEST_ONE_SHIFT(0, 65, u64, 0, true);
+ TEST_ONE_SHIFT(0, 64, s64, 0, true);
+ TEST_ONE_SHIFT(0, 65, s64, 0, true);
+
+ kunit_info(test, "%d truncate shift tests finished\n", count);
+}
+
+static void shift_nonsense_test(struct kunit *test)
+{
+ int count = 0;
+
+ /* Nonsense: negative initial value. */
+ TEST_ONE_SHIFT(-1, 0, s8, 0, true);
+ TEST_ONE_SHIFT(-1, 0, u8, 0, true);
+ TEST_ONE_SHIFT(-5, 0, s16, 0, true);
+ TEST_ONE_SHIFT(-5, 0, u16, 0, true);
+ TEST_ONE_SHIFT(-10, 0, int, 0, true);
+ TEST_ONE_SHIFT(-10, 0, unsigned int, 0, true);
+ TEST_ONE_SHIFT(-100, 0, s32, 0, true);
+ TEST_ONE_SHIFT(-100, 0, u32, 0, true);
+ TEST_ONE_SHIFT(-10000, 0, s64, 0, true);
+ TEST_ONE_SHIFT(-10000, 0, u64, 0, true);
+
+ /* Nonsense: negative shift values. */
+ TEST_ONE_SHIFT(0, -5, s8, 0, true);
+ TEST_ONE_SHIFT(0, -5, u8, 0, true);
+ TEST_ONE_SHIFT(0, -10, s16, 0, true);
+ TEST_ONE_SHIFT(0, -10, u16, 0, true);
+ TEST_ONE_SHIFT(0, -15, int, 0, true);
+ TEST_ONE_SHIFT(0, -15, unsigned int, 0, true);
+ TEST_ONE_SHIFT(0, -20, s32, 0, true);
+ TEST_ONE_SHIFT(0, -20, u32, 0, true);
+ TEST_ONE_SHIFT(0, -30, s64, 0, true);
+ TEST_ONE_SHIFT(0, -30, u64, 0, true);
+
+ /*
+ * Corner case: for unsigned types, we fail when we've shifted
+ * through the entire width of bits. For signed types, we might
+ * want to match this behavior, but that would mean noticing if
+ * we shift through all but the signed bit, and this is not
+ * currently detected (but we'll notice an overflow into the
+ * signed bit). So, for now, we will test this condition but
+ * mark it as not expected to overflow.
+ */
+ TEST_ONE_SHIFT(0, 7, s8, 0, false);
+ TEST_ONE_SHIFT(0, 15, s16, 0, false);
+ TEST_ONE_SHIFT(0, 31, int, 0, false);
+ TEST_ONE_SHIFT(0, 31, s32, 0, false);
+ TEST_ONE_SHIFT(0, 63, s64, 0, false);
+
+ kunit_info(test, "%d nonsense shift tests finished\n", count);
+}
+#undef TEST_ONE_SHIFT
+
+/*
+ * Deal with the various forms of allocator arguments. See comments above
+ * the DEFINE_TEST_ALLOC() instances for mapping of the "bits".
+ */
+#define alloc_GFP (GFP_KERNEL | __GFP_NOWARN)
+#define alloc010(alloc, arg, sz) alloc(sz, alloc_GFP)
+#define alloc011(alloc, arg, sz) alloc(sz, alloc_GFP, NUMA_NO_NODE)
+#define alloc000(alloc, arg, sz) alloc(sz)
+#define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE)
+#define alloc110(alloc, arg, sz) alloc(arg, sz, alloc_GFP)
+#define free0(free, arg, ptr) free(ptr)
+#define free1(free, arg, ptr) free(arg, ptr)
+
+/* Wrap around to 16K */
+#define TEST_SIZE (5 * 4096)
+
+#define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\
+static void test_ ## func (struct kunit *test, void *arg) \
+{ \
+ volatile size_t a = TEST_SIZE; \
+ volatile size_t b = (SIZE_MAX / TEST_SIZE) + 1; \
+ void *ptr; \
+ \
+ /* Tiny allocation test. */ \
+ ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, 1);\
+ KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, \
+ #func " failed regular allocation?!\n"); \
+ free ## want_arg (free_func, arg, ptr); \
+ \
+ /* Wrapped allocation test. */ \
+ ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \
+ a * b); \
+ KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, \
+ #func " unexpectedly failed bad wrapping?!\n"); \
+ free ## want_arg (free_func, arg, ptr); \
+ \
+ /* Saturated allocation test. */ \
+ ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \
+ array_size(a, b)); \
+ if (ptr) { \
+ KUNIT_FAIL(test, #func " missed saturation!\n"); \
+ free ## want_arg (free_func, arg, ptr); \
+ } \
+}
+
+/*
+ * Allocator uses a trailing node argument --------+ (e.g. kmalloc_node())
+ * Allocator uses the gfp_t argument -----------+ | (e.g. kmalloc())
+ * Allocator uses a special leading argument + | | (e.g. devm_kmalloc())
+ * | | |
+ */
+DEFINE_TEST_ALLOC(kmalloc, kfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kmalloc_node, kfree, 0, 1, 1);
+DEFINE_TEST_ALLOC(kzalloc, kfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kzalloc_node, kfree, 0, 1, 1);
+DEFINE_TEST_ALLOC(__vmalloc, vfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kvmalloc, kvfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kvmalloc_node, kvfree, 0, 1, 1);
+DEFINE_TEST_ALLOC(kvzalloc, kvfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kvzalloc_node, kvfree, 0, 1, 1);
+DEFINE_TEST_ALLOC(devm_kmalloc, devm_kfree, 1, 1, 0);
+DEFINE_TEST_ALLOC(devm_kzalloc, devm_kfree, 1, 1, 0);
+
+static void overflow_allocation_test(struct kunit *test)
+{
+ struct device *dev;
+ int count = 0;
+
+#define check_allocation_overflow(alloc) do { \
+ count++; \
+ test_ ## alloc(test, dev); \
+} while (0)
+
+ /* Create dummy device for devm_kmalloc()-family tests. */
+ dev = kunit_device_register(test, "overflow-test");
+ KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev),
+ "Cannot register test device\n");
+
+ check_allocation_overflow(kmalloc);
+ check_allocation_overflow(kmalloc_node);
+ check_allocation_overflow(kzalloc);
+ check_allocation_overflow(kzalloc_node);
+ check_allocation_overflow(__vmalloc);
+ check_allocation_overflow(kvmalloc);
+ check_allocation_overflow(kvmalloc_node);
+ check_allocation_overflow(kvzalloc);
+ check_allocation_overflow(kvzalloc_node);
+ check_allocation_overflow(devm_kmalloc);
+ check_allocation_overflow(devm_kzalloc);
+
+ kunit_info(test, "%d allocation overflow tests finished\n", count);
+#undef check_allocation_overflow
+}
+
+struct __test_flex_array {
+ unsigned long flags;
+ size_t count;
+ unsigned long data[];
+};
+
+static void overflow_size_helpers_test(struct kunit *test)
+{
+ /* Make sure struct_size() can be used in a constant expression. */
+ u8 ce_array[struct_size_t(struct __test_flex_array, data, 55)];
+ struct __test_flex_array *obj;
+ int count = 0;
+ int var;
+ volatile int unconst = 0;
+
+ /* Verify constant expression against runtime version. */
+ var = 55;
+ OPTIMIZER_HIDE_VAR(var);
+ KUNIT_EXPECT_EQ(test, sizeof(ce_array), struct_size(obj, data, var));
+
+#define check_one_size_helper(expected, func, args...) do { \
+ size_t _r = func(args); \
+ KUNIT_EXPECT_EQ_MSG(test, _r, expected, \
+ "expected " #func "(" #args ") to return %zu but got %zu instead\n", \
+ (size_t)(expected), _r); \
+ count++; \
+} while (0)
+
+ var = 4;
+ check_one_size_helper(20, size_mul, var++, 5);
+ check_one_size_helper(20, size_mul, 4, var++);
+ check_one_size_helper(0, size_mul, 0, 3);
+ check_one_size_helper(0, size_mul, 3, 0);
+ check_one_size_helper(6, size_mul, 2, 3);
+ check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 1);
+ check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 3);
+ check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, -3);
+
+ var = 4;
+ check_one_size_helper(9, size_add, var++, 5);
+ check_one_size_helper(9, size_add, 4, var++);
+ check_one_size_helper(9, size_add, 9, 0);
+ check_one_size_helper(9, size_add, 0, 9);
+ check_one_size_helper(5, size_add, 2, 3);
+ check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 1);
+ check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 3);
+ check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, -3);
+
+ var = 4;
+ check_one_size_helper(1, size_sub, var--, 3);
+ check_one_size_helper(1, size_sub, 4, var--);
+ check_one_size_helper(1, size_sub, 3, 2);
+ check_one_size_helper(9, size_sub, 9, 0);
+ check_one_size_helper(SIZE_MAX, size_sub, 9, -3);
+ check_one_size_helper(SIZE_MAX, size_sub, 0, 9);
+ check_one_size_helper(SIZE_MAX, size_sub, 2, 3);
+ check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 0);
+ check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 10);
+ check_one_size_helper(SIZE_MAX, size_sub, 0, SIZE_MAX);
+ check_one_size_helper(SIZE_MAX, size_sub, 14, SIZE_MAX);
+ check_one_size_helper(SIZE_MAX - 2, size_sub, SIZE_MAX - 1, 1);
+ check_one_size_helper(SIZE_MAX - 4, size_sub, SIZE_MAX - 1, 3);
+ check_one_size_helper(1, size_sub, SIZE_MAX - 1, -3);
+
+ var = 4;
+ check_one_size_helper(4 * sizeof(*obj->data),
+ flex_array_size, obj, data, var++);
+ check_one_size_helper(5 * sizeof(*obj->data),
+ flex_array_size, obj, data, var++);
+ check_one_size_helper(0, flex_array_size, obj, data, 0 + unconst);
+ check_one_size_helper(sizeof(*obj->data),
+ flex_array_size, obj, data, 1 + unconst);
+ check_one_size_helper(7 * sizeof(*obj->data),
+ flex_array_size, obj, data, 7 + unconst);
+ check_one_size_helper(SIZE_MAX,
+ flex_array_size, obj, data, -1 + unconst);
+ check_one_size_helper(SIZE_MAX,
+ flex_array_size, obj, data, SIZE_MAX - 4 + unconst);
+
+ var = 4;
+ check_one_size_helper(sizeof(*obj) + (4 * sizeof(*obj->data)),
+ struct_size, obj, data, var++);
+ check_one_size_helper(sizeof(*obj) + (5 * sizeof(*obj->data)),
+ struct_size, obj, data, var++);
+ check_one_size_helper(sizeof(*obj), struct_size, obj, data, 0 + unconst);
+ check_one_size_helper(sizeof(*obj) + sizeof(*obj->data),
+ struct_size, obj, data, 1 + unconst);
+ check_one_size_helper(SIZE_MAX,
+ struct_size, obj, data, -3 + unconst);
+ check_one_size_helper(SIZE_MAX,
+ struct_size, obj, data, SIZE_MAX - 3 + unconst);
+
+ kunit_info(test, "%d overflow size helper tests finished\n", count);
+#undef check_one_size_helper
+}
+
+static void overflows_type_test(struct kunit *test)
+{
+ int count = 0;
+ unsigned int var;
+
+#define __TEST_OVERFLOWS_TYPE(func, arg1, arg2, of) do { \
+ bool __of = func(arg1, arg2); \
+ KUNIT_EXPECT_EQ_MSG(test, __of, of, \
+ "expected " #func "(" #arg1 ", " #arg2 " to%s overflow\n",\
+ of ? "" : " not"); \
+ count++; \
+} while (0)
+
+/* Args are: first type, second type, value, overflow expected */
+#define TEST_OVERFLOWS_TYPE(__t1, __t2, v, of) do { \
+ __t1 t1 = (v); \
+ __t2 t2; \
+ __TEST_OVERFLOWS_TYPE(__overflows_type, t1, t2, of); \
+ __TEST_OVERFLOWS_TYPE(__overflows_type, t1, __t2, of); \
+ __TEST_OVERFLOWS_TYPE(__overflows_type_constexpr, t1, t2, of); \
+ __TEST_OVERFLOWS_TYPE(__overflows_type_constexpr, t1, __t2, of);\
+} while (0)
+
+ TEST_OVERFLOWS_TYPE(u8, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u8, u16, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u8, s8, U8_MAX, true);
+ TEST_OVERFLOWS_TYPE(u8, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u8, s8, (u8)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u8, s16, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u8, -1, true);
+ TEST_OVERFLOWS_TYPE(s8, u8, S8_MIN, true);
+ TEST_OVERFLOWS_TYPE(s8, u16, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u16, -1, true);
+ TEST_OVERFLOWS_TYPE(s8, u16, S8_MIN, true);
+ TEST_OVERFLOWS_TYPE(s8, u32, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u32, -1, true);
+ TEST_OVERFLOWS_TYPE(s8, u32, S8_MIN, true);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(s8, u64, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u64, -1, true);
+ TEST_OVERFLOWS_TYPE(s8, u64, S8_MIN, true);
+#endif
+ TEST_OVERFLOWS_TYPE(s8, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, s8, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(s8, s16, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, s16, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(u16, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u16, u8, (u16)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u16, u8, U16_MAX, true);
+ TEST_OVERFLOWS_TYPE(u16, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u16, s8, (u16)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u16, s8, U16_MAX, true);
+ TEST_OVERFLOWS_TYPE(u16, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u16, s16, (u16)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u16, s16, U16_MAX, true);
+ TEST_OVERFLOWS_TYPE(u16, u32, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u16, s32, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u8, (s16)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s16, u8, -1, true);
+ TEST_OVERFLOWS_TYPE(s16, u8, S16_MIN, true);
+ TEST_OVERFLOWS_TYPE(s16, u16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u16, -1, true);
+ TEST_OVERFLOWS_TYPE(s16, u16, S16_MIN, true);
+ TEST_OVERFLOWS_TYPE(s16, u32, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u32, -1, true);
+ TEST_OVERFLOWS_TYPE(s16, u32, S16_MIN, true);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(s16, u64, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u64, -1, true);
+ TEST_OVERFLOWS_TYPE(s16, u64, S16_MIN, true);
+#endif
+ TEST_OVERFLOWS_TYPE(s16, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, s8, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(s16, s8, (s16)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s16, s8, (s16)S8_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s16, s8, S16_MAX, true);
+ TEST_OVERFLOWS_TYPE(s16, s8, S16_MIN, true);
+ TEST_OVERFLOWS_TYPE(s16, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, s16, S16_MIN, false);
+ TEST_OVERFLOWS_TYPE(s16, s32, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, s32, S16_MIN, false);
+ TEST_OVERFLOWS_TYPE(u32, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, u8, (u32)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u32, u8, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s8, (u32)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u32, s8, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, u16, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, u16, U16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u32, u16, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s16, (u32)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u32, s16, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, u32, U32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s32, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, s32, (u32)S32_MAX + 1, true);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(u32, u64, U32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s64, U32_MAX, false);
+#endif
+ TEST_OVERFLOWS_TYPE(s32, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, u8, (s32)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, S32_MAX, true);
+ TEST_OVERFLOWS_TYPE(s32, u8, -1, true);
+ TEST_OVERFLOWS_TYPE(s32, u8, S32_MIN, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, u16, (s32)U16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, S32_MAX, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, -1, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, S32_MIN, true);
+ TEST_OVERFLOWS_TYPE(s32, u32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, u32, -1, true);
+ TEST_OVERFLOWS_TYPE(s32, u32, S32_MIN, true);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(s32, u64, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, u64, -1, true);
+ TEST_OVERFLOWS_TYPE(s32, u64, S32_MIN, true);
+#endif
+ TEST_OVERFLOWS_TYPE(s32, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, s8, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(s32, s8, (s32)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s32, s8, (s32)S8_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s32, s8, S32_MAX, true);
+ TEST_OVERFLOWS_TYPE(s32, s8, S32_MIN, true);
+ TEST_OVERFLOWS_TYPE(s32, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, s16, S16_MIN, false);
+ TEST_OVERFLOWS_TYPE(s32, s16, (s32)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s32, s16, (s32)S16_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s32, s16, S32_MAX, true);
+ TEST_OVERFLOWS_TYPE(s32, s16, S32_MIN, true);
+ TEST_OVERFLOWS_TYPE(s32, s32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, s32, S32_MIN, false);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(s32, s64, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, s64, S32_MIN, false);
+ TEST_OVERFLOWS_TYPE(u64, u8, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, u8, (u64)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, u16, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, u16, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, u16, (u64)U16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, u32, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, u32, U32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, u32, (u64)U32_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, u64, U64_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s8, (u64)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, s8, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s16, (u64)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, s16, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, s32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s32, (u64)S32_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, s32, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, s64, S64_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s64, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, s64, (u64)S64_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, u8, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, u8, S64_MIN, true);
+ TEST_OVERFLOWS_TYPE(s64, u8, -1, true);
+ TEST_OVERFLOWS_TYPE(s64, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, u8, (s64)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, u16, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, u16, S64_MIN, true);
+ TEST_OVERFLOWS_TYPE(s64, u16, -1, true);
+ TEST_OVERFLOWS_TYPE(s64, u16, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, u16, (s64)U16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, u32, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, u32, S64_MIN, true);
+ TEST_OVERFLOWS_TYPE(s64, u32, -1, true);
+ TEST_OVERFLOWS_TYPE(s64, u32, U32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, u32, (s64)U32_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, u64, S64_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, u64, S64_MIN, true);
+ TEST_OVERFLOWS_TYPE(s64, u64, -1, true);
+ TEST_OVERFLOWS_TYPE(s64, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, s8, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(s64, s8, (s64)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s8, (s64)S8_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s8, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, s16, S16_MIN, false);
+ TEST_OVERFLOWS_TYPE(s64, s16, (s64)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s16, (s64)S16_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s16, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, s32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, s32, S32_MIN, false);
+ TEST_OVERFLOWS_TYPE(s64, s32, (s64)S32_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s32, (s64)S32_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s32, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, s64, S64_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, s64, S64_MIN, false);
+#endif
+
+ /* Check for macro side-effects. */
+ var = INT_MAX - 1;
+ __TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, false);
+ __TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, false);
+ __TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, true);
+ var = INT_MAX - 1;
+ __TEST_OVERFLOWS_TYPE(overflows_type, var++, int, false);
+ __TEST_OVERFLOWS_TYPE(overflows_type, var++, int, false);
+ __TEST_OVERFLOWS_TYPE(overflows_type, var++, int, true);
+
+ kunit_info(test, "%d overflows_type() tests finished\n", count);
+#undef TEST_OVERFLOWS_TYPE
+#undef __TEST_OVERFLOWS_TYPE
+}
+
+static void same_type_test(struct kunit *test)
+{
+ int count = 0;
+ int var;
+
+#define TEST_SAME_TYPE(t1, t2, same) do { \
+ typeof(t1) __t1h = type_max(t1); \
+ typeof(t1) __t1l = type_min(t1); \
+ typeof(t2) __t2h = type_max(t2); \
+ typeof(t2) __t2l = type_min(t2); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(t1, __t1h)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(t1, __t1l)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(__t1h, t1)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(__t1l, t1)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(t2, __t2h)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(t2, __t2l)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(__t2h, t2)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(__t2l, t2)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t1, t2)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t2, __t1h)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t2, __t1l)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(__t1h, t2)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(__t1l, t2)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t1, __t2h)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t1, __t2l)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(__t2h, t1)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(__t2l, t1)); \
+} while (0)
+
+#if BITS_PER_LONG == 64
+# define TEST_SAME_TYPE64(base, t, m) TEST_SAME_TYPE(base, t, m)
+#else
+# define TEST_SAME_TYPE64(base, t, m) do { } while (0)
+#endif
+
+#define TEST_TYPE_SETS(base, mu8, mu16, mu32, ms8, ms16, ms32, mu64, ms64) \
+do { \
+ TEST_SAME_TYPE(base, u8, mu8); \
+ TEST_SAME_TYPE(base, u16, mu16); \
+ TEST_SAME_TYPE(base, u32, mu32); \
+ TEST_SAME_TYPE(base, s8, ms8); \
+ TEST_SAME_TYPE(base, s16, ms16); \
+ TEST_SAME_TYPE(base, s32, ms32); \
+ TEST_SAME_TYPE64(base, u64, mu64); \
+ TEST_SAME_TYPE64(base, s64, ms64); \
+} while (0)
+
+ TEST_TYPE_SETS(u8, true, false, false, false, false, false, false, false);
+ TEST_TYPE_SETS(u16, false, true, false, false, false, false, false, false);
+ TEST_TYPE_SETS(u32, false, false, true, false, false, false, false, false);
+ TEST_TYPE_SETS(s8, false, false, false, true, false, false, false, false);
+ TEST_TYPE_SETS(s16, false, false, false, false, true, false, false, false);
+ TEST_TYPE_SETS(s32, false, false, false, false, false, true, false, false);
+#if BITS_PER_LONG == 64
+ TEST_TYPE_SETS(u64, false, false, false, false, false, false, true, false);
+ TEST_TYPE_SETS(s64, false, false, false, false, false, false, false, true);
+#endif
+
+ /* Check for macro side-effects. */
+ var = 4;
+ KUNIT_EXPECT_EQ(test, var, 4);
+ KUNIT_EXPECT_TRUE(test, __same_type(var++, int));
+ KUNIT_EXPECT_EQ(test, var, 4);
+ KUNIT_EXPECT_TRUE(test, __same_type(int, var++));
+ KUNIT_EXPECT_EQ(test, var, 4);
+ KUNIT_EXPECT_TRUE(test, __same_type(var++, var++));
+ KUNIT_EXPECT_EQ(test, var, 4);
+
+ kunit_info(test, "%d __same_type() tests finished\n", count);
+
+#undef TEST_TYPE_SETS
+#undef TEST_SAME_TYPE64
+#undef TEST_SAME_TYPE
+}
+
+static void castable_to_type_test(struct kunit *test)
+{
+ int count = 0;
+
+#define TEST_CASTABLE_TO_TYPE(arg1, arg2, pass) do { \
+ bool __pass = castable_to_type(arg1, arg2); \
+ KUNIT_EXPECT_EQ_MSG(test, __pass, pass, \
+ "expected castable_to_type(" #arg1 ", " #arg2 ") to%s pass\n",\
+ pass ? "" : " not"); \
+ count++; \
+} while (0)
+
+ TEST_CASTABLE_TO_TYPE(16, u8, true);
+ TEST_CASTABLE_TO_TYPE(16, u16, true);
+ TEST_CASTABLE_TO_TYPE(16, u32, true);
+ TEST_CASTABLE_TO_TYPE(16, s8, true);
+ TEST_CASTABLE_TO_TYPE(16, s16, true);
+ TEST_CASTABLE_TO_TYPE(16, s32, true);
+ TEST_CASTABLE_TO_TYPE(-16, s8, true);
+ TEST_CASTABLE_TO_TYPE(-16, s16, true);
+ TEST_CASTABLE_TO_TYPE(-16, s32, true);
+#if BITS_PER_LONG == 64
+ TEST_CASTABLE_TO_TYPE(16, u64, true);
+ TEST_CASTABLE_TO_TYPE(-16, s64, true);
+#endif
+
+#define TEST_CASTABLE_TO_TYPE_VAR(width) do { \
+ u ## width u ## width ## var = 0; \
+ s ## width s ## width ## var = 0; \
+ \
+ /* Constant expressions that fit types. */ \
+ TEST_CASTABLE_TO_TYPE(type_max(u ## width), u ## width, true); \
+ TEST_CASTABLE_TO_TYPE(type_min(u ## width), u ## width, true); \
+ TEST_CASTABLE_TO_TYPE(type_max(u ## width), u ## width ## var, true); \
+ TEST_CASTABLE_TO_TYPE(type_min(u ## width), u ## width ## var, true); \
+ TEST_CASTABLE_TO_TYPE(type_max(s ## width), s ## width, true); \
+ TEST_CASTABLE_TO_TYPE(type_min(s ## width), s ## width, true); \
+ TEST_CASTABLE_TO_TYPE(type_max(s ## width), s ## width ## var, true); \
+ TEST_CASTABLE_TO_TYPE(type_min(u ## width), s ## width ## var, true); \
+ /* Constant expressions that do not fit types. */ \
+ TEST_CASTABLE_TO_TYPE(type_max(u ## width), s ## width, false); \
+ TEST_CASTABLE_TO_TYPE(type_max(u ## width), s ## width ## var, false); \
+ TEST_CASTABLE_TO_TYPE(type_min(s ## width), u ## width, false); \
+ TEST_CASTABLE_TO_TYPE(type_min(s ## width), u ## width ## var, false); \
+ /* Non-constant expression with mismatched type. */ \
+ TEST_CASTABLE_TO_TYPE(s ## width ## var, u ## width, false); \
+ TEST_CASTABLE_TO_TYPE(u ## width ## var, s ## width, false); \
+} while (0)
+
+#define TEST_CASTABLE_TO_TYPE_RANGE(width) do { \
+ unsigned long big = U ## width ## _MAX; \
+ signed long small = S ## width ## _MIN; \
+ u ## width u ## width ## var = 0; \
+ s ## width s ## width ## var = 0; \
+ \
+ /* Constant expression in range. */ \
+ TEST_CASTABLE_TO_TYPE(U ## width ## _MAX, u ## width, true); \
+ TEST_CASTABLE_TO_TYPE(U ## width ## _MAX, u ## width ## var, true); \
+ TEST_CASTABLE_TO_TYPE(S ## width ## _MIN, s ## width, true); \
+ TEST_CASTABLE_TO_TYPE(S ## width ## _MIN, s ## width ## var, true); \
+ /* Constant expression out of range. */ \
+ TEST_CASTABLE_TO_TYPE((unsigned long)U ## width ## _MAX + 1, u ## width, false); \
+ TEST_CASTABLE_TO_TYPE((unsigned long)U ## width ## _MAX + 1, u ## width ## var, false); \
+ TEST_CASTABLE_TO_TYPE((signed long)S ## width ## _MIN - 1, s ## width, false); \
+ TEST_CASTABLE_TO_TYPE((signed long)S ## width ## _MIN - 1, s ## width ## var, false); \
+ /* Non-constant expression with mismatched type. */ \
+ TEST_CASTABLE_TO_TYPE(big, u ## width, false); \
+ TEST_CASTABLE_TO_TYPE(big, u ## width ## var, false); \
+ TEST_CASTABLE_TO_TYPE(small, s ## width, false); \
+ TEST_CASTABLE_TO_TYPE(small, s ## width ## var, false); \
+} while (0)
+
+ TEST_CASTABLE_TO_TYPE_VAR(8);
+ TEST_CASTABLE_TO_TYPE_VAR(16);
+ TEST_CASTABLE_TO_TYPE_VAR(32);
+#if BITS_PER_LONG == 64
+ TEST_CASTABLE_TO_TYPE_VAR(64);
+#endif
+
+ TEST_CASTABLE_TO_TYPE_RANGE(8);
+ TEST_CASTABLE_TO_TYPE_RANGE(16);
+#if BITS_PER_LONG == 64
+ TEST_CASTABLE_TO_TYPE_RANGE(32);
+#endif
+ kunit_info(test, "%d castable_to_type() tests finished\n", count);
+
+#undef TEST_CASTABLE_TO_TYPE_RANGE
+#undef TEST_CASTABLE_TO_TYPE_VAR
+#undef TEST_CASTABLE_TO_TYPE
+}
+
+struct foo {
+ int a;
+ u32 counter;
+ s16 array[] __counted_by(counter);
+};
+
+struct bar {
+ int a;
+ u32 counter;
+ s16 array[];
+};
+
+static void DEFINE_FLEX_test(struct kunit *test)
+{
+ DEFINE_RAW_FLEX(struct bar, two, array, 2);
+ DEFINE_FLEX(struct foo, eight, array, counter, 8);
+ DEFINE_FLEX(struct foo, empty, array, counter, 0);
+ /* Using _RAW_ on a __counted_by struct will initialize "counter" to zero */
+ DEFINE_RAW_FLEX(struct foo, two_but_zero, array, 2);
+ int array_size_override = 0;
+
+ KUNIT_EXPECT_EQ(test, sizeof(*two), sizeof(struct bar));
+ KUNIT_EXPECT_EQ(test, __struct_size(two), sizeof(struct bar) + 2 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __member_size(two), sizeof(struct bar) + 2 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __struct_size(two->array), 2 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __member_size(two->array), 2 * sizeof(s16));
+
+ KUNIT_EXPECT_EQ(test, sizeof(*eight), sizeof(struct foo));
+ KUNIT_EXPECT_EQ(test, __struct_size(eight), sizeof(struct foo) + 8 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __member_size(eight), sizeof(struct foo) + 8 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __struct_size(eight->array), 8 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __member_size(eight->array), 8 * sizeof(s16));
+
+ KUNIT_EXPECT_EQ(test, sizeof(*empty), sizeof(struct foo));
+ KUNIT_EXPECT_EQ(test, __struct_size(empty), sizeof(struct foo));
+ KUNIT_EXPECT_EQ(test, __member_size(empty), sizeof(struct foo));
+ KUNIT_EXPECT_EQ(test, __struct_size(empty->array), 0);
+ KUNIT_EXPECT_EQ(test, __member_size(empty->array), 0);
+
+ /* If __counted_by is not being used, array size will have the on-stack size. */
+ if (!IS_ENABLED(CONFIG_CC_HAS_COUNTED_BY))
+ array_size_override = 2 * sizeof(s16);
+
+ KUNIT_EXPECT_EQ(test, sizeof(*two_but_zero), sizeof(struct foo));
+ KUNIT_EXPECT_EQ(test, __struct_size(two_but_zero), sizeof(struct foo) + 2 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __member_size(two_but_zero), sizeof(struct foo) + 2 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __struct_size(two_but_zero->array), array_size_override);
+ KUNIT_EXPECT_EQ(test, __member_size(two_but_zero->array), array_size_override);
+}
+
+static struct kunit_case overflow_test_cases[] = {
+ KUNIT_CASE(u8_u8__u8_overflow_test),
+ KUNIT_CASE(s8_s8__s8_overflow_test),
+ KUNIT_CASE(u16_u16__u16_overflow_test),
+ KUNIT_CASE(s16_s16__s16_overflow_test),
+ KUNIT_CASE(u32_u32__u32_overflow_test),
+ KUNIT_CASE(s32_s32__s32_overflow_test),
+ KUNIT_CASE(u64_u64__u64_overflow_test),
+ KUNIT_CASE(s64_s64__s64_overflow_test),
+ KUNIT_CASE(u32_u32__int_overflow_test),
+ KUNIT_CASE(u32_u32__u8_overflow_test),
+ KUNIT_CASE(u8_u8__int_overflow_test),
+ KUNIT_CASE(int_int__u8_overflow_test),
+ KUNIT_CASE(shift_sane_test),
+ KUNIT_CASE(shift_overflow_test),
+ KUNIT_CASE(shift_truncate_test),
+ KUNIT_CASE(shift_nonsense_test),
+ KUNIT_CASE(overflow_allocation_test),
+ KUNIT_CASE(overflow_size_helpers_test),
+ KUNIT_CASE(overflows_type_test),
+ KUNIT_CASE(same_type_test),
+ KUNIT_CASE(castable_to_type_test),
+ KUNIT_CASE(DEFINE_FLEX_test),
+ {}
+};
+
+static struct kunit_suite overflow_test_suite = {
+ .name = "overflow",
+ .test_cases = overflow_test_cases,
+};
+
+kunit_test_suite(overflow_test_suite);
+
+MODULE_DESCRIPTION("Test cases for arithmetic overflow checks");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/lib/tests/printf_kunit.c b/lib/tests/printf_kunit.c
new file mode 100644
index 000000000000..2c9f6170bacd
--- /dev/null
+++ b/lib/tests/printf_kunit.c
@@ -0,0 +1,803 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test cases for printf facility.
+ */
+
+#include <kunit/test.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+#include <linux/sprintf.h>
+#include <linux/string.h>
+
+#include <linux/bitmap.h>
+#include <linux/dcache.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+
+#include <linux/gfp.h>
+#include <linux/mm.h>
+
+#include <linux/property.h>
+
+#define BUF_SIZE 256
+#define PAD_SIZE 16
+#define FILL_CHAR '$'
+
+#define NOWARN(option, comment, block) \
+ __diag_push(); \
+ __diag_ignore_all(#option, comment); \
+ block \
+ __diag_pop();
+
+static unsigned int total_tests;
+
+static char *test_buffer;
+static char *alloced_buffer;
+
+static void __printf(7, 0)
+do_test(struct kunit *kunittest, const char *file, const int line, int bufsize, const char *expect,
+ int elen, const char *fmt, va_list ap)
+{
+ va_list aq;
+ int ret, written;
+
+ total_tests++;
+
+ memset(alloced_buffer, FILL_CHAR, BUF_SIZE + 2*PAD_SIZE);
+ va_copy(aq, ap);
+ ret = vsnprintf(test_buffer, bufsize, fmt, aq);
+ va_end(aq);
+
+ if (ret != elen) {
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) returned %d, expected %d\n",
+ file, line, bufsize, fmt, ret, elen);
+ return;
+ }
+
+ if (memchr_inv(alloced_buffer, FILL_CHAR, PAD_SIZE)) {
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) wrote before buffer\n",
+ file, line, bufsize, fmt);
+ return;
+ }
+
+ if (!bufsize) {
+ if (memchr_inv(test_buffer, FILL_CHAR, BUF_SIZE + PAD_SIZE)) {
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, 0, \"%s\", ...) wrote to buffer\n",
+ file, line, fmt);
+ }
+ return;
+ }
+
+ written = min(bufsize-1, elen);
+ if (test_buffer[written]) {
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) did not nul-terminate buffer\n",
+ file, line, bufsize, fmt);
+ return;
+ }
+
+ if (memchr_inv(test_buffer + written + 1, FILL_CHAR, bufsize - (written + 1))) {
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) wrote beyond the nul-terminator\n",
+ file, line, bufsize, fmt);
+ return;
+ }
+
+ if (memchr_inv(test_buffer + bufsize, FILL_CHAR, BUF_SIZE + PAD_SIZE - bufsize)) {
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) wrote beyond buffer\n",
+ file, line, bufsize, fmt);
+ return;
+ }
+
+ if (memcmp(test_buffer, expect, written)) {
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) wrote '%s', expected '%.*s'\n",
+ file, line, bufsize, fmt, test_buffer, written, expect);
+ return;
+ }
+}
+
+static void __printf(6, 7)
+__test(struct kunit *kunittest, const char *file, const int line, const char *expect, int elen,
+ const char *fmt, ...)
+{
+ va_list ap;
+ int rand;
+ char *p;
+
+ if (elen >= BUF_SIZE) {
+ KUNIT_FAIL(kunittest,
+ "%s:%d: error in test suite: expected length (%d) >= BUF_SIZE (%d). fmt=\"%s\"\n",
+ file, line, elen, BUF_SIZE, fmt);
+ return;
+ }
+
+ va_start(ap, fmt);
+
+ /*
+ * Every fmt+args is subjected to four tests: Three where we
+ * tell vsnprintf varying buffer sizes (plenty, not quite
+ * enough and 0), and then we also test that kvasprintf would
+ * be able to print it as expected.
+ */
+ do_test(kunittest, file, line, BUF_SIZE, expect, elen, fmt, ap);
+ rand = get_random_u32_inclusive(1, elen + 1);
+ /* Since elen < BUF_SIZE, we have 1 <= rand <= BUF_SIZE. */
+ do_test(kunittest, file, line, rand, expect, elen, fmt, ap);
+ do_test(kunittest, file, line, 0, expect, elen, fmt, ap);
+
+ p = kvasprintf(GFP_KERNEL, fmt, ap);
+ if (p) {
+ total_tests++;
+ if (memcmp(p, expect, elen+1)) {
+ KUNIT_FAIL(kunittest,
+ "%s:%d: kvasprintf(..., \"%s\", ...) returned '%s', expected '%s'\n",
+ file, line, fmt, p, expect);
+ }
+ kfree(p);
+ }
+ va_end(ap);
+}
+
+#define test(expect, fmt, ...) \
+ __test(kunittest, __FILE__, __LINE__, expect, strlen(expect), fmt, ##__VA_ARGS__)
+
+static void
+test_basic(struct kunit *kunittest)
+{
+ /* Work around annoying "warning: zero-length gnu_printf format string". */
+ char nul = '\0';
+
+ test("", &nul);
+ test("100%", "100%%");
+ test("xxx%yyy", "xxx%cyyy", '%');
+ __test(kunittest, __FILE__, __LINE__, "xxx\0yyy", 7, "xxx%cyyy", '\0');
+}
+
+static void
+test_number(struct kunit *kunittest)
+{
+ test("0x1234abcd ", "%#-12x", 0x1234abcd);
+ test(" 0x1234abcd", "%#12x", 0x1234abcd);
+ test("0|001| 12|+123| 1234|-123|-1234", "%d|%03d|%3d|%+d|% d|%+d|% d", 0, 1, 12, 123, 1234, -123, -1234);
+ NOWARN(-Wformat, "Intentionally test narrowing conversion specifiers.", {
+ test("0|1|1|128|255", "%hhu|%hhu|%hhu|%hhu|%hhu", 0, 1, 257, 128, -1);
+ test("0|1|1|-128|-1", "%hhd|%hhd|%hhd|%hhd|%hhd", 0, 1, 257, 128, -1);
+ test("2015122420151225", "%ho%ho%#ho", 1037, 5282, -11627);
+ })
+ /*
+ * POSIX/C99: »The result of converting zero with an explicit
+ * precision of zero shall be no characters.« Hence the output
+ * from the below test should really be "00|0||| ". However,
+ * the kernel's printf also produces a single 0 in that
+ * case. This test case simply documents the current
+ * behaviour.
+ */
+ test("00|0|0|0|0", "%.2d|%.1d|%.0d|%.*d|%1.0d", 0, 0, 0, 0, 0, 0);
+}
+
+static void
+test_string(struct kunit *kunittest)
+{
+ test("", "%s%.0s", "", "123");
+ test("ABCD|abc|123", "%s|%.3s|%.*s", "ABCD", "abcdef", 3, "123456");
+ test("1 | 2|3 | 4|5 ", "%-3s|%3s|%-*s|%*s|%*s", "1", "2", 3, "3", 3, "4", -3, "5");
+ test("1234 ", "%-10.4s", "123456");
+ test(" 1234", "%10.4s", "123456");
+ /*
+ * POSIX and C99 say that a negative precision (which is only
+ * possible to pass via a * argument) should be treated as if
+ * the precision wasn't present, and that if the precision is
+ * omitted (as in %.s), the precision should be taken to be
+ * 0. However, the kernel's printf behave exactly opposite,
+ * treating a negative precision as 0 and treating an omitted
+ * precision specifier as if no precision was given.
+ *
+ * These test cases document the current behaviour; should
+ * anyone ever feel the need to follow the standards more
+ * closely, this can be revisited.
+ */
+ test(" ", "%4.*s", -5, "123456");
+ test("123456", "%.s", "123456");
+ test("a||", "%.s|%.0s|%.*s", "a", "b", 0, "c");
+ test("a | | ", "%-3.s|%-3.0s|%-3.*s", "a", "b", 0, "c");
+}
+
+#define PLAIN_BUF_SIZE 64 /* leave some space so we don't oops */
+
+#if BITS_PER_LONG == 64
+
+#define PTR_WIDTH 16
+#define PTR ((void *)0xffff0123456789abUL)
+#define PTR_STR "ffff0123456789ab"
+#define PTR_VAL_NO_CRNG "(____ptrval____)"
+#define ZEROS "00000000" /* hex 32 zero bits */
+#define ONES "ffffffff" /* hex 32 one bits */
+
+#else
+
+#define PTR_WIDTH 8
+#define PTR ((void *)0x456789ab)
+#define PTR_STR "456789ab"
+#define PTR_VAL_NO_CRNG "(ptrval)"
+#define ZEROS ""
+#define ONES ""
+
+#endif /* BITS_PER_LONG == 64 */
+
+static void
+plain_hash_to_buffer(struct kunit *kunittest, const void *p, char *buf, size_t len)
+{
+ KUNIT_ASSERT_EQ(kunittest, snprintf(buf, len, "%p", p), PTR_WIDTH);
+
+ if (strncmp(buf, PTR_VAL_NO_CRNG, PTR_WIDTH) == 0) {
+ kunit_skip(kunittest,
+ "crng possibly not yet initialized. plain 'p' buffer contains \"%s\"\n",
+ PTR_VAL_NO_CRNG);
+ }
+}
+
+static void
+hash_pointer(struct kunit *kunittest)
+{
+ if (no_hash_pointers)
+ kunit_skip(kunittest, "hash pointers disabled");
+
+ char buf[PLAIN_BUF_SIZE];
+
+ plain_hash_to_buffer(kunittest, PTR, buf, PLAIN_BUF_SIZE);
+
+ /*
+ * The hash of %p is unpredictable, therefore test() cannot be used.
+ *
+ * Instead verify that the first 32 bits are zeros on a 64-bit system
+ * and that the non-hashed value is not printed.
+ */
+
+ KUNIT_EXPECT_MEMEQ(kunittest, buf, ZEROS, strlen(ZEROS));
+ KUNIT_EXPECT_MEMNEQ(kunittest, buf, PTR_STR, PTR_WIDTH);
+}
+
+static void
+test_hashed(struct kunit *kunittest, const char *fmt, const void *p)
+{
+ char buf[PLAIN_BUF_SIZE];
+
+ plain_hash_to_buffer(kunittest, p, buf, PLAIN_BUF_SIZE);
+
+ test(buf, fmt, p);
+}
+
+/*
+ * NULL pointers aren't hashed.
+ */
+static void
+null_pointer(struct kunit *kunittest)
+{
+ test(ZEROS "00000000", "%p", NULL);
+ test(ZEROS "00000000", "%px", NULL);
+ test("(null)", "%pE", NULL);
+}
+
+/*
+ * Error pointers aren't hashed.
+ */
+static void
+error_pointer(struct kunit *kunittest)
+{
+ test(ONES "fffffff5", "%p", ERR_PTR(-11));
+ test(ONES "fffffff5", "%px", ERR_PTR(-11));
+ test("(efault)", "%pE", ERR_PTR(-11));
+}
+
+#define PTR_INVALID ((void *)0x000000ab)
+
+static void
+invalid_pointer(struct kunit *kunittest)
+{
+ test_hashed(kunittest, "%p", PTR_INVALID);
+ test(ZEROS "000000ab", "%px", PTR_INVALID);
+ test("(efault)", "%pE", PTR_INVALID);
+}
+
+static void
+symbol_ptr(struct kunit *kunittest)
+{
+}
+
+static void
+kernel_ptr(struct kunit *kunittest)
+{
+ /* We can't test this without access to kptr_restrict. */
+}
+
+static void
+struct_resource(struct kunit *kunittest)
+{
+ struct resource test_resource = {
+ .start = 0xc0ffee00,
+ .end = 0xc0ffee00,
+ .flags = IORESOURCE_MEM,
+ };
+
+ test("[mem 0xc0ffee00 flags 0x200]",
+ "%pr", &test_resource);
+
+ test_resource = (struct resource) {
+ .start = 0xc0ffee,
+ .end = 0xba5eba11,
+ .flags = IORESOURCE_MEM,
+ };
+ test("[mem 0x00c0ffee-0xba5eba11 flags 0x200]",
+ "%pr", &test_resource);
+
+ test_resource = (struct resource) {
+ .start = 0xba5eba11,
+ .end = 0xc0ffee,
+ .flags = IORESOURCE_MEM,
+ };
+ test("[mem 0xba5eba11-0x00c0ffee flags 0x200]",
+ "%pr", &test_resource);
+
+ test_resource = (struct resource) {
+ .start = 0xba5eba11,
+ .end = 0xba5eca11,
+ .flags = IORESOURCE_MEM,
+ };
+
+ test("[mem 0xba5eba11-0xba5eca11 flags 0x200]",
+ "%pr", &test_resource);
+
+ test_resource = (struct resource) {
+ .start = 0xba11,
+ .end = 0xca10,
+ .flags = IORESOURCE_IO |
+ IORESOURCE_DISABLED |
+ IORESOURCE_UNSET,
+ };
+
+ test("[io size 0x1000 disabled]",
+ "%pR", &test_resource);
+}
+
+static void
+struct_range(struct kunit *kunittest)
+{
+ struct range test_range = DEFINE_RANGE(0xc0ffee00ba5eba11,
+ 0xc0ffee00ba5eba11);
+ test("[range 0xc0ffee00ba5eba11]", "%pra", &test_range);
+
+ test_range = DEFINE_RANGE(0xc0ffee, 0xba5eba11);
+ test("[range 0x0000000000c0ffee-0x00000000ba5eba11]",
+ "%pra", &test_range);
+
+ test_range = DEFINE_RANGE(0xba5eba11, 0xc0ffee);
+ test("[range 0x00000000ba5eba11-0x0000000000c0ffee]",
+ "%pra", &test_range);
+}
+
+static void
+addr(struct kunit *kunittest)
+{
+}
+
+static void
+escaped_str(struct kunit *kunittest)
+{
+}
+
+static void
+hex_string(struct kunit *kunittest)
+{
+ const char buf[3] = {0xc0, 0xff, 0xee};
+
+ test("c0 ff ee|c0:ff:ee|c0-ff-ee|c0ffee",
+ "%3ph|%3phC|%3phD|%3phN", buf, buf, buf, buf);
+ test("c0 ff ee|c0:ff:ee|c0-ff-ee|c0ffee",
+ "%*ph|%*phC|%*phD|%*phN", 3, buf, 3, buf, 3, buf, 3, buf);
+}
+
+static void
+mac(struct kunit *kunittest)
+{
+ const u8 addr[6] = {0x2d, 0x48, 0xd6, 0xfc, 0x7a, 0x05};
+
+ test("2d:48:d6:fc:7a:05", "%pM", addr);
+ test("05:7a:fc:d6:48:2d", "%pMR", addr);
+ test("2d-48-d6-fc-7a-05", "%pMF", addr);
+ test("2d48d6fc7a05", "%pm", addr);
+ test("057afcd6482d", "%pmR", addr);
+}
+
+static void
+ip4(struct kunit *kunittest)
+{
+ struct sockaddr_in sa;
+
+ sa.sin_family = AF_INET;
+ sa.sin_port = cpu_to_be16(12345);
+ sa.sin_addr.s_addr = cpu_to_be32(0x7f000001);
+
+ test("127.000.000.001|127.0.0.1", "%pi4|%pI4", &sa.sin_addr, &sa.sin_addr);
+ test("127.000.000.001|127.0.0.1", "%piS|%pIS", &sa, &sa);
+ sa.sin_addr.s_addr = cpu_to_be32(0x01020304);
+ test("001.002.003.004:12345|1.2.3.4:12345", "%piSp|%pISp", &sa, &sa);
+}
+
+static void
+ip6(struct kunit *kunittest)
+{
+}
+
+static void
+uuid(struct kunit *kunittest)
+{
+ const char uuid[16] = {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
+ 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
+
+ test("00010203-0405-0607-0809-0a0b0c0d0e0f", "%pUb", uuid);
+ test("00010203-0405-0607-0809-0A0B0C0D0E0F", "%pUB", uuid);
+ test("03020100-0504-0706-0809-0a0b0c0d0e0f", "%pUl", uuid);
+ test("03020100-0504-0706-0809-0A0B0C0D0E0F", "%pUL", uuid);
+}
+
+static struct dentry test_dentry[4] = {
+ { .d_parent = &test_dentry[0],
+ .d_name = QSTR_INIT(test_dentry[0].d_iname, 3),
+ .d_iname = "foo" },
+ { .d_parent = &test_dentry[0],
+ .d_name = QSTR_INIT(test_dentry[1].d_iname, 5),
+ .d_iname = "bravo" },
+ { .d_parent = &test_dentry[1],
+ .d_name = QSTR_INIT(test_dentry[2].d_iname, 4),
+ .d_iname = "alfa" },
+ { .d_parent = &test_dentry[2],
+ .d_name = QSTR_INIT(test_dentry[3].d_iname, 5),
+ .d_iname = "romeo" },
+};
+
+static void
+dentry(struct kunit *kunittest)
+{
+ test("foo", "%pd", &test_dentry[0]);
+ test("foo", "%pd2", &test_dentry[0]);
+
+ test("(null)", "%pd", NULL);
+ test("(efault)", "%pd", PTR_INVALID);
+ test("(null)", "%pD", NULL);
+ test("(efault)", "%pD", PTR_INVALID);
+
+ test("romeo", "%pd", &test_dentry[3]);
+ test("alfa/romeo", "%pd2", &test_dentry[3]);
+ test("bravo/alfa/romeo", "%pd3", &test_dentry[3]);
+ test("/bravo/alfa/romeo", "%pd4", &test_dentry[3]);
+ test("/bravo/alfa", "%pd4", &test_dentry[2]);
+
+ test("bravo/alfa |bravo/alfa ", "%-12pd2|%*pd2", &test_dentry[2], -12, &test_dentry[2]);
+ test(" bravo/alfa| bravo/alfa", "%12pd2|%*pd2", &test_dentry[2], 12, &test_dentry[2]);
+}
+
+static void
+struct_va_format(struct kunit *kunittest)
+{
+}
+
+static void
+time_and_date(struct kunit *kunittest)
+{
+ /* 1543210543 */
+ const struct rtc_time tm = {
+ .tm_sec = 43,
+ .tm_min = 35,
+ .tm_hour = 5,
+ .tm_mday = 26,
+ .tm_mon = 10,
+ .tm_year = 118,
+ };
+ /* 2019-01-04T15:32:23 */
+ time64_t t = 1546615943;
+
+ test("(%pt?)", "%pt", &tm);
+ test("2018-11-26T05:35:43", "%ptR", &tm);
+ test("0118-10-26T05:35:43", "%ptRr", &tm);
+ test("05:35:43|2018-11-26", "%ptRt|%ptRd", &tm, &tm);
+ test("05:35:43|0118-10-26", "%ptRtr|%ptRdr", &tm, &tm);
+ test("05:35:43|2018-11-26", "%ptRttr|%ptRdtr", &tm, &tm);
+ test("05:35:43 tr|2018-11-26 tr", "%ptRt tr|%ptRd tr", &tm, &tm);
+
+ test("2019-01-04T15:32:23", "%ptT", &t);
+ test("0119-00-04T15:32:23", "%ptTr", &t);
+ test("15:32:23|2019-01-04", "%ptTt|%ptTd", &t, &t);
+ test("15:32:23|0119-00-04", "%ptTtr|%ptTdr", &t, &t);
+
+ test("2019-01-04 15:32:23", "%ptTs", &t);
+ test("0119-00-04 15:32:23", "%ptTsr", &t);
+ test("15:32:23|2019-01-04", "%ptTts|%ptTds", &t, &t);
+ test("15:32:23|0119-00-04", "%ptTtrs|%ptTdrs", &t, &t);
+}
+
+static void
+struct_clk(struct kunit *kunittest)
+{
+}
+
+static void
+large_bitmap(struct kunit *kunittest)
+{
+ const int nbits = 1 << 16;
+ unsigned long *bits = bitmap_zalloc(nbits, GFP_KERNEL);
+ if (!bits)
+ return;
+
+ bitmap_set(bits, 1, 20);
+ bitmap_set(bits, 60000, 15);
+ test("1-20,60000-60014", "%*pbl", nbits, bits);
+ bitmap_free(bits);
+}
+
+static void
+bitmap(struct kunit *kunittest)
+{
+ DECLARE_BITMAP(bits, 20);
+ const int primes[] = {2,3,5,7,11,13,17,19};
+ int i;
+
+ bitmap_zero(bits, 20);
+ test("00000|00000", "%20pb|%*pb", bits, 20, bits);
+ test("|", "%20pbl|%*pbl", bits, 20, bits);
+
+ for (i = 0; i < ARRAY_SIZE(primes); ++i)
+ set_bit(primes[i], bits);
+ test("a28ac|a28ac", "%20pb|%*pb", bits, 20, bits);
+ test("2-3,5,7,11,13,17,19|2-3,5,7,11,13,17,19", "%20pbl|%*pbl", bits, 20, bits);
+
+ bitmap_fill(bits, 20);
+ test("fffff|fffff", "%20pb|%*pb", bits, 20, bits);
+ test("0-19|0-19", "%20pbl|%*pbl", bits, 20, bits);
+
+ large_bitmap(kunittest);
+}
+
+static void
+netdev_features(struct kunit *kunittest)
+{
+}
+
+struct page_flags_test {
+ int width;
+ int shift;
+ int mask;
+ const char *fmt;
+ const char *name;
+};
+
+static const struct page_flags_test pft[] = {
+ {SECTIONS_WIDTH, SECTIONS_PGSHIFT, SECTIONS_MASK,
+ "%d", "section"},
+ {NODES_WIDTH, NODES_PGSHIFT, NODES_MASK,
+ "%d", "node"},
+ {ZONES_WIDTH, ZONES_PGSHIFT, ZONES_MASK,
+ "%d", "zone"},
+ {LAST_CPUPID_WIDTH, LAST_CPUPID_PGSHIFT, LAST_CPUPID_MASK,
+ "%#x", "lastcpupid"},
+ {KASAN_TAG_WIDTH, KASAN_TAG_PGSHIFT, KASAN_TAG_MASK,
+ "%#x", "kasantag"},
+};
+
+static void
+page_flags_test(struct kunit *kunittest, int section, int node, int zone,
+ int last_cpupid, int kasan_tag, unsigned long flags, const char *name,
+ char *cmp_buf)
+{
+ unsigned long values[] = {section, node, zone, last_cpupid, kasan_tag};
+ unsigned long size;
+ bool append = false;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(values); i++)
+ flags |= (values[i] & pft[i].mask) << pft[i].shift;
+
+ size = scnprintf(cmp_buf, BUF_SIZE, "%#lx(", flags);
+ if (flags & PAGEFLAGS_MASK) {
+ size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s", name);
+ append = true;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pft); i++) {
+ if (!pft[i].width)
+ continue;
+
+ if (append)
+ size += scnprintf(cmp_buf + size, BUF_SIZE - size, "|");
+
+ size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s=",
+ pft[i].name);
+ size += scnprintf(cmp_buf + size, BUF_SIZE - size, pft[i].fmt,
+ values[i] & pft[i].mask);
+ append = true;
+ }
+
+ snprintf(cmp_buf + size, BUF_SIZE - size, ")");
+
+ test(cmp_buf, "%pGp", &flags);
+}
+
+static void
+flags(struct kunit *kunittest)
+{
+ unsigned long flags;
+ char *cmp_buffer;
+ gfp_t gfp;
+
+ cmp_buffer = kunit_kmalloc(kunittest, BUF_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(kunittest, cmp_buffer);
+
+ flags = 0;
+ page_flags_test(kunittest, 0, 0, 0, 0, 0, flags, "", cmp_buffer);
+
+ flags = 1UL << NR_PAGEFLAGS;
+ page_flags_test(kunittest, 0, 0, 0, 0, 0, flags, "", cmp_buffer);
+
+ flags |= 1UL << PG_uptodate | 1UL << PG_dirty | 1UL << PG_lru
+ | 1UL << PG_active | 1UL << PG_swapbacked;
+ page_flags_test(kunittest, 1, 1, 1, 0x1fffff, 1, flags,
+ "uptodate|dirty|lru|active|swapbacked",
+ cmp_buffer);
+
+ flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
+ test("read|exec|mayread|maywrite|mayexec", "%pGv", &flags);
+
+ gfp = GFP_TRANSHUGE;
+ test("GFP_TRANSHUGE", "%pGg", &gfp);
+
+ gfp = GFP_ATOMIC|__GFP_DMA;
+ test("GFP_ATOMIC|GFP_DMA", "%pGg", &gfp);
+
+ gfp = __GFP_HIGH;
+ test("__GFP_HIGH", "%pGg", &gfp);
+
+ /* Any flags not translated by the table should remain numeric */
+ gfp = ~__GFP_BITS_MASK;
+ snprintf(cmp_buffer, BUF_SIZE, "%#lx", (unsigned long) gfp);
+ test(cmp_buffer, "%pGg", &gfp);
+
+ snprintf(cmp_buffer, BUF_SIZE, "__GFP_HIGH|%#lx",
+ (unsigned long) gfp);
+ gfp |= __GFP_HIGH;
+ test(cmp_buffer, "%pGg", &gfp);
+}
+
+static void fwnode_pointer(struct kunit *kunittest)
+{
+ const struct software_node first = { .name = "first" };
+ const struct software_node second = { .name = "second", .parent = &first };
+ const struct software_node third = { .name = "third", .parent = &second };
+ const struct software_node *group[] = { &first, &second, &third, NULL };
+ const char * const full_name_second = "first/second";
+ const char * const full_name_third = "first/second/third";
+ const char * const second_name = "second";
+ const char * const third_name = "third";
+ int rval;
+
+ rval = software_node_register_node_group(group);
+ if (rval) {
+ kunit_skip(kunittest, "cannot register softnodes; rval %d\n", rval);
+ }
+
+ test(full_name_second, "%pfw", software_node_fwnode(&second));
+ test(full_name_third, "%pfw", software_node_fwnode(&third));
+ test(full_name_third, "%pfwf", software_node_fwnode(&third));
+ test(second_name, "%pfwP", software_node_fwnode(&second));
+ test(third_name, "%pfwP", software_node_fwnode(&third));
+
+ software_node_unregister_node_group(group);
+}
+
+static void fourcc_pointer(struct kunit *kunittest)
+{
+ struct {
+ u32 code;
+ char *str;
+ } const try[] = {
+ { 0x3231564e, "NV12 little-endian (0x3231564e)", },
+ { 0xb231564e, "NV12 big-endian (0xb231564e)", },
+ { 0x10111213, ".... little-endian (0x10111213)", },
+ { 0x20303159, "Y10 little-endian (0x20303159)", },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(try); i++)
+ test(try[i].str, "%p4cc", &try[i].code);
+}
+
+static void
+errptr(struct kunit *kunittest)
+{
+ test("-1234", "%pe", ERR_PTR(-1234));
+
+ /* Check that %pe with a non-ERR_PTR gets treated as ordinary %p. */
+ BUILD_BUG_ON(IS_ERR(PTR));
+ test_hashed(kunittest, "%pe", PTR);
+
+#ifdef CONFIG_SYMBOLIC_ERRNAME
+ test("(-ENOTSOCK)", "(%pe)", ERR_PTR(-ENOTSOCK));
+ test("(-EAGAIN)", "(%pe)", ERR_PTR(-EAGAIN));
+ BUILD_BUG_ON(EAGAIN != EWOULDBLOCK);
+ test("(-EAGAIN)", "(%pe)", ERR_PTR(-EWOULDBLOCK));
+ test("[-EIO ]", "[%-8pe]", ERR_PTR(-EIO));
+ test("[ -EIO]", "[%8pe]", ERR_PTR(-EIO));
+ test("-EPROBE_DEFER", "%pe", ERR_PTR(-EPROBE_DEFER));
+#endif
+}
+
+static int printf_suite_init(struct kunit_suite *suite)
+{
+ total_tests = 0;
+
+ alloced_buffer = kmalloc(BUF_SIZE + 2*PAD_SIZE, GFP_KERNEL);
+ if (!alloced_buffer)
+ return -ENOMEM;
+ test_buffer = alloced_buffer + PAD_SIZE;
+
+ return 0;
+}
+
+static void printf_suite_exit(struct kunit_suite *suite)
+{
+ kfree(alloced_buffer);
+
+ kunit_info(suite, "ran %u tests\n", total_tests);
+}
+
+static struct kunit_case printf_test_cases[] = {
+ KUNIT_CASE(test_basic),
+ KUNIT_CASE(test_number),
+ KUNIT_CASE(test_string),
+ KUNIT_CASE(hash_pointer),
+ KUNIT_CASE(null_pointer),
+ KUNIT_CASE(error_pointer),
+ KUNIT_CASE(invalid_pointer),
+ KUNIT_CASE(symbol_ptr),
+ KUNIT_CASE(kernel_ptr),
+ KUNIT_CASE(struct_resource),
+ KUNIT_CASE(struct_range),
+ KUNIT_CASE(addr),
+ KUNIT_CASE(escaped_str),
+ KUNIT_CASE(hex_string),
+ KUNIT_CASE(mac),
+ KUNIT_CASE(ip4),
+ KUNIT_CASE(ip6),
+ KUNIT_CASE(uuid),
+ KUNIT_CASE(dentry),
+ KUNIT_CASE(struct_va_format),
+ KUNIT_CASE(time_and_date),
+ KUNIT_CASE(struct_clk),
+ KUNIT_CASE(bitmap),
+ KUNIT_CASE(netdev_features),
+ KUNIT_CASE(flags),
+ KUNIT_CASE(errptr),
+ KUNIT_CASE(fwnode_pointer),
+ KUNIT_CASE(fourcc_pointer),
+ {}
+};
+
+static struct kunit_suite printf_test_suite = {
+ .name = "printf",
+ .suite_init = printf_suite_init,
+ .suite_exit = printf_suite_exit,
+ .test_cases = printf_test_cases,
+};
+
+kunit_test_suite(printf_test_suite);
+
+MODULE_AUTHOR("Rasmus Villemoes <linux@rasmusvillemoes.dk>");
+MODULE_DESCRIPTION("Test cases for printf facility");
+MODULE_LICENSE("GPL");
diff --git a/lib/tests/scanf_kunit.c b/lib/tests/scanf_kunit.c
new file mode 100644
index 000000000000..e9a36ed80575
--- /dev/null
+++ b/lib/tests/scanf_kunit.c
@@ -0,0 +1,815 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test cases for sscanf facility.
+ */
+
+#include <kunit/test.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/prandom.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#define BUF_SIZE 1024
+
+static char *test_buffer;
+static char *fmt_buffer;
+static struct rnd_state rnd_state;
+
+typedef void (*check_fn)(struct kunit *test, const char *file, const int line,
+ const void *check_data, const char *string, const char *fmt, int n_args,
+ va_list ap);
+
+static void __scanf(7, 9)
+_test(struct kunit *test, const char *file, const int line, check_fn fn, const void *check_data,
+ const char *string, const char *fmt, int n_args, ...)
+{
+ va_list ap, ap_copy;
+ int ret;
+
+ va_start(ap, n_args);
+ va_copy(ap_copy, ap);
+ ret = vsscanf(string, fmt, ap_copy);
+ va_end(ap_copy);
+
+ if (ret != n_args) {
+ KUNIT_FAIL(test, "%s:%d: vsscanf(\"%s\", \"%s\", ...) returned %d expected %d",
+ file, line, string, fmt, ret, n_args);
+ } else {
+ (*fn)(test, file, line, check_data, string, fmt, n_args, ap);
+ }
+
+ va_end(ap);
+}
+
+#define _check_numbers_template(arg_fmt, expect, str, fmt, n_args, ap) \
+do { \
+ for (; n_args > 0; n_args--, expect++) { \
+ typeof(*expect) got = *va_arg(ap, typeof(expect)); \
+ if (got != *expect) { \
+ KUNIT_FAIL(test, \
+ "%s:%d: vsscanf(\"%s\", \"%s\", ...) expected " arg_fmt " got " arg_fmt, \
+ file, line, str, fmt, *expect, got); \
+ return; \
+ } \
+ } \
+} while (0)
+
+static void check_ull(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
+{
+ const unsigned long long *pval = check_data;
+
+ _check_numbers_template("%llu", pval, string, fmt, n_args, ap);
+}
+
+static void check_ll(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
+{
+ const long long *pval = check_data;
+
+ _check_numbers_template("%lld", pval, string, fmt, n_args, ap);
+}
+
+static void check_ulong(struct kunit *test, const char *file, const int line,
+ const void *check_data, const char *string, const char *fmt, int n_args,
+ va_list ap)
+{
+ const unsigned long *pval = check_data;
+
+ _check_numbers_template("%lu", pval, string, fmt, n_args, ap);
+}
+
+static void check_long(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
+{
+ const long *pval = check_data;
+
+ _check_numbers_template("%ld", pval, string, fmt, n_args, ap);
+}
+
+static void check_uint(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
+{
+ const unsigned int *pval = check_data;
+
+ _check_numbers_template("%u", pval, string, fmt, n_args, ap);
+}
+
+static void check_int(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
+{
+ const int *pval = check_data;
+
+ _check_numbers_template("%d", pval, string, fmt, n_args, ap);
+}
+
+static void check_ushort(struct kunit *test, const char *file, const int line,
+ const void *check_data, const char *string, const char *fmt, int n_args,
+ va_list ap)
+{
+ const unsigned short *pval = check_data;
+
+ _check_numbers_template("%hu", pval, string, fmt, n_args, ap);
+}
+
+static void check_short(struct kunit *test, const char *file, const int line,
+ const void *check_data, const char *string, const char *fmt, int n_args,
+ va_list ap)
+{
+ const short *pval = check_data;
+
+ _check_numbers_template("%hd", pval, string, fmt, n_args, ap);
+}
+
+static void check_uchar(struct kunit *test, const char *file, const int line,
+ const void *check_data, const char *string, const char *fmt, int n_args,
+ va_list ap)
+{
+ const unsigned char *pval = check_data;
+
+ _check_numbers_template("%hhu", pval, string, fmt, n_args, ap);
+}
+
+static void check_char(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
+{
+ const signed char *pval = check_data;
+
+ _check_numbers_template("%hhd", pval, string, fmt, n_args, ap);
+}
+
+/* Selection of interesting numbers to test, copied from test-kstrtox.c */
+static const unsigned long long numbers[] = {
+ 0x0ULL,
+ 0x1ULL,
+ 0x7fULL,
+ 0x80ULL,
+ 0x81ULL,
+ 0xffULL,
+ 0x100ULL,
+ 0x101ULL,
+ 0x7fffULL,
+ 0x8000ULL,
+ 0x8001ULL,
+ 0xffffULL,
+ 0x10000ULL,
+ 0x10001ULL,
+ 0x7fffffffULL,
+ 0x80000000ULL,
+ 0x80000001ULL,
+ 0xffffffffULL,
+ 0x100000000ULL,
+ 0x100000001ULL,
+ 0x7fffffffffffffffULL,
+ 0x8000000000000000ULL,
+ 0x8000000000000001ULL,
+ 0xfffffffffffffffeULL,
+ 0xffffffffffffffffULL,
+};
+
+#define value_representable_in_type(T, val) \
+(is_signed_type(T) \
+ ? ((long long)(val) >= type_min(T)) && ((long long)(val) <= type_max(T)) \
+ : ((unsigned long long)(val) <= type_max(T)))
+
+
+#define test_one_number(T, gen_fmt, scan_fmt, val, fn) \
+do { \
+ const T expect_val = (T)(val); \
+ T result = ~expect_val; /* should be overwritten */ \
+ \
+ snprintf(test_buffer, BUF_SIZE, gen_fmt, expect_val); \
+ _test(test, __FILE__, __LINE__, fn, &expect_val, test_buffer, "%" scan_fmt, 1, &result);\
+} while (0)
+
+#define simple_numbers_loop(T, gen_fmt, scan_fmt, fn) \
+do { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(numbers); i++) { \
+ if (value_representable_in_type(T, numbers[i])) \
+ test_one_number(T, gen_fmt, scan_fmt, \
+ numbers[i], fn); \
+ \
+ if (value_representable_in_type(T, -numbers[i])) \
+ test_one_number(T, gen_fmt, scan_fmt, \
+ -numbers[i], fn); \
+ } \
+} while (0)
+
+static void numbers_simple(struct kunit *test)
+{
+ simple_numbers_loop(unsigned long long, "%llu", "llu", check_ull);
+ simple_numbers_loop(long long, "%lld", "lld", check_ll);
+ simple_numbers_loop(long long, "%lld", "lli", check_ll);
+ simple_numbers_loop(unsigned long long, "%llx", "llx", check_ull);
+ simple_numbers_loop(long long, "%llx", "llx", check_ll);
+ simple_numbers_loop(long long, "0x%llx", "lli", check_ll);
+ simple_numbers_loop(unsigned long long, "0x%llx", "llx", check_ull);
+ simple_numbers_loop(long long, "0x%llx", "llx", check_ll);
+
+ simple_numbers_loop(unsigned long, "%lu", "lu", check_ulong);
+ simple_numbers_loop(long, "%ld", "ld", check_long);
+ simple_numbers_loop(long, "%ld", "li", check_long);
+ simple_numbers_loop(unsigned long, "%lx", "lx", check_ulong);
+ simple_numbers_loop(long, "%lx", "lx", check_long);
+ simple_numbers_loop(long, "0x%lx", "li", check_long);
+ simple_numbers_loop(unsigned long, "0x%lx", "lx", check_ulong);
+ simple_numbers_loop(long, "0x%lx", "lx", check_long);
+
+ simple_numbers_loop(unsigned int, "%u", "u", check_uint);
+ simple_numbers_loop(int, "%d", "d", check_int);
+ simple_numbers_loop(int, "%d", "i", check_int);
+ simple_numbers_loop(unsigned int, "%x", "x", check_uint);
+ simple_numbers_loop(int, "%x", "x", check_int);
+ simple_numbers_loop(int, "0x%x", "i", check_int);
+ simple_numbers_loop(unsigned int, "0x%x", "x", check_uint);
+ simple_numbers_loop(int, "0x%x", "x", check_int);
+
+ simple_numbers_loop(unsigned short, "%hu", "hu", check_ushort);
+ simple_numbers_loop(short, "%hd", "hd", check_short);
+ simple_numbers_loop(short, "%hd", "hi", check_short);
+ simple_numbers_loop(unsigned short, "%hx", "hx", check_ushort);
+ simple_numbers_loop(short, "%hx", "hx", check_short);
+ simple_numbers_loop(short, "0x%hx", "hi", check_short);
+ simple_numbers_loop(unsigned short, "0x%hx", "hx", check_ushort);
+ simple_numbers_loop(short, "0x%hx", "hx", check_short);
+
+ simple_numbers_loop(unsigned char, "%hhu", "hhu", check_uchar);
+ simple_numbers_loop(signed char, "%hhd", "hhd", check_char);
+ simple_numbers_loop(signed char, "%hhd", "hhi", check_char);
+ simple_numbers_loop(unsigned char, "%hhx", "hhx", check_uchar);
+ simple_numbers_loop(signed char, "%hhx", "hhx", check_char);
+ simple_numbers_loop(signed char, "0x%hhx", "hhi", check_char);
+ simple_numbers_loop(unsigned char, "0x%hhx", "hhx", check_uchar);
+ simple_numbers_loop(signed char, "0x%hhx", "hhx", check_char);
+}
+
+/*
+ * This gives a better variety of number "lengths" in a small sample than
+ * the raw prandom*() functions (Not mathematically rigorous!!).
+ * Variabilty of length and value is more important than perfect randomness.
+ */
+static u32 next_test_random(u32 max_bits)
+{
+ u32 n_bits = hweight32(prandom_u32_state(&rnd_state)) % (max_bits + 1);
+
+ return prandom_u32_state(&rnd_state) & GENMASK(n_bits, 0);
+}
+
+static unsigned long long next_test_random_ull(void)
+{
+ u32 rand1 = prandom_u32_state(&rnd_state);
+ u32 n_bits = (hweight32(rand1) * 3) % 64;
+ u64 val = (u64)prandom_u32_state(&rnd_state) * rand1;
+
+ return val & GENMASK_ULL(n_bits, 0);
+}
+
+#define random_for_type(T) \
+ ((T)(sizeof(T) <= sizeof(u32) \
+ ? next_test_random(BITS_PER_TYPE(T)) \
+ : next_test_random_ull()))
+
+/*
+ * Define a pattern of negative and positive numbers to ensure we get
+ * some of both within the small number of samples in a test string.
+ */
+#define NEGATIVES_PATTERN 0x3246 /* 00110010 01000110 */
+
+#define fill_random_array(arr) \
+do { \
+ unsigned int neg_pattern = NEGATIVES_PATTERN; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(arr); i++, neg_pattern >>= 1) { \
+ (arr)[i] = random_for_type(typeof((arr)[0])); \
+ if (is_signed_type(typeof((arr)[0])) && (neg_pattern & 1)) \
+ (arr)[i] = -(arr)[i]; \
+ } \
+} while (0)
+
+/*
+ * Convenience wrapper around snprintf() to append at buf_pos in buf,
+ * updating buf_pos and returning the number of characters appended.
+ * On error buf_pos is not changed and return value is 0.
+ */
+static int __printf(4, 5)
+append_fmt(char *buf, int *buf_pos, int buf_len, const char *val_fmt, ...)
+{
+ va_list ap;
+ int field_len;
+
+ va_start(ap, val_fmt);
+ field_len = vsnprintf(buf + *buf_pos, buf_len - *buf_pos, val_fmt, ap);
+ va_end(ap);
+
+ if (field_len < 0)
+ field_len = 0;
+
+ *buf_pos += field_len;
+
+ return field_len;
+}
+
+/*
+ * Convenience function to append the field delimiter string
+ * to both the value string and format string buffers.
+ */
+static void append_delim(char *str_buf, int *str_buf_pos, int str_buf_len,
+ char *fmt_buf, int *fmt_buf_pos, int fmt_buf_len,
+ const char *delim_str)
+{
+ append_fmt(str_buf, str_buf_pos, str_buf_len, delim_str);
+ append_fmt(fmt_buf, fmt_buf_pos, fmt_buf_len, delim_str);
+}
+
+#define test_array_8(fn, check_data, string, fmt, arr) \
+do { \
+ BUILD_BUG_ON(ARRAY_SIZE(arr) != 8); \
+ _test(test, __FILE__, __LINE__, fn, check_data, string, fmt, 8, \
+ &(arr)[0], &(arr)[1], &(arr)[2], &(arr)[3], \
+ &(arr)[4], &(arr)[5], &(arr)[6], &(arr)[7]); \
+} while (0)
+
+#define numbers_list_8(T, gen_fmt, field_sep, scan_fmt, fn) \
+do { \
+ int i, pos = 0, fmt_pos = 0; \
+ T expect[8], result[8]; \
+ \
+ fill_random_array(expect); \
+ \
+ for (i = 0; i < ARRAY_SIZE(expect); i++) { \
+ if (i != 0) \
+ append_delim(test_buffer, &pos, BUF_SIZE, \
+ fmt_buffer, &fmt_pos, BUF_SIZE, \
+ field_sep); \
+ \
+ append_fmt(test_buffer, &pos, BUF_SIZE, gen_fmt, expect[i]); \
+ append_fmt(fmt_buffer, &fmt_pos, BUF_SIZE, "%%%s", scan_fmt); \
+ } \
+ \
+ test_array_8(fn, expect, test_buffer, fmt_buffer, result); \
+} while (0)
+
+#define numbers_list_fix_width(T, gen_fmt, field_sep, width, scan_fmt, fn) \
+do { \
+ char full_fmt[16]; \
+ \
+ snprintf(full_fmt, sizeof(full_fmt), "%u%s", width, scan_fmt); \
+ numbers_list_8(T, gen_fmt, field_sep, full_fmt, fn); \
+} while (0)
+
+#define numbers_list_val_width(T, gen_fmt, field_sep, scan_fmt, fn) \
+do { \
+ int i, val_len, pos = 0, fmt_pos = 0; \
+ T expect[8], result[8]; \
+ \
+ fill_random_array(expect); \
+ \
+ for (i = 0; i < ARRAY_SIZE(expect); i++) { \
+ if (i != 0) \
+ append_delim(test_buffer, &pos, BUF_SIZE, \
+ fmt_buffer, &fmt_pos, BUF_SIZE, field_sep);\
+ \
+ val_len = append_fmt(test_buffer, &pos, BUF_SIZE, gen_fmt, \
+ expect[i]); \
+ append_fmt(fmt_buffer, &fmt_pos, BUF_SIZE, \
+ "%%%u%s", val_len, scan_fmt); \
+ } \
+ \
+ test_array_8(fn, expect, test_buffer, fmt_buffer, result); \
+} while (0)
+
+static void numbers_list_ll(struct kunit *test, const char *delim)
+{
+ numbers_list_8(unsigned long long, "%llu", delim, "llu", check_ull);
+ numbers_list_8(long long, "%lld", delim, "lld", check_ll);
+ numbers_list_8(long long, "%lld", delim, "lli", check_ll);
+ numbers_list_8(unsigned long long, "%llx", delim, "llx", check_ull);
+ numbers_list_8(unsigned long long, "0x%llx", delim, "llx", check_ull);
+ numbers_list_8(long long, "0x%llx", delim, "lli", check_ll);
+}
+
+static void numbers_list_l(struct kunit *test, const char *delim)
+{
+ numbers_list_8(unsigned long, "%lu", delim, "lu", check_ulong);
+ numbers_list_8(long, "%ld", delim, "ld", check_long);
+ numbers_list_8(long, "%ld", delim, "li", check_long);
+ numbers_list_8(unsigned long, "%lx", delim, "lx", check_ulong);
+ numbers_list_8(unsigned long, "0x%lx", delim, "lx", check_ulong);
+ numbers_list_8(long, "0x%lx", delim, "li", check_long);
+}
+
+static void numbers_list_d(struct kunit *test, const char *delim)
+{
+ numbers_list_8(unsigned int, "%u", delim, "u", check_uint);
+ numbers_list_8(int, "%d", delim, "d", check_int);
+ numbers_list_8(int, "%d", delim, "i", check_int);
+ numbers_list_8(unsigned int, "%x", delim, "x", check_uint);
+ numbers_list_8(unsigned int, "0x%x", delim, "x", check_uint);
+ numbers_list_8(int, "0x%x", delim, "i", check_int);
+}
+
+static void numbers_list_h(struct kunit *test, const char *delim)
+{
+ numbers_list_8(unsigned short, "%hu", delim, "hu", check_ushort);
+ numbers_list_8(short, "%hd", delim, "hd", check_short);
+ numbers_list_8(short, "%hd", delim, "hi", check_short);
+ numbers_list_8(unsigned short, "%hx", delim, "hx", check_ushort);
+ numbers_list_8(unsigned short, "0x%hx", delim, "hx", check_ushort);
+ numbers_list_8(short, "0x%hx", delim, "hi", check_short);
+}
+
+static void numbers_list_hh(struct kunit *test, const char *delim)
+{
+ numbers_list_8(unsigned char, "%hhu", delim, "hhu", check_uchar);
+ numbers_list_8(signed char, "%hhd", delim, "hhd", check_char);
+ numbers_list_8(signed char, "%hhd", delim, "hhi", check_char);
+ numbers_list_8(unsigned char, "%hhx", delim, "hhx", check_uchar);
+ numbers_list_8(unsigned char, "0x%hhx", delim, "hhx", check_uchar);
+ numbers_list_8(signed char, "0x%hhx", delim, "hhi", check_char);
+}
+
+static void numbers_list(struct kunit *test)
+{
+ const char * const *param = test->param_value;
+ const char *delim = *param;
+
+ numbers_list_ll(test, delim);
+ numbers_list_l(test, delim);
+ numbers_list_d(test, delim);
+ numbers_list_h(test, delim);
+ numbers_list_hh(test, delim);
+}
+
+static void numbers_list_field_width_ll(struct kunit *test, const char *delim)
+{
+ numbers_list_fix_width(unsigned long long, "%llu", delim, 20, "llu", check_ull);
+ numbers_list_fix_width(long long, "%lld", delim, 20, "lld", check_ll);
+ numbers_list_fix_width(long long, "%lld", delim, 20, "lli", check_ll);
+ numbers_list_fix_width(unsigned long long, "%llx", delim, 16, "llx", check_ull);
+ numbers_list_fix_width(unsigned long long, "0x%llx", delim, 18, "llx", check_ull);
+ numbers_list_fix_width(long long, "0x%llx", delim, 18, "lli", check_ll);
+}
+
+static void numbers_list_field_width_l(struct kunit *test, const char *delim)
+{
+#if BITS_PER_LONG == 64
+ numbers_list_fix_width(unsigned long, "%lu", delim, 20, "lu", check_ulong);
+ numbers_list_fix_width(long, "%ld", delim, 20, "ld", check_long);
+ numbers_list_fix_width(long, "%ld", delim, 20, "li", check_long);
+ numbers_list_fix_width(unsigned long, "%lx", delim, 16, "lx", check_ulong);
+ numbers_list_fix_width(unsigned long, "0x%lx", delim, 18, "lx", check_ulong);
+ numbers_list_fix_width(long, "0x%lx", delim, 18, "li", check_long);
+#else
+ numbers_list_fix_width(unsigned long, "%lu", delim, 10, "lu", check_ulong);
+ numbers_list_fix_width(long, "%ld", delim, 11, "ld", check_long);
+ numbers_list_fix_width(long, "%ld", delim, 11, "li", check_long);
+ numbers_list_fix_width(unsigned long, "%lx", delim, 8, "lx", check_ulong);
+ numbers_list_fix_width(unsigned long, "0x%lx", delim, 10, "lx", check_ulong);
+ numbers_list_fix_width(long, "0x%lx", delim, 10, "li", check_long);
+#endif
+}
+
+static void numbers_list_field_width_d(struct kunit *test, const char *delim)
+{
+ numbers_list_fix_width(unsigned int, "%u", delim, 10, "u", check_uint);
+ numbers_list_fix_width(int, "%d", delim, 11, "d", check_int);
+ numbers_list_fix_width(int, "%d", delim, 11, "i", check_int);
+ numbers_list_fix_width(unsigned int, "%x", delim, 8, "x", check_uint);
+ numbers_list_fix_width(unsigned int, "0x%x", delim, 10, "x", check_uint);
+ numbers_list_fix_width(int, "0x%x", delim, 10, "i", check_int);
+}
+
+static void numbers_list_field_width_h(struct kunit *test, const char *delim)
+{
+ numbers_list_fix_width(unsigned short, "%hu", delim, 5, "hu", check_ushort);
+ numbers_list_fix_width(short, "%hd", delim, 6, "hd", check_short);
+ numbers_list_fix_width(short, "%hd", delim, 6, "hi", check_short);
+ numbers_list_fix_width(unsigned short, "%hx", delim, 4, "hx", check_ushort);
+ numbers_list_fix_width(unsigned short, "0x%hx", delim, 6, "hx", check_ushort);
+ numbers_list_fix_width(short, "0x%hx", delim, 6, "hi", check_short);
+}
+
+static void numbers_list_field_width_hh(struct kunit *test, const char *delim)
+{
+ numbers_list_fix_width(unsigned char, "%hhu", delim, 3, "hhu", check_uchar);
+ numbers_list_fix_width(signed char, "%hhd", delim, 4, "hhd", check_char);
+ numbers_list_fix_width(signed char, "%hhd", delim, 4, "hhi", check_char);
+ numbers_list_fix_width(unsigned char, "%hhx", delim, 2, "hhx", check_uchar);
+ numbers_list_fix_width(unsigned char, "0x%hhx", delim, 4, "hhx", check_uchar);
+ numbers_list_fix_width(signed char, "0x%hhx", delim, 4, "hhi", check_char);
+}
+
+/*
+ * List of numbers separated by delim. Each field width specifier is the
+ * maximum possible digits for the given type and base.
+ */
+static void numbers_list_field_width_typemax(struct kunit *test)
+{
+ const char * const *param = test->param_value;
+ const char *delim = *param;
+
+ numbers_list_field_width_ll(test, delim);
+ numbers_list_field_width_l(test, delim);
+ numbers_list_field_width_d(test, delim);
+ numbers_list_field_width_h(test, delim);
+ numbers_list_field_width_hh(test, delim);
+}
+
+static void numbers_list_field_width_val_ll(struct kunit *test, const char *delim)
+{
+ numbers_list_val_width(unsigned long long, "%llu", delim, "llu", check_ull);
+ numbers_list_val_width(long long, "%lld", delim, "lld", check_ll);
+ numbers_list_val_width(long long, "%lld", delim, "lli", check_ll);
+ numbers_list_val_width(unsigned long long, "%llx", delim, "llx", check_ull);
+ numbers_list_val_width(unsigned long long, "0x%llx", delim, "llx", check_ull);
+ numbers_list_val_width(long long, "0x%llx", delim, "lli", check_ll);
+}
+
+static void numbers_list_field_width_val_l(struct kunit *test, const char *delim)
+{
+ numbers_list_val_width(unsigned long, "%lu", delim, "lu", check_ulong);
+ numbers_list_val_width(long, "%ld", delim, "ld", check_long);
+ numbers_list_val_width(long, "%ld", delim, "li", check_long);
+ numbers_list_val_width(unsigned long, "%lx", delim, "lx", check_ulong);
+ numbers_list_val_width(unsigned long, "0x%lx", delim, "lx", check_ulong);
+ numbers_list_val_width(long, "0x%lx", delim, "li", check_long);
+}
+
+static void numbers_list_field_width_val_d(struct kunit *test, const char *delim)
+{
+ numbers_list_val_width(unsigned int, "%u", delim, "u", check_uint);
+ numbers_list_val_width(int, "%d", delim, "d", check_int);
+ numbers_list_val_width(int, "%d", delim, "i", check_int);
+ numbers_list_val_width(unsigned int, "%x", delim, "x", check_uint);
+ numbers_list_val_width(unsigned int, "0x%x", delim, "x", check_uint);
+ numbers_list_val_width(int, "0x%x", delim, "i", check_int);
+}
+
+static void numbers_list_field_width_val_h(struct kunit *test, const char *delim)
+{
+ numbers_list_val_width(unsigned short, "%hu", delim, "hu", check_ushort);
+ numbers_list_val_width(short, "%hd", delim, "hd", check_short);
+ numbers_list_val_width(short, "%hd", delim, "hi", check_short);
+ numbers_list_val_width(unsigned short, "%hx", delim, "hx", check_ushort);
+ numbers_list_val_width(unsigned short, "0x%hx", delim, "hx", check_ushort);
+ numbers_list_val_width(short, "0x%hx", delim, "hi", check_short);
+}
+
+static void numbers_list_field_width_val_hh(struct kunit *test, const char *delim)
+{
+ numbers_list_val_width(unsigned char, "%hhu", delim, "hhu", check_uchar);
+ numbers_list_val_width(signed char, "%hhd", delim, "hhd", check_char);
+ numbers_list_val_width(signed char, "%hhd", delim, "hhi", check_char);
+ numbers_list_val_width(unsigned char, "%hhx", delim, "hhx", check_uchar);
+ numbers_list_val_width(unsigned char, "0x%hhx", delim, "hhx", check_uchar);
+ numbers_list_val_width(signed char, "0x%hhx", delim, "hhi", check_char);
+}
+
+/*
+ * List of numbers separated by delim. Each field width specifier is the
+ * exact length of the corresponding value digits in the string being scanned.
+ */
+static void numbers_list_field_width_val_width(struct kunit *test)
+{
+ const char * const *param = test->param_value;
+ const char *delim = *param;
+
+ numbers_list_field_width_val_ll(test, delim);
+ numbers_list_field_width_val_l(test, delim);
+ numbers_list_field_width_val_d(test, delim);
+ numbers_list_field_width_val_h(test, delim);
+ numbers_list_field_width_val_hh(test, delim);
+}
+
+/*
+ * Slice a continuous string of digits without field delimiters, containing
+ * numbers of varying length, using the field width to extract each group
+ * of digits. For example the hex values c0,3,bf01,303 would have a
+ * string representation of "c03bf01303" and extracted with "%2x%1x%4x%3x".
+ */
+static void numbers_slice(struct kunit *test)
+{
+ const char *delim = "";
+
+ KUNIT_ASSERT_PTR_EQ(test, test->param_value, NULL);
+ test->param_value = &delim;
+
+ numbers_list_field_width_val_width(test);
+}
+
+#define test_number_prefix(T, str, scan_fmt, expect0, expect1, n_args, fn) \
+do { \
+ const T expect[2] = { expect0, expect1 }; \
+ T result[2] = { (T)~expect[0], (T)~expect[1] }; \
+ \
+ _test(test, __FILE__, __LINE__, fn, &expect, str, scan_fmt, n_args, &result[0], &result[1]);\
+} while (0)
+
+/*
+ * Number prefix is >= field width.
+ * Expected behaviour is derived from testing userland sscanf.
+ */
+static void numbers_prefix_overflow(struct kunit *test)
+{
+ /*
+ * Negative decimal with a field of width 1, should quit scanning
+ * and return 0.
+ */
+ test_number_prefix(long long, "-1 1", "%1lld %lld", 0, 0, 0, check_ll);
+ test_number_prefix(long, "-1 1", "%1ld %ld", 0, 0, 0, check_long);
+ test_number_prefix(int, "-1 1", "%1d %d", 0, 0, 0, check_int);
+ test_number_prefix(short, "-1 1", "%1hd %hd", 0, 0, 0, check_short);
+ test_number_prefix(signed char, "-1 1", "%1hhd %hhd", 0, 0, 0, check_char);
+
+ test_number_prefix(long long, "-1 1", "%1lli %lli", 0, 0, 0, check_ll);
+ test_number_prefix(long, "-1 1", "%1li %li", 0, 0, 0, check_long);
+ test_number_prefix(int, "-1 1", "%1i %i", 0, 0, 0, check_int);
+ test_number_prefix(short, "-1 1", "%1hi %hi", 0, 0, 0, check_short);
+ test_number_prefix(signed char, "-1 1", "%1hhi %hhi", 0, 0, 0, check_char);
+
+ /*
+ * 0x prefix in a field of width 1: 0 is a valid digit so should
+ * convert. Next field scan starts at the 'x' which isn't a digit so
+ * scan quits with one field converted.
+ */
+ test_number_prefix(unsigned long long, "0xA7", "%1llx%llx", 0, 0, 1, check_ull);
+ test_number_prefix(unsigned long, "0xA7", "%1lx%lx", 0, 0, 1, check_ulong);
+ test_number_prefix(unsigned int, "0xA7", "%1x%x", 0, 0, 1, check_uint);
+ test_number_prefix(unsigned short, "0xA7", "%1hx%hx", 0, 0, 1, check_ushort);
+ test_number_prefix(unsigned char, "0xA7", "%1hhx%hhx", 0, 0, 1, check_uchar);
+ test_number_prefix(long long, "0xA7", "%1lli%llx", 0, 0, 1, check_ll);
+ test_number_prefix(long, "0xA7", "%1li%lx", 0, 0, 1, check_long);
+ test_number_prefix(int, "0xA7", "%1i%x", 0, 0, 1, check_int);
+ test_number_prefix(short, "0xA7", "%1hi%hx", 0, 0, 1, check_short);
+ test_number_prefix(char, "0xA7", "%1hhi%hhx", 0, 0, 1, check_char);
+
+ /*
+ * 0x prefix in a field of width 2 using %x conversion: first field
+ * converts to 0. Next field scan starts at the character after "0x".
+ * Both fields will convert.
+ */
+ test_number_prefix(unsigned long long, "0xA7", "%2llx%llx", 0, 0xa7, 2, check_ull);
+ test_number_prefix(unsigned long, "0xA7", "%2lx%lx", 0, 0xa7, 2, check_ulong);
+ test_number_prefix(unsigned int, "0xA7", "%2x%x", 0, 0xa7, 2, check_uint);
+ test_number_prefix(unsigned short, "0xA7", "%2hx%hx", 0, 0xa7, 2, check_ushort);
+ test_number_prefix(unsigned char, "0xA7", "%2hhx%hhx", 0, 0xa7, 2, check_uchar);
+
+ /*
+ * 0x prefix in a field of width 2 using %i conversion: first field
+ * converts to 0. Next field scan starts at the character after "0x",
+ * which will convert if can be interpreted as decimal but will fail
+ * if it contains any hex digits (since no 0x prefix).
+ */
+ test_number_prefix(long long, "0x67", "%2lli%lli", 0, 67, 2, check_ll);
+ test_number_prefix(long, "0x67", "%2li%li", 0, 67, 2, check_long);
+ test_number_prefix(int, "0x67", "%2i%i", 0, 67, 2, check_int);
+ test_number_prefix(short, "0x67", "%2hi%hi", 0, 67, 2, check_short);
+ test_number_prefix(char, "0x67", "%2hhi%hhi", 0, 67, 2, check_char);
+
+ test_number_prefix(long long, "0xA7", "%2lli%lli", 0, 0, 1, check_ll);
+ test_number_prefix(long, "0xA7", "%2li%li", 0, 0, 1, check_long);
+ test_number_prefix(int, "0xA7", "%2i%i", 0, 0, 1, check_int);
+ test_number_prefix(short, "0xA7", "%2hi%hi", 0, 0, 1, check_short);
+ test_number_prefix(char, "0xA7", "%2hhi%hhi", 0, 0, 1, check_char);
+}
+
+#define _test_simple_strtoxx(T, fn, gen_fmt, expect, base) \
+do { \
+ T got; \
+ char *endp; \
+ int len; \
+ \
+ len = snprintf(test_buffer, BUF_SIZE, gen_fmt, expect); \
+ got = (fn)(test_buffer, &endp, base); \
+ if (got != (expect)) { \
+ KUNIT_FAIL(test, #fn "(\"%s\", %d): got " gen_fmt " expected " gen_fmt, \
+ test_buffer, base, got, expect); \
+ } else if (endp != test_buffer + len) { \
+ KUNIT_FAIL(test, #fn "(\"%s\", %d) startp=0x%px got endp=0x%px expected 0x%px", \
+ test_buffer, base, test_buffer, \
+ test_buffer + len, endp); \
+ } \
+} while (0)
+
+#define test_simple_strtoxx(T, fn, gen_fmt, base) \
+do { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(numbers); i++) { \
+ _test_simple_strtoxx(T, fn, gen_fmt, (T)numbers[i], base); \
+ \
+ if (is_signed_type(T)) \
+ _test_simple_strtoxx(T, fn, gen_fmt, \
+ -(T)numbers[i], base); \
+ } \
+} while (0)
+
+static void test_simple_strtoull(struct kunit *test)
+{
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "%llu", 10);
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "%llu", 0);
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "%llx", 16);
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "0x%llx", 16);
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "0x%llx", 0);
+}
+
+static void test_simple_strtoll(struct kunit *test)
+{
+ test_simple_strtoxx(long long, simple_strtoll, "%lld", 10);
+ test_simple_strtoxx(long long, simple_strtoll, "%lld", 0);
+ test_simple_strtoxx(long long, simple_strtoll, "%llx", 16);
+ test_simple_strtoxx(long long, simple_strtoll, "0x%llx", 16);
+ test_simple_strtoxx(long long, simple_strtoll, "0x%llx", 0);
+}
+
+static void test_simple_strtoul(struct kunit *test)
+{
+ test_simple_strtoxx(unsigned long, simple_strtoul, "%lu", 10);
+ test_simple_strtoxx(unsigned long, simple_strtoul, "%lu", 0);
+ test_simple_strtoxx(unsigned long, simple_strtoul, "%lx", 16);
+ test_simple_strtoxx(unsigned long, simple_strtoul, "0x%lx", 16);
+ test_simple_strtoxx(unsigned long, simple_strtoul, "0x%lx", 0);
+}
+
+static void test_simple_strtol(struct kunit *test)
+{
+ test_simple_strtoxx(long, simple_strtol, "%ld", 10);
+ test_simple_strtoxx(long, simple_strtol, "%ld", 0);
+ test_simple_strtoxx(long, simple_strtol, "%lx", 16);
+ test_simple_strtoxx(long, simple_strtol, "0x%lx", 16);
+ test_simple_strtoxx(long, simple_strtol, "0x%lx", 0);
+}
+
+/* Selection of common delimiters/separators between numbers in a string. */
+static const char * const number_delimiters[] = {
+ " ", ":", ",", "-", "/",
+};
+
+static void number_delimiter_param_desc(const char * const *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "\"%s\"", *param);
+}
+
+KUNIT_ARRAY_PARAM(number_delimiters, number_delimiters, number_delimiter_param_desc);
+
+static struct kunit_case scanf_test_cases[] = {
+ KUNIT_CASE(numbers_simple),
+ /* String with multiple numbers separated by delimiter. */
+ KUNIT_CASE_PARAM(numbers_list, number_delimiters_gen_params),
+ /* Field width may be longer than actual field digits. */
+ KUNIT_CASE_PARAM(numbers_list_field_width_typemax, number_delimiters_gen_params),
+ /* Each field width exactly length of actual field digits. */
+ KUNIT_CASE_PARAM(numbers_list_field_width_val_width, number_delimiters_gen_params),
+ /* Slice continuous sequence of digits using field widths. */
+ KUNIT_CASE(numbers_slice),
+ KUNIT_CASE(numbers_prefix_overflow),
+
+ KUNIT_CASE(test_simple_strtoull),
+ KUNIT_CASE(test_simple_strtoll),
+ KUNIT_CASE(test_simple_strtoul),
+ KUNIT_CASE(test_simple_strtol),
+ {}
+};
+
+static int scanf_suite_init(struct kunit_suite *suite)
+{
+ test_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
+ if (!test_buffer)
+ return -ENOMEM;
+
+ fmt_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
+ if (!fmt_buffer) {
+ kfree(test_buffer);
+ return -ENOMEM;
+ }
+
+ prandom_seed_state(&rnd_state, 3141592653589793238ULL);
+
+ return 0;
+}
+
+static void scanf_suite_exit(struct kunit_suite *suite)
+{
+ kfree(fmt_buffer);
+ kfree(test_buffer);
+}
+
+static struct kunit_suite scanf_test_suite = {
+ .name = "scanf",
+ .suite_init = scanf_suite_init,
+ .suite_exit = scanf_suite_exit,
+ .test_cases = scanf_test_cases,
+};
+
+kunit_test_suite(scanf_test_suite);
+
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_DESCRIPTION("Test cases for sscanf facility");
+MODULE_LICENSE("GPL v2");
diff --git a/lib/tests/siphash_kunit.c b/lib/tests/siphash_kunit.c
new file mode 100644
index 000000000000..26bd4e8dc03e
--- /dev/null
+++ b/lib/tests/siphash_kunit.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright (C) 2016-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * Test cases for siphash.c
+ *
+ * SipHash: a fast short-input PRF
+ * https://131002.net/siphash/
+ *
+ * This implementation is specifically for SipHash2-4 for a secure PRF
+ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
+ * hashtables.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/siphash.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+
+/* Test vectors taken from reference source available at:
+ * https://github.com/veorq/SipHash
+ */
+
+static const siphash_key_t test_key_siphash =
+ {{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }};
+
+static const u64 test_vectors_siphash[64] = {
+ 0x726fdb47dd0e0e31ULL, 0x74f839c593dc67fdULL, 0x0d6c8009d9a94f5aULL,
+ 0x85676696d7fb7e2dULL, 0xcf2794e0277187b7ULL, 0x18765564cd99a68dULL,
+ 0xcbc9466e58fee3ceULL, 0xab0200f58b01d137ULL, 0x93f5f5799a932462ULL,
+ 0x9e0082df0ba9e4b0ULL, 0x7a5dbbc594ddb9f3ULL, 0xf4b32f46226bada7ULL,
+ 0x751e8fbc860ee5fbULL, 0x14ea5627c0843d90ULL, 0xf723ca908e7af2eeULL,
+ 0xa129ca6149be45e5ULL, 0x3f2acc7f57c29bdbULL, 0x699ae9f52cbe4794ULL,
+ 0x4bc1b3f0968dd39cULL, 0xbb6dc91da77961bdULL, 0xbed65cf21aa2ee98ULL,
+ 0xd0f2cbb02e3b67c7ULL, 0x93536795e3a33e88ULL, 0xa80c038ccd5ccec8ULL,
+ 0xb8ad50c6f649af94ULL, 0xbce192de8a85b8eaULL, 0x17d835b85bbb15f3ULL,
+ 0x2f2e6163076bcfadULL, 0xde4daaaca71dc9a5ULL, 0xa6a2506687956571ULL,
+ 0xad87a3535c49ef28ULL, 0x32d892fad841c342ULL, 0x7127512f72f27cceULL,
+ 0xa7f32346f95978e3ULL, 0x12e0b01abb051238ULL, 0x15e034d40fa197aeULL,
+ 0x314dffbe0815a3b4ULL, 0x027990f029623981ULL, 0xcadcd4e59ef40c4dULL,
+ 0x9abfd8766a33735cULL, 0x0e3ea96b5304a7d0ULL, 0xad0c42d6fc585992ULL,
+ 0x187306c89bc215a9ULL, 0xd4a60abcf3792b95ULL, 0xf935451de4f21df2ULL,
+ 0xa9538f0419755787ULL, 0xdb9acddff56ca510ULL, 0xd06c98cd5c0975ebULL,
+ 0xe612a3cb9ecba951ULL, 0xc766e62cfcadaf96ULL, 0xee64435a9752fe72ULL,
+ 0xa192d576b245165aULL, 0x0a8787bf8ecb74b2ULL, 0x81b3e73d20b49b6fULL,
+ 0x7fa8220ba3b2eceaULL, 0x245731c13ca42499ULL, 0xb78dbfaf3a8d83bdULL,
+ 0xea1ad565322a1a0bULL, 0x60e61c23a3795013ULL, 0x6606d7e446282b93ULL,
+ 0x6ca4ecb15c5f91e1ULL, 0x9f626da15c9625f3ULL, 0xe51b38608ef25f57ULL,
+ 0x958a324ceb064572ULL
+};
+
+#if BITS_PER_LONG == 64
+static const hsiphash_key_t test_key_hsiphash =
+ {{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }};
+
+static const u32 test_vectors_hsiphash[64] = {
+ 0x050fc4dcU, 0x7d57ca93U, 0x4dc7d44dU,
+ 0xe7ddf7fbU, 0x88d38328U, 0x49533b67U,
+ 0xc59f22a7U, 0x9bb11140U, 0x8d299a8eU,
+ 0x6c063de4U, 0x92ff097fU, 0xf94dc352U,
+ 0x57b4d9a2U, 0x1229ffa7U, 0xc0f95d34U,
+ 0x2a519956U, 0x7d908b66U, 0x63dbd80cU,
+ 0xb473e63eU, 0x8d297d1cU, 0xa6cce040U,
+ 0x2b45f844U, 0xa320872eU, 0xdae6c123U,
+ 0x67349c8cU, 0x705b0979U, 0xca9913a5U,
+ 0x4ade3b35U, 0xef6cd00dU, 0x4ab1e1f4U,
+ 0x43c5e663U, 0x8c21d1bcU, 0x16a7b60dU,
+ 0x7a8ff9bfU, 0x1f2a753eU, 0xbf186b91U,
+ 0xada26206U, 0xa3c33057U, 0xae3a36a1U,
+ 0x7b108392U, 0x99e41531U, 0x3f1ad944U,
+ 0xc8138825U, 0xc28949a6U, 0xfaf8876bU,
+ 0x9f042196U, 0x68b1d623U, 0x8b5114fdU,
+ 0xdf074c46U, 0x12cc86b3U, 0x0a52098fU,
+ 0x9d292f9aU, 0xa2f41f12U, 0x43a71ed0U,
+ 0x73f0bce6U, 0x70a7e980U, 0x243c6d75U,
+ 0xfdb71513U, 0xa67d8a08U, 0xb7e8f148U,
+ 0xf7a644eeU, 0x0f1837f2U, 0x4b6694e0U,
+ 0xb7bbb3a8U
+};
+#else
+static const hsiphash_key_t test_key_hsiphash =
+ {{ 0x03020100U, 0x07060504U }};
+
+static const u32 test_vectors_hsiphash[64] = {
+ 0x5814c896U, 0xe7e864caU, 0xbc4b0e30U,
+ 0x01539939U, 0x7e059ea6U, 0x88e3d89bU,
+ 0xa0080b65U, 0x9d38d9d6U, 0x577999b1U,
+ 0xc839caedU, 0xe4fa32cfU, 0x959246eeU,
+ 0x6b28096cU, 0x66dd9cd6U, 0x16658a7cU,
+ 0xd0257b04U, 0x8b31d501U, 0x2b1cd04bU,
+ 0x06712339U, 0x522aca67U, 0x911bb605U,
+ 0x90a65f0eU, 0xf826ef7bU, 0x62512debU,
+ 0x57150ad7U, 0x5d473507U, 0x1ec47442U,
+ 0xab64afd3U, 0x0a4100d0U, 0x6d2ce652U,
+ 0x2331b6a3U, 0x08d8791aU, 0xbc6dda8dU,
+ 0xe0f6c934U, 0xb0652033U, 0x9b9851ccU,
+ 0x7c46fb7fU, 0x732ba8cbU, 0xf142997aU,
+ 0xfcc9aa1bU, 0x05327eb2U, 0xe110131cU,
+ 0xf9e5e7c0U, 0xa7d708a6U, 0x11795ab1U,
+ 0x65671619U, 0x9f5fff91U, 0xd89c5267U,
+ 0x007783ebU, 0x95766243U, 0xab639262U,
+ 0x9c7e1390U, 0xc368dda6U, 0x38ddc455U,
+ 0xfa13d379U, 0x979ea4e8U, 0x53ecd77eU,
+ 0x2ee80657U, 0x33dbb66aU, 0xae3f0577U,
+ 0x88b4c4ccU, 0x3e7f480bU, 0x74c1ebf8U,
+ 0x87178304U
+};
+#endif
+
+#define chk(hash, vector, fmt...) \
+ KUNIT_EXPECT_EQ_MSG(test, hash, vector, fmt)
+
+static void siphash_test(struct kunit *test)
+{
+ u8 in[64] __aligned(SIPHASH_ALIGNMENT);
+ u8 in_unaligned[65] __aligned(SIPHASH_ALIGNMENT);
+ u8 i;
+
+ for (i = 0; i < 64; ++i) {
+ in[i] = i;
+ in_unaligned[i + 1] = i;
+ chk(siphash(in, i, &test_key_siphash),
+ test_vectors_siphash[i],
+ "siphash self-test aligned %u: FAIL", i + 1);
+ chk(siphash(in_unaligned + 1, i, &test_key_siphash),
+ test_vectors_siphash[i],
+ "siphash self-test unaligned %u: FAIL", i + 1);
+ chk(hsiphash(in, i, &test_key_hsiphash),
+ test_vectors_hsiphash[i],
+ "hsiphash self-test aligned %u: FAIL", i + 1);
+ chk(hsiphash(in_unaligned + 1, i, &test_key_hsiphash),
+ test_vectors_hsiphash[i],
+ "hsiphash self-test unaligned %u: FAIL", i + 1);
+ }
+ chk(siphash_1u64(0x0706050403020100ULL, &test_key_siphash),
+ test_vectors_siphash[8],
+ "siphash self-test 1u64: FAIL");
+ chk(siphash_2u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+ &test_key_siphash),
+ test_vectors_siphash[16],
+ "siphash self-test 2u64: FAIL");
+ chk(siphash_3u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+ 0x1716151413121110ULL, &test_key_siphash),
+ test_vectors_siphash[24],
+ "siphash self-test 3u64: FAIL");
+ chk(siphash_4u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+ 0x1716151413121110ULL, 0x1f1e1d1c1b1a1918ULL,
+ &test_key_siphash),
+ test_vectors_siphash[32],
+ "siphash self-test 4u64: FAIL");
+ chk(siphash_1u32(0x03020100U, &test_key_siphash),
+ test_vectors_siphash[4],
+ "siphash self-test 1u32: FAIL");
+ chk(siphash_2u32(0x03020100U, 0x07060504U, &test_key_siphash),
+ test_vectors_siphash[8],
+ "siphash self-test 2u32: FAIL");
+ chk(siphash_3u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, &test_key_siphash),
+ test_vectors_siphash[12],
+ "siphash self-test 3u32: FAIL");
+ chk(siphash_4u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, 0x0f0e0d0cU, &test_key_siphash),
+ test_vectors_siphash[16],
+ "siphash self-test 4u32: FAIL");
+ chk(hsiphash_1u32(0x03020100U, &test_key_hsiphash),
+ test_vectors_hsiphash[4],
+ "hsiphash self-test 1u32: FAIL");
+ chk(hsiphash_2u32(0x03020100U, 0x07060504U, &test_key_hsiphash),
+ test_vectors_hsiphash[8],
+ "hsiphash self-test 2u32: FAIL");
+ chk(hsiphash_3u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, &test_key_hsiphash),
+ test_vectors_hsiphash[12],
+ "hsiphash self-test 3u32: FAIL");
+ chk(hsiphash_4u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, 0x0f0e0d0cU, &test_key_hsiphash),
+ test_vectors_hsiphash[16],
+ "hsiphash self-test 4u32: FAIL");
+}
+
+static struct kunit_case siphash_test_cases[] = {
+ KUNIT_CASE(siphash_test),
+ {}
+};
+
+static struct kunit_suite siphash_test_suite = {
+ .name = "siphash",
+ .test_cases = siphash_test_cases,
+};
+
+kunit_test_suite(siphash_test_suite);
+
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
+MODULE_DESCRIPTION("Test cases for siphash.c");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/tests/slub_kunit.c b/lib/tests/slub_kunit.c
new file mode 100644
index 000000000000..d47c472b0520
--- /dev/null
+++ b/lib/tests/slub_kunit.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
+#include <linux/delay.h>
+#include "../mm/slab.h"
+
+static struct kunit_resource resource;
+static int slab_errors;
+
+/*
+ * Wrapper function for kmem_cache_create(), which reduces 2 parameters:
+ * 'align' and 'ctor', and sets SLAB_SKIP_KFENCE flag to avoid getting an
+ * object from kfence pool, where the operation could be caught by both
+ * our test and kfence sanity check.
+ */
+static struct kmem_cache *test_kmem_cache_create(const char *name,
+ unsigned int size, slab_flags_t flags)
+{
+ struct kmem_cache *s = kmem_cache_create(name, size, 0,
+ (flags | SLAB_NO_USER_FLAGS), NULL);
+ s->flags |= SLAB_SKIP_KFENCE;
+ return s;
+}
+
+static void test_clobber_zone(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_alloc", 64,
+ SLAB_RED_ZONE);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kasan_disable_current();
+ p[64] = 0x12;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kasan_enable_current();
+ kmem_cache_free(s, p);
+ kmem_cache_destroy(s);
+}
+
+#ifndef CONFIG_KASAN
+static void test_next_pointer(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_next_ptr_free",
+ 64, SLAB_POISON);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+ unsigned long tmp;
+ unsigned long *ptr_addr;
+
+ kmem_cache_free(s, p);
+
+ ptr_addr = (unsigned long *)(p + s->offset);
+ tmp = *ptr_addr;
+ p[s->offset] = ~p[s->offset];
+
+ /*
+ * Expecting three errors.
+ * One for the corrupted freechain and the other one for the wrong
+ * count of objects in use. The third error is fixing broken cache.
+ */
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 3, slab_errors);
+
+ /*
+ * Try to repair corrupted freepointer.
+ * Still expecting two errors. The first for the wrong count
+ * of objects in use.
+ * The second error is for fixing broken cache.
+ */
+ *ptr_addr = tmp;
+ slab_errors = 0;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ /*
+ * Previous validation repaired the count of objects in use.
+ * Now expecting no error.
+ */
+ slab_errors = 0;
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 0, slab_errors);
+
+ kmem_cache_destroy(s);
+}
+
+static void test_first_word(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_1th_word_free",
+ 64, SLAB_POISON);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kmem_cache_free(s, p);
+ *p = 0x78;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kmem_cache_destroy(s);
+}
+
+static void test_clobber_50th_byte(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_50th_word_free",
+ 64, SLAB_POISON);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kmem_cache_free(s, p);
+ p[50] = 0x9a;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kmem_cache_destroy(s);
+}
+#endif
+
+static void test_clobber_redzone_free(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_free", 64,
+ SLAB_RED_ZONE);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kasan_disable_current();
+ kmem_cache_free(s, p);
+ p[64] = 0xab;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kasan_enable_current();
+ kmem_cache_destroy(s);
+}
+
+static void test_kmalloc_redzone_access(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32,
+ SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
+ u8 *p = alloc_hooks(__kmalloc_cache_noprof(s, GFP_KERNEL, 18));
+
+ kasan_disable_current();
+
+ /* Suppress the -Warray-bounds warning */
+ OPTIMIZER_HIDE_VAR(p);
+ p[18] = 0xab;
+ p[19] = 0xab;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kasan_enable_current();
+ kmem_cache_free(s, p);
+ kmem_cache_destroy(s);
+}
+
+struct test_kfree_rcu_struct {
+ struct rcu_head rcu;
+};
+
+static void test_kfree_rcu(struct kunit *test)
+{
+ struct kmem_cache *s;
+ struct test_kfree_rcu_struct *p;
+
+ if (IS_BUILTIN(CONFIG_SLUB_KUNIT_TEST))
+ kunit_skip(test, "can't do kfree_rcu() when test is built-in");
+
+ s = test_kmem_cache_create("TestSlub_kfree_rcu",
+ sizeof(struct test_kfree_rcu_struct),
+ SLAB_NO_MERGE);
+ p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kfree_rcu(p, rcu);
+ kmem_cache_destroy(s);
+
+ KUNIT_EXPECT_EQ(test, 0, slab_errors);
+}
+
+struct cache_destroy_work {
+ struct work_struct work;
+ struct kmem_cache *s;
+};
+
+static void cache_destroy_workfn(struct work_struct *w)
+{
+ struct cache_destroy_work *cdw;
+
+ cdw = container_of(w, struct cache_destroy_work, work);
+ kmem_cache_destroy(cdw->s);
+}
+
+#define KMEM_CACHE_DESTROY_NR 10
+
+static void test_kfree_rcu_wq_destroy(struct kunit *test)
+{
+ struct test_kfree_rcu_struct *p;
+ struct cache_destroy_work cdw;
+ struct workqueue_struct *wq;
+ struct kmem_cache *s;
+ unsigned int delay;
+ int i;
+
+ if (IS_BUILTIN(CONFIG_SLUB_KUNIT_TEST))
+ kunit_skip(test, "can't do kfree_rcu() when test is built-in");
+
+ INIT_WORK_ONSTACK(&cdw.work, cache_destroy_workfn);
+ wq = alloc_workqueue("test_kfree_rcu_destroy_wq",
+ WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+
+ if (!wq)
+ kunit_skip(test, "failed to alloc wq");
+
+ for (i = 0; i < KMEM_CACHE_DESTROY_NR; i++) {
+ s = test_kmem_cache_create("TestSlub_kfree_rcu_wq_destroy",
+ sizeof(struct test_kfree_rcu_struct),
+ SLAB_NO_MERGE);
+
+ if (!s)
+ kunit_skip(test, "failed to create cache");
+
+ delay = get_random_u8();
+ p = kmem_cache_alloc(s, GFP_KERNEL);
+ kfree_rcu(p, rcu);
+
+ cdw.s = s;
+
+ msleep(delay);
+ queue_work(wq, &cdw.work);
+ flush_work(&cdw.work);
+ }
+
+ destroy_workqueue(wq);
+ KUNIT_EXPECT_EQ(test, 0, slab_errors);
+}
+
+static void test_leak_destroy(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_leak_destroy",
+ 64, SLAB_NO_MERGE);
+ kmem_cache_alloc(s, GFP_KERNEL);
+
+ kmem_cache_destroy(s);
+
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+}
+
+static void test_krealloc_redzone_zeroing(struct kunit *test)
+{
+ u8 *p;
+ int i;
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_krealloc", 64,
+ SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
+
+ p = alloc_hooks(__kmalloc_cache_noprof(s, GFP_KERNEL, 48));
+ memset(p, 0xff, 48);
+
+ kasan_disable_current();
+ OPTIMIZER_HIDE_VAR(p);
+
+ /* Test shrink */
+ p = krealloc(p, 40, GFP_KERNEL | __GFP_ZERO);
+ for (i = 40; i < 64; i++)
+ KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
+
+ /* Test grow within the same 64B kmalloc object */
+ p = krealloc(p, 56, GFP_KERNEL | __GFP_ZERO);
+ for (i = 40; i < 56; i++)
+ KUNIT_EXPECT_EQ(test, p[i], 0);
+ for (i = 56; i < 64; i++)
+ KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 0, slab_errors);
+
+ memset(p, 0xff, 56);
+ /* Test grow with allocating a bigger 128B object */
+ p = krealloc(p, 112, GFP_KERNEL | __GFP_ZERO);
+ for (i = 0; i < 56; i++)
+ KUNIT_EXPECT_EQ(test, p[i], 0xff);
+ for (i = 56; i < 112; i++)
+ KUNIT_EXPECT_EQ(test, p[i], 0);
+
+ kfree(p);
+ kasan_enable_current();
+ kmem_cache_destroy(s);
+}
+
+static int test_init(struct kunit *test)
+{
+ slab_errors = 0;
+
+ kunit_add_named_resource(test, NULL, NULL, &resource,
+ "slab_errors", &slab_errors);
+ return 0;
+}
+
+static struct kunit_case test_cases[] = {
+ KUNIT_CASE(test_clobber_zone),
+
+#ifndef CONFIG_KASAN
+ KUNIT_CASE(test_next_pointer),
+ KUNIT_CASE(test_first_word),
+ KUNIT_CASE(test_clobber_50th_byte),
+#endif
+
+ KUNIT_CASE(test_clobber_redzone_free),
+ KUNIT_CASE(test_kmalloc_redzone_access),
+ KUNIT_CASE(test_kfree_rcu),
+ KUNIT_CASE(test_kfree_rcu_wq_destroy),
+ KUNIT_CASE(test_leak_destroy),
+ KUNIT_CASE(test_krealloc_redzone_zeroing),
+ {}
+};
+
+static struct kunit_suite test_suite = {
+ .name = "slub_test",
+ .init = test_init,
+ .test_cases = test_cases,
+};
+kunit_test_suite(test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/tests/stackinit_kunit.c b/lib/tests/stackinit_kunit.c
new file mode 100644
index 000000000000..63aa78e6f5c1
--- /dev/null
+++ b/lib/tests/stackinit_kunit.c
@@ -0,0 +1,594 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Test cases for compiler-based stack variable zeroing via
+ * -ftrivial-auto-var-init={zero,pattern} or CONFIG_GCC_PLUGIN_STRUCTLEAK*.
+ * For example, see:
+ * "Running tests with kunit_tool" at Documentation/dev-tools/kunit/start.rst
+ * ./tools/testing/kunit/kunit.py run stackinit [--raw_output] \
+ * --make_option LLVM=1 \
+ * --kconfig_add CONFIG_INIT_STACK_ALL_ZERO=y
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+/* Exfiltration buffer. */
+#define MAX_VAR_SIZE 128
+static u8 check_buf[MAX_VAR_SIZE];
+
+/* Character array to trigger stack protector in all functions. */
+#define VAR_BUFFER 32
+
+/* Volatile mask to convince compiler to copy memory with 0xff. */
+static volatile u8 forced_mask = 0xff;
+
+/* Location and size tracking to validate fill and test are colocated. */
+static void *fill_start, *target_start;
+static size_t fill_size, target_size;
+
+static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
+ char *needle_start, size_t needle_size)
+{
+ if (needle_start >= haystack_start &&
+ needle_start + needle_size <= haystack_start + haystack_size)
+ return true;
+ return false;
+}
+
+/* Whether the test is expected to fail. */
+#define WANT_SUCCESS 0
+#define XFAIL 1
+
+#define DO_NOTHING_TYPE_SCALAR(var_type) var_type
+#define DO_NOTHING_TYPE_STRING(var_type) void
+#define DO_NOTHING_TYPE_STRUCT(var_type) void
+#define DO_NOTHING_TYPE_UNION(var_type) void
+
+#define DO_NOTHING_RETURN_SCALAR(ptr) *(ptr)
+#define DO_NOTHING_RETURN_STRING(ptr) /**/
+#define DO_NOTHING_RETURN_STRUCT(ptr) /**/
+#define DO_NOTHING_RETURN_UNION(ptr) /**/
+
+#define DO_NOTHING_CALL_SCALAR(var, name) \
+ (var) = do_nothing_ ## name(&(var))
+#define DO_NOTHING_CALL_STRING(var, name) \
+ do_nothing_ ## name(var)
+#define DO_NOTHING_CALL_STRUCT(var, name) \
+ do_nothing_ ## name(&(var))
+#define DO_NOTHING_CALL_UNION(var, name) \
+ do_nothing_ ## name(&(var))
+
+#define FETCH_ARG_SCALAR(var) &var
+#define FETCH_ARG_STRING(var) var
+#define FETCH_ARG_STRUCT(var) &var
+#define FETCH_ARG_UNION(var) &var
+
+/*
+ * On m68k, if the leaf function test variable is longer than 8 bytes,
+ * the start of the stack frame moves. 8 is sufficiently large to
+ * test m68k char arrays, but leave it at 16 for other architectures.
+ */
+#ifdef CONFIG_M68K
+#define FILL_SIZE_STRING 8
+#define FILL_SIZE_ARRAY 2
+#else
+#define FILL_SIZE_STRING 16
+#define FILL_SIZE_ARRAY 8
+#endif
+
+#define INIT_CLONE_SCALAR /**/
+#define INIT_CLONE_STRING [FILL_SIZE_STRING]
+#define INIT_CLONE_STRUCT /**/
+#define INIT_CLONE_UNION /**/
+
+#define ZERO_CLONE_SCALAR(zero) memset(&(zero), 0x00, sizeof(zero))
+#define ZERO_CLONE_STRING(zero) memset(&(zero), 0x00, sizeof(zero))
+/*
+ * For the struct, intentionally poison padding to see if it gets
+ * copied out in direct assignments.
+ * */
+#define ZERO_CLONE_STRUCT(zero) \
+ do { \
+ memset(&(zero), 0xFF, sizeof(zero)); \
+ zero.one = 0; \
+ zero.two = 0; \
+ zero.three = 0; \
+ zero.four = 0; \
+ } while (0)
+#define ZERO_CLONE_UNION(zero) ZERO_CLONE_STRUCT(zero)
+
+#define INIT_SCALAR_none(var_type) /**/
+#define INIT_SCALAR_zero(var_type) = 0
+
+#define INIT_STRING_none(var_type) [FILL_SIZE_STRING] /**/
+#define INIT_STRING_zero(var_type) [FILL_SIZE_STRING] = { }
+
+#define INIT_STRUCT_none(var_type) /**/
+#define INIT_STRUCT_zero(var_type) = { }
+#define INIT_STRUCT_old_zero(var_type) = { 0 }
+
+
+#define __static_partial { .two = 0, }
+#define __static_all { .one = 0, \
+ .two = 0, \
+ .three = 0, \
+ .four = 0, \
+ }
+#define __dynamic_partial { .two = arg->two, }
+#define __dynamic_all { .one = arg->one, \
+ .two = arg->two, \
+ .three = arg->three, \
+ .four = arg->four, \
+ }
+#define __runtime_partial var.two = 0
+#define __runtime_all var.one = 0; \
+ var.two = 0; \
+ var.three = 0; \
+ var.four = 0
+
+#define INIT_STRUCT_static_partial(var_type) \
+ = __static_partial
+#define INIT_STRUCT_static_all(var_type) \
+ = __static_all
+#define INIT_STRUCT_dynamic_partial(var_type) \
+ = __dynamic_partial
+#define INIT_STRUCT_dynamic_all(var_type) \
+ = __dynamic_all
+#define INIT_STRUCT_runtime_partial(var_type) \
+ ; __runtime_partial
+#define INIT_STRUCT_runtime_all(var_type) \
+ ; __runtime_all
+
+#define INIT_STRUCT_assigned_static_partial(var_type) \
+ ; var = (var_type)__static_partial
+#define INIT_STRUCT_assigned_static_all(var_type) \
+ ; var = (var_type)__static_all
+#define INIT_STRUCT_assigned_dynamic_partial(var_type) \
+ ; var = (var_type)__dynamic_partial
+#define INIT_STRUCT_assigned_dynamic_all(var_type) \
+ ; var = (var_type)__dynamic_all
+
+#define INIT_STRUCT_assigned_copy(var_type) \
+ ; var = *(arg)
+
+/* Union initialization is the same as structs. */
+#define INIT_UNION_none(var_type) INIT_STRUCT_none(var_type)
+#define INIT_UNION_zero(var_type) INIT_STRUCT_zero(var_type)
+#define INIT_UNION_old_zero(var_type) INIT_STRUCT_old_zero(var_type)
+
+#define INIT_UNION_static_partial(var_type) \
+ INIT_STRUCT_static_partial(var_type)
+#define INIT_UNION_static_all(var_type) \
+ INIT_STRUCT_static_all(var_type)
+#define INIT_UNION_dynamic_partial(var_type) \
+ INIT_STRUCT_dynamic_partial(var_type)
+#define INIT_UNION_dynamic_all(var_type) \
+ INIT_STRUCT_dynamic_all(var_type)
+#define INIT_UNION_runtime_partial(var_type) \
+ INIT_STRUCT_runtime_partial(var_type)
+#define INIT_UNION_runtime_all(var_type) \
+ INIT_STRUCT_runtime_all(var_type)
+#define INIT_UNION_assigned_static_partial(var_type) \
+ INIT_STRUCT_assigned_static_partial(var_type)
+#define INIT_UNION_assigned_static_all(var_type) \
+ INIT_STRUCT_assigned_static_all(var_type)
+#define INIT_UNION_assigned_dynamic_partial(var_type) \
+ INIT_STRUCT_assigned_dynamic_partial(var_type)
+#define INIT_UNION_assigned_dynamic_all(var_type) \
+ INIT_STRUCT_assigned_dynamic_all(var_type)
+#define INIT_UNION_assigned_copy(var_type) \
+ INIT_STRUCT_assigned_copy(var_type)
+
+/*
+ * The "did we actually fill the stack?" check value needs
+ * to be neither 0 nor any of the "pattern" bytes. The
+ * pattern bytes are compiler, architecture, and type based,
+ * so we have to pick a value that never appears for those
+ * combinations. Use 0x99 which is not 0xFF, 0xFE, nor 0xAA.
+ */
+#define FILL_BYTE 0x99
+
+/*
+ * @name: unique string name for the test
+ * @var_type: type to be tested for zeroing initialization
+ * @which: is this a SCALAR, STRING, or STRUCT type?
+ * @init_level: what kind of initialization is performed
+ * @xfail: is this test expected to fail?
+ */
+#define DEFINE_TEST_DRIVER(name, var_type, which, xfail) \
+/* Returns 0 on success, 1 on failure. */ \
+static noinline void test_ ## name (struct kunit *test) \
+{ \
+ var_type zero INIT_CLONE_ ## which; \
+ int ignored; \
+ u8 sum = 0, i; \
+ \
+ /* Notice when a new test is larger than expected. */ \
+ BUILD_BUG_ON(sizeof(zero) > MAX_VAR_SIZE); \
+ \
+ /* Fill clone type with zero for per-field init. */ \
+ ZERO_CLONE_ ## which(zero); \
+ /* Clear entire check buffer for 0xFF overlap test. */ \
+ memset(check_buf, 0x00, sizeof(check_buf)); \
+ /* Fill stack with FILL_BYTE. */ \
+ ignored = leaf_ ##name((unsigned long)&ignored, 1, \
+ FETCH_ARG_ ## which(zero)); \
+ /* Verify all bytes overwritten with FILL_BYTE. */ \
+ for (sum = 0, i = 0; i < target_size; i++) \
+ sum += (check_buf[i] != FILL_BYTE); \
+ /* Clear entire check buffer for later bit tests. */ \
+ memset(check_buf, 0x00, sizeof(check_buf)); \
+ /* Extract stack-defined variable contents. */ \
+ ignored = leaf_ ##name((unsigned long)&ignored, 0, \
+ FETCH_ARG_ ## which(zero)); \
+ /* \
+ * Delay the sum test to here to do as little as \
+ * possible between the two leaf function calls. \
+ */ \
+ KUNIT_ASSERT_EQ_MSG(test, sum, 0, \
+ "leaf fill was not 0x%02X!?\n", \
+ FILL_BYTE); \
+ \
+ /* Validate that compiler lined up fill and target. */ \
+ KUNIT_ASSERT_TRUE_MSG(test, \
+ stackinit_range_contains(fill_start, fill_size, \
+ target_start, target_size), \
+ "stackframe was not the same between calls!? " \
+ "(fill %zu wide, target offset by %d)\n", \
+ fill_size, \
+ (int)((ssize_t)(uintptr_t)fill_start - \
+ (ssize_t)(uintptr_t)target_start)); \
+ \
+ /* Validate check region has no FILL_BYTE bytes. */ \
+ for (sum = 0, i = 0; i < target_size; i++) \
+ sum += (check_buf[i] == FILL_BYTE); \
+ \
+ if (sum != 0 && xfail) \
+ kunit_skip(test, \
+ "XFAIL uninit bytes: %d\n", \
+ sum); \
+ KUNIT_ASSERT_EQ_MSG(test, sum, 0, \
+ "uninit bytes: %d\n", sum); \
+}
+#define DEFINE_TEST(name, var_type, which, init_level, xfail) \
+/* no-op to force compiler into ignoring "uninitialized" vars */\
+static noinline DO_NOTHING_TYPE_ ## which(var_type) \
+do_nothing_ ## name(var_type *ptr) \
+{ \
+ OPTIMIZER_HIDE_VAR(ptr); \
+ /* Will always be true, but compiler doesn't know. */ \
+ if ((unsigned long)ptr > 0x2) \
+ return DO_NOTHING_RETURN_ ## which(ptr); \
+ else \
+ return DO_NOTHING_RETURN_ ## which(ptr + 1); \
+} \
+static noinline int leaf_ ## name(unsigned long sp, bool fill, \
+ var_type *arg) \
+{ \
+ char buf[VAR_BUFFER]; \
+ var_type var \
+ INIT_ ## which ## _ ## init_level(var_type); \
+ \
+ target_start = &var; \
+ target_size = sizeof(var); \
+ /* \
+ * Keep this buffer around to make sure we've got a \
+ * stack frame of SOME kind... \
+ */ \
+ memset(buf, (char)(sp & 0xff), sizeof(buf)); \
+ /* Fill variable with FILL_BYTE. */ \
+ if (fill) { \
+ fill_start = &var; \
+ fill_size = sizeof(var); \
+ memset(fill_start, \
+ FILL_BYTE & forced_mask, \
+ fill_size); \
+ } \
+ \
+ /* Silence "never initialized" warnings. */ \
+ DO_NOTHING_CALL_ ## which(var, name); \
+ \
+ /* Exfiltrate "var". */ \
+ memcpy(check_buf, target_start, target_size); \
+ \
+ return (int)buf[0] | (int)buf[sizeof(buf) - 1]; \
+} \
+DEFINE_TEST_DRIVER(name, var_type, which, xfail)
+
+/* Structure with no padding. */
+struct test_packed {
+ unsigned long one;
+ unsigned long two;
+ unsigned long three;
+ unsigned long four;
+};
+
+/* Simple structure with padding likely to be covered by compiler. */
+struct test_small_hole {
+ size_t one;
+ char two;
+ /* 3 byte padding hole here. */
+ int three;
+ unsigned long four;
+};
+
+/* Trigger unhandled padding in a structure. */
+struct test_big_hole {
+ u8 one;
+ u8 two;
+ u8 three;
+ /* 61 byte padding hole here. */
+ u8 four __aligned(64);
+} __aligned(64);
+
+struct test_trailing_hole {
+ char *one;
+ char *two;
+ char *three;
+ char four;
+ /* "sizeof(unsigned long) - 1" byte padding hole here. */
+};
+
+/* Test if STRUCTLEAK is clearing structs with __user fields. */
+struct test_user {
+ u8 one;
+ unsigned long two;
+ char __user *three;
+ unsigned long four;
+};
+
+/* No padding: all members are the same size. */
+union test_same_sizes {
+ unsigned long one;
+ unsigned long two;
+ unsigned long three;
+ unsigned long four;
+};
+
+/* Mismatched sizes, with one and two being small */
+union test_small_start {
+ char one:1;
+ char two;
+ short three;
+ unsigned long four;
+ struct big_struct {
+ unsigned long array[FILL_SIZE_ARRAY];
+ } big;
+};
+
+/* Mismatched sizes, with three and four being small */
+union test_small_end {
+ short one;
+ unsigned long two;
+ char three:1;
+ char four;
+};
+
+#define ALWAYS_PASS WANT_SUCCESS
+#define ALWAYS_FAIL XFAIL
+
+#ifdef CONFIG_INIT_STACK_NONE
+# define USER_PASS XFAIL
+# define BYREF_PASS XFAIL
+# define STRONG_PASS XFAIL
+#elif defined(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER)
+# define USER_PASS WANT_SUCCESS
+# define BYREF_PASS XFAIL
+# define STRONG_PASS XFAIL
+#elif defined(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF)
+# define USER_PASS WANT_SUCCESS
+# define BYREF_PASS WANT_SUCCESS
+# define STRONG_PASS XFAIL
+#else
+# define USER_PASS WANT_SUCCESS
+# define BYREF_PASS WANT_SUCCESS
+# define STRONG_PASS WANT_SUCCESS
+#endif
+
+#define DEFINE_SCALAR_TEST(name, init, xfail) \
+ DEFINE_TEST(name ## _ ## init, name, SCALAR, \
+ init, xfail)
+
+#define DEFINE_SCALAR_TESTS(init, xfail) \
+ DEFINE_SCALAR_TEST(u8, init, xfail); \
+ DEFINE_SCALAR_TEST(u16, init, xfail); \
+ DEFINE_SCALAR_TEST(u32, init, xfail); \
+ DEFINE_SCALAR_TEST(u64, init, xfail); \
+ DEFINE_TEST(char_array_ ## init, unsigned char, \
+ STRING, init, xfail)
+
+#define DEFINE_STRUCT_TEST(name, init, xfail) \
+ DEFINE_TEST(name ## _ ## init, \
+ struct test_ ## name, STRUCT, init, \
+ xfail)
+
+#define DEFINE_UNION_TEST(name, init, xfail) \
+ DEFINE_TEST(name ## _ ## init, \
+ union test_ ## name, STRUCT, init, \
+ xfail)
+
+#define DEFINE_STRUCT_TESTS(init, xfail) \
+ DEFINE_STRUCT_TEST(small_hole, init, xfail); \
+ DEFINE_STRUCT_TEST(big_hole, init, xfail); \
+ DEFINE_STRUCT_TEST(trailing_hole, init, xfail); \
+ DEFINE_STRUCT_TEST(packed, init, xfail)
+
+#define DEFINE_STRUCT_INITIALIZER_TESTS(base, xfail) \
+ DEFINE_STRUCT_TESTS(base ## _ ## partial, \
+ xfail); \
+ DEFINE_STRUCT_TESTS(base ## _ ## all, xfail)
+
+#define DEFINE_UNION_INITIALIZER_TESTS(base, xfail) \
+ DEFINE_UNION_TESTS(base ## _ ## partial, \
+ xfail); \
+ DEFINE_UNION_TESTS(base ## _ ## all, xfail)
+
+#define DEFINE_UNION_TESTS(init, xfail) \
+ DEFINE_UNION_TEST(same_sizes, init, xfail); \
+ DEFINE_UNION_TEST(small_start, init, xfail); \
+ DEFINE_UNION_TEST(small_end, init, xfail);
+
+/* These should be fully initialized all the time! */
+DEFINE_SCALAR_TESTS(zero, ALWAYS_PASS);
+DEFINE_STRUCT_TESTS(zero, ALWAYS_PASS);
+DEFINE_STRUCT_TESTS(old_zero, ALWAYS_PASS);
+DEFINE_UNION_TESTS(zero, ALWAYS_PASS);
+DEFINE_UNION_TESTS(old_zero, ALWAYS_PASS);
+/* Struct initializers: padding may be left uninitialized. */
+DEFINE_STRUCT_INITIALIZER_TESTS(static, STRONG_PASS);
+DEFINE_STRUCT_INITIALIZER_TESTS(dynamic, STRONG_PASS);
+DEFINE_STRUCT_INITIALIZER_TESTS(runtime, STRONG_PASS);
+DEFINE_STRUCT_INITIALIZER_TESTS(assigned_static, STRONG_PASS);
+DEFINE_STRUCT_INITIALIZER_TESTS(assigned_dynamic, STRONG_PASS);
+DEFINE_STRUCT_TESTS(assigned_copy, ALWAYS_FAIL);
+DEFINE_UNION_INITIALIZER_TESTS(static, STRONG_PASS);
+DEFINE_UNION_INITIALIZER_TESTS(dynamic, STRONG_PASS);
+DEFINE_UNION_INITIALIZER_TESTS(runtime, STRONG_PASS);
+DEFINE_UNION_INITIALIZER_TESTS(assigned_static, STRONG_PASS);
+DEFINE_UNION_INITIALIZER_TESTS(assigned_dynamic, STRONG_PASS);
+DEFINE_UNION_TESTS(assigned_copy, ALWAYS_FAIL);
+/* No initialization without compiler instrumentation. */
+DEFINE_SCALAR_TESTS(none, STRONG_PASS);
+DEFINE_STRUCT_TESTS(none, BYREF_PASS);
+/* Initialization of members with __user attribute. */
+DEFINE_TEST(user, struct test_user, STRUCT, none, USER_PASS);
+
+/*
+ * Check two uses through a variable declaration outside either path,
+ * which was noticed as a special case in porting earlier stack init
+ * compiler logic.
+ */
+static int noinline __leaf_switch_none(int path, bool fill)
+{
+ switch (path) {
+ /*
+ * This is intentionally unreachable. To silence the
+ * warning, build with -Wno-switch-unreachable
+ */
+ uint64_t var[10];
+
+ case 1:
+ target_start = &var;
+ target_size = sizeof(var);
+ if (fill) {
+ fill_start = &var;
+ fill_size = sizeof(var);
+
+ memset(fill_start, (forced_mask | 0x55) & FILL_BYTE, fill_size);
+ }
+ memcpy(check_buf, target_start, target_size);
+ break;
+ case 2:
+ target_start = &var;
+ target_size = sizeof(var);
+ if (fill) {
+ fill_start = &var;
+ fill_size = sizeof(var);
+
+ memset(fill_start, (forced_mask | 0xaa) & FILL_BYTE, fill_size);
+ }
+ memcpy(check_buf, target_start, target_size);
+ break;
+ default:
+ var[1] = 5;
+ return var[1] & forced_mask;
+ }
+ return 0;
+}
+
+static noinline int leaf_switch_1_none(unsigned long sp, bool fill,
+ uint64_t *arg)
+{
+ return __leaf_switch_none(1, fill);
+}
+
+static noinline int leaf_switch_2_none(unsigned long sp, bool fill,
+ uint64_t *arg)
+{
+ return __leaf_switch_none(2, fill);
+}
+
+/*
+ * These are expected to fail for most configurations because neither
+ * GCC nor Clang have a way to perform initialization of variables in
+ * non-code areas (i.e. in a switch statement before the first "case").
+ * https://llvm.org/pr44916
+ */
+DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, ALWAYS_FAIL);
+DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, ALWAYS_FAIL);
+
+#define KUNIT_test_scalars(init) \
+ KUNIT_CASE(test_u8_ ## init), \
+ KUNIT_CASE(test_u16_ ## init), \
+ KUNIT_CASE(test_u32_ ## init), \
+ KUNIT_CASE(test_u64_ ## init), \
+ KUNIT_CASE(test_char_array_ ## init)
+
+#define KUNIT_test_structs(init) \
+ KUNIT_CASE(test_small_hole_ ## init), \
+ KUNIT_CASE(test_big_hole_ ## init), \
+ KUNIT_CASE(test_trailing_hole_ ## init),\
+ KUNIT_CASE(test_packed_ ## init) \
+
+#define KUNIT_test_unions(init) \
+ KUNIT_CASE(test_same_sizes_ ## init), \
+ KUNIT_CASE(test_small_start_ ## init), \
+ KUNIT_CASE(test_small_end_ ## init) \
+
+static struct kunit_case stackinit_test_cases[] = {
+ /* These are explicitly initialized and should always pass. */
+ KUNIT_test_scalars(zero),
+ KUNIT_test_structs(zero),
+ KUNIT_test_structs(old_zero),
+ KUNIT_test_unions(zero),
+ KUNIT_test_unions(old_zero),
+ /* Padding here appears to be accidentally always initialized? */
+ KUNIT_test_structs(dynamic_partial),
+ KUNIT_test_structs(assigned_dynamic_partial),
+ KUNIT_test_unions(dynamic_partial),
+ KUNIT_test_unions(assigned_dynamic_partial),
+ /* Padding initialization depends on compiler behaviors. */
+ KUNIT_test_structs(static_partial),
+ KUNIT_test_structs(static_all),
+ KUNIT_test_structs(dynamic_all),
+ KUNIT_test_structs(runtime_partial),
+ KUNIT_test_structs(runtime_all),
+ KUNIT_test_structs(assigned_static_partial),
+ KUNIT_test_structs(assigned_static_all),
+ KUNIT_test_structs(assigned_dynamic_all),
+ KUNIT_test_unions(static_partial),
+ KUNIT_test_unions(static_all),
+ KUNIT_test_unions(dynamic_all),
+ KUNIT_test_unions(runtime_partial),
+ KUNIT_test_unions(runtime_all),
+ KUNIT_test_unions(assigned_static_partial),
+ KUNIT_test_unions(assigned_static_all),
+ KUNIT_test_unions(assigned_dynamic_all),
+ /* Everything fails this since it effectively performs a memcpy(). */
+ KUNIT_test_structs(assigned_copy),
+ KUNIT_test_unions(assigned_copy),
+ /* STRUCTLEAK_BYREF_ALL should cover everything from here down. */
+ KUNIT_test_scalars(none),
+ KUNIT_CASE(test_switch_1_none),
+ KUNIT_CASE(test_switch_2_none),
+ /* STRUCTLEAK_BYREF should cover from here down. */
+ KUNIT_test_structs(none),
+ /* STRUCTLEAK will only cover this. */
+ KUNIT_CASE(test_user),
+ {}
+};
+
+static struct kunit_suite stackinit_test_suite = {
+ .name = "stackinit",
+ .test_cases = stackinit_test_cases,
+};
+
+kunit_test_suites(&stackinit_test_suite);
+
+MODULE_DESCRIPTION("Test cases for compiler-based stack variable zeroing");
+MODULE_LICENSE("GPL");
diff --git a/lib/tests/string_helpers_kunit.c b/lib/tests/string_helpers_kunit.c
new file mode 100644
index 000000000000..c853046183d2
--- /dev/null
+++ b/lib/tests/string_helpers_kunit.c
@@ -0,0 +1,629 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Test cases for lib/string_helpers.c module.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/array_size.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <linux/string.h>
+#include <linux/string_helpers.h>
+
+static void test_string_check_buf(struct kunit *test,
+ const char *name, unsigned int flags,
+ char *in, size_t p,
+ char *out_real, size_t q_real,
+ char *out_test, size_t q_test)
+{
+ KUNIT_ASSERT_EQ_MSG(test, q_real, q_test, "name:%s", name);
+ KUNIT_EXPECT_MEMEQ_MSG(test, out_test, out_real, q_test,
+ "name:%s", name);
+}
+
+struct test_string {
+ const char *in;
+ const char *out;
+ unsigned int flags;
+};
+
+static const struct test_string strings[] = {
+ {
+ .in = "\\f\\ \\n\\r\\t\\v",
+ .out = "\f\\ \n\r\t\v",
+ .flags = UNESCAPE_SPACE,
+ },
+ {
+ .in = "\\40\\1\\387\\0064\\05\\040\\8a\\110\\777",
+ .out = " \001\00387\0064\005 \\8aH?7",
+ .flags = UNESCAPE_OCTAL,
+ },
+ {
+ .in = "\\xv\\xa\\x2c\\xD\\x6f2",
+ .out = "\\xv\n,\ro2",
+ .flags = UNESCAPE_HEX,
+ },
+ {
+ .in = "\\h\\\\\\\"\\a\\e\\",
+ .out = "\\h\\\"\a\e\\",
+ .flags = UNESCAPE_SPECIAL,
+ },
+};
+
+static void test_string_unescape(struct kunit *test,
+ const char *name, unsigned int flags,
+ bool inplace)
+{
+ int q_real = 256;
+ char *in = kunit_kzalloc(test, q_real, GFP_KERNEL);
+ char *out_test = kunit_kzalloc(test, q_real, GFP_KERNEL);
+ char *out_real = kunit_kzalloc(test, q_real, GFP_KERNEL);
+ int i, p = 0, q_test = 0;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, in);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, out_test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, out_real);
+
+ for (i = 0; i < ARRAY_SIZE(strings); i++) {
+ const char *s = strings[i].in;
+ int len = strlen(strings[i].in);
+
+ /* Copy string to in buffer */
+ memcpy(&in[p], s, len);
+ p += len;
+
+ /* Copy expected result for given flags */
+ if (flags & strings[i].flags) {
+ s = strings[i].out;
+ len = strlen(strings[i].out);
+ }
+ memcpy(&out_test[q_test], s, len);
+ q_test += len;
+ }
+ in[p++] = '\0';
+
+ /* Call string_unescape and compare result */
+ if (inplace) {
+ memcpy(out_real, in, p);
+ if (flags == UNESCAPE_ANY)
+ q_real = string_unescape_any_inplace(out_real);
+ else
+ q_real = string_unescape_inplace(out_real, flags);
+ } else if (flags == UNESCAPE_ANY) {
+ q_real = string_unescape_any(in, out_real, q_real);
+ } else {
+ q_real = string_unescape(in, out_real, q_real, flags);
+ }
+
+ test_string_check_buf(test, name, flags, in, p - 1, out_real, q_real,
+ out_test, q_test);
+}
+
+struct test_string_1 {
+ const char *out;
+ unsigned int flags;
+};
+
+#define TEST_STRING_2_MAX_S1 32
+struct test_string_2 {
+ const char *in;
+ struct test_string_1 s1[TEST_STRING_2_MAX_S1];
+};
+
+#define TEST_STRING_2_DICT_0 NULL
+static const struct test_string_2 escape0[] = {{
+ .in = "\f\\ \n\r\t\v",
+ .s1 = {{
+ .out = "\\f\\ \\n\\r\\t\\v",
+ .flags = ESCAPE_SPACE,
+ },{
+ .out = "\\f\\134\\040\\n\\r\\t\\v",
+ .flags = ESCAPE_SPACE | ESCAPE_OCTAL,
+ },{
+ .out = "\\f\\x5c\\x20\\n\\r\\t\\v",
+ .flags = ESCAPE_SPACE | ESCAPE_HEX,
+ },{
+ /* terminator */
+ }}
+},{
+ .in = "\\h\\\"\a\e\\",
+ .s1 = {{
+ .out = "\\\\h\\\\\\\"\\a\\e\\\\",
+ .flags = ESCAPE_SPECIAL,
+ },{
+ .out = "\\\\\\150\\\\\\\"\\a\\e\\\\",
+ .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL,
+ },{
+ .out = "\\\\\\x68\\\\\\\"\\a\\e\\\\",
+ .flags = ESCAPE_SPECIAL | ESCAPE_HEX,
+ },{
+ /* terminator */
+ }}
+},{
+ .in = "\eb \\C\007\"\x90\r]",
+ .s1 = {{
+ .out = "\eb \\C\007\"\x90\\r]",
+ .flags = ESCAPE_SPACE,
+ },{
+ .out = "\\eb \\\\C\\a\\\"\x90\r]",
+ .flags = ESCAPE_SPECIAL,
+ },{
+ .out = "\\eb \\\\C\\a\\\"\x90\\r]",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL,
+ },{
+ .out = "\\033\\142\\040\\134\\103\\007\\042\\220\\015\\135",
+ .flags = ESCAPE_OCTAL,
+ },{
+ .out = "\\033\\142\\040\\134\\103\\007\\042\\220\\r\\135",
+ .flags = ESCAPE_SPACE | ESCAPE_OCTAL,
+ },{
+ .out = "\\e\\142\\040\\\\\\103\\a\\\"\\220\\015\\135",
+ .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL,
+ },{
+ .out = "\\e\\142\\040\\\\\\103\\a\\\"\\220\\r\\135",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_OCTAL,
+ },{
+ .out = "\eb \\C\007\"\x90\r]",
+ .flags = ESCAPE_NP,
+ },{
+ .out = "\eb \\C\007\"\x90\\r]",
+ .flags = ESCAPE_SPACE | ESCAPE_NP,
+ },{
+ .out = "\\eb \\C\\a\"\x90\r]",
+ .flags = ESCAPE_SPECIAL | ESCAPE_NP,
+ },{
+ .out = "\\eb \\C\\a\"\x90\\r]",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_NP,
+ },{
+ .out = "\\033b \\C\\007\"\\220\\015]",
+ .flags = ESCAPE_OCTAL | ESCAPE_NP,
+ },{
+ .out = "\\033b \\C\\007\"\\220\\r]",
+ .flags = ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_NP,
+ },{
+ .out = "\\eb \\C\\a\"\\220\\r]",
+ .flags = ESCAPE_SPECIAL | ESCAPE_SPACE | ESCAPE_OCTAL |
+ ESCAPE_NP,
+ },{
+ .out = "\\x1bb \\C\\x07\"\\x90\\x0d]",
+ .flags = ESCAPE_NP | ESCAPE_HEX,
+ },{
+ /* terminator */
+ }}
+},{
+ .in = "\007 \eb\"\x90\xCF\r",
+ .s1 = {{
+ .out = "\007 \eb\"\\220\\317\r",
+ .flags = ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\\x90\\xcf\r",
+ .flags = ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_NA,
+ },{
+ /* terminator */
+ }}
+},{
+ /* terminator */
+}};
+
+#define TEST_STRING_2_DICT_1 "b\\ \t\r\xCF"
+static const struct test_string_2 escape1[] = {{
+ .in = "\f\\ \n\r\t\v",
+ .s1 = {{
+ .out = "\f\\134\\040\n\\015\\011\v",
+ .flags = ESCAPE_OCTAL,
+ },{
+ .out = "\f\\x5c\\x20\n\\x0d\\x09\v",
+ .flags = ESCAPE_HEX,
+ },{
+ .out = "\f\\134\\040\n\\015\\011\v",
+ .flags = ESCAPE_ANY | ESCAPE_APPEND,
+ },{
+ .out = "\\014\\134\\040\\012\\015\\011\\013",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NAP,
+ },{
+ .out = "\\x0c\\x5c\\x20\\x0a\\x0d\\x09\\x0b",
+ .flags = ESCAPE_HEX | ESCAPE_APPEND | ESCAPE_NAP,
+ },{
+ .out = "\f\\134\\040\n\\015\\011\v",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NA,
+ },{
+ .out = "\f\\x5c\\x20\n\\x0d\\x09\v",
+ .flags = ESCAPE_HEX | ESCAPE_APPEND | ESCAPE_NA,
+ },{
+ /* terminator */
+ }}
+},{
+ .in = "\\h\\\"\a\xCF\e\\",
+ .s1 = {{
+ .out = "\\134h\\134\"\a\\317\e\\134",
+ .flags = ESCAPE_OCTAL,
+ },{
+ .out = "\\134h\\134\"\a\\317\e\\134",
+ .flags = ESCAPE_ANY | ESCAPE_APPEND,
+ },{
+ .out = "\\134h\\134\"\\007\\317\\033\\134",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NAP,
+ },{
+ .out = "\\134h\\134\"\a\\317\e\\134",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NA,
+ },{
+ /* terminator */
+ }}
+},{
+ .in = "\eb \\C\007\"\x90\r]",
+ .s1 = {{
+ .out = "\e\\142\\040\\134C\007\"\x90\\015]",
+ .flags = ESCAPE_OCTAL,
+ },{
+ /* terminator */
+ }}
+},{
+ .in = "\007 \eb\"\x90\xCF\r",
+ .s1 = {{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPACE | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_ANY | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_SPACE | ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NA,
+ },{
+ /* terminator */
+ }}
+},{
+ .in = "\007 \eb\"\x90\xCF\r",
+ .s1 = {{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\xCF\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\xCF\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\\015",
+ .flags = ESCAPE_OCTAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\\015",
+ .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_ANY | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\x0d",
+ .flags = ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\x0d",
+ .flags = ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ /* terminator */
+ }}
+},{
+ /* terminator */
+}};
+
+static const struct test_string strings_upper[] = {
+ {
+ .in = "abcdefgh1234567890test",
+ .out = "ABCDEFGH1234567890TEST",
+ },
+ {
+ .in = "abCdeFgH1234567890TesT",
+ .out = "ABCDEFGH1234567890TEST",
+ },
+};
+
+static const struct test_string strings_lower[] = {
+ {
+ .in = "ABCDEFGH1234567890TEST",
+ .out = "abcdefgh1234567890test",
+ },
+ {
+ .in = "abCdeFgH1234567890TesT",
+ .out = "abcdefgh1234567890test",
+ },
+};
+
+static const char *test_string_find_match(const struct test_string_2 *s2,
+ unsigned int flags)
+{
+ const struct test_string_1 *s1 = s2->s1;
+ unsigned int i;
+
+ if (!flags)
+ return s2->in;
+
+ /* Test cases are NULL-aware */
+ flags &= ~ESCAPE_NULL;
+
+ /* ESCAPE_OCTAL has a higher priority */
+ if (flags & ESCAPE_OCTAL)
+ flags &= ~ESCAPE_HEX;
+
+ for (i = 0; i < TEST_STRING_2_MAX_S1 && s1->out; i++, s1++)
+ if (s1->flags == flags)
+ return s1->out;
+ return NULL;
+}
+
+static void
+test_string_escape_overflow(struct kunit *test,
+ const char *in, int p, unsigned int flags, const char *esc,
+ int q_test, const char *name)
+{
+ int q_real;
+
+ q_real = string_escape_mem(in, p, NULL, 0, flags, esc);
+ KUNIT_EXPECT_EQ_MSG(test, q_real, q_test, "name:%s: flags:%#x", name, flags);
+}
+
+static void test_string_escape(struct kunit *test, const char *name,
+ const struct test_string_2 *s2,
+ unsigned int flags, const char *esc)
+{
+ size_t out_size = 512;
+ char *out_test = kunit_kzalloc(test, out_size, GFP_KERNEL);
+ char *out_real = kunit_kzalloc(test, out_size, GFP_KERNEL);
+ char *in = kunit_kzalloc(test, 256, GFP_KERNEL);
+ int p = 0, q_test = 0;
+ int q_real;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, out_test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, out_real);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, in);
+
+ for (; s2->in; s2++) {
+ const char *out;
+ int len;
+
+ /* NULL injection */
+ if (flags & ESCAPE_NULL) {
+ in[p++] = '\0';
+ /* '\0' passes isascii() test */
+ if (flags & ESCAPE_NA && !(flags & ESCAPE_APPEND && esc)) {
+ out_test[q_test++] = '\0';
+ } else {
+ out_test[q_test++] = '\\';
+ out_test[q_test++] = '0';
+ }
+ }
+
+ /* Don't try strings that have no output */
+ out = test_string_find_match(s2, flags);
+ if (!out)
+ continue;
+
+ /* Copy string to in buffer */
+ len = strlen(s2->in);
+ memcpy(&in[p], s2->in, len);
+ p += len;
+
+ /* Copy expected result for given flags */
+ len = strlen(out);
+ memcpy(&out_test[q_test], out, len);
+ q_test += len;
+ }
+
+ q_real = string_escape_mem(in, p, out_real, out_size, flags, esc);
+
+ test_string_check_buf(test, name, flags, in, p, out_real, q_real, out_test,
+ q_test);
+
+ test_string_escape_overflow(test, in, p, flags, esc, q_test, name);
+}
+
+#define string_get_size_maxbuf 16
+#define test_string_get_size_one(size, blk_size, exp_result10, exp_result2) \
+ do { \
+ BUILD_BUG_ON(sizeof(exp_result10) >= string_get_size_maxbuf); \
+ BUILD_BUG_ON(sizeof(exp_result2) >= string_get_size_maxbuf); \
+ __test_string_get_size(test, (size), (blk_size), (exp_result10), \
+ (exp_result2)); \
+ } while (0)
+
+
+static void test_string_get_size_check(struct kunit *test,
+ const char *units,
+ const char *exp,
+ char *res,
+ const u64 size,
+ const u64 blk_size)
+{
+ KUNIT_EXPECT_MEMEQ_MSG(test, res, exp, strlen(exp) + 1,
+ "string_get_size(size = %llu, blk_size = %llu, units = %s)",
+ size, blk_size, units);
+}
+
+static void __strchrcut(char *dst, const char *src, const char *cut)
+{
+ const char *from = src;
+ size_t len;
+
+ do {
+ len = strcspn(from, cut);
+ memcpy(dst, from, len);
+ dst += len;
+ from += len;
+ } while (*from++);
+ *dst = '\0';
+}
+
+static void __test_string_get_size_one(struct kunit *test,
+ const u64 size, const u64 blk_size,
+ const char *exp_result10,
+ const char *exp_result2,
+ enum string_size_units units,
+ const char *cut)
+{
+ char buf10[string_get_size_maxbuf];
+ char buf2[string_get_size_maxbuf];
+ char exp10[string_get_size_maxbuf];
+ char exp2[string_get_size_maxbuf];
+ char prefix10[64];
+ char prefix2[64];
+
+ sprintf(prefix10, "STRING_UNITS_10 [%s]", cut);
+ sprintf(prefix2, "STRING_UNITS_2 [%s]", cut);
+
+ __strchrcut(exp10, exp_result10, cut);
+ __strchrcut(exp2, exp_result2, cut);
+
+ string_get_size(size, blk_size, STRING_UNITS_10 | units, buf10, sizeof(buf10));
+ string_get_size(size, blk_size, STRING_UNITS_2 | units, buf2, sizeof(buf2));
+
+ test_string_get_size_check(test, prefix10, exp10, buf10, size, blk_size);
+ test_string_get_size_check(test, prefix2, exp2, buf2, size, blk_size);
+}
+
+static void __test_string_get_size(struct kunit *test,
+ const u64 size, const u64 blk_size,
+ const char *exp_result10,
+ const char *exp_result2)
+{
+ struct {
+ enum string_size_units units;
+ const char *cut;
+ } get_size_test_cases[] = {
+ { 0, "" },
+ { STRING_UNITS_NO_SPACE, " " },
+ { STRING_UNITS_NO_SPACE | STRING_UNITS_NO_BYTES, " B" },
+ { STRING_UNITS_NO_BYTES, "B" },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(get_size_test_cases); i++)
+ __test_string_get_size_one(test, size, blk_size,
+ exp_result10, exp_result2,
+ get_size_test_cases[i].units,
+ get_size_test_cases[i].cut);
+}
+
+static void test_get_size(struct kunit *test)
+{
+ /* small values */
+ test_string_get_size_one(0, 512, "0 B", "0 B");
+ test_string_get_size_one(1, 512, "512 B", "512 B");
+ test_string_get_size_one(1100, 1, "1.10 kB", "1.07 KiB");
+
+ /* normal values */
+ test_string_get_size_one(16384, 512, "8.39 MB", "8.00 MiB");
+ test_string_get_size_one(500118192, 512, "256 GB", "238 GiB");
+ test_string_get_size_one(8192, 4096, "33.6 MB", "32.0 MiB");
+
+ /* weird block sizes */
+ test_string_get_size_one(3000, 1900, "5.70 MB", "5.44 MiB");
+
+ /* huge values */
+ test_string_get_size_one(U64_MAX, 4096, "75.6 ZB", "64.0 ZiB");
+ test_string_get_size_one(4096, U64_MAX, "75.6 ZB", "64.0 ZiB");
+}
+
+static void test_upper_lower(struct kunit *test)
+{
+ char *dst;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(strings_upper); i++) {
+ const char *s = strings_upper[i].in;
+ int len = strlen(strings_upper[i].in) + 1;
+
+ dst = kmalloc(len, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, dst);
+
+ string_upper(dst, s);
+ KUNIT_EXPECT_STREQ(test, dst, strings_upper[i].out);
+ kfree(dst);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(strings_lower); i++) {
+ const char *s = strings_lower[i].in;
+ int len = strlen(strings_lower[i].in) + 1;
+
+ dst = kmalloc(len, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, dst);
+
+ string_lower(dst, s);
+ KUNIT_EXPECT_STREQ(test, dst, strings_lower[i].out);
+ kfree(dst);
+ }
+}
+
+static void test_unescape(struct kunit *test)
+{
+ unsigned int i;
+
+ for (i = 0; i < UNESCAPE_ALL_MASK + 1; i++)
+ test_string_unescape(test, "unescape", i, false);
+ test_string_unescape(test, "unescape inplace",
+ get_random_u32_below(UNESCAPE_ALL_MASK + 1), true);
+
+ /* Without dictionary */
+ for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
+ test_string_escape(test, "escape 0", escape0, i, TEST_STRING_2_DICT_0);
+
+ /* With dictionary */
+ for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
+ test_string_escape(test, "escape 1", escape1, i, TEST_STRING_2_DICT_1);
+}
+
+static struct kunit_case string_helpers_test_cases[] = {
+ KUNIT_CASE(test_get_size),
+ KUNIT_CASE(test_upper_lower),
+ KUNIT_CASE(test_unescape),
+ {}
+};
+
+static struct kunit_suite string_helpers_test_suite = {
+ .name = "string_helpers",
+ .test_cases = string_helpers_test_cases,
+};
+
+kunit_test_suites(&string_helpers_test_suite);
+
+MODULE_DESCRIPTION("Test cases for string helpers module");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/tests/string_kunit.c b/lib/tests/string_kunit.c
new file mode 100644
index 000000000000..0ed7448a26d3
--- /dev/null
+++ b/lib/tests/string_kunit.c
@@ -0,0 +1,637 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test cases for string functions.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#define STRCMP_LARGE_BUF_LEN 2048
+#define STRCMP_CHANGE_POINT 1337
+#define STRCMP_TEST_EXPECT_EQUAL(test, fn, ...) KUNIT_EXPECT_EQ(test, fn(__VA_ARGS__), 0)
+#define STRCMP_TEST_EXPECT_LOWER(test, fn, ...) KUNIT_EXPECT_LT(test, fn(__VA_ARGS__), 0)
+#define STRCMP_TEST_EXPECT_GREATER(test, fn, ...) KUNIT_EXPECT_GT(test, fn(__VA_ARGS__), 0)
+
+static void string_test_memset16(struct kunit *test)
+{
+ unsigned i, j, k;
+ u16 v, *p;
+
+ p = kunit_kzalloc(test, 256 * 2 * 2, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p);
+
+ for (i = 0; i < 256; i++) {
+ for (j = 0; j < 256; j++) {
+ memset(p, 0xa1, 256 * 2 * sizeof(v));
+ memset16(p + i, 0xb1b2, j);
+ for (k = 0; k < 512; k++) {
+ v = p[k];
+ if (k < i) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1,
+ "i:%d j:%d k:%d", i, j, k);
+ } else if (k < i + j) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xb1b2,
+ "i:%d j:%d k:%d", i, j, k);
+ } else {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1,
+ "i:%d j:%d k:%d", i, j, k);
+ }
+ }
+ }
+ }
+}
+
+static void string_test_memset32(struct kunit *test)
+{
+ unsigned i, j, k;
+ u32 v, *p;
+
+ p = kunit_kzalloc(test, 256 * 2 * 4, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p);
+
+ for (i = 0; i < 256; i++) {
+ for (j = 0; j < 256; j++) {
+ memset(p, 0xa1, 256 * 2 * sizeof(v));
+ memset32(p + i, 0xb1b2b3b4, j);
+ for (k = 0; k < 512; k++) {
+ v = p[k];
+ if (k < i) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1a1a1,
+ "i:%d j:%d k:%d", i, j, k);
+ } else if (k < i + j) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xb1b2b3b4,
+ "i:%d j:%d k:%d", i, j, k);
+ } else {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1a1a1,
+ "i:%d j:%d k:%d", i, j, k);
+ }
+ }
+ }
+ }
+}
+
+static void string_test_memset64(struct kunit *test)
+{
+ unsigned i, j, k;
+ u64 v, *p;
+
+ p = kunit_kzalloc(test, 256 * 2 * 8, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p);
+
+ for (i = 0; i < 256; i++) {
+ for (j = 0; j < 256; j++) {
+ memset(p, 0xa1, 256 * 2 * sizeof(v));
+ memset64(p + i, 0xb1b2b3b4b5b6b7b8ULL, j);
+ for (k = 0; k < 512; k++) {
+ v = p[k];
+ if (k < i) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1a1a1a1a1a1a1ULL,
+ "i:%d j:%d k:%d", i, j, k);
+ } else if (k < i + j) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xb1b2b3b4b5b6b7b8ULL,
+ "i:%d j:%d k:%d", i, j, k);
+ } else {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1a1a1a1a1a1a1ULL,
+ "i:%d j:%d k:%d", i, j, k);
+ }
+ }
+ }
+ }
+}
+
+static void string_test_strchr(struct kunit *test)
+{
+ const char *test_string = "abcdefghijkl";
+ const char *empty_string = "";
+ char *result;
+ int i;
+
+ for (i = 0; i < strlen(test_string) + 1; i++) {
+ result = strchr(test_string, test_string[i]);
+ KUNIT_ASSERT_EQ_MSG(test, result - test_string, i,
+ "char:%c", 'a' + i);
+ }
+
+ result = strchr(empty_string, '\0');
+ KUNIT_ASSERT_PTR_EQ(test, result, empty_string);
+
+ result = strchr(empty_string, 'a');
+ KUNIT_ASSERT_NULL(test, result);
+
+ result = strchr(test_string, 'z');
+ KUNIT_ASSERT_NULL(test, result);
+}
+
+static void string_test_strnchr(struct kunit *test)
+{
+ const char *test_string = "abcdefghijkl";
+ const char *empty_string = "";
+ char *result;
+ int i, j;
+
+ for (i = 0; i < strlen(test_string) + 1; i++) {
+ for (j = 0; j < strlen(test_string) + 2; j++) {
+ result = strnchr(test_string, j, test_string[i]);
+ if (j <= i) {
+ KUNIT_ASSERT_NULL_MSG(test, result,
+ "char:%c i:%d j:%d", 'a' + i, i, j);
+ } else {
+ KUNIT_ASSERT_EQ_MSG(test, result - test_string, i,
+ "char:%c i:%d j:%d", 'a' + i, i, j);
+ }
+ }
+ }
+
+ result = strnchr(empty_string, 0, '\0');
+ KUNIT_ASSERT_NULL(test, result);
+
+ result = strnchr(empty_string, 1, '\0');
+ KUNIT_ASSERT_PTR_EQ(test, result, empty_string);
+
+ result = strnchr(empty_string, 1, 'a');
+ KUNIT_ASSERT_NULL(test, result);
+
+ result = strnchr(NULL, 0, '\0');
+ KUNIT_ASSERT_NULL(test, result);
+}
+
+static void string_test_strspn(struct kunit *test)
+{
+ static const struct strspn_test {
+ const char str[16];
+ const char accept[16];
+ const char reject[16];
+ unsigned a;
+ unsigned r;
+ } tests[] = {
+ { "foobar", "", "", 0, 6 },
+ { "abba", "abc", "ABBA", 4, 4 },
+ { "abba", "a", "b", 1, 1 },
+ { "", "abc", "abc", 0, 0},
+ };
+ const struct strspn_test *s = tests;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(tests); ++i, ++s) {
+ KUNIT_ASSERT_EQ_MSG(test, s->a, strspn(s->str, s->accept),
+ "i:%zu", i);
+ KUNIT_ASSERT_EQ_MSG(test, s->r, strcspn(s->str, s->reject),
+ "i:%zu", i);
+ }
+}
+
+static char strcmp_buffer1[STRCMP_LARGE_BUF_LEN];
+static char strcmp_buffer2[STRCMP_LARGE_BUF_LEN];
+
+static void strcmp_fill_buffers(char fill1, char fill2)
+{
+ memset(strcmp_buffer1, fill1, STRCMP_LARGE_BUF_LEN);
+ memset(strcmp_buffer2, fill2, STRCMP_LARGE_BUF_LEN);
+ strcmp_buffer1[STRCMP_LARGE_BUF_LEN - 1] = 0;
+ strcmp_buffer2[STRCMP_LARGE_BUF_LEN - 1] = 0;
+}
+
+static void string_test_strcmp(struct kunit *test)
+{
+ /* Equal strings */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcmp, "Hello, Kernel!", "Hello, Kernel!");
+ /* First string is lexicographically less than the second */
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Hello, KUnit!", "Hello, Kernel!");
+ /* First string is lexicographically larger than the second */
+ STRCMP_TEST_EXPECT_GREATER(test, strcmp, "Hello, Kernel!", "Hello, KUnit!");
+ /* Empty string is always lexicographically less than any non-empty string */
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "", "Non-empty string");
+ /* Two empty strings should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcmp, "", "");
+ /* Compare two strings which have only one char difference */
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Abacaba", "Abadaba");
+ /* Compare two strings which have the same prefix*/
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Just a string", "Just a string and something else");
+}
+
+static void string_test_strcmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('B', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strcmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'A';
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strcmp, strcmp_buffer1, strcmp_buffer2);
+}
+
+static void string_test_strncmp(struct kunit *test)
+{
+ /* Equal strings */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Hello, KUnit!", "Hello, KUnit!", 13);
+ /* First string is lexicographically less than the second */
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Hello, KUnit!", "Hello, Kernel!", 13);
+ /* Result is always 'equal' when count = 0 */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Hello, Kernel!", "Hello, KUnit!", 0);
+ /* Strings with common prefix are equal if count = length of prefix */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Abacaba", "Abadaba", 3);
+ /* Strings with common prefix are not equal when count = length of prefix + 1 */
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Abacaba", "Abadaba", 4);
+ /* If one string is a prefix of another, the shorter string is lexicographically smaller */
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Just a string", "Just a string and something else",
+ strlen("Just a string and something else"));
+ /*
+ * If one string is a prefix of another, and we check first length
+ * of prefix chars, the result is 'equal'
+ */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Just a string", "Just a string and something else",
+ strlen("Just a string"));
+}
+
+static void string_test_strncmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('B', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'A';
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+ /* the strings are equal up to STRCMP_CHANGE_POINT */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT);
+ STRCMP_TEST_EXPECT_GREATER(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT + 1);
+}
+
+static void string_test_strcasecmp(struct kunit *test)
+{
+ /* Same strings in different case should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "Hello, Kernel!", "HeLLO, KErNeL!");
+ /* Empty strings should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "", "");
+ /* Despite ascii code for 'a' is larger than ascii code for 'B', 'a' < 'B' */
+ STRCMP_TEST_EXPECT_LOWER(test, strcasecmp, "a", "B");
+ STRCMP_TEST_EXPECT_GREATER(test, strcasecmp, "B", "a");
+ /* Special symbols and numbers should be processed correctly */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "-+**.1230ghTTT~^", "-+**.1230Ghttt~^");
+}
+
+static void string_test_strcasecmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('b', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'a';
+ STRCMP_TEST_EXPECT_LOWER(test, strcasecmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strcasecmp, strcmp_buffer1, strcmp_buffer2);
+}
+
+static void string_test_strncasecmp(struct kunit *test)
+{
+ /* Same strings in different case should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "AbAcAbA", "Abacaba", strlen("Abacaba"));
+ /* strncasecmp should check 'count' chars only */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "AbaCaBa", "abaCaDa", 5);
+ STRCMP_TEST_EXPECT_LOWER(test, strncasecmp, "a", "B", 1);
+ STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, "B", "a", 1);
+ /* Result is always 'equal' when count = 0 */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "Abacaba", "Not abacaba", 0);
+}
+
+static void string_test_strncasecmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('b', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'a';
+ STRCMP_TEST_EXPECT_LOWER(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT);
+ STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT + 1);
+}
+
+/**
+ * strscpy_check() - Run a specific test case.
+ * @test: KUnit test context pointer
+ * @src: Source string, argument to strscpy_pad()
+ * @count: Size of destination buffer, argument to strscpy_pad()
+ * @expected: Expected return value from call to strscpy_pad()
+ * @chars: Number of characters from the src string expected to be
+ * written to the dst buffer.
+ * @terminator: 1 if there should be a terminating null byte 0 otherwise.
+ * @pad: Number of pad characters expected (in the tail of dst buffer).
+ * (@pad does not include the null terminator byte.)
+ *
+ * Calls strscpy_pad() and verifies the return value and state of the
+ * destination buffer after the call returns.
+ */
+static void strscpy_check(struct kunit *test, char *src, int count,
+ int expected, int chars, int terminator, int pad)
+{
+ int nr_bytes_poison;
+ int max_expected;
+ int max_count;
+ int written;
+ char buf[6];
+ int index, i;
+ const char POISON = 'z';
+
+ KUNIT_ASSERT_TRUE_MSG(test, src != NULL,
+ "null source string not supported");
+
+ memset(buf, POISON, sizeof(buf));
+ /* Future proofing test suite, validate args */
+ max_count = sizeof(buf) - 2; /* Space for null and to verify overflow */
+ max_expected = count - 1; /* Space for the null */
+
+ KUNIT_ASSERT_LE_MSG(test, count, max_count,
+ "count (%d) is too big (%d) ... aborting", count, max_count);
+ KUNIT_EXPECT_LE_MSG(test, expected, max_expected,
+ "expected (%d) is bigger than can possibly be returned (%d)",
+ expected, max_expected);
+
+ written = strscpy_pad(buf, src, count);
+ KUNIT_ASSERT_EQ(test, written, expected);
+
+ if (count && written == -E2BIG) {
+ KUNIT_ASSERT_EQ_MSG(test, 0, strncmp(buf, src, count - 1),
+ "buffer state invalid for -E2BIG");
+ KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0',
+ "too big string is not null terminated correctly");
+ }
+
+ for (i = 0; i < chars; i++)
+ KUNIT_ASSERT_EQ_MSG(test, buf[i], src[i],
+ "buf[i]==%c != src[i]==%c", buf[i], src[i]);
+
+ if (terminator)
+ KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0',
+ "string is not null terminated correctly");
+
+ for (i = 0; i < pad; i++) {
+ index = chars + terminator + i;
+ KUNIT_ASSERT_EQ_MSG(test, buf[index], '\0',
+ "padding missing at index: %d", i);
+ }
+
+ nr_bytes_poison = sizeof(buf) - chars - terminator - pad;
+ for (i = 0; i < nr_bytes_poison; i++) {
+ index = sizeof(buf) - 1 - i; /* Check from the end back */
+ KUNIT_ASSERT_EQ_MSG(test, buf[index], POISON,
+ "poison value missing at index: %d", i);
+ }
+}
+
+static void string_test_strscpy(struct kunit *test)
+{
+ char dest[8];
+
+ /*
+ * strscpy_check() uses a destination buffer of size 6 and needs at
+ * least 2 characters spare (one for null and one to check for
+ * overflow). This means we should only call tc() with
+ * strings up to a maximum of 4 characters long and 'count'
+ * should not exceed 4. To test with longer strings increase
+ * the buffer size in tc().
+ */
+
+ /* strscpy_check(test, src, count, expected, chars, terminator, pad) */
+ strscpy_check(test, "a", 0, -E2BIG, 0, 0, 0);
+ strscpy_check(test, "", 0, -E2BIG, 0, 0, 0);
+
+ strscpy_check(test, "a", 1, -E2BIG, 0, 1, 0);
+ strscpy_check(test, "", 1, 0, 0, 1, 0);
+
+ strscpy_check(test, "ab", 2, -E2BIG, 1, 1, 0);
+ strscpy_check(test, "a", 2, 1, 1, 1, 0);
+ strscpy_check(test, "", 2, 0, 0, 1, 1);
+
+ strscpy_check(test, "abc", 3, -E2BIG, 2, 1, 0);
+ strscpy_check(test, "ab", 3, 2, 2, 1, 0);
+ strscpy_check(test, "a", 3, 1, 1, 1, 1);
+ strscpy_check(test, "", 3, 0, 0, 1, 2);
+
+ strscpy_check(test, "abcd", 4, -E2BIG, 3, 1, 0);
+ strscpy_check(test, "abc", 4, 3, 3, 1, 0);
+ strscpy_check(test, "ab", 4, 2, 2, 1, 1);
+ strscpy_check(test, "a", 4, 1, 1, 1, 2);
+ strscpy_check(test, "", 4, 0, 0, 1, 3);
+
+ /* Compile-time-known source strings. */
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", ARRAY_SIZE(dest)), 0);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", 3), 0);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", 1), 0);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", 0), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", ARRAY_SIZE(dest)), 5);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 3), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 1), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 0), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "This is too long", ARRAY_SIZE(dest)), -E2BIG);
+}
+
+static volatile int unconst;
+
+static void string_test_strcat(struct kunit *test)
+{
+ char dest[8];
+
+ /* Destination is terminated. */
+ memset(dest, 0, sizeof(dest));
+ KUNIT_EXPECT_EQ(test, strlen(dest), 0);
+ /* Empty copy does nothing. */
+ KUNIT_EXPECT_TRUE(test, strcat(dest, "") == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* 4 characters copied in, stops at %NUL. */
+ KUNIT_EXPECT_TRUE(test, strcat(dest, "four\000123") == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "four");
+ KUNIT_EXPECT_EQ(test, dest[5], '\0');
+ /* 2 more characters copied in okay. */
+ KUNIT_EXPECT_TRUE(test, strcat(dest, "AB") == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+}
+
+static void string_test_strncat(struct kunit *test)
+{
+ char dest[8];
+
+ /* Destination is terminated. */
+ memset(dest, 0, sizeof(dest));
+ KUNIT_EXPECT_EQ(test, strlen(dest), 0);
+ /* Empty copy of size 0 does nothing. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "", 0 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* Empty copy of size 1 does nothing too. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "", 1 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* Copy of max 0 characters should do nothing. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "asdf", 0 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+
+ /* 4 characters copied in, even if max is 8. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "four\000123", 8 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "four");
+ KUNIT_EXPECT_EQ(test, dest[5], '\0');
+ KUNIT_EXPECT_EQ(test, dest[6], '\0');
+ /* 2 characters copied in okay, 2 ignored. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "ABCD", 2 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+}
+
+static void string_test_strlcat(struct kunit *test)
+{
+ char dest[8] = "";
+ int len = sizeof(dest) + unconst;
+
+ /* Destination is terminated. */
+ KUNIT_EXPECT_EQ(test, strlen(dest), 0);
+ /* Empty copy is size 0. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "", len), 0);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* Size 1 should keep buffer terminated, report size of source only. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "four", 1 + unconst), 4);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+
+ /* 4 characters copied in. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "four", len), 4);
+ KUNIT_EXPECT_STREQ(test, dest, "four");
+ /* 2 characters copied in okay, gets to 6 total. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "AB", len), 6);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+ /* 2 characters ignored if max size (7) reached. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "CD", 7 + unconst), 8);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+ /* 1 of 2 characters skipped, now at true max size. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "EFG", len), 9);
+ KUNIT_EXPECT_STREQ(test, dest, "fourABE");
+ /* Everything else ignored, now at full size. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "1234", len), 11);
+ KUNIT_EXPECT_STREQ(test, dest, "fourABE");
+}
+
+static void string_test_strtomem(struct kunit *test)
+{
+ static const char input[sizeof(unsigned long)] = "hi";
+ static const char truncate[] = "this is too long";
+ struct {
+ unsigned long canary1;
+ unsigned char output[sizeof(unsigned long)] __nonstring;
+ unsigned long canary2;
+ } wrap;
+
+ memset(&wrap, 0xFF, sizeof(wrap));
+ KUNIT_EXPECT_EQ_MSG(test, wrap.canary1, ULONG_MAX,
+ "bad initial canary value");
+ KUNIT_EXPECT_EQ_MSG(test, wrap.canary2, ULONG_MAX,
+ "bad initial canary value");
+
+ /* Check unpadded copy leaves surroundings untouched. */
+ strtomem(wrap.output, input);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
+ KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
+ for (size_t i = 2; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], 0xFF);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check truncated copy leaves surroundings untouched. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem(wrap.output, truncate);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ for (size_t i = 0; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check padded copy leaves only string padded. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem_pad(wrap.output, input, 0xAA);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
+ KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
+ for (size_t i = 2; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], 0xAA);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check truncated padded copy has no padding. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem(wrap.output, truncate);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ for (size_t i = 0; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+}
+
+
+static void string_test_memtostr(struct kunit *test)
+{
+ char nonstring[7] __nonstring = { 'a', 'b', 'c', 'd', 'e', 'f', 'g' };
+ char nonstring_small[3] __nonstring = { 'a', 'b', 'c' };
+ char dest[sizeof(nonstring) + 1];
+
+ /* Copy in a non-NUL-terminated string into exactly right-sized dest. */
+ KUNIT_EXPECT_EQ(test, sizeof(dest), sizeof(nonstring) + 1);
+ memset(dest, 'X', sizeof(dest));
+ memtostr(dest, nonstring);
+ KUNIT_EXPECT_STREQ(test, dest, "abcdefg");
+ memset(dest, 'X', sizeof(dest));
+ memtostr(dest, nonstring_small);
+ KUNIT_EXPECT_STREQ(test, dest, "abc");
+ KUNIT_EXPECT_EQ(test, dest[7], 'X');
+
+ memset(dest, 'X', sizeof(dest));
+ memtostr_pad(dest, nonstring);
+ KUNIT_EXPECT_STREQ(test, dest, "abcdefg");
+ memset(dest, 'X', sizeof(dest));
+ memtostr_pad(dest, nonstring_small);
+ KUNIT_EXPECT_STREQ(test, dest, "abc");
+ KUNIT_EXPECT_EQ(test, dest[7], '\0');
+}
+
+static struct kunit_case string_test_cases[] = {
+ KUNIT_CASE(string_test_memset16),
+ KUNIT_CASE(string_test_memset32),
+ KUNIT_CASE(string_test_memset64),
+ KUNIT_CASE(string_test_strchr),
+ KUNIT_CASE(string_test_strnchr),
+ KUNIT_CASE(string_test_strspn),
+ KUNIT_CASE(string_test_strcmp),
+ KUNIT_CASE(string_test_strcmp_long_strings),
+ KUNIT_CASE(string_test_strncmp),
+ KUNIT_CASE(string_test_strncmp_long_strings),
+ KUNIT_CASE(string_test_strcasecmp),
+ KUNIT_CASE(string_test_strcasecmp_long_strings),
+ KUNIT_CASE(string_test_strncasecmp),
+ KUNIT_CASE(string_test_strncasecmp_long_strings),
+ KUNIT_CASE(string_test_strscpy),
+ KUNIT_CASE(string_test_strcat),
+ KUNIT_CASE(string_test_strncat),
+ KUNIT_CASE(string_test_strlcat),
+ KUNIT_CASE(string_test_strtomem),
+ KUNIT_CASE(string_test_memtostr),
+ {}
+};
+
+static struct kunit_suite string_test_suite = {
+ .name = "string",
+ .test_cases = string_test_cases,
+};
+
+kunit_test_suites(&string_test_suite);
+
+MODULE_DESCRIPTION("Test cases for string functions");
+MODULE_LICENSE("GPL v2");
diff --git a/lib/tests/test_bits.c b/lib/tests/test_bits.c
new file mode 100644
index 000000000000..c7b38d91e1f1
--- /dev/null
+++ b/lib/tests/test_bits.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases for functions and macros in bits.h
+ */
+
+#include <kunit/test.h>
+#include <linux/bits.h>
+
+
+static void genmask_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, 1ul, GENMASK(0, 0));
+ KUNIT_EXPECT_EQ(test, 3ul, GENMASK(1, 0));
+ KUNIT_EXPECT_EQ(test, 6ul, GENMASK(2, 1));
+ KUNIT_EXPECT_EQ(test, 0xFFFFFFFFul, GENMASK(31, 0));
+
+#ifdef TEST_GENMASK_FAILURES
+ /* these should fail compilation */
+ GENMASK(0, 1);
+ GENMASK(0, 10);
+ GENMASK(9, 10);
+#endif
+
+
+}
+
+static void genmask_ull_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, 1ull, GENMASK_ULL(0, 0));
+ KUNIT_EXPECT_EQ(test, 3ull, GENMASK_ULL(1, 0));
+ KUNIT_EXPECT_EQ(test, 0x000000ffffe00000ull, GENMASK_ULL(39, 21));
+ KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, GENMASK_ULL(63, 0));
+
+#ifdef TEST_GENMASK_FAILURES
+ /* these should fail compilation */
+ GENMASK_ULL(0, 1);
+ GENMASK_ULL(0, 10);
+ GENMASK_ULL(9, 10);
+#endif
+}
+
+static void genmask_u128_test(struct kunit *test)
+{
+#ifdef CONFIG_ARCH_SUPPORTS_INT128
+ /* Below 64 bit masks */
+ KUNIT_EXPECT_EQ(test, 0x0000000000000001ull, GENMASK_U128(0, 0));
+ KUNIT_EXPECT_EQ(test, 0x0000000000000003ull, GENMASK_U128(1, 0));
+ KUNIT_EXPECT_EQ(test, 0x0000000000000006ull, GENMASK_U128(2, 1));
+ KUNIT_EXPECT_EQ(test, 0x00000000ffffffffull, GENMASK_U128(31, 0));
+ KUNIT_EXPECT_EQ(test, 0x000000ffffe00000ull, GENMASK_U128(39, 21));
+ KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, GENMASK_U128(63, 0));
+
+ /* Above 64 bit masks - only 64 bit portion can be validated once */
+ KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, GENMASK_U128(64, 0) >> 1);
+ KUNIT_EXPECT_EQ(test, 0x00000000ffffffffull, GENMASK_U128(81, 50) >> 50);
+ KUNIT_EXPECT_EQ(test, 0x0000000000ffffffull, GENMASK_U128(87, 64) >> 64);
+ KUNIT_EXPECT_EQ(test, 0x0000000000ff0000ull, GENMASK_U128(87, 80) >> 64);
+
+ KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, GENMASK_U128(127, 0) >> 64);
+ KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, (u64)GENMASK_U128(127, 0));
+ KUNIT_EXPECT_EQ(test, 0x0000000000000003ull, GENMASK_U128(127, 126) >> 126);
+ KUNIT_EXPECT_EQ(test, 0x0000000000000001ull, GENMASK_U128(127, 127) >> 127);
+#ifdef TEST_GENMASK_FAILURES
+ /* these should fail compilation */
+ GENMASK_U128(0, 1);
+ GENMASK_U128(0, 10);
+ GENMASK_U128(9, 10);
+#endif /* TEST_GENMASK_FAILURES */
+#endif /* CONFIG_ARCH_SUPPORTS_INT128 */
+}
+
+static void genmask_input_check_test(struct kunit *test)
+{
+ unsigned int x, y;
+ int z, w;
+
+ /* Unknown input */
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(x, 0));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(0, x));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(x, y));
+
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(z, 0));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(0, z));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(z, w));
+
+ /* Valid input */
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(1, 1));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(39, 21));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(100, 80));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(110, 65));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(127, 0));
+}
+
+
+static struct kunit_case bits_test_cases[] = {
+ KUNIT_CASE(genmask_test),
+ KUNIT_CASE(genmask_ull_test),
+ KUNIT_CASE(genmask_u128_test),
+ KUNIT_CASE(genmask_input_check_test),
+ {}
+};
+
+static struct kunit_suite bits_test_suite = {
+ .name = "bits-test",
+ .test_cases = bits_test_cases,
+};
+kunit_test_suite(bits_test_suite);
+
+MODULE_DESCRIPTION("Test cases for functions and macros in bits.h");
+MODULE_LICENSE("GPL");
diff --git a/lib/tests/test_fprobe.c b/lib/tests/test_fprobe.c
new file mode 100644
index 000000000000..cf92111b5c79
--- /dev/null
+++ b/lib/tests/test_fprobe.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * test_fprobe.c - simple sanity test for fprobe
+ */
+
+#include <linux/kernel.h>
+#include <linux/fprobe.h>
+#include <linux/random.h>
+#include <kunit/test.h>
+
+#define div_factor 3
+
+static struct kunit *current_test;
+
+static u32 rand1, entry_val, exit_val;
+
+/* Use indirect calls to avoid inlining the target functions */
+static u32 (*target)(u32 value);
+static u32 (*target2)(u32 value);
+static unsigned long target_ip;
+static unsigned long target2_ip;
+static int entry_return_value;
+
+static noinline u32 fprobe_selftest_target(u32 value)
+{
+ return (value / div_factor);
+}
+
+static noinline u32 fprobe_selftest_target2(u32 value)
+{
+ return (value / div_factor) + 1;
+}
+
+static notrace int fp_entry_handler(struct fprobe *fp, unsigned long ip,
+ unsigned long ret_ip,
+ struct ftrace_regs *fregs, void *data)
+{
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ /* This can be called on the fprobe_selftest_target and the fprobe_selftest_target2 */
+ if (ip != target_ip)
+ KUNIT_EXPECT_EQ(current_test, ip, target2_ip);
+ entry_val = (rand1 / div_factor);
+ if (fp->entry_data_size) {
+ KUNIT_EXPECT_NOT_NULL(current_test, data);
+ if (data)
+ *(u32 *)data = entry_val;
+ } else
+ KUNIT_EXPECT_NULL(current_test, data);
+
+ return entry_return_value;
+}
+
+static notrace void fp_exit_handler(struct fprobe *fp, unsigned long ip,
+ unsigned long ret_ip,
+ struct ftrace_regs *fregs, void *data)
+{
+ unsigned long ret = ftrace_regs_get_return_value(fregs);
+
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ if (ip != target_ip) {
+ KUNIT_EXPECT_EQ(current_test, ip, target2_ip);
+ KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor) + 1);
+ } else
+ KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor));
+ KUNIT_EXPECT_EQ(current_test, entry_val, (rand1 / div_factor));
+ exit_val = entry_val + div_factor;
+ if (fp->entry_data_size) {
+ KUNIT_EXPECT_NOT_NULL(current_test, data);
+ if (data)
+ KUNIT_EXPECT_EQ(current_test, *(u32 *)data, entry_val);
+ } else
+ KUNIT_EXPECT_NULL(current_test, data);
+}
+
+/* Test entry only (no rethook) */
+static void test_fprobe_entry(struct kunit *test)
+{
+ struct fprobe fp_entry = {
+ .entry_handler = fp_entry_handler,
+ };
+
+ current_test = test;
+
+ /* Before register, unregister should be failed. */
+ KUNIT_EXPECT_NE(test, 0, unregister_fprobe(&fp_entry));
+ KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp_entry, "fprobe_selftest_target*", NULL));
+
+ entry_val = 0;
+ exit_val = 0;
+ target(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, 0, exit_val);
+
+ entry_val = 0;
+ exit_val = 0;
+ target2(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, 0, exit_val);
+
+ KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp_entry));
+}
+
+static void test_fprobe(struct kunit *test)
+{
+ struct fprobe fp = {
+ .entry_handler = fp_entry_handler,
+ .exit_handler = fp_exit_handler,
+ };
+
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp, "fprobe_selftest_target*", NULL));
+
+ entry_val = 0;
+ exit_val = 0;
+ target(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
+
+ entry_val = 0;
+ exit_val = 0;
+ target2(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
+
+ KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
+}
+
+static void test_fprobe_syms(struct kunit *test)
+{
+ static const char *syms[] = {"fprobe_selftest_target", "fprobe_selftest_target2"};
+ struct fprobe fp = {
+ .entry_handler = fp_entry_handler,
+ .exit_handler = fp_exit_handler,
+ };
+
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_fprobe_syms(&fp, syms, 2));
+
+ entry_val = 0;
+ exit_val = 0;
+ target(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
+
+ entry_val = 0;
+ exit_val = 0;
+ target2(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
+
+ KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
+}
+
+/* Test private entry_data */
+static void test_fprobe_data(struct kunit *test)
+{
+ struct fprobe fp = {
+ .entry_handler = fp_entry_handler,
+ .exit_handler = fp_exit_handler,
+ .entry_data_size = sizeof(u32),
+ };
+
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp, "fprobe_selftest_target", NULL));
+
+ target(rand1);
+
+ KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
+}
+
+static void test_fprobe_skip(struct kunit *test)
+{
+ struct fprobe fp = {
+ .entry_handler = fp_entry_handler,
+ .exit_handler = fp_exit_handler,
+ };
+
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp, "fprobe_selftest_target", NULL));
+
+ entry_return_value = 1;
+ entry_val = 0;
+ exit_val = 0;
+ target(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, 0, exit_val);
+ KUNIT_EXPECT_EQ(test, 0, fp.nmissed);
+ entry_return_value = 0;
+
+ KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
+}
+
+static unsigned long get_ftrace_location(void *func)
+{
+ unsigned long size, addr = (unsigned long)func;
+
+ if (!kallsyms_lookup_size_offset(addr, &size, NULL) || !size)
+ return 0;
+
+ return ftrace_location_range(addr, addr + size - 1);
+}
+
+static int fprobe_test_init(struct kunit *test)
+{
+ rand1 = get_random_u32_above(div_factor);
+ target = fprobe_selftest_target;
+ target2 = fprobe_selftest_target2;
+ target_ip = get_ftrace_location(target);
+ target2_ip = get_ftrace_location(target2);
+
+ return 0;
+}
+
+static struct kunit_case fprobe_testcases[] = {
+ KUNIT_CASE(test_fprobe_entry),
+ KUNIT_CASE(test_fprobe),
+ KUNIT_CASE(test_fprobe_syms),
+ KUNIT_CASE(test_fprobe_data),
+ KUNIT_CASE(test_fprobe_skip),
+ {}
+};
+
+static struct kunit_suite fprobe_test_suite = {
+ .name = "fprobe_test",
+ .init = fprobe_test_init,
+ .test_cases = fprobe_testcases,
+};
+
+kunit_test_suites(&fprobe_test_suite);
+
diff --git a/lib/tests/test_hash.c b/lib/tests/test_hash.c
new file mode 100644
index 000000000000..a7af39662a0a
--- /dev/null
+++ b/lib/tests/test_hash.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test cases for <linux/hash.h> and <linux/stringhash.h>
+ * This just verifies that various ways of computing a hash
+ * produce the same thing and, for cases where a k-bit hash
+ * value is requested, is of the requested size.
+ *
+ * We fill a buffer with a 255-byte null-terminated string,
+ * and use both full_name_hash() and hashlen_string() to hash the
+ * substrings from i to j, where 0 <= i < j < 256.
+ *
+ * The returned values are used to check that __hash_32() and
+ * __hash_32_generic() compute the same thing. Likewise hash_32()
+ * and hash_64().
+ */
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/hash.h>
+#include <linux/stringhash.h>
+#include <kunit/test.h>
+
+/* 32-bit XORSHIFT generator. Seed must not be zero. */
+static u32 __attribute_const__
+xorshift(u32 seed)
+{
+ seed ^= seed << 13;
+ seed ^= seed >> 17;
+ seed ^= seed << 5;
+ return seed;
+}
+
+/* Given a non-zero x, returns a non-zero byte. */
+static u8 __attribute_const__
+mod255(u32 x)
+{
+ x = (x & 0xffff) + (x >> 16); /* 1 <= x <= 0x1fffe */
+ x = (x & 0xff) + (x >> 8); /* 1 <= x <= 0x2fd */
+ x = (x & 0xff) + (x >> 8); /* 1 <= x <= 0x100 */
+ x = (x & 0xff) + (x >> 8); /* 1 <= x <= 0xff */
+ return x;
+}
+
+/* Fill the buffer with non-zero bytes. */
+static void fill_buf(char *buf, size_t len, u32 seed)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ seed = xorshift(seed);
+ buf[i] = mod255(seed);
+ }
+}
+
+/* Holds most testing variables for the int test. */
+struct test_hash_params {
+ /* Pointer to integer to be hashed. */
+ unsigned long long *h64;
+ /* Low 32-bits of integer to be hashed. */
+ u32 h0;
+ /* Arch-specific hash result. */
+ u32 h1;
+ /* Generic hash result. */
+ u32 h2;
+ /* ORed hashes of given size (in bits). */
+ u32 (*hash_or)[33];
+};
+
+#ifdef HAVE_ARCH__HASH_32
+static void
+test_int__hash_32(struct kunit *test, struct test_hash_params *params)
+{
+ params->hash_or[1][0] |= params->h2 = __hash_32_generic(params->h0);
+#if HAVE_ARCH__HASH_32 == 1
+ KUNIT_EXPECT_EQ_MSG(test, params->h1, params->h2,
+ "__hash_32(%#x) = %#x != __hash_32_generic() = %#x",
+ params->h0, params->h1, params->h2);
+#endif
+}
+#endif
+
+#ifdef HAVE_ARCH_HASH_64
+static void
+test_int_hash_64(struct kunit *test, struct test_hash_params *params, u32 const *m, int *k)
+{
+ params->h2 = hash_64_generic(*params->h64, *k);
+#if HAVE_ARCH_HASH_64 == 1
+ KUNIT_EXPECT_EQ_MSG(test, params->h1, params->h2,
+ "hash_64(%#llx, %d) = %#x != hash_64_generic() = %#x",
+ *params->h64, *k, params->h1, params->h2);
+#else
+ KUNIT_EXPECT_LE_MSG(test, params->h1, params->h2,
+ "hash_64_generic(%#llx, %d) = %#x > %#x",
+ *params->h64, *k, params->h1, *m);
+#endif
+}
+#endif
+
+/*
+ * Test the various integer hash functions. h64 (or its low-order bits)
+ * is the integer to hash. hash_or accumulates the OR of the hash values,
+ * which are later checked to see that they cover all the requested bits.
+ *
+ * Because these functions (as opposed to the string hashes) are all
+ * inline, the code being tested is actually in the module, and you can
+ * recompile and re-test the module without rebooting.
+ */
+static void
+test_int_hash(struct kunit *test, unsigned long long h64, u32 hash_or[2][33])
+{
+ int k;
+ struct test_hash_params params = { &h64, (u32)h64, 0, 0, hash_or };
+
+ /* Test __hash32 */
+ hash_or[0][0] |= params.h1 = __hash_32(params.h0);
+#ifdef HAVE_ARCH__HASH_32
+ test_int__hash_32(test, &params);
+#endif
+
+ /* Test k = 1..32 bits */
+ for (k = 1; k <= 32; k++) {
+ u32 const m = ((u32)2 << (k-1)) - 1; /* Low k bits set */
+
+ /* Test hash_32 */
+ hash_or[0][k] |= params.h1 = hash_32(params.h0, k);
+ KUNIT_EXPECT_LE_MSG(test, params.h1, m,
+ "hash_32(%#x, %d) = %#x > %#x",
+ params.h0, k, params.h1, m);
+
+ /* Test hash_64 */
+ hash_or[1][k] |= params.h1 = hash_64(h64, k);
+ KUNIT_EXPECT_LE_MSG(test, params.h1, m,
+ "hash_64(%#llx, %d) = %#x > %#x",
+ h64, k, params.h1, m);
+#ifdef HAVE_ARCH_HASH_64
+ test_int_hash_64(test, &params, &m, &k);
+#endif
+ }
+}
+
+#define SIZE 256 /* Run time is cubic in SIZE */
+
+static void test_string_or(struct kunit *test)
+{
+ char buf[SIZE+1];
+ u32 string_or = 0;
+ int i, j;
+
+ fill_buf(buf, SIZE, 1);
+
+ /* Test every possible non-empty substring in the buffer. */
+ for (j = SIZE; j > 0; --j) {
+ buf[j] = '\0';
+
+ for (i = 0; i <= j; i++) {
+ u32 h0 = full_name_hash(buf+i, buf+i, j-i);
+
+ string_or |= h0;
+ } /* i */
+ } /* j */
+
+ /* The OR of all the hash values should cover all the bits */
+ KUNIT_EXPECT_EQ_MSG(test, string_or, -1u,
+ "OR of all string hash results = %#x != %#x",
+ string_or, -1u);
+}
+
+static void test_hash_or(struct kunit *test)
+{
+ char buf[SIZE+1];
+ u32 hash_or[2][33] = { { 0, } };
+ unsigned long long h64 = 0;
+ int i, j;
+
+ fill_buf(buf, SIZE, 1);
+
+ /* Test every possible non-empty substring in the buffer. */
+ for (j = SIZE; j > 0; --j) {
+ buf[j] = '\0';
+
+ for (i = 0; i <= j; i++) {
+ u64 hashlen = hashlen_string(buf+i, buf+i);
+ u32 h0 = full_name_hash(buf+i, buf+i, j-i);
+
+ /* Check that hashlen_string gets the length right */
+ KUNIT_EXPECT_EQ_MSG(test, hashlen_len(hashlen), j-i,
+ "hashlen_string(%d..%d) returned length %u, expected %d",
+ i, j, hashlen_len(hashlen), j-i);
+ /* Check that the hashes match */
+ KUNIT_EXPECT_EQ_MSG(test, hashlen_hash(hashlen), h0,
+ "hashlen_string(%d..%d) = %08x != full_name_hash() = %08x",
+ i, j, hashlen_hash(hashlen), h0);
+
+ h64 = h64 << 32 | h0; /* For use with hash_64 */
+ test_int_hash(test, h64, hash_or);
+ } /* i */
+ } /* j */
+
+ KUNIT_EXPECT_EQ_MSG(test, hash_or[0][0], -1u,
+ "OR of all __hash_32 results = %#x != %#x",
+ hash_or[0][0], -1u);
+#ifdef HAVE_ARCH__HASH_32
+#if HAVE_ARCH__HASH_32 != 1 /* Test is pointless if results match */
+ KUNIT_EXPECT_EQ_MSG(test, hash_or[1][0], -1u,
+ "OR of all __hash_32_generic results = %#x != %#x",
+ hash_or[1][0], -1u);
+#endif
+#endif
+
+ /* Likewise for all the i-bit hash values */
+ for (i = 1; i <= 32; i++) {
+ u32 const m = ((u32)2 << (i-1)) - 1; /* Low i bits set */
+
+ KUNIT_EXPECT_EQ_MSG(test, hash_or[0][i], m,
+ "OR of all hash_32(%d) results = %#x (%#x expected)",
+ i, hash_or[0][i], m);
+ KUNIT_EXPECT_EQ_MSG(test, hash_or[1][i], m,
+ "OR of all hash_64(%d) results = %#x (%#x expected)",
+ i, hash_or[1][i], m);
+ }
+}
+
+static struct kunit_case hash_test_cases[] __refdata = {
+ KUNIT_CASE(test_string_or),
+ KUNIT_CASE(test_hash_or),
+ {}
+};
+
+static struct kunit_suite hash_test_suite = {
+ .name = "hash",
+ .test_cases = hash_test_cases,
+};
+
+
+kunit_test_suite(hash_test_suite);
+
+MODULE_DESCRIPTION("Test cases for <linux/hash.h> and <linux/stringhash.h>");
+MODULE_LICENSE("GPL");
diff --git a/lib/tests/test_kprobes.c b/lib/tests/test_kprobes.c
new file mode 100644
index 000000000000..b7582010125c
--- /dev/null
+++ b/lib/tests/test_kprobes.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * test_kprobes.c - simple sanity test for k*probes
+ *
+ * Copyright IBM Corp. 2008
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/random.h>
+#include <kunit/test.h>
+
+#define div_factor 3
+
+static u32 rand1, preh_val, posth_val;
+static u32 (*target)(u32 value);
+static u32 (*recursed_target)(u32 value);
+static u32 (*target2)(u32 value);
+static struct kunit *current_test;
+
+static unsigned long (*internal_target)(void);
+static unsigned long (*stacktrace_target)(void);
+static unsigned long (*stacktrace_driver)(void);
+static unsigned long target_return_address[2];
+
+static noinline u32 kprobe_target(u32 value)
+{
+ return (value / div_factor);
+}
+
+static noinline u32 kprobe_recursed_target(u32 value)
+{
+ return (value / div_factor);
+}
+
+static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+
+ preh_val = recursed_target(rand1);
+ return 0;
+}
+
+static void kp_post_handler(struct kprobe *p, struct pt_regs *regs,
+ unsigned long flags)
+{
+ u32 expval = recursed_target(rand1);
+
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ KUNIT_EXPECT_EQ(current_test, preh_val, expval);
+
+ posth_val = preh_val + div_factor;
+}
+
+static struct kprobe kp = {
+ .symbol_name = "kprobe_target",
+ .pre_handler = kp_pre_handler,
+ .post_handler = kp_post_handler
+};
+
+static void test_kprobe(struct kunit *test)
+{
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_kprobe(&kp));
+ target(rand1);
+ unregister_kprobe(&kp);
+ KUNIT_EXPECT_NE(test, 0, preh_val);
+ KUNIT_EXPECT_NE(test, 0, posth_val);
+}
+
+static noinline u32 kprobe_target2(u32 value)
+{
+ return (value / div_factor) + 1;
+}
+
+static noinline unsigned long kprobe_stacktrace_internal_target(void)
+{
+ if (!target_return_address[0])
+ target_return_address[0] = (unsigned long)__builtin_return_address(0);
+ return target_return_address[0];
+}
+
+static noinline unsigned long kprobe_stacktrace_target(void)
+{
+ if (!target_return_address[1])
+ target_return_address[1] = (unsigned long)__builtin_return_address(0);
+
+ if (internal_target)
+ internal_target();
+
+ return target_return_address[1];
+}
+
+static noinline unsigned long kprobe_stacktrace_driver(void)
+{
+ if (stacktrace_target)
+ stacktrace_target();
+
+ /* This is for preventing inlining the function */
+ return (unsigned long)__builtin_return_address(0);
+}
+
+static int kp_pre_handler2(struct kprobe *p, struct pt_regs *regs)
+{
+ preh_val = (rand1 / div_factor) + 1;
+ return 0;
+}
+
+static void kp_post_handler2(struct kprobe *p, struct pt_regs *regs,
+ unsigned long flags)
+{
+ KUNIT_EXPECT_EQ(current_test, preh_val, (rand1 / div_factor) + 1);
+ posth_val = preh_val + div_factor;
+}
+
+static struct kprobe kp2 = {
+ .symbol_name = "kprobe_target2",
+ .pre_handler = kp_pre_handler2,
+ .post_handler = kp_post_handler2
+};
+
+static void test_kprobes(struct kunit *test)
+{
+ struct kprobe *kps[2] = {&kp, &kp2};
+
+ current_test = test;
+
+ /* addr and flags should be cleard for reusing kprobe. */
+ kp.addr = NULL;
+ kp.flags = 0;
+
+ KUNIT_EXPECT_EQ(test, 0, register_kprobes(kps, 2));
+ preh_val = 0;
+ posth_val = 0;
+ target(rand1);
+
+ KUNIT_EXPECT_NE(test, 0, preh_val);
+ KUNIT_EXPECT_NE(test, 0, posth_val);
+
+ preh_val = 0;
+ posth_val = 0;
+ target2(rand1);
+
+ KUNIT_EXPECT_NE(test, 0, preh_val);
+ KUNIT_EXPECT_NE(test, 0, posth_val);
+ unregister_kprobes(kps, 2);
+}
+
+static struct kprobe kp_missed = {
+ .symbol_name = "kprobe_recursed_target",
+ .pre_handler = kp_pre_handler,
+ .post_handler = kp_post_handler,
+};
+
+static void test_kprobe_missed(struct kunit *test)
+{
+ current_test = test;
+ preh_val = 0;
+ posth_val = 0;
+
+ KUNIT_EXPECT_EQ(test, 0, register_kprobe(&kp_missed));
+
+ recursed_target(rand1);
+
+ KUNIT_EXPECT_EQ(test, 2, kp_missed.nmissed);
+ KUNIT_EXPECT_NE(test, 0, preh_val);
+ KUNIT_EXPECT_NE(test, 0, posth_val);
+
+ unregister_kprobe(&kp_missed);
+}
+
+#ifdef CONFIG_KRETPROBES
+static u32 krph_val;
+
+static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ krph_val = (rand1 / div_factor);
+ return 0;
+}
+
+static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ unsigned long ret = regs_return_value(regs);
+
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ KUNIT_EXPECT_EQ(current_test, ret, rand1 / div_factor);
+ KUNIT_EXPECT_NE(current_test, krph_val, 0);
+ krph_val = rand1;
+ return 0;
+}
+
+static struct kretprobe rp = {
+ .handler = return_handler,
+ .entry_handler = entry_handler,
+ .kp.symbol_name = "kprobe_target"
+};
+
+static void test_kretprobe(struct kunit *test)
+{
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_kretprobe(&rp));
+ target(rand1);
+ unregister_kretprobe(&rp);
+ KUNIT_EXPECT_EQ(test, krph_val, rand1);
+}
+
+static int return_handler2(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ unsigned long ret = regs_return_value(regs);
+
+ KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor) + 1);
+ KUNIT_EXPECT_NE(current_test, krph_val, 0);
+ krph_val = rand1;
+ return 0;
+}
+
+static struct kretprobe rp2 = {
+ .handler = return_handler2,
+ .entry_handler = entry_handler,
+ .kp.symbol_name = "kprobe_target2"
+};
+
+static void test_kretprobes(struct kunit *test)
+{
+ struct kretprobe *rps[2] = {&rp, &rp2};
+
+ current_test = test;
+ /* addr and flags should be cleard for reusing kprobe. */
+ rp.kp.addr = NULL;
+ rp.kp.flags = 0;
+ KUNIT_EXPECT_EQ(test, 0, register_kretprobes(rps, 2));
+
+ krph_val = 0;
+ target(rand1);
+ KUNIT_EXPECT_EQ(test, krph_val, rand1);
+
+ krph_val = 0;
+ target2(rand1);
+ KUNIT_EXPECT_EQ(test, krph_val, rand1);
+ unregister_kretprobes(rps, 2);
+}
+
+#ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
+#define STACK_BUF_SIZE 16
+static unsigned long stack_buf[STACK_BUF_SIZE];
+
+static int stacktrace_return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ unsigned long retval = regs_return_value(regs);
+ int i, ret;
+
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ KUNIT_EXPECT_EQ(current_test, retval, target_return_address[1]);
+
+ /*
+ * Test stacktrace inside the kretprobe handler, this will involves
+ * kretprobe trampoline, but must include correct return address
+ * of the target function.
+ */
+ ret = stack_trace_save(stack_buf, STACK_BUF_SIZE, 0);
+ KUNIT_EXPECT_NE(current_test, ret, 0);
+
+ for (i = 0; i < ret; i++) {
+ if (stack_buf[i] == target_return_address[1])
+ break;
+ }
+ KUNIT_EXPECT_NE(current_test, i, ret);
+
+#if !IS_MODULE(CONFIG_KPROBES_SANITY_TEST)
+ /*
+ * Test stacktrace from pt_regs at the return address. Thus the stack
+ * trace must start from the target return address.
+ */
+ ret = stack_trace_save_regs(regs, stack_buf, STACK_BUF_SIZE, 0);
+ KUNIT_EXPECT_NE(current_test, ret, 0);
+ KUNIT_EXPECT_EQ(current_test, stack_buf[0], target_return_address[1]);
+#endif
+
+ return 0;
+}
+
+static struct kretprobe rp3 = {
+ .handler = stacktrace_return_handler,
+ .kp.symbol_name = "kprobe_stacktrace_target"
+};
+
+static void test_stacktrace_on_kretprobe(struct kunit *test)
+{
+ unsigned long myretaddr = (unsigned long)__builtin_return_address(0);
+
+ current_test = test;
+ rp3.kp.addr = NULL;
+ rp3.kp.flags = 0;
+
+ /*
+ * Run the stacktrace_driver() to record correct return address in
+ * stacktrace_target() and ensure stacktrace_driver() call is not
+ * inlined by checking the return address of stacktrace_driver()
+ * and the return address of this function is different.
+ */
+ KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
+
+ KUNIT_ASSERT_EQ(test, 0, register_kretprobe(&rp3));
+ KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
+ unregister_kretprobe(&rp3);
+}
+
+static int stacktrace_internal_return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ unsigned long retval = regs_return_value(regs);
+ int i, ret;
+
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ KUNIT_EXPECT_EQ(current_test, retval, target_return_address[0]);
+
+ /*
+ * Test stacktrace inside the kretprobe handler for nested case.
+ * The unwinder will find the kretprobe_trampoline address on the
+ * return address, and kretprobe must solve that.
+ */
+ ret = stack_trace_save(stack_buf, STACK_BUF_SIZE, 0);
+ KUNIT_EXPECT_NE(current_test, ret, 0);
+
+ for (i = 0; i < ret - 1; i++) {
+ if (stack_buf[i] == target_return_address[0]) {
+ KUNIT_EXPECT_EQ(current_test, stack_buf[i + 1], target_return_address[1]);
+ break;
+ }
+ }
+ KUNIT_EXPECT_NE(current_test, i, ret);
+
+#if !IS_MODULE(CONFIG_KPROBES_SANITY_TEST)
+ /* Ditto for the regs version. */
+ ret = stack_trace_save_regs(regs, stack_buf, STACK_BUF_SIZE, 0);
+ KUNIT_EXPECT_NE(current_test, ret, 0);
+ KUNIT_EXPECT_EQ(current_test, stack_buf[0], target_return_address[0]);
+ KUNIT_EXPECT_EQ(current_test, stack_buf[1], target_return_address[1]);
+#endif
+
+ return 0;
+}
+
+static struct kretprobe rp4 = {
+ .handler = stacktrace_internal_return_handler,
+ .kp.symbol_name = "kprobe_stacktrace_internal_target"
+};
+
+static void test_stacktrace_on_nested_kretprobe(struct kunit *test)
+{
+ unsigned long myretaddr = (unsigned long)__builtin_return_address(0);
+ struct kretprobe *rps[2] = {&rp3, &rp4};
+
+ current_test = test;
+ rp3.kp.addr = NULL;
+ rp3.kp.flags = 0;
+
+ //KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
+
+ KUNIT_ASSERT_EQ(test, 0, register_kretprobes(rps, 2));
+ KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
+ unregister_kretprobes(rps, 2);
+}
+#endif /* CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE */
+
+#endif /* CONFIG_KRETPROBES */
+
+static int kprobes_test_init(struct kunit *test)
+{
+ target = kprobe_target;
+ target2 = kprobe_target2;
+ recursed_target = kprobe_recursed_target;
+ stacktrace_target = kprobe_stacktrace_target;
+ internal_target = kprobe_stacktrace_internal_target;
+ stacktrace_driver = kprobe_stacktrace_driver;
+ rand1 = get_random_u32_above(div_factor);
+ return 0;
+}
+
+static struct kunit_case kprobes_testcases[] = {
+ KUNIT_CASE(test_kprobe),
+ KUNIT_CASE(test_kprobes),
+ KUNIT_CASE(test_kprobe_missed),
+#ifdef CONFIG_KRETPROBES
+ KUNIT_CASE(test_kretprobe),
+ KUNIT_CASE(test_kretprobes),
+#ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
+ KUNIT_CASE(test_stacktrace_on_kretprobe),
+ KUNIT_CASE(test_stacktrace_on_nested_kretprobe),
+#endif
+#endif
+ {}
+};
+
+static struct kunit_suite kprobes_test_suite = {
+ .name = "kprobes_test",
+ .init = kprobes_test_init,
+ .test_cases = kprobes_testcases,
+};
+
+kunit_test_suites(&kprobes_test_suite);
+
+MODULE_DESCRIPTION("simple sanity test for k*probes");
+MODULE_LICENSE("GPL");
diff --git a/lib/tests/test_linear_ranges.c b/lib/tests/test_linear_ranges.c
new file mode 100644
index 000000000000..f482be00f1bc
--- /dev/null
+++ b/lib/tests/test_linear_ranges.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the linear_ranges helper.
+ *
+ * Copyright (C) 2020, ROHM Semiconductors.
+ * Author: Matti Vaittinen <matti.vaittien@fi.rohmeurope.com>
+ */
+#include <kunit/test.h>
+
+#include <linux/linear_range.h>
+
+/* First things first. I deeply dislike unit-tests. I have seen all the hell
+ * breaking loose when people who think the unit tests are "the silver bullet"
+ * to kill bugs get to decide how a company should implement testing strategy...
+ *
+ * Believe me, it may get _really_ ridiculous. It is tempting to think that
+ * walking through all the possible execution branches will nail down 100% of
+ * bugs. This may lead to ideas about demands to get certain % of "test
+ * coverage" - measured as line coverage. And that is one of the worst things
+ * you can do.
+ *
+ * Ask people to provide line coverage and they do. I've seen clever tools
+ * which generate test cases to test the existing functions - and by default
+ * these tools expect code to be correct and just generate checks which are
+ * passing when ran against current code-base. Run this generator and you'll get
+ * tests that do not test code is correct but just verify nothing changes.
+ * Problem is that testing working code is pointless. And if it is not
+ * working, your test must not assume it is working. You won't catch any bugs
+ * by such tests. What you can do is to generate a huge amount of tests.
+ * Especially if you were are asked to proivde 100% line-coverage x_x. So what
+ * does these tests - which are not finding any bugs now - do?
+ *
+ * They add inertia to every future development. I think it was Terry Pratchet
+ * who wrote someone having same impact as thick syrup has to chronometre.
+ * Excessive amount of unit-tests have this effect to development. If you do
+ * actually find _any_ bug from code in such environment and try fixing it...
+ * ...chances are you also need to fix the test cases. In sunny day you fix one
+ * test. But I've done refactoring which resulted 500+ broken tests (which had
+ * really zero value other than proving to managers that we do do "quality")...
+ *
+ * After this being said - there are situations where UTs can be handy. If you
+ * have algorithms which take some input and should produce output - then you
+ * can implement few, carefully selected simple UT-cases which test this. I've
+ * previously used this for example for netlink and device-tree data parsing
+ * functions. Feed some data examples to functions and verify the output is as
+ * expected. I am not covering all the cases but I will see the logic should be
+ * working.
+ *
+ * Here we also do some minor testing. I don't want to go through all branches
+ * or test more or less obvious things - but I want to see the main logic is
+ * working. And I definitely don't want to add 500+ test cases that break when
+ * some simple fix is done x_x. So - let's only add few, well selected tests
+ * which ensure as much logic is good as possible.
+ */
+
+/*
+ * Test Range 1:
+ * selectors: 2 3 4 5 6
+ * values (5): 10 20 30 40 50
+ *
+ * Test Range 2:
+ * selectors: 7 8 9 10
+ * values (4): 100 150 200 250
+ */
+
+#define RANGE1_MIN 10
+#define RANGE1_MIN_SEL 2
+#define RANGE1_STEP 10
+
+/* 2, 3, 4, 5, 6 */
+static const unsigned int range1_sels[] = { RANGE1_MIN_SEL, RANGE1_MIN_SEL + 1,
+ RANGE1_MIN_SEL + 2,
+ RANGE1_MIN_SEL + 3,
+ RANGE1_MIN_SEL + 4 };
+/* 10, 20, 30, 40, 50 */
+static const unsigned int range1_vals[] = { RANGE1_MIN, RANGE1_MIN +
+ RANGE1_STEP,
+ RANGE1_MIN + RANGE1_STEP * 2,
+ RANGE1_MIN + RANGE1_STEP * 3,
+ RANGE1_MIN + RANGE1_STEP * 4 };
+
+#define RANGE2_MIN 100
+#define RANGE2_MIN_SEL 7
+#define RANGE2_STEP 50
+
+/* 7, 8, 9, 10 */
+static const unsigned int range2_sels[] = { RANGE2_MIN_SEL, RANGE2_MIN_SEL + 1,
+ RANGE2_MIN_SEL + 2,
+ RANGE2_MIN_SEL + 3 };
+/* 100, 150, 200, 250 */
+static const unsigned int range2_vals[] = { RANGE2_MIN, RANGE2_MIN +
+ RANGE2_STEP,
+ RANGE2_MIN + RANGE2_STEP * 2,
+ RANGE2_MIN + RANGE2_STEP * 3 };
+
+#define RANGE1_NUM_VALS (ARRAY_SIZE(range1_vals))
+#define RANGE2_NUM_VALS (ARRAY_SIZE(range2_vals))
+#define RANGE_NUM_VALS (RANGE1_NUM_VALS + RANGE2_NUM_VALS)
+
+#define RANGE1_MAX_SEL (RANGE1_MIN_SEL + RANGE1_NUM_VALS - 1)
+#define RANGE1_MAX_VAL (range1_vals[RANGE1_NUM_VALS - 1])
+
+#define RANGE2_MAX_SEL (RANGE2_MIN_SEL + RANGE2_NUM_VALS - 1)
+#define RANGE2_MAX_VAL (range2_vals[RANGE2_NUM_VALS - 1])
+
+#define SMALLEST_SEL RANGE1_MIN_SEL
+#define SMALLEST_VAL RANGE1_MIN
+
+static struct linear_range testr[] = {
+ LINEAR_RANGE(RANGE1_MIN, RANGE1_MIN_SEL, RANGE1_MAX_SEL, RANGE1_STEP),
+ LINEAR_RANGE(RANGE2_MIN, RANGE2_MIN_SEL, RANGE2_MAX_SEL, RANGE2_STEP),
+};
+
+static void range_test_get_value(struct kunit *test)
+{
+ int ret, i;
+ unsigned int sel, val;
+
+ for (i = 0; i < RANGE1_NUM_VALS; i++) {
+ sel = range1_sels[i];
+ ret = linear_range_get_value_array(&testr[0], 2, sel, &val);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, val, range1_vals[i]);
+ }
+ for (i = 0; i < RANGE2_NUM_VALS; i++) {
+ sel = range2_sels[i];
+ ret = linear_range_get_value_array(&testr[0], 2, sel, &val);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, val, range2_vals[i]);
+ }
+ ret = linear_range_get_value_array(&testr[0], 2, sel + 1, &val);
+ KUNIT_EXPECT_NE(test, 0, ret);
+}
+
+static void range_test_get_selector_high(struct kunit *test)
+{
+ int ret, i;
+ unsigned int sel;
+ bool found;
+
+ for (i = 0; i < RANGE1_NUM_VALS; i++) {
+ ret = linear_range_get_selector_high(&testr[0], range1_vals[i],
+ &sel, &found);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, sel, range1_sels[i]);
+ KUNIT_EXPECT_TRUE(test, found);
+ }
+
+ ret = linear_range_get_selector_high(&testr[0], RANGE1_MAX_VAL + 1,
+ &sel, &found);
+ KUNIT_EXPECT_LE(test, ret, 0);
+
+ ret = linear_range_get_selector_high(&testr[0], RANGE1_MIN - 1,
+ &sel, &found);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_FALSE(test, found);
+ KUNIT_EXPECT_EQ(test, sel, range1_sels[0]);
+}
+
+static void range_test_get_value_amount(struct kunit *test)
+{
+ int ret;
+
+ ret = linear_range_values_in_range_array(&testr[0], 2);
+ KUNIT_EXPECT_EQ(test, (int)RANGE_NUM_VALS, ret);
+}
+
+static void range_test_get_selector_low(struct kunit *test)
+{
+ int i, ret;
+ unsigned int sel;
+ bool found;
+
+ for (i = 0; i < RANGE1_NUM_VALS; i++) {
+ ret = linear_range_get_selector_low_array(&testr[0], 2,
+ range1_vals[i], &sel,
+ &found);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, sel, range1_sels[i]);
+ KUNIT_EXPECT_TRUE(test, found);
+ }
+ for (i = 0; i < RANGE2_NUM_VALS; i++) {
+ ret = linear_range_get_selector_low_array(&testr[0], 2,
+ range2_vals[i], &sel,
+ &found);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, sel, range2_sels[i]);
+ KUNIT_EXPECT_TRUE(test, found);
+ }
+
+ /*
+ * Seek value greater than range max => get_selector_*_low should
+ * return Ok - but set found to false as value is not in range
+ */
+ ret = linear_range_get_selector_low_array(&testr[0], 2,
+ range2_vals[RANGE2_NUM_VALS - 1] + 1,
+ &sel, &found);
+
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, sel, range2_sels[RANGE2_NUM_VALS - 1]);
+ KUNIT_EXPECT_FALSE(test, found);
+}
+
+static struct kunit_case range_test_cases[] = {
+ KUNIT_CASE(range_test_get_value_amount),
+ KUNIT_CASE(range_test_get_selector_high),
+ KUNIT_CASE(range_test_get_selector_low),
+ KUNIT_CASE(range_test_get_value),
+ {},
+};
+
+static struct kunit_suite range_test_module = {
+ .name = "linear-ranges-test",
+ .test_cases = range_test_cases,
+};
+
+kunit_test_suites(&range_test_module);
+
+MODULE_DESCRIPTION("KUnit test for the linear_ranges helper");
+MODULE_LICENSE("GPL");
diff --git a/lib/tests/test_list_sort.c b/lib/tests/test_list_sort.c
new file mode 100644
index 000000000000..30879abc8a42
--- /dev/null
+++ b/lib/tests/test_list_sort.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <kunit/test.h>
+
+#include <linux/kernel.h>
+#include <linux/list_sort.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+
+/*
+ * The pattern of set bits in the list length determines which cases
+ * are hit in list_sort().
+ */
+#define TEST_LIST_LEN (512+128+2) /* not including head */
+
+#define TEST_POISON1 0xDEADBEEF
+#define TEST_POISON2 0xA324354C
+
+struct debug_el {
+ unsigned int poison1;
+ struct list_head list;
+ unsigned int poison2;
+ int value;
+ unsigned int serial;
+};
+
+static void check(struct kunit *test, struct debug_el *ela, struct debug_el *elb)
+{
+ struct debug_el **elts = test->priv;
+
+ KUNIT_EXPECT_LT_MSG(test, ela->serial, (unsigned int)TEST_LIST_LEN, "incorrect serial");
+ KUNIT_EXPECT_LT_MSG(test, elb->serial, (unsigned int)TEST_LIST_LEN, "incorrect serial");
+
+ KUNIT_EXPECT_PTR_EQ_MSG(test, elts[ela->serial], ela, "phantom element");
+ KUNIT_EXPECT_PTR_EQ_MSG(test, elts[elb->serial], elb, "phantom element");
+
+ KUNIT_EXPECT_EQ_MSG(test, ela->poison1, TEST_POISON1, "bad poison");
+ KUNIT_EXPECT_EQ_MSG(test, ela->poison2, TEST_POISON2, "bad poison");
+
+ KUNIT_EXPECT_EQ_MSG(test, elb->poison1, TEST_POISON1, "bad poison");
+ KUNIT_EXPECT_EQ_MSG(test, elb->poison2, TEST_POISON2, "bad poison");
+}
+
+/* `priv` is the test pointer so check() can fail the test if the list is invalid. */
+static int cmp(void *priv, const struct list_head *a, const struct list_head *b)
+{
+ struct debug_el *ela, *elb;
+
+ ela = container_of(a, struct debug_el, list);
+ elb = container_of(b, struct debug_el, list);
+
+ check(priv, ela, elb);
+ return ela->value - elb->value;
+}
+
+static void list_sort_test(struct kunit *test)
+{
+ int i, count = 1;
+ struct debug_el *el, **elts;
+ struct list_head *cur;
+ LIST_HEAD(head);
+
+ elts = kunit_kcalloc(test, TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elts);
+ test->priv = elts;
+
+ for (i = 0; i < TEST_LIST_LEN; i++) {
+ el = kunit_kmalloc(test, sizeof(*el), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, el);
+
+ /* force some equivalencies */
+ el->value = get_random_u32_below(TEST_LIST_LEN / 3);
+ el->serial = i;
+ el->poison1 = TEST_POISON1;
+ el->poison2 = TEST_POISON2;
+ elts[i] = el;
+ list_add_tail(&el->list, &head);
+ }
+
+ list_sort(test, &head, cmp);
+
+ for (cur = head.next; cur->next != &head; cur = cur->next) {
+ struct debug_el *el1;
+ int cmp_result;
+
+ KUNIT_ASSERT_PTR_EQ_MSG(test, cur->next->prev, cur,
+ "list is corrupted");
+
+ cmp_result = cmp(test, cur, cur->next);
+ KUNIT_ASSERT_LE_MSG(test, cmp_result, 0, "list is not sorted");
+
+ el = container_of(cur, struct debug_el, list);
+ el1 = container_of(cur->next, struct debug_el, list);
+ if (cmp_result == 0) {
+ KUNIT_ASSERT_LE_MSG(test, el->serial, el1->serial,
+ "order of equivalent elements not preserved");
+ }
+
+ check(test, el, el1);
+ count++;
+ }
+ KUNIT_EXPECT_PTR_EQ_MSG(test, head.prev, cur, "list is corrupted");
+
+ KUNIT_EXPECT_EQ_MSG(test, count, TEST_LIST_LEN,
+ "list length changed after sorting!");
+}
+
+static struct kunit_case list_sort_cases[] = {
+ KUNIT_CASE(list_sort_test),
+ {}
+};
+
+static struct kunit_suite list_sort_suite = {
+ .name = "list_sort",
+ .test_cases = list_sort_cases,
+};
+
+kunit_test_suites(&list_sort_suite);
+
+MODULE_DESCRIPTION("list_sort() KUnit test suite");
+MODULE_LICENSE("GPL");
diff --git a/lib/tests/test_sort.c b/lib/tests/test_sort.c
new file mode 100644
index 000000000000..cd4a338d1153
--- /dev/null
+++ b/lib/tests/test_sort.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <kunit/test.h>
+
+#include <linux/sort.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+/* a simple boot-time regression test */
+
+#define TEST_LEN 1000
+
+static int cmpint(const void *a, const void *b)
+{
+ return *(int *)a - *(int *)b;
+}
+
+static void test_sort(struct kunit *test)
+{
+ int *a, i, r = 1;
+
+ a = kunit_kmalloc_array(test, TEST_LEN, sizeof(*a), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a);
+
+ for (i = 0; i < TEST_LEN; i++) {
+ r = (r * 725861) % 6599;
+ a[i] = r;
+ }
+
+ sort(a, TEST_LEN, sizeof(*a), cmpint, NULL);
+
+ for (i = 0; i < TEST_LEN - 1; i++)
+ KUNIT_ASSERT_LE(test, a[i], a[i + 1]);
+
+ r = 48;
+
+ for (i = 0; i < TEST_LEN - 1; i++) {
+ r = (r * 725861) % 6599;
+ a[i] = r;
+ }
+
+ sort(a, TEST_LEN - 1, sizeof(*a), cmpint, NULL);
+
+ for (i = 0; i < TEST_LEN - 2; i++)
+ KUNIT_ASSERT_LE(test, a[i], a[i + 1]);
+}
+
+static struct kunit_case sort_test_cases[] = {
+ KUNIT_CASE(test_sort),
+ {}
+};
+
+static struct kunit_suite sort_test_suite = {
+ .name = "lib_sort",
+ .test_cases = sort_test_cases,
+};
+
+kunit_test_suites(&sort_test_suite);
+
+MODULE_DESCRIPTION("sort() KUnit test suite");
+MODULE_LICENSE("GPL");
diff --git a/lib/tests/usercopy_kunit.c b/lib/tests/usercopy_kunit.c
new file mode 100644
index 000000000000..77fa00a13df7
--- /dev/null
+++ b/lib/tests/usercopy_kunit.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Kernel module for testing copy_to/from_user infrastructure.
+ *
+ * Copyright 2013 Google Inc. All Rights Reserved
+ *
+ * Authors:
+ * Kees Cook <keescook@chromium.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mman.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <kunit/test.h>
+
+/*
+ * Several 32-bit architectures support 64-bit {get,put}_user() calls.
+ * As there doesn't appear to be anything that can safely determine
+ * their capability at compile-time, we just have to opt-out certain archs.
+ */
+#if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \
+ !defined(CONFIG_M68K) && \
+ !defined(CONFIG_MICROBLAZE) && \
+ !defined(CONFIG_NIOS2) && \
+ !defined(CONFIG_PPC32) && \
+ !defined(CONFIG_SUPERH))
+# define TEST_U64
+#endif
+
+struct usercopy_test_priv {
+ char *kmem;
+ char __user *umem;
+ size_t size;
+};
+
+static bool is_zeroed(void *from, size_t size)
+{
+ return memchr_inv(from, 0x0, size) == NULL;
+}
+
+/* Test usage of check_nonzero_user(). */
+static void usercopy_test_check_nonzero_user(struct kunit *test)
+{
+ size_t start, end, i, zero_start, zero_end;
+ struct usercopy_test_priv *priv = test->priv;
+ char __user *umem = priv->umem;
+ char *kmem = priv->kmem;
+ size_t size = priv->size;
+
+ KUNIT_ASSERT_GE_MSG(test, size, 2 * PAGE_SIZE, "buffer too small");
+
+ /*
+ * We want to cross a page boundary to exercise the code more
+ * effectively. We also don't want to make the size we scan too large,
+ * otherwise the test can take a long time and cause soft lockups. So
+ * scan a 1024 byte region across the page boundary.
+ */
+ size = 1024;
+ start = PAGE_SIZE - (size / 2);
+
+ kmem += start;
+ umem += start;
+
+ zero_start = size / 4;
+ zero_end = size - zero_start;
+
+ /*
+ * We conduct a series of check_nonzero_user() tests on a block of
+ * memory with the following byte-pattern (trying every possible
+ * [start,end] pair):
+ *
+ * [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ]
+ *
+ * And we verify that check_nonzero_user() acts identically to
+ * memchr_inv().
+ */
+
+ memset(kmem, 0x0, size);
+ for (i = 1; i < zero_start; i += 2)
+ kmem[i] = 0xff;
+ for (i = zero_end; i < size; i += 2)
+ kmem[i] = 0xff;
+
+ KUNIT_EXPECT_EQ_MSG(test, copy_to_user(umem, kmem, size), 0,
+ "legitimate copy_to_user failed");
+
+ for (start = 0; start <= size; start++) {
+ for (end = start; end <= size; end++) {
+ size_t len = end - start;
+ int retval = check_zeroed_user(umem + start, len);
+ int expected = is_zeroed(kmem + start, len);
+
+ KUNIT_ASSERT_EQ_MSG(test, retval, expected,
+ "check_nonzero_user(=%d) != memchr_inv(=%d) mismatch (start=%zu, end=%zu)",
+ retval, expected, start, end);
+ }
+ }
+}
+
+/* Test usage of copy_struct_from_user(). */
+static void usercopy_test_copy_struct_from_user(struct kunit *test)
+{
+ char *umem_src = NULL, *expected = NULL;
+ struct usercopy_test_priv *priv = test->priv;
+ char __user *umem = priv->umem;
+ char *kmem = priv->kmem;
+ size_t size = priv->size;
+ size_t ksize, usize;
+
+ umem_src = kunit_kmalloc(test, size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, umem_src);
+
+ expected = kunit_kmalloc(test, size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected);
+
+ /* Fill umem with a fixed byte pattern. */
+ memset(umem_src, 0x3e, size);
+ KUNIT_ASSERT_EQ_MSG(test, copy_to_user(umem, umem_src, size), 0,
+ "legitimate copy_to_user failed");
+
+ /* Check basic case -- (usize == ksize). */
+ ksize = size;
+ usize = size;
+
+ memcpy(expected, umem_src, ksize);
+
+ memset(kmem, 0x0, size);
+ KUNIT_EXPECT_EQ_MSG(test, copy_struct_from_user(kmem, ksize, umem, usize), 0,
+ "copy_struct_from_user(usize == ksize) failed");
+ KUNIT_EXPECT_MEMEQ_MSG(test, kmem, expected, ksize,
+ "copy_struct_from_user(usize == ksize) gives unexpected copy");
+
+ /* Old userspace case -- (usize < ksize). */
+ ksize = size;
+ usize = size / 2;
+
+ memcpy(expected, umem_src, usize);
+ memset(expected + usize, 0x0, ksize - usize);
+
+ memset(kmem, 0x0, size);
+ KUNIT_EXPECT_EQ_MSG(test, copy_struct_from_user(kmem, ksize, umem, usize), 0,
+ "copy_struct_from_user(usize < ksize) failed");
+ KUNIT_EXPECT_MEMEQ_MSG(test, kmem, expected, ksize,
+ "copy_struct_from_user(usize < ksize) gives unexpected copy");
+
+ /* New userspace (-E2BIG) case -- (usize > ksize). */
+ ksize = size / 2;
+ usize = size;
+
+ memset(kmem, 0x0, size);
+ KUNIT_EXPECT_EQ_MSG(test, copy_struct_from_user(kmem, ksize, umem, usize), -E2BIG,
+ "copy_struct_from_user(usize > ksize) didn't give E2BIG");
+
+ /* New userspace (success) case -- (usize > ksize). */
+ ksize = size / 2;
+ usize = size;
+
+ memcpy(expected, umem_src, ksize);
+ KUNIT_EXPECT_EQ_MSG(test, clear_user(umem + ksize, usize - ksize), 0,
+ "legitimate clear_user failed");
+
+ memset(kmem, 0x0, size);
+ KUNIT_EXPECT_EQ_MSG(test, copy_struct_from_user(kmem, ksize, umem, usize), 0,
+ "copy_struct_from_user(usize > ksize) failed");
+ KUNIT_EXPECT_MEMEQ_MSG(test, kmem, expected, ksize,
+ "copy_struct_from_user(usize > ksize) gives unexpected copy");
+}
+
+/*
+ * Legitimate usage: none of these copies should fail.
+ */
+static void usercopy_test_valid(struct kunit *test)
+{
+ struct usercopy_test_priv *priv = test->priv;
+ char __user *usermem = priv->umem;
+ char *kmem = priv->kmem;
+
+ memset(kmem, 0x3a, PAGE_SIZE * 2);
+ KUNIT_EXPECT_EQ_MSG(test, 0, copy_to_user(usermem, kmem, PAGE_SIZE),
+ "legitimate copy_to_user failed");
+ memset(kmem, 0x0, PAGE_SIZE);
+ KUNIT_EXPECT_EQ_MSG(test, 0, copy_from_user(kmem, usermem, PAGE_SIZE),
+ "legitimate copy_from_user failed");
+ KUNIT_EXPECT_MEMEQ_MSG(test, kmem, kmem + PAGE_SIZE, PAGE_SIZE,
+ "legitimate usercopy failed to copy data");
+
+#define test_legit(size, check) \
+ do { \
+ size val_##size = (check); \
+ KUNIT_EXPECT_EQ_MSG(test, 0, \
+ put_user(val_##size, (size __user *)usermem), \
+ "legitimate put_user (" #size ") failed"); \
+ val_##size = 0; \
+ KUNIT_EXPECT_EQ_MSG(test, 0, \
+ get_user(val_##size, (size __user *)usermem), \
+ "legitimate get_user (" #size ") failed"); \
+ KUNIT_EXPECT_EQ_MSG(test, val_##size, check, \
+ "legitimate get_user (" #size ") failed to do copy"); \
+ } while (0)
+
+ test_legit(u8, 0x5a);
+ test_legit(u16, 0x5a5b);
+ test_legit(u32, 0x5a5b5c5d);
+#ifdef TEST_U64
+ test_legit(u64, 0x5a5b5c5d6a6b6c6d);
+#endif
+#undef test_legit
+}
+
+/*
+ * Invalid usage: none of these copies should succeed.
+ */
+static void usercopy_test_invalid(struct kunit *test)
+{
+ struct usercopy_test_priv *priv = test->priv;
+ char __user *usermem = priv->umem;
+ char *bad_usermem = (char *)usermem;
+ char *kmem = priv->kmem;
+ u64 *kmem_u64 = (u64 *)kmem;
+
+ if (IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) ||
+ !IS_ENABLED(CONFIG_MMU)) {
+ kunit_skip(test, "Testing for kernel/userspace address confusion is only sensible on architectures with a shared address space");
+ return;
+ }
+
+ /* Prepare kernel memory with check values. */
+ memset(kmem, 0x5a, PAGE_SIZE);
+ memset(kmem + PAGE_SIZE, 0, PAGE_SIZE);
+
+ /* Reject kernel-to-kernel copies through copy_from_user(). */
+ KUNIT_EXPECT_NE_MSG(test, copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE),
+ PAGE_SIZE), 0,
+ "illegal all-kernel copy_from_user passed");
+
+ /* Destination half of buffer should have been zeroed. */
+ KUNIT_EXPECT_MEMEQ_MSG(test, kmem + PAGE_SIZE, kmem, PAGE_SIZE,
+ "zeroing failure for illegal all-kernel copy_from_user");
+
+#if 0
+ /*
+ * When running with SMAP/PAN/etc, this will Oops the kernel
+ * due to the zeroing of userspace memory on failure. This needs
+ * to be tested in LKDTM instead, since this test module does not
+ * expect to explode.
+ */
+ KUNIT_EXPECT_NE_MSG(test, copy_from_user(bad_usermem, (char __user *)kmem,
+ PAGE_SIZE), 0,
+ "illegal reversed copy_from_user passed");
+#endif
+ KUNIT_EXPECT_NE_MSG(test, copy_to_user((char __user *)kmem, kmem + PAGE_SIZE,
+ PAGE_SIZE), 0,
+ "illegal all-kernel copy_to_user passed");
+
+ KUNIT_EXPECT_NE_MSG(test, copy_to_user((char __user *)kmem, bad_usermem,
+ PAGE_SIZE), 0,
+ "illegal reversed copy_to_user passed");
+
+#define test_illegal(size, check) \
+ do { \
+ size val_##size = (check); \
+ /* get_user() */ \
+ KUNIT_EXPECT_NE_MSG(test, get_user(val_##size, (size __user *)kmem), 0, \
+ "illegal get_user (" #size ") passed"); \
+ KUNIT_EXPECT_EQ_MSG(test, val_##size, 0, \
+ "zeroing failure for illegal get_user (" #size ")"); \
+ /* put_user() */ \
+ *kmem_u64 = 0xF09FA4AFF09FA4AF; \
+ KUNIT_EXPECT_NE_MSG(test, put_user(val_##size, (size __user *)kmem), 0, \
+ "illegal put_user (" #size ") passed"); \
+ KUNIT_EXPECT_EQ_MSG(test, *kmem_u64, 0xF09FA4AFF09FA4AF, \
+ "illegal put_user (" #size ") wrote to kernel memory!"); \
+ } while (0)
+
+ test_illegal(u8, 0x5a);
+ test_illegal(u16, 0x5a5b);
+ test_illegal(u32, 0x5a5b5c5d);
+#ifdef TEST_U64
+ test_illegal(u64, 0x5a5b5c5d6a6b6c6d);
+#endif
+#undef test_illegal
+}
+
+static int usercopy_test_init(struct kunit *test)
+{
+ struct usercopy_test_priv *priv;
+ unsigned long user_addr;
+
+ if (!IS_ENABLED(CONFIG_MMU)) {
+ kunit_skip(test, "Userspace allocation testing not available on non-MMU systems");
+ return 0;
+ }
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+ test->priv = priv;
+ priv->size = PAGE_SIZE * 2;
+
+ priv->kmem = kunit_kmalloc(test, priv->size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->kmem);
+
+ user_addr = kunit_vm_mmap(test, NULL, 0, priv->size,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ KUNIT_ASSERT_NE_MSG(test, user_addr, 0,
+ "Could not create userspace mm");
+ KUNIT_ASSERT_LT_MSG(test, user_addr, (unsigned long)TASK_SIZE,
+ "Failed to allocate user memory");
+ priv->umem = (char __user *)user_addr;
+
+ return 0;
+}
+
+static struct kunit_case usercopy_test_cases[] = {
+ KUNIT_CASE(usercopy_test_valid),
+ KUNIT_CASE(usercopy_test_invalid),
+ KUNIT_CASE(usercopy_test_check_nonzero_user),
+ KUNIT_CASE(usercopy_test_copy_struct_from_user),
+ {}
+};
+
+static struct kunit_suite usercopy_test_suite = {
+ .name = "usercopy",
+ .init = usercopy_test_init,
+ .test_cases = usercopy_test_cases,
+};
+
+kunit_test_suites(&usercopy_test_suite);
+MODULE_AUTHOR("Kees Cook <kees@kernel.org>");
+MODULE_DESCRIPTION("Kernel module for testing copy_to/from_user infrastructure");
+MODULE_LICENSE("GPL");
diff --git a/lib/tests/util_macros_kunit.c b/lib/tests/util_macros_kunit.c
new file mode 100644
index 000000000000..94cc9f0de50a
--- /dev/null
+++ b/lib/tests/util_macros_kunit.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases for bitfield helpers.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/util_macros.h>
+
+#define FIND_CLOSEST_RANGE_CHECK(from, to, array, exp_idx) \
+{ \
+ int i; \
+ for (i = from; i <= to; i++) { \
+ int found = find_closest(i, array, ARRAY_SIZE(array)); \
+ KUNIT_ASSERT_EQ(ctx, exp_idx, found); \
+ } \
+}
+
+static void test_find_closest(struct kunit *ctx)
+{
+ /* This will test a few arrays that are found in drivers */
+ static const int ina226_avg_tab[] = { 1, 4, 16, 64, 128, 256, 512, 1024 };
+ static const unsigned int ad7616_oversampling_avail[] = {
+ 1, 2, 4, 8, 16, 32, 64, 128,
+ };
+ static u32 wd_timeout_table[] = { 2, 4, 6, 8, 16, 32, 48, 64 };
+ static int array_prog1a[] = { 1, 2, 3, 4, 5 };
+ static u32 array_prog1b[] = { 2, 3, 4, 5, 6 };
+ static int array_prog1mix[] = { -2, -1, 0, 1, 2 };
+ static int array_prog2a[] = { 1, 3, 5, 7 };
+ static u32 array_prog2b[] = { 2, 4, 6, 8 };
+ static int array_prog3a[] = { 1, 4, 7, 10 };
+ static u32 array_prog3b[] = { 2, 5, 8, 11 };
+ static int array_prog4a[] = { 1, 5, 9, 13 };
+ static u32 array_prog4b[] = { 2, 6, 10, 14 };
+
+ FIND_CLOSEST_RANGE_CHECK(-3, 2, ina226_avg_tab, 0);
+ FIND_CLOSEST_RANGE_CHECK(3, 10, ina226_avg_tab, 1);
+ FIND_CLOSEST_RANGE_CHECK(11, 40, ina226_avg_tab, 2);
+ FIND_CLOSEST_RANGE_CHECK(41, 96, ina226_avg_tab, 3);
+ FIND_CLOSEST_RANGE_CHECK(97, 192, ina226_avg_tab, 4);
+ FIND_CLOSEST_RANGE_CHECK(193, 384, ina226_avg_tab, 5);
+ FIND_CLOSEST_RANGE_CHECK(385, 768, ina226_avg_tab, 6);
+ FIND_CLOSEST_RANGE_CHECK(769, 2048, ina226_avg_tab, 7);
+
+ /* The array that found the bug that caused this kunit to exist */
+ FIND_CLOSEST_RANGE_CHECK(-3, 1, ad7616_oversampling_avail, 0);
+ FIND_CLOSEST_RANGE_CHECK(2, 3, ad7616_oversampling_avail, 1);
+ FIND_CLOSEST_RANGE_CHECK(4, 6, ad7616_oversampling_avail, 2);
+ FIND_CLOSEST_RANGE_CHECK(7, 12, ad7616_oversampling_avail, 3);
+ FIND_CLOSEST_RANGE_CHECK(13, 24, ad7616_oversampling_avail, 4);
+ FIND_CLOSEST_RANGE_CHECK(25, 48, ad7616_oversampling_avail, 5);
+ FIND_CLOSEST_RANGE_CHECK(49, 96, ad7616_oversampling_avail, 6);
+ FIND_CLOSEST_RANGE_CHECK(97, 256, ad7616_oversampling_avail, 7);
+
+ FIND_CLOSEST_RANGE_CHECK(-3, 3, wd_timeout_table, 0);
+ FIND_CLOSEST_RANGE_CHECK(4, 5, wd_timeout_table, 1);
+ FIND_CLOSEST_RANGE_CHECK(6, 7, wd_timeout_table, 2);
+ FIND_CLOSEST_RANGE_CHECK(8, 12, wd_timeout_table, 3);
+ FIND_CLOSEST_RANGE_CHECK(13, 24, wd_timeout_table, 4);
+ FIND_CLOSEST_RANGE_CHECK(25, 40, wd_timeout_table, 5);
+ FIND_CLOSEST_RANGE_CHECK(41, 56, wd_timeout_table, 6);
+ FIND_CLOSEST_RANGE_CHECK(57, 128, wd_timeout_table, 7);
+
+ /* One could argue that find_closest() should not be used for monotonic
+ * arrays (like 1,2,3,4,5), but even so, it should work as long as the
+ * array is sorted ascending. */
+ FIND_CLOSEST_RANGE_CHECK(-3, 1, array_prog1a, 0);
+ FIND_CLOSEST_RANGE_CHECK(2, 2, array_prog1a, 1);
+ FIND_CLOSEST_RANGE_CHECK(3, 3, array_prog1a, 2);
+ FIND_CLOSEST_RANGE_CHECK(4, 4, array_prog1a, 3);
+ FIND_CLOSEST_RANGE_CHECK(5, 8, array_prog1a, 4);
+
+ FIND_CLOSEST_RANGE_CHECK(-3, 2, array_prog1b, 0);
+ FIND_CLOSEST_RANGE_CHECK(3, 3, array_prog1b, 1);
+ FIND_CLOSEST_RANGE_CHECK(4, 4, array_prog1b, 2);
+ FIND_CLOSEST_RANGE_CHECK(5, 5, array_prog1b, 3);
+ FIND_CLOSEST_RANGE_CHECK(6, 8, array_prog1b, 4);
+
+ FIND_CLOSEST_RANGE_CHECK(-4, -2, array_prog1mix, 0);
+ FIND_CLOSEST_RANGE_CHECK(-1, -1, array_prog1mix, 1);
+ FIND_CLOSEST_RANGE_CHECK(0, 0, array_prog1mix, 2);
+ FIND_CLOSEST_RANGE_CHECK(1, 1, array_prog1mix, 3);
+ FIND_CLOSEST_RANGE_CHECK(2, 5, array_prog1mix, 4);
+
+ FIND_CLOSEST_RANGE_CHECK(-3, 2, array_prog2a, 0);
+ FIND_CLOSEST_RANGE_CHECK(3, 4, array_prog2a, 1);
+ FIND_CLOSEST_RANGE_CHECK(5, 6, array_prog2a, 2);
+ FIND_CLOSEST_RANGE_CHECK(7, 10, array_prog2a, 3);
+
+ FIND_CLOSEST_RANGE_CHECK(-3, 3, array_prog2b, 0);
+ FIND_CLOSEST_RANGE_CHECK(4, 5, array_prog2b, 1);
+ FIND_CLOSEST_RANGE_CHECK(6, 7, array_prog2b, 2);
+ FIND_CLOSEST_RANGE_CHECK(8, 10, array_prog2b, 3);
+
+ FIND_CLOSEST_RANGE_CHECK(-3, 2, array_prog3a, 0);
+ FIND_CLOSEST_RANGE_CHECK(3, 5, array_prog3a, 1);
+ FIND_CLOSEST_RANGE_CHECK(6, 8, array_prog3a, 2);
+ FIND_CLOSEST_RANGE_CHECK(9, 20, array_prog3a, 3);
+
+ FIND_CLOSEST_RANGE_CHECK(-3, 3, array_prog3b, 0);
+ FIND_CLOSEST_RANGE_CHECK(4, 6, array_prog3b, 1);
+ FIND_CLOSEST_RANGE_CHECK(7, 9, array_prog3b, 2);
+ FIND_CLOSEST_RANGE_CHECK(10, 20, array_prog3b, 3);
+
+ FIND_CLOSEST_RANGE_CHECK(-3, 3, array_prog4a, 0);
+ FIND_CLOSEST_RANGE_CHECK(4, 7, array_prog4a, 1);
+ FIND_CLOSEST_RANGE_CHECK(8, 11, array_prog4a, 2);
+ FIND_CLOSEST_RANGE_CHECK(12, 20, array_prog4a, 3);
+
+ FIND_CLOSEST_RANGE_CHECK(-3, 4, array_prog4b, 0);
+ FIND_CLOSEST_RANGE_CHECK(5, 8, array_prog4b, 1);
+ FIND_CLOSEST_RANGE_CHECK(9, 12, array_prog4b, 2);
+ FIND_CLOSEST_RANGE_CHECK(13, 20, array_prog4b, 3);
+}
+
+#define FIND_CLOSEST_DESC_RANGE_CHECK(from, to, array, exp_idx) \
+{ \
+ int i; \
+ for (i = from; i <= to; i++) { \
+ int found = find_closest_descending(i, array, \
+ ARRAY_SIZE(array)); \
+ KUNIT_ASSERT_EQ(ctx, exp_idx, found); \
+ } \
+}
+
+static void test_find_closest_descending(struct kunit *ctx)
+{
+ /* Same arrays as 'test_find_closest' but reversed */
+ static const int ina226_avg_tab[] = { 1024, 512, 256, 128, 64, 16, 4, 1 };
+ static const unsigned int ad7616_oversampling_avail[] = {
+ 128, 64, 32, 16, 8, 4, 2, 1
+ };
+ static u32 wd_timeout_table[] = { 64, 48, 32, 16, 8, 6, 4, 2 };
+ static int array_prog1a[] = { 5, 4, 3, 2, 1 };
+ static u32 array_prog1b[] = { 6, 5, 4, 3, 2 };
+ static int array_prog1mix[] = { 2, 1, 0, -1, -2 };
+ static int array_prog2a[] = { 7, 5, 3, 1 };
+ static u32 array_prog2b[] = { 8, 6, 4, 2 };
+ static int array_prog3a[] = { 10, 7, 4, 1 };
+ static u32 array_prog3b[] = { 11, 8, 5, 2 };
+ static int array_prog4a[] = { 13, 9, 5, 1 };
+ static u32 array_prog4b[] = { 14, 10, 6, 2 };
+
+ FIND_CLOSEST_DESC_RANGE_CHECK(-3, 2, ina226_avg_tab, 7);
+ FIND_CLOSEST_DESC_RANGE_CHECK(3, 10, ina226_avg_tab, 6);
+ FIND_CLOSEST_DESC_RANGE_CHECK(11, 40, ina226_avg_tab, 5);
+ FIND_CLOSEST_DESC_RANGE_CHECK(41, 96, ina226_avg_tab, 4);
+ FIND_CLOSEST_DESC_RANGE_CHECK(97, 192, ina226_avg_tab, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(193, 384, ina226_avg_tab, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(385, 768, ina226_avg_tab, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(769, 2048, ina226_avg_tab, 0);
+
+ FIND_CLOSEST_DESC_RANGE_CHECK(-3, 1, ad7616_oversampling_avail, 7);
+ FIND_CLOSEST_DESC_RANGE_CHECK(2, 3, ad7616_oversampling_avail, 6);
+ FIND_CLOSEST_DESC_RANGE_CHECK(4, 6, ad7616_oversampling_avail, 5);
+ FIND_CLOSEST_DESC_RANGE_CHECK(7, 12, ad7616_oversampling_avail, 4);
+ FIND_CLOSEST_DESC_RANGE_CHECK(13, 24, ad7616_oversampling_avail, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(25, 48, ad7616_oversampling_avail, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(49, 96, ad7616_oversampling_avail, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(97, 256, ad7616_oversampling_avail, 0);
+
+ FIND_CLOSEST_DESC_RANGE_CHECK(-3, 3, wd_timeout_table, 7);
+ FIND_CLOSEST_DESC_RANGE_CHECK(4, 5, wd_timeout_table, 6);
+ FIND_CLOSEST_DESC_RANGE_CHECK(6, 7, wd_timeout_table, 5);
+ FIND_CLOSEST_DESC_RANGE_CHECK(8, 12, wd_timeout_table, 4);
+ FIND_CLOSEST_DESC_RANGE_CHECK(13, 24, wd_timeout_table, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(25, 40, wd_timeout_table, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(41, 56, wd_timeout_table, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(57, 128, wd_timeout_table, 0);
+
+ /* One could argue that find_closest_descending() should not be used
+ * for monotonic arrays (like 5,4,3,2,1), but even so, it should still
+ * it should work as long as the array is sorted descending. */
+ FIND_CLOSEST_DESC_RANGE_CHECK(-3, 1, array_prog1a, 4);
+ FIND_CLOSEST_DESC_RANGE_CHECK(2, 2, array_prog1a, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(3, 3, array_prog1a, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(4, 4, array_prog1a, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(5, 8, array_prog1a, 0);
+
+ FIND_CLOSEST_DESC_RANGE_CHECK(-3, 2, array_prog1b, 4);
+ FIND_CLOSEST_DESC_RANGE_CHECK(3, 3, array_prog1b, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(4, 4, array_prog1b, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(5, 5, array_prog1b, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(6, 8, array_prog1b, 0);
+
+ FIND_CLOSEST_DESC_RANGE_CHECK(-4, -2, array_prog1mix, 4);
+ FIND_CLOSEST_DESC_RANGE_CHECK(-1, -1, array_prog1mix, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(0, 0, array_prog1mix, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(1, 1, array_prog1mix, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(2, 5, array_prog1mix, 0);
+
+ FIND_CLOSEST_DESC_RANGE_CHECK(-3, 2, array_prog2a, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(3, 4, array_prog2a, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(5, 6, array_prog2a, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(7, 10, array_prog2a, 0);
+
+ FIND_CLOSEST_DESC_RANGE_CHECK(-3, 3, array_prog2b, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(4, 5, array_prog2b, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(6, 7, array_prog2b, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(8, 10, array_prog2b, 0);
+
+ FIND_CLOSEST_DESC_RANGE_CHECK(-3, 2, array_prog3a, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(3, 5, array_prog3a, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(6, 8, array_prog3a, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(9, 20, array_prog3a, 0);
+
+ FIND_CLOSEST_DESC_RANGE_CHECK(-3, 3, array_prog3b, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(4, 6, array_prog3b, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(7, 9, array_prog3b, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(10, 20, array_prog3b, 0);
+
+ FIND_CLOSEST_DESC_RANGE_CHECK(-3, 3, array_prog4a, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(4, 7, array_prog4a, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(8, 11, array_prog4a, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(12, 20, array_prog4a, 0);
+
+ FIND_CLOSEST_DESC_RANGE_CHECK(-3, 4, array_prog4b, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(5, 8, array_prog4b, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(9, 12, array_prog4b, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(13, 20, array_prog4b, 0);
+}
+
+static struct kunit_case __refdata util_macros_test_cases[] = {
+ KUNIT_CASE(test_find_closest),
+ KUNIT_CASE(test_find_closest_descending),
+ {}
+};
+
+static struct kunit_suite util_macros_test_suite = {
+ .name = "util_macros.h",
+ .test_cases = util_macros_test_cases,
+};
+
+kunit_test_suites(&util_macros_test_suite);
+
+MODULE_AUTHOR("Alexandru Ardelean <aardelean@baylibre.com>");
+MODULE_DESCRIPTION("Test cases for util_macros.h helpers");
+MODULE_LICENSE("GPL");