summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorPetr Mladek <pmladek@suse.com>2021-11-02 10:39:27 +0100
committerPetr Mladek <pmladek@suse.com>2021-11-02 10:39:27 +0100
commit40e64a88dadcfa168914065baf7f035de957bbe0 (patch)
tree06c8c4a9e6c1b478aa6851794c6a33bec1ce6ec4 /lib
parent24a1dffbecafeb00d8830985eb7a318e37aabc4e (diff)
parent6a7ca80f4033c9cf3003625b2ef8b497f4ec44da (diff)
downloadlwn-40e64a88dadcfa168914065baf7f035de957bbe0.tar.gz
lwn-40e64a88dadcfa168914065baf7f035de957bbe0.zip
Merge branch 'for-5.16-vsprintf-pgp' into for-linus
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug13
-rw-r--r--lib/Kconfig.kcsan42
-rw-r--r--lib/bootconfig.c8
-rw-r--r--lib/debug_info.c3
-rw-r--r--lib/dump_stack.c3
-rw-r--r--lib/iov_iter.c8
-rw-r--r--lib/kasprintf.c2
-rw-r--r--lib/kunit/string-stream.h2
-rw-r--r--lib/kunit/test.c109
-rw-r--r--lib/logic_iomem.c16
-rw-r--r--lib/math/Kconfig2
-rw-r--r--lib/math/rational.c3
-rw-r--r--lib/scatterlist.c160
-rw-r--r--lib/sg_pool.c3
-rw-r--r--lib/test_kasan.c80
-rw-r--r--lib/test_kasan_module.c20
-rw-r--r--lib/test_printf.c66
-rw-r--r--lib/test_scanf.c79
-rw-r--r--lib/test_sort.c40
-rw-r--r--lib/test_stackinit.c253
-rw-r--r--lib/test_vmalloc.c5
-rw-r--r--lib/ubsan.c3
-rw-r--r--lib/vsprintf.c12
23 files changed, 657 insertions, 275 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 12b805dabbc9..ed4a31e34098 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -346,7 +346,7 @@ config FRAME_WARN
int "Warn for stack frames larger than"
range 0 8192
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
- default 1280 if (!64BIT && PARISC)
+ default 1536 if (!64BIT && PARISC)
default 1024 if (!64BIT && !PARISC)
default 2048 if 64BIT
help
@@ -365,6 +365,7 @@ config STRIP_ASM_SYMS
config READABLE_ASM
bool "Generate readable assembler code"
depends on DEBUG_KERNEL
+ depends on CC_IS_GCC
help
Disable some compiler optimizations that tend to generate human unreadable
assembler output. This may make the kernel slightly slower, but it helps
@@ -383,6 +384,7 @@ config HEADERS_INSTALL
config DEBUG_SECTION_MISMATCH
bool "Enable full Section mismatch analysis"
+ depends on CC_IS_GCC
help
The section mismatch analysis checks if there are illegal
references from one section to another section.
@@ -1062,7 +1064,6 @@ config HARDLOCKUP_DETECTOR
depends on HAVE_HARDLOCKUP_DETECTOR_PERF || HAVE_HARDLOCKUP_DETECTOR_ARCH
select LOCKUP_DETECTOR
select HARDLOCKUP_DETECTOR_PERF if HAVE_HARDLOCKUP_DETECTOR_PERF
- select HARDLOCKUP_DETECTOR_ARCH if HAVE_HARDLOCKUP_DETECTOR_ARCH
help
Say Y here to enable the kernel to act as a watchdog to detect
hard lockups.
@@ -2059,8 +2060,9 @@ config TEST_MIN_HEAP
If unsure, say N.
config TEST_SORT
- tristate "Array-based sort test"
- depends on DEBUG_KERNEL || m
+ tristate "Array-based sort test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
help
This option enables the self-test function of 'sort()' at boot,
or at module load time.
@@ -2441,8 +2443,7 @@ config SLUB_KUNIT_TEST
config RATIONAL_KUNIT_TEST
tristate "KUnit test for rational.c" if !KUNIT_ALL_TESTS
- depends on KUNIT
- select RATIONAL
+ depends on KUNIT && RATIONAL
default KUNIT_ALL_TESTS
help
This builds the rational math unit test.
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
index 0440f373248e..e0a93ffdef30 100644
--- a/lib/Kconfig.kcsan
+++ b/lib/Kconfig.kcsan
@@ -40,10 +40,14 @@ menuconfig KCSAN
if KCSAN
-# Compiler capabilities that should not fail the test if they are unavailable.
config CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE
def_bool (CC_IS_CLANG && $(cc-option,-fsanitize=thread -mllvm -tsan-compound-read-before-write=1)) || \
(CC_IS_GCC && $(cc-option,-fsanitize=thread --param tsan-compound-read-before-write=1))
+ help
+ The compiler instruments plain compound read-write operations
+ differently (++, --, +=, -=, |=, &=, etc.), which allows KCSAN to
+ distinguish them from other plain accesses. This is currently
+ supported by Clang 12 or later.
config KCSAN_VERBOSE
bool "Show verbose reports with more information about system state"
@@ -58,9 +62,6 @@ config KCSAN_VERBOSE
generated from any one of them, system stability may suffer due to
deadlocks or recursion. If in doubt, say N.
-config KCSAN_DEBUG
- bool "Debugging of KCSAN internals"
-
config KCSAN_SELFTEST
bool "Perform short selftests on boot"
default y
@@ -149,7 +150,8 @@ config KCSAN_SKIP_WATCH_RANDOMIZE
KCSAN_WATCH_SKIP.
config KCSAN_INTERRUPT_WATCHER
- bool "Interruptible watchers"
+ bool "Interruptible watchers" if !KCSAN_STRICT
+ default KCSAN_STRICT
help
If enabled, a task that set up a watchpoint may be interrupted while
delayed. This option will allow KCSAN to detect races between
@@ -169,13 +171,9 @@ config KCSAN_REPORT_ONCE_IN_MS
reporting to avoid flooding the console with reports. Setting this
to 0 disables rate limiting.
-# The main purpose of the below options is to control reported data races (e.g.
-# in fuzzer configs), and are not expected to be switched frequently by other
-# users. We could turn some of them into boot parameters, but given they should
-# not be switched normally, let's keep them here to simplify configuration.
-#
-# The defaults below are chosen to be very conservative, and may miss certain
-# bugs.
+# The main purpose of the below options is to control reported data races, and
+# are not expected to be switched frequently by non-testers or at runtime.
+# The defaults are chosen to be conservative, and can miss certain bugs.
config KCSAN_REPORT_RACE_UNKNOWN_ORIGIN
bool "Report races of unknown origin"
@@ -186,9 +184,17 @@ config KCSAN_REPORT_RACE_UNKNOWN_ORIGIN
reported if it was only possible to infer a race due to a data value
change while an access is being delayed on a watchpoint.
+config KCSAN_STRICT
+ bool "Strict data-race checking"
+ help
+ KCSAN will report data races with the strictest possible rules, which
+ closely aligns with the rules defined by the Linux-kernel memory
+ consistency model (LKMM).
+
config KCSAN_REPORT_VALUE_CHANGE_ONLY
bool "Only report races where watcher observed a data value change"
default y
+ depends on !KCSAN_STRICT
help
If enabled and a conflicting write is observed via a watchpoint, but
the data value of the memory location was observed to remain
@@ -197,6 +203,7 @@ config KCSAN_REPORT_VALUE_CHANGE_ONLY
config KCSAN_ASSUME_PLAIN_WRITES_ATOMIC
bool "Assume that plain aligned writes up to word size are atomic"
default y
+ depends on !KCSAN_STRICT
help
Assume that plain aligned writes up to word size are atomic by
default, and also not subject to other unsafe compiler optimizations
@@ -209,6 +216,7 @@ config KCSAN_ASSUME_PLAIN_WRITES_ATOMIC
config KCSAN_IGNORE_ATOMICS
bool "Do not instrument marked atomic accesses"
+ depends on !KCSAN_STRICT
help
Never instrument marked atomic accesses. This option can be used for
additional filtering. Conflicting marked atomic reads and plain
@@ -224,4 +232,14 @@ config KCSAN_IGNORE_ATOMICS
due to two conflicting plain writes will be reported (aligned and
unaligned, if CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n).
+config KCSAN_PERMISSIVE
+ bool "Enable all additional permissive rules"
+ depends on KCSAN_REPORT_VALUE_CHANGE_ONLY
+ help
+ Enable additional permissive rules to ignore certain classes of data
+ races (also see kernel/kcsan/permissive.h). None of the permissive
+ rules imply that such data races are generally safe, but can be used
+ to further reduce reported data races due to data-racy patterns
+ common across the kernel.
+
endif # KCSAN
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index 927017431fb6..f8419cff1147 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -142,16 +142,16 @@ xbc_node_match_prefix(struct xbc_node *node, const char **prefix)
}
/**
- * xbc_node_find_child() - Find a child node which matches given key
+ * xbc_node_find_subkey() - Find a subkey node which matches given key
* @parent: An XBC node.
* @key: A key string.
*
- * Search a node under @parent which matches @key. The @key can contain
+ * Search a key node under @parent which matches @key. The @key can contain
* several words jointed with '.'. If @parent is NULL, this searches the
* node from whole tree. Return NULL if no node is matched.
*/
struct xbc_node * __init
-xbc_node_find_child(struct xbc_node *parent, const char *key)
+xbc_node_find_subkey(struct xbc_node *parent, const char *key)
{
struct xbc_node *node;
@@ -191,7 +191,7 @@ const char * __init
xbc_node_find_value(struct xbc_node *parent, const char *key,
struct xbc_node **vnode)
{
- struct xbc_node *node = xbc_node_find_child(parent, key);
+ struct xbc_node *node = xbc_node_find_subkey(parent, key);
if (!node || !xbc_node_is_key(node))
return NULL;
diff --git a/lib/debug_info.c b/lib/debug_info.c
index 36daf753293c..cc4723c74af5 100644
--- a/lib/debug_info.c
+++ b/lib/debug_info.c
@@ -5,8 +5,6 @@
* CONFIG_DEBUG_INFO_REDUCED. Please do not add actual code. However,
* adding appropriate #includes is fine.
*/
-#include <stdarg.h>
-
#include <linux/cred.h>
#include <linux/crypto.h>
#include <linux/dcache.h>
@@ -22,6 +20,7 @@
#include <linux/net.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/stdarg.h>
#include <linux/types.h>
#include <net/addrconf.h>
#include <net/sock.h>
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index cd3387bb34e5..6b7f1bf6715d 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -89,7 +89,8 @@ static void __dump_stack(const char *log_lvl)
}
/**
- * dump_stack - dump the current task information and its stack trace
+ * dump_stack_lvl - dump the current task information and its stack trace
+ * @log_lvl: log level
*
* Architectures can override this implementation by implementing its own.
*/
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index e23123ae3a13..f2d50d69a6c3 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -672,7 +672,7 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
* _copy_mc_to_iter - copy to iter with source memory error exception handling
* @addr: source kernel address
* @bytes: total transfer length
- * @iter: destination iterator
+ * @i: destination iterator
*
* The pmem driver deploys this for the dax operation
* (dax_copy_to_iter()) for dax reads (bypass page-cache and the
@@ -690,6 +690,8 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
* * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
* Compare to copy_to_iter() where only ITER_IOVEC attempts might return
* a short copy.
+ *
+ * Return: number of bytes copied (may be %0)
*/
size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
@@ -744,7 +746,7 @@ EXPORT_SYMBOL(_copy_from_iter_nocache);
* _copy_from_iter_flushcache - write destination through cpu cache
* @addr: destination kernel address
* @bytes: total transfer length
- * @iter: source iterator
+ * @i: source iterator
*
* The pmem driver arranges for filesystem-dax to use this facility via
* dax_copy_from_iter() for ensuring that writes to persistent memory
@@ -753,6 +755,8 @@ EXPORT_SYMBOL(_copy_from_iter_nocache);
* all iterator types. The _copy_from_iter_nocache() only attempts to
* bypass the cache for the ITER_IOVEC case, and on some archs may use
* instructions that strand dirty-data in the cache.
+ *
+ * Return: number of bytes copied (may be %0)
*/
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
diff --git a/lib/kasprintf.c b/lib/kasprintf.c
index bacf7b83ccf0..cd2f5974ed98 100644
--- a/lib/kasprintf.c
+++ b/lib/kasprintf.c
@@ -5,7 +5,7 @@
* Copyright (C) 1991, 1992 Linus Torvalds
*/
-#include <stdarg.h>
+#include <linux/stdarg.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/types.h>
diff --git a/lib/kunit/string-stream.h b/lib/kunit/string-stream.h
index 5e94b623454f..43f9508a55b4 100644
--- a/lib/kunit/string-stream.h
+++ b/lib/kunit/string-stream.h
@@ -11,7 +11,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <stdarg.h>
+#include <linux/stdarg.h>
struct string_stream_fragment {
struct kunit *test;
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index d79ecb86ea57..f246b847024e 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -10,6 +10,7 @@
#include <kunit/test-bug.h>
#include <linux/kernel.h>
#include <linux/kref.h>
+#include <linux/moduleparam.h>
#include <linux/sched/debug.h>
#include <linux/sched.h>
@@ -52,6 +53,51 @@ EXPORT_SYMBOL_GPL(__kunit_fail_current_test);
#endif
/*
+ * KUnit statistic mode:
+ * 0 - disabled
+ * 1 - only when there is more than one subtest
+ * 2 - enabled
+ */
+static int kunit_stats_enabled = 1;
+module_param_named(stats_enabled, kunit_stats_enabled, int, 0644);
+MODULE_PARM_DESC(stats_enabled,
+ "Print test stats: never (0), only for multiple subtests (1), or always (2)");
+
+struct kunit_result_stats {
+ unsigned long passed;
+ unsigned long skipped;
+ unsigned long failed;
+ unsigned long total;
+};
+
+static bool kunit_should_print_stats(struct kunit_result_stats stats)
+{
+ if (kunit_stats_enabled == 0)
+ return false;
+
+ if (kunit_stats_enabled == 2)
+ return true;
+
+ return (stats.total > 1);
+}
+
+static void kunit_print_test_stats(struct kunit *test,
+ struct kunit_result_stats stats)
+{
+ if (!kunit_should_print_stats(stats))
+ return;
+
+ kunit_log(KERN_INFO, test,
+ KUNIT_SUBTEST_INDENT
+ "# %s: pass:%lu fail:%lu skip:%lu total:%lu",
+ test->name,
+ stats.passed,
+ stats.failed,
+ stats.skipped,
+ stats.total);
+}
+
+/*
* Append formatted message to log, size of which is limited to
* KUNIT_LOG_SIZE bytes (including null terminating byte).
*/
@@ -393,15 +439,69 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite,
test_case->status = KUNIT_SUCCESS;
}
+static void kunit_print_suite_stats(struct kunit_suite *suite,
+ struct kunit_result_stats suite_stats,
+ struct kunit_result_stats param_stats)
+{
+ if (kunit_should_print_stats(suite_stats)) {
+ kunit_log(KERN_INFO, suite,
+ "# %s: pass:%lu fail:%lu skip:%lu total:%lu",
+ suite->name,
+ suite_stats.passed,
+ suite_stats.failed,
+ suite_stats.skipped,
+ suite_stats.total);
+ }
+
+ if (kunit_should_print_stats(param_stats)) {
+ kunit_log(KERN_INFO, suite,
+ "# Totals: pass:%lu fail:%lu skip:%lu total:%lu",
+ param_stats.passed,
+ param_stats.failed,
+ param_stats.skipped,
+ param_stats.total);
+ }
+}
+
+static void kunit_update_stats(struct kunit_result_stats *stats,
+ enum kunit_status status)
+{
+ switch (status) {
+ case KUNIT_SUCCESS:
+ stats->passed++;
+ break;
+ case KUNIT_SKIPPED:
+ stats->skipped++;
+ break;
+ case KUNIT_FAILURE:
+ stats->failed++;
+ break;
+ }
+
+ stats->total++;
+}
+
+static void kunit_accumulate_stats(struct kunit_result_stats *total,
+ struct kunit_result_stats add)
+{
+ total->passed += add.passed;
+ total->skipped += add.skipped;
+ total->failed += add.failed;
+ total->total += add.total;
+}
+
int kunit_run_tests(struct kunit_suite *suite)
{
char param_desc[KUNIT_PARAM_DESC_SIZE];
struct kunit_case *test_case;
+ struct kunit_result_stats suite_stats = { 0 };
+ struct kunit_result_stats total_stats = { 0 };
kunit_print_subtest_start(suite);
kunit_suite_for_each_test_case(suite, test_case) {
struct kunit test = { .param_value = NULL, .param_index = 0 };
+ struct kunit_result_stats param_stats = { 0 };
test_case->status = KUNIT_SKIPPED;
if (test_case->generate_params) {
@@ -431,14 +531,23 @@ int kunit_run_tests(struct kunit_suite *suite)
test.param_value = test_case->generate_params(test.param_value, param_desc);
test.param_index++;
}
+
+ kunit_update_stats(&param_stats, test.status);
+
} while (test.param_value);
+ kunit_print_test_stats(&test, param_stats);
+
kunit_print_ok_not_ok(&test, true, test_case->status,
kunit_test_case_num(suite, test_case),
test_case->name,
test.status_comment);
+
+ kunit_update_stats(&suite_stats, test_case->status);
+ kunit_accumulate_stats(&total_stats, param_stats);
}
+ kunit_print_suite_stats(suite, suite_stats, total_stats);
kunit_print_subtest_end(suite);
return 0;
diff --git a/lib/logic_iomem.c b/lib/logic_iomem.c
index b76b92dd0f1f..9bdfde0c0f86 100644
--- a/lib/logic_iomem.c
+++ b/lib/logic_iomem.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/logic_iomem.h>
+#include <asm/io.h>
struct logic_iomem_region {
const struct resource *res;
@@ -78,7 +79,7 @@ static void __iomem *real_ioremap(phys_addr_t offset, size_t size)
static void real_iounmap(void __iomem *addr)
{
WARN(1, "invalid iounmap for addr 0x%llx\n",
- (unsigned long long)addr);
+ (unsigned long long __force)addr);
}
#endif /* CONFIG_LOGIC_IOMEM_FALLBACK */
@@ -172,14 +173,15 @@ EXPORT_SYMBOL(iounmap);
static u##sz real_raw_read ## op(const volatile void __iomem *addr) \
{ \
WARN(1, "Invalid read" #op " at address %llx\n", \
- (unsigned long long)addr); \
+ (unsigned long long __force)addr); \
return (u ## sz)~0ULL; \
} \
\
-void real_raw_write ## op(u ## sz val, volatile void __iomem *addr) \
+static void real_raw_write ## op(u ## sz val, \
+ volatile void __iomem *addr) \
{ \
WARN(1, "Invalid writeq" #op " of 0x%llx at address %llx\n", \
- (unsigned long long)val, (unsigned long long)addr); \
+ (unsigned long long)val, (unsigned long long __force)addr);\
} \
MAKE_FALLBACK(b, 8);
@@ -192,14 +194,14 @@ MAKE_FALLBACK(q, 64);
static void real_memset_io(volatile void __iomem *addr, int value, size_t size)
{
WARN(1, "Invalid memset_io at address 0x%llx\n",
- (unsigned long long)addr);
+ (unsigned long long __force)addr);
}
static void real_memcpy_fromio(void *buffer, const volatile void __iomem *addr,
size_t size)
{
WARN(1, "Invalid memcpy_fromio at address 0x%llx\n",
- (unsigned long long)addr);
+ (unsigned long long __force)addr);
memset(buffer, 0xff, size);
}
@@ -208,7 +210,7 @@ static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer,
size_t size)
{
WARN(1, "Invalid memcpy_toio at address 0x%llx\n",
- (unsigned long long)addr);
+ (unsigned long long __force)addr);
}
#endif /* CONFIG_LOGIC_IOMEM_FALLBACK */
diff --git a/lib/math/Kconfig b/lib/math/Kconfig
index f19bc9734fa7..0634b428d0cb 100644
--- a/lib/math/Kconfig
+++ b/lib/math/Kconfig
@@ -14,4 +14,4 @@ config PRIME_NUMBERS
If unsure, say N.
config RATIONAL
- bool
+ tristate
diff --git a/lib/math/rational.c b/lib/math/rational.c
index c0ab51d8fbb9..ec59d426ea63 100644
--- a/lib/math/rational.c
+++ b/lib/math/rational.c
@@ -13,6 +13,7 @@
#include <linux/export.h>
#include <linux/minmax.h>
#include <linux/limits.h>
+#include <linux/module.h>
/*
* calculate best rational approximation for a given fraction
@@ -106,3 +107,5 @@ void rational_best_approximation(
}
EXPORT_SYMBOL(rational_best_approximation);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 27efa6178153..abb3432ed744 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -182,6 +182,7 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
* @nents_first_chunk: Number of entries int the (preallocated) first
* scatterlist chunk, 0 means no such preallocated first chunk
* @free_fn: Free function
+ * @num_ents: Number of entries in the table
*
* Description:
* Free an sg table previously allocated and setup with
@@ -190,7 +191,8 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
*
**/
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
- unsigned int nents_first_chunk, sg_free_fn *free_fn)
+ unsigned int nents_first_chunk, sg_free_fn *free_fn,
+ unsigned int num_ents)
{
struct scatterlist *sgl, *next;
unsigned curr_max_ents = nents_first_chunk ?: max_ents;
@@ -199,8 +201,8 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
return;
sgl = table->sgl;
- while (table->orig_nents) {
- unsigned int alloc_size = table->orig_nents;
+ while (num_ents) {
+ unsigned int alloc_size = num_ents;
unsigned int sg_size;
/*
@@ -218,7 +220,7 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
next = NULL;
}
- table->orig_nents -= sg_size;
+ num_ents -= sg_size;
if (nents_first_chunk)
nents_first_chunk = 0;
else
@@ -232,13 +234,27 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
EXPORT_SYMBOL(__sg_free_table);
/**
+ * sg_free_append_table - Free a previously allocated append sg table.
+ * @table: The mapped sg append table header
+ *
+ **/
+void sg_free_append_table(struct sg_append_table *table)
+{
+ __sg_free_table(&table->sgt, SG_MAX_SINGLE_ALLOC, false, sg_kfree,
+ table->total_nents);
+}
+EXPORT_SYMBOL(sg_free_append_table);
+
+
+/**
* sg_free_table - Free a previously allocated sg table
* @table: The mapped sg table header
*
**/
void sg_free_table(struct sg_table *table)
{
- __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
+ __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree,
+ table->orig_nents);
}
EXPORT_SYMBOL(sg_free_table);
@@ -359,13 +375,12 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
NULL, 0, gfp_mask, sg_kmalloc);
if (unlikely(ret))
- __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
-
+ sg_free_table(table);
return ret;
}
EXPORT_SYMBOL(sg_alloc_table);
-static struct scatterlist *get_next_sg(struct sg_table *table,
+static struct scatterlist *get_next_sg(struct sg_append_table *table,
struct scatterlist *cur,
unsigned long needed_sges,
gfp_t gfp_mask)
@@ -386,54 +401,52 @@ static struct scatterlist *get_next_sg(struct sg_table *table,
return ERR_PTR(-ENOMEM);
sg_init_table(new_sg, alloc_size);
if (cur) {
+ table->total_nents += alloc_size - 1;
__sg_chain(next_sg, new_sg);
- table->orig_nents += alloc_size - 1;
} else {
- table->sgl = new_sg;
- table->orig_nents = alloc_size;
- table->nents = 0;
+ table->sgt.sgl = new_sg;
+ table->total_nents = alloc_size;
}
return new_sg;
}
/**
- * __sg_alloc_table_from_pages - Allocate and initialize an sg table from
- * an array of pages
- * @sgt: The sg table header to use
- * @pages: Pointer to an array of page pointers
- * @n_pages: Number of pages in the pages array
+ * sg_alloc_append_table_from_pages - Allocate and initialize an append sg
+ * table from an array of pages
+ * @sgt_append: The sg append table to use
+ * @pages: Pointer to an array of page pointers
+ * @n_pages: Number of pages in the pages array
* @offset: Offset from start of the first page to the start of a buffer
* @size: Number of valid bytes in the buffer (after offset)
* @max_segment: Maximum size of a scatterlist element in bytes
- * @prv: Last populated sge in sgt
* @left_pages: Left pages caller have to set after this call
* @gfp_mask: GFP allocation mask
*
* Description:
- * If @prv is NULL, allocate and initialize an sg table from a list of pages,
- * else reuse the scatterlist passed in at @prv.
- * Contiguous ranges of the pages are squashed into a single scatterlist
- * entry up to the maximum size specified in @max_segment. A user may
- * provide an offset at a start and a size of valid data in a buffer
- * specified by the page array.
+ * In the first call it allocate and initialize an sg table from a list of
+ * pages, else reuse the scatterlist from sgt_append. Contiguous ranges of
+ * the pages are squashed into a single scatterlist entry up to the maximum
+ * size specified in @max_segment. A user may provide an offset at a start
+ * and a size of valid data in a buffer specified by the page array. The
+ * returned sg table is released by sg_free_append_table
*
* Returns:
- * Last SGE in sgt on success, PTR_ERR on otherwise.
- * The allocation in @sgt must be released by sg_free_table.
+ * 0 on success, negative error on failure
*
* Notes:
* If this function returns non-0 (eg failure), the caller must call
- * sg_free_table() to cleanup any leftover allocations.
+ * sg_free_append_table() to cleanup any leftover allocations.
+ *
+ * In the fist call, sgt_append must by initialized.
*/
-struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt,
+int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
struct page **pages, unsigned int n_pages, unsigned int offset,
unsigned long size, unsigned int max_segment,
- struct scatterlist *prv, unsigned int left_pages,
- gfp_t gfp_mask)
+ unsigned int left_pages, gfp_t gfp_mask)
{
unsigned int chunks, cur_page, seg_len, i, prv_len = 0;
unsigned int added_nents = 0;
- struct scatterlist *s = prv;
+ struct scatterlist *s = sgt_append->prv;
/*
* The algorithm below requires max_segment to be aligned to PAGE_SIZE
@@ -441,25 +454,26 @@ struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt,
*/
max_segment = ALIGN_DOWN(max_segment, PAGE_SIZE);
if (WARN_ON(max_segment < PAGE_SIZE))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
- if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && prv)
- return ERR_PTR(-EOPNOTSUPP);
+ if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && sgt_append->prv)
+ return -EOPNOTSUPP;
- if (prv) {
- unsigned long paddr = (page_to_pfn(sg_page(prv)) * PAGE_SIZE +
- prv->offset + prv->length) /
- PAGE_SIZE;
+ if (sgt_append->prv) {
+ unsigned long paddr =
+ (page_to_pfn(sg_page(sgt_append->prv)) * PAGE_SIZE +
+ sgt_append->prv->offset + sgt_append->prv->length) /
+ PAGE_SIZE;
if (WARN_ON(offset))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
/* Merge contiguous pages into the last SG */
- prv_len = prv->length;
+ prv_len = sgt_append->prv->length;
while (n_pages && page_to_pfn(pages[0]) == paddr) {
- if (prv->length + PAGE_SIZE > max_segment)
+ if (sgt_append->prv->length + PAGE_SIZE > max_segment)
break;
- prv->length += PAGE_SIZE;
+ sgt_append->prv->length += PAGE_SIZE;
paddr++;
pages++;
n_pages--;
@@ -496,15 +510,16 @@ struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt,
}
/* Pass how many chunks might be left */
- s = get_next_sg(sgt, s, chunks - i + left_pages, gfp_mask);
+ s = get_next_sg(sgt_append, s, chunks - i + left_pages,
+ gfp_mask);
if (IS_ERR(s)) {
/*
* Adjust entry length to be as before function was
* called.
*/
- if (prv)
- prv->length = prv_len;
- return s;
+ if (sgt_append->prv)
+ sgt_append->prv->length = prv_len;
+ return PTR_ERR(s);
}
chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
sg_set_page(s, pages[cur_page],
@@ -514,42 +529,58 @@ struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt,
offset = 0;
cur_page = j;
}
- sgt->nents += added_nents;
+ sgt_append->sgt.nents += added_nents;
+ sgt_append->sgt.orig_nents = sgt_append->sgt.nents;
+ sgt_append->prv = s;
out:
if (!left_pages)
sg_mark_end(s);
- return s;
+ return 0;
}
-EXPORT_SYMBOL(__sg_alloc_table_from_pages);
+EXPORT_SYMBOL(sg_alloc_append_table_from_pages);
/**
- * sg_alloc_table_from_pages - Allocate and initialize an sg table from
- * an array of pages
+ * sg_alloc_table_from_pages_segment - Allocate and initialize an sg table from
+ * an array of pages and given maximum
+ * segment.
* @sgt: The sg table header to use
* @pages: Pointer to an array of page pointers
* @n_pages: Number of pages in the pages array
* @offset: Offset from start of the first page to the start of a buffer
* @size: Number of valid bytes in the buffer (after offset)
+ * @max_segment: Maximum size of a scatterlist element in bytes
* @gfp_mask: GFP allocation mask
*
* Description:
* Allocate and initialize an sg table from a list of pages. Contiguous
- * ranges of the pages are squashed into a single scatterlist node. A user
- * may provide an offset at a start and a size of valid data in a buffer
- * specified by the page array. The returned sg table is released by
- * sg_free_table.
+ * ranges of the pages are squashed into a single scatterlist node up to the
+ * maximum size specified in @max_segment. A user may provide an offset at a
+ * start and a size of valid data in a buffer specified by the page array.
*
- * Returns:
+ * The returned sg table is released by sg_free_table.
+ *
+ * Returns:
* 0 on success, negative error on failure
*/
-int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
- unsigned int n_pages, unsigned int offset,
- unsigned long size, gfp_t gfp_mask)
+int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages,
+ unsigned int n_pages, unsigned int offset,
+ unsigned long size, unsigned int max_segment,
+ gfp_t gfp_mask)
{
- return PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, n_pages,
- offset, size, UINT_MAX, NULL, 0, gfp_mask));
+ struct sg_append_table append = {};
+ int err;
+
+ err = sg_alloc_append_table_from_pages(&append, pages, n_pages, offset,
+ size, max_segment, 0, gfp_mask);
+ if (err) {
+ sg_free_append_table(&append);
+ return err;
+ }
+ memcpy(sgt, &append.sgt, sizeof(*sgt));
+ WARN_ON(append.total_nents != sgt->orig_nents);
+ return 0;
}
-EXPORT_SYMBOL(sg_alloc_table_from_pages);
+EXPORT_SYMBOL(sg_alloc_table_from_pages_segment);
#ifdef CONFIG_SGL_ALLOC
@@ -887,9 +918,8 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
miter->__offset += miter->consumed;
miter->__remaining -= miter->consumed;
- if ((miter->__flags & SG_MITER_TO_SG) &&
- !PageSlab(miter->page))
- flush_kernel_dcache_page(miter->page);
+ if (miter->__flags & SG_MITER_TO_SG)
+ flush_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) {
WARN_ON_ONCE(preemptible());
diff --git a/lib/sg_pool.c b/lib/sg_pool.c
index db29e5c1f790..a0b1a52cd6f7 100644
--- a/lib/sg_pool.c
+++ b/lib/sg_pool.c
@@ -90,7 +90,8 @@ void sg_free_table_chained(struct sg_table *table,
if (nents_first_chunk == 1)
nents_first_chunk = 0;
- __sg_free_table(table, SG_CHUNK_SIZE, nents_first_chunk, sg_pool_free);
+ __sg_free_table(table, SG_CHUNK_SIZE, nents_first_chunk, sg_pool_free,
+ table->orig_nents);
}
EXPORT_SYMBOL_GPL(sg_free_table_chained);
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 8be9d4b3b259..8835e0784578 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -120,12 +120,28 @@ static void kasan_test_exit(struct kunit *test)
static void kmalloc_oob_right(struct kunit *test)
{
char *ptr;
- size_t size = 123;
+ size_t size = 128 - KASAN_GRANULE_SIZE - 5;
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 'x');
+ /*
+ * An unaligned access past the requested kmalloc size.
+ * Only generic KASAN can precisely detect these.
+ */
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC))
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x');
+
+ /*
+ * An aligned access into the first out-of-bounds granule that falls
+ * within the aligned kmalloc object.
+ */
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
+
+ /* Out-of-bounds access past the aligned kmalloc object. */
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] =
+ ptr[size + KASAN_GRANULE_SIZE + 5]);
+
kfree(ptr);
}
@@ -149,7 +165,7 @@ static void kmalloc_node_oob_right(struct kunit *test)
ptr = kmalloc_node(size, GFP_KERNEL, 0);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
kfree(ptr);
}
@@ -185,7 +201,7 @@ static void kmalloc_pagealloc_uaf(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
kfree(ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
}
static void kmalloc_pagealloc_invalid_free(struct kunit *test)
@@ -219,7 +235,7 @@ static void pagealloc_oob_right(struct kunit *test)
ptr = page_address(pages);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
free_pages((unsigned long)ptr, order);
}
@@ -234,7 +250,7 @@ static void pagealloc_uaf(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
free_pages((unsigned long)ptr, order);
- KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
}
static void kmalloc_large_oob_right(struct kunit *test)
@@ -410,64 +426,70 @@ static void kmalloc_uaf_16(struct kunit *test)
kfree(ptr1);
}
+/*
+ * Note: in the memset tests below, the written range touches both valid and
+ * invalid memory. This makes sure that the instrumentation does not only check
+ * the starting address but the whole range.
+ */
+
static void kmalloc_oob_memset_2(struct kunit *test)
{
char *ptr;
- size_t size = 8;
+ size_t size = 128 - KASAN_GRANULE_SIZE;
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 7 + OOB_TAG_OFF, 0, 2));
+ KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, 2));
kfree(ptr);
}
static void kmalloc_oob_memset_4(struct kunit *test)
{
char *ptr;
- size_t size = 8;
+ size_t size = 128 - KASAN_GRANULE_SIZE;
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 5 + OOB_TAG_OFF, 0, 4));
+ KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, 4));
kfree(ptr);
}
-
static void kmalloc_oob_memset_8(struct kunit *test)
{
char *ptr;
- size_t size = 8;
+ size_t size = 128 - KASAN_GRANULE_SIZE;
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 8));
+ KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, 8));
kfree(ptr);
}
static void kmalloc_oob_memset_16(struct kunit *test)
{
char *ptr;
- size_t size = 16;
+ size_t size = 128 - KASAN_GRANULE_SIZE;
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 16));
+ KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, 16));
kfree(ptr);
}
static void kmalloc_oob_in_memset(struct kunit *test)
{
char *ptr;
- size_t size = 666;
+ size_t size = 128 - KASAN_GRANULE_SIZE;
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size + 5 + OOB_TAG_OFF));
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ memset(ptr, 0, size + KASAN_GRANULE_SIZE));
kfree(ptr);
}
@@ -477,11 +499,17 @@ static void kmalloc_memmove_invalid_size(struct kunit *test)
size_t size = 64;
volatile size_t invalid_size = -2;
+ /*
+ * Hardware tag-based mode doesn't check memmove for negative size.
+ * As a result, this test introduces a side-effect memory corruption,
+ * which can result in a crash.
+ */
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
+
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
memset((char *)ptr, 0, 64);
-
KUNIT_EXPECT_KASAN_FAIL(test,
memmove((char *)ptr, (char *)ptr + 4, invalid_size));
kfree(ptr);
@@ -496,7 +524,7 @@ static void kmalloc_uaf(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
kfree(ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, *(ptr + 8) = 'x');
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]);
}
static void kmalloc_uaf_memset(struct kunit *test)
@@ -504,6 +532,12 @@ static void kmalloc_uaf_memset(struct kunit *test)
char *ptr;
size_t size = 33;
+ /*
+ * Only generic KASAN uses quarantine, which is required to avoid a
+ * kernel memory corruption this test causes.
+ */
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
+
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@@ -535,7 +569,7 @@ again:
goto again;
}
- KUNIT_EXPECT_KASAN_FAIL(test, ptr1[40] = 'x');
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]);
KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
kfree(ptr2);
@@ -682,7 +716,7 @@ static void ksize_unpoisons_memory(struct kunit *test)
ptr[size] = 'x';
/* This one must. */
- KUNIT_EXPECT_KASAN_FAIL(test, ptr[real_size] = 'y');
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
kfree(ptr);
}
@@ -701,8 +735,8 @@ static void ksize_uaf(struct kunit *test)
kfree(ptr);
KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
- KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *(ptr + size));
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
}
static void kasan_stack_oob(struct kunit *test)
diff --git a/lib/test_kasan_module.c b/lib/test_kasan_module.c
index f1017f345d6c..7ebf433edef3 100644
--- a/lib/test_kasan_module.c
+++ b/lib/test_kasan_module.c
@@ -15,13 +15,11 @@
#include "../mm/kasan/kasan.h"
-#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
-
static noinline void __init copy_user_test(void)
{
char *kmem;
char __user *usermem;
- size_t size = 10;
+ size_t size = 128 - KASAN_GRANULE_SIZE;
int __maybe_unused unused;
kmem = kmalloc(size, GFP_KERNEL);
@@ -38,25 +36,25 @@ static noinline void __init copy_user_test(void)
}
pr_info("out-of-bounds in copy_from_user()\n");
- unused = copy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
+ unused = copy_from_user(kmem, usermem, size + 1);
pr_info("out-of-bounds in copy_to_user()\n");
- unused = copy_to_user(usermem, kmem, size + 1 + OOB_TAG_OFF);
+ unused = copy_to_user(usermem, kmem, size + 1);
pr_info("out-of-bounds in __copy_from_user()\n");
- unused = __copy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
+ unused = __copy_from_user(kmem, usermem, size + 1);
pr_info("out-of-bounds in __copy_to_user()\n");
- unused = __copy_to_user(usermem, kmem, size + 1 + OOB_TAG_OFF);
+ unused = __copy_to_user(usermem, kmem, size + 1);
pr_info("out-of-bounds in __copy_from_user_inatomic()\n");
- unused = __copy_from_user_inatomic(kmem, usermem, size + 1 + OOB_TAG_OFF);
+ unused = __copy_from_user_inatomic(kmem, usermem, size + 1);
pr_info("out-of-bounds in __copy_to_user_inatomic()\n");
- unused = __copy_to_user_inatomic(usermem, kmem, size + 1 + OOB_TAG_OFF);
+ unused = __copy_to_user_inatomic(usermem, kmem, size + 1);
pr_info("out-of-bounds in strncpy_from_user()\n");
- unused = strncpy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
+ unused = strncpy_from_user(kmem, usermem, size + 1);
vm_munmap((unsigned long)usermem, PAGE_SIZE);
kfree(kmem);
@@ -73,7 +71,7 @@ static noinline void __init kasan_rcu_reclaim(struct rcu_head *rp)
struct kasan_rcu_info, rcu);
kfree(fp);
- fp->i = 1;
+ ((volatile struct kasan_rcu_info *)fp)->i;
}
static noinline void __init kasan_rcu_uaf(void)
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 8ac71aee46af..07309c45f327 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -586,70 +586,59 @@ struct page_flags_test {
int width;
int shift;
int mask;
- unsigned long value;
const char *fmt;
const char *name;
};
-static struct page_flags_test pft[] = {
+static const struct page_flags_test pft[] = {
{SECTIONS_WIDTH, SECTIONS_PGSHIFT, SECTIONS_MASK,
- 0, "%d", "section"},
+ "%d", "section"},
{NODES_WIDTH, NODES_PGSHIFT, NODES_MASK,
- 0, "%d", "node"},
+ "%d", "node"},
{ZONES_WIDTH, ZONES_PGSHIFT, ZONES_MASK,
- 0, "%d", "zone"},
+ "%d", "zone"},
{LAST_CPUPID_WIDTH, LAST_CPUPID_PGSHIFT, LAST_CPUPID_MASK,
- 0, "%#x", "lastcpupid"},
+ "%#x", "lastcpupid"},
{KASAN_TAG_WIDTH, KASAN_TAG_PGSHIFT, KASAN_TAG_MASK,
- 0, "%#x", "kasantag"},
+ "%#x", "kasantag"},
};
static void __init
page_flags_test(int section, int node, int zone, int last_cpupid,
- int kasan_tag, int flags, const char *name, char *cmp_buf)
+ int kasan_tag, unsigned long flags, const char *name,
+ char *cmp_buf)
{
unsigned long values[] = {section, node, zone, last_cpupid, kasan_tag};
- unsigned long page_flags = 0;
- unsigned long size = 0;
+ unsigned long size;
bool append = false;
int i;
- flags &= BIT(NR_PAGEFLAGS) - 1;
- if (flags) {
- page_flags |= flags;
- snprintf(cmp_buf + size, BUF_SIZE - size, "%s", name);
- size = strlen(cmp_buf);
-#if SECTIONS_WIDTH || NODES_WIDTH || ZONES_WIDTH || \
- LAST_CPUPID_WIDTH || KASAN_TAG_WIDTH
- /* Other information also included in page flags */
- snprintf(cmp_buf + size, BUF_SIZE - size, "|");
- size = strlen(cmp_buf);
-#endif
- }
+ for (i = 0; i < ARRAY_SIZE(values); i++)
+ flags |= (values[i] & pft[i].mask) << pft[i].shift;
- /* Set the test value */
- for (i = 0; i < ARRAY_SIZE(pft); i++)
- pft[i].value = values[i];
+ size = scnprintf(cmp_buf, BUF_SIZE, "%#lx(", flags);
+ if (flags & PAGEFLAGS_MASK) {
+ size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s", name);
+ append = true;
+ }
for (i = 0; i < ARRAY_SIZE(pft); i++) {
if (!pft[i].width)
continue;
- if (append) {
- snprintf(cmp_buf + size, BUF_SIZE - size, "|");
- size = strlen(cmp_buf);
- }
+ if (append)
+ size += scnprintf(cmp_buf + size, BUF_SIZE - size, "|");
- page_flags |= (pft[i].value & pft[i].mask) << pft[i].shift;
- snprintf(cmp_buf + size, BUF_SIZE - size, "%s=", pft[i].name);
- size = strlen(cmp_buf);
- snprintf(cmp_buf + size, BUF_SIZE - size, pft[i].fmt,
- pft[i].value & pft[i].mask);
- size = strlen(cmp_buf);
+ size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s=",
+ pft[i].name);
+ size += scnprintf(cmp_buf + size, BUF_SIZE - size, pft[i].fmt,
+ values[i] & pft[i].mask);
append = true;
}
- test(cmp_buf, "%pGp", &page_flags);
+ snprintf(cmp_buf + size, BUF_SIZE - size, ")");
+
+ test(cmp_buf, "%pGp", &flags);
}
static void __init
@@ -675,9 +664,8 @@ flags(void)
"uptodate|dirty|lru|active|swapbacked",
cmp_buffer);
- flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC
- | VM_DENYWRITE;
- test("read|exec|mayread|maywrite|mayexec|denywrite", "%pGv", &flags);
+ flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
+ test("read|exec|mayread|maywrite|mayexec", "%pGv", &flags);
gfp = GFP_TRANSHUGE;
test("GFP_TRANSHUGE", "%pGg", &gfp);
diff --git a/lib/test_scanf.c b/lib/test_scanf.c
index abae88848972..b620cf7de503 100644
--- a/lib/test_scanf.c
+++ b/lib/test_scanf.c
@@ -398,7 +398,7 @@ do { \
test_array_8(fn, expect, test_buffer, fmt_buffer, result); \
} while (0)
-static void __init numbers_list(const char *delim)
+static void __init numbers_list_ll(const char *delim)
{
numbers_list_8(unsigned long long, "%llu", delim, "llu", check_ull);
numbers_list_8(long long, "%lld", delim, "lld", check_ll);
@@ -406,28 +406,40 @@ static void __init numbers_list(const char *delim)
numbers_list_8(unsigned long long, "%llx", delim, "llx", check_ull);
numbers_list_8(unsigned long long, "0x%llx", delim, "llx", check_ull);
numbers_list_8(long long, "0x%llx", delim, "lli", check_ll);
+}
+static void __init numbers_list_l(const char *delim)
+{
numbers_list_8(unsigned long, "%lu", delim, "lu", check_ulong);
numbers_list_8(long, "%ld", delim, "ld", check_long);
numbers_list_8(long, "%ld", delim, "li", check_long);
numbers_list_8(unsigned long, "%lx", delim, "lx", check_ulong);
numbers_list_8(unsigned long, "0x%lx", delim, "lx", check_ulong);
numbers_list_8(long, "0x%lx", delim, "li", check_long);
+}
+static void __init numbers_list_d(const char *delim)
+{
numbers_list_8(unsigned int, "%u", delim, "u", check_uint);
numbers_list_8(int, "%d", delim, "d", check_int);
numbers_list_8(int, "%d", delim, "i", check_int);
numbers_list_8(unsigned int, "%x", delim, "x", check_uint);
numbers_list_8(unsigned int, "0x%x", delim, "x", check_uint);
numbers_list_8(int, "0x%x", delim, "i", check_int);
+}
+static void __init numbers_list_h(const char *delim)
+{
numbers_list_8(unsigned short, "%hu", delim, "hu", check_ushort);
numbers_list_8(short, "%hd", delim, "hd", check_short);
numbers_list_8(short, "%hd", delim, "hi", check_short);
numbers_list_8(unsigned short, "%hx", delim, "hx", check_ushort);
numbers_list_8(unsigned short, "0x%hx", delim, "hx", check_ushort);
numbers_list_8(short, "0x%hx", delim, "hi", check_short);
+}
+static void __init numbers_list_hh(const char *delim)
+{
numbers_list_8(unsigned char, "%hhu", delim, "hhu", check_uchar);
numbers_list_8(signed char, "%hhd", delim, "hhd", check_char);
numbers_list_8(signed char, "%hhd", delim, "hhi", check_char);
@@ -436,11 +448,16 @@ static void __init numbers_list(const char *delim)
numbers_list_8(signed char, "0x%hhx", delim, "hhi", check_char);
}
-/*
- * List of numbers separated by delim. Each field width specifier is the
- * maximum possible digits for the given type and base.
- */
-static void __init numbers_list_field_width_typemax(const char *delim)
+static void __init numbers_list(const char *delim)
+{
+ numbers_list_ll(delim);
+ numbers_list_l(delim);
+ numbers_list_d(delim);
+ numbers_list_h(delim);
+ numbers_list_hh(delim);
+}
+
+static void __init numbers_list_field_width_ll(const char *delim)
{
numbers_list_fix_width(unsigned long long, "%llu", delim, 20, "llu", check_ull);
numbers_list_fix_width(long long, "%lld", delim, 20, "lld", check_ll);
@@ -448,7 +465,10 @@ static void __init numbers_list_field_width_typemax(const char *delim)
numbers_list_fix_width(unsigned long long, "%llx", delim, 16, "llx", check_ull);
numbers_list_fix_width(unsigned long long, "0x%llx", delim, 18, "llx", check_ull);
numbers_list_fix_width(long long, "0x%llx", delim, 18, "lli", check_ll);
+}
+static void __init numbers_list_field_width_l(const char *delim)
+{
#if BITS_PER_LONG == 64
numbers_list_fix_width(unsigned long, "%lu", delim, 20, "lu", check_ulong);
numbers_list_fix_width(long, "%ld", delim, 20, "ld", check_long);
@@ -464,21 +484,30 @@ static void __init numbers_list_field_width_typemax(const char *delim)
numbers_list_fix_width(unsigned long, "0x%lx", delim, 10, "lx", check_ulong);
numbers_list_fix_width(long, "0x%lx", delim, 10, "li", check_long);
#endif
+}
+static void __init numbers_list_field_width_d(const char *delim)
+{
numbers_list_fix_width(unsigned int, "%u", delim, 10, "u", check_uint);
numbers_list_fix_width(int, "%d", delim, 11, "d", check_int);
numbers_list_fix_width(int, "%d", delim, 11, "i", check_int);
numbers_list_fix_width(unsigned int, "%x", delim, 8, "x", check_uint);
numbers_list_fix_width(unsigned int, "0x%x", delim, 10, "x", check_uint);
numbers_list_fix_width(int, "0x%x", delim, 10, "i", check_int);
+}
+static void __init numbers_list_field_width_h(const char *delim)
+{
numbers_list_fix_width(unsigned short, "%hu", delim, 5, "hu", check_ushort);
numbers_list_fix_width(short, "%hd", delim, 6, "hd", check_short);
numbers_list_fix_width(short, "%hd", delim, 6, "hi", check_short);
numbers_list_fix_width(unsigned short, "%hx", delim, 4, "hx", check_ushort);
numbers_list_fix_width(unsigned short, "0x%hx", delim, 6, "hx", check_ushort);
numbers_list_fix_width(short, "0x%hx", delim, 6, "hi", check_short);
+}
+static void __init numbers_list_field_width_hh(const char *delim)
+{
numbers_list_fix_width(unsigned char, "%hhu", delim, 3, "hhu", check_uchar);
numbers_list_fix_width(signed char, "%hhd", delim, 4, "hhd", check_char);
numbers_list_fix_width(signed char, "%hhd", delim, 4, "hhi", check_char);
@@ -489,9 +518,18 @@ static void __init numbers_list_field_width_typemax(const char *delim)
/*
* List of numbers separated by delim. Each field width specifier is the
- * exact length of the corresponding value digits in the string being scanned.
+ * maximum possible digits for the given type and base.
*/
-static void __init numbers_list_field_width_val_width(const char *delim)
+static void __init numbers_list_field_width_typemax(const char *delim)
+{
+ numbers_list_field_width_ll(delim);
+ numbers_list_field_width_l(delim);
+ numbers_list_field_width_d(delim);
+ numbers_list_field_width_h(delim);
+ numbers_list_field_width_hh(delim);
+}
+
+static void __init numbers_list_field_width_val_ll(const char *delim)
{
numbers_list_val_width(unsigned long long, "%llu", delim, "llu", check_ull);
numbers_list_val_width(long long, "%lld", delim, "lld", check_ll);
@@ -499,28 +537,40 @@ static void __init numbers_list_field_width_val_width(const char *delim)
numbers_list_val_width(unsigned long long, "%llx", delim, "llx", check_ull);
numbers_list_val_width(unsigned long long, "0x%llx", delim, "llx", check_ull);
numbers_list_val_width(long long, "0x%llx", delim, "lli", check_ll);
+}
+static void __init numbers_list_field_width_val_l(const char *delim)
+{
numbers_list_val_width(unsigned long, "%lu", delim, "lu", check_ulong);
numbers_list_val_width(long, "%ld", delim, "ld", check_long);
numbers_list_val_width(long, "%ld", delim, "li", check_long);
numbers_list_val_width(unsigned long, "%lx", delim, "lx", check_ulong);
numbers_list_val_width(unsigned long, "0x%lx", delim, "lx", check_ulong);
numbers_list_val_width(long, "0x%lx", delim, "li", check_long);
+}
+static void __init numbers_list_field_width_val_d(const char *delim)
+{
numbers_list_val_width(unsigned int, "%u", delim, "u", check_uint);
numbers_list_val_width(int, "%d", delim, "d", check_int);
numbers_list_val_width(int, "%d", delim, "i", check_int);
numbers_list_val_width(unsigned int, "%x", delim, "x", check_uint);
numbers_list_val_width(unsigned int, "0x%x", delim, "x", check_uint);
numbers_list_val_width(int, "0x%x", delim, "i", check_int);
+}
+static void __init numbers_list_field_width_val_h(const char *delim)
+{
numbers_list_val_width(unsigned short, "%hu", delim, "hu", check_ushort);
numbers_list_val_width(short, "%hd", delim, "hd", check_short);
numbers_list_val_width(short, "%hd", delim, "hi", check_short);
numbers_list_val_width(unsigned short, "%hx", delim, "hx", check_ushort);
numbers_list_val_width(unsigned short, "0x%hx", delim, "hx", check_ushort);
numbers_list_val_width(short, "0x%hx", delim, "hi", check_short);
+}
+static void __init numbers_list_field_width_val_hh(const char *delim)
+{
numbers_list_val_width(unsigned char, "%hhu", delim, "hhu", check_uchar);
numbers_list_val_width(signed char, "%hhd", delim, "hhd", check_char);
numbers_list_val_width(signed char, "%hhd", delim, "hhi", check_char);
@@ -530,6 +580,19 @@ static void __init numbers_list_field_width_val_width(const char *delim)
}
/*
+ * List of numbers separated by delim. Each field width specifier is the
+ * exact length of the corresponding value digits in the string being scanned.
+ */
+static void __init numbers_list_field_width_val_width(const char *delim)
+{
+ numbers_list_field_width_val_ll(delim);
+ numbers_list_field_width_val_l(delim);
+ numbers_list_field_width_val_d(delim);
+ numbers_list_field_width_val_h(delim);
+ numbers_list_field_width_val_hh(delim);
+}
+
+/*
* Slice a continuous string of digits without field delimiters, containing
* numbers of varying length, using the field width to extract each group
* of digits. For example the hex values c0,3,bf01,303 would have a
diff --git a/lib/test_sort.c b/lib/test_sort.c
index 52edbe10f2e5..be02e3a098cf 100644
--- a/lib/test_sort.c
+++ b/lib/test_sort.c
@@ -1,4 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
+
+#include <kunit/test.h>
+
#include <linux/sort.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -7,18 +10,17 @@
#define TEST_LEN 1000
-static int __init cmpint(const void *a, const void *b)
+static int cmpint(const void *a, const void *b)
{
return *(int *)a - *(int *)b;
}
-static int __init test_sort_init(void)
+static void test_sort(struct kunit *test)
{
- int *a, i, r = 1, err = -ENOMEM;
+ int *a, i, r = 1;
- a = kmalloc_array(TEST_LEN, sizeof(*a), GFP_KERNEL);
- if (!a)
- return err;
+ a = kunit_kmalloc_array(test, TEST_LEN, sizeof(*a), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a);
for (i = 0; i < TEST_LEN; i++) {
r = (r * 725861) % 6599;
@@ -27,24 +29,20 @@ static int __init test_sort_init(void)
sort(a, TEST_LEN, sizeof(*a), cmpint, NULL);
- err = -EINVAL;
for (i = 0; i < TEST_LEN-1; i++)
- if (a[i] > a[i+1]) {
- pr_err("test has failed\n");
- goto exit;
- }
- err = 0;
- pr_info("test passed\n");
-exit:
- kfree(a);
- return err;
+ KUNIT_ASSERT_LE(test, a[i], a[i + 1]);
}
-static void __exit test_sort_exit(void)
-{
-}
+static struct kunit_case sort_test_cases[] = {
+ KUNIT_CASE(test_sort),
+ {}
+};
+
+static struct kunit_suite sort_test_suite = {
+ .name = "lib_sort",
+ .test_cases = sort_test_cases,
+};
-module_init(test_sort_init);
-module_exit(test_sort_exit);
+kunit_test_suites(&sort_test_suite);
MODULE_LICENSE("GPL");
diff --git a/lib/test_stackinit.c b/lib/test_stackinit.c
index f93b1e145ada..a3c74e6a21ff 100644
--- a/lib/test_stackinit.c
+++ b/lib/test_stackinit.c
@@ -1,8 +1,13 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Test cases for compiler-based stack variable zeroing via future
- * compiler flags or CONFIG_GCC_PLUGIN_STRUCTLEAK*.
+ * Test cases for compiler-based stack variable zeroing via
+ * -ftrivial-auto-var-init={zero,pattern} or CONFIG_GCC_PLUGIN_STRUCTLEAK*.
+ *
+ * External build example:
+ * clang -O2 -Wall -ftrivial-auto-var-init=pattern \
+ * -o test_stackinit test_stackinit.c
*/
+#ifdef __KERNEL__
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
@@ -10,6 +15,63 @@
#include <linux/module.h>
#include <linux/string.h>
+#else
+
+/* Userspace headers. */
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <sys/types.h>
+
+/* Linux kernel-ism stubs for stand-alone userspace build. */
+#define KBUILD_MODNAME "stackinit"
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define pr_err(fmt, ...) fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warn(fmt, ...) fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt, ...) fprintf(stdout, pr_fmt(fmt), ##__VA_ARGS__)
+#define __init /**/
+#define __exit /**/
+#define __user /**/
+#define noinline __attribute__((__noinline__))
+#define __aligned(x) __attribute__((__aligned__(x)))
+#ifdef __clang__
+# define __compiletime_error(message) /**/
+#else
+# define __compiletime_error(message) __attribute__((__error__(message)))
+#endif
+#define __compiletime_assert(condition, msg, prefix, suffix) \
+ do { \
+ extern void prefix ## suffix(void) __compiletime_error(msg); \
+ if (!(condition)) \
+ prefix ## suffix(); \
+ } while (0)
+#define _compiletime_assert(condition, msg, prefix, suffix) \
+ __compiletime_assert(condition, msg, prefix, suffix)
+#define compiletime_assert(condition, msg) \
+ _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
+#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg)
+#define BUILD_BUG_ON(condition) \
+ BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition)
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+
+#define module_init(func) static int (*do_init)(void) = func
+#define module_exit(func) static void (*do_exit)(void) = func
+#define MODULE_LICENSE(str) int main(void) { \
+ int rc; \
+ /* License: str */ \
+ rc = do_init(); \
+ if (rc == 0) \
+ do_exit(); \
+ return rc; \
+ }
+
+#endif /* __KERNEL__ */
+
/* Exfiltration buffer. */
#define MAX_VAR_SIZE 128
static u8 check_buf[MAX_VAR_SIZE];
@@ -33,6 +95,10 @@ static bool range_contains(char *haystack_start, size_t haystack_size,
return false;
}
+/* Whether the test is expected to fail. */
+#define WANT_SUCCESS 0
+#define XFAIL 1
+
#define DO_NOTHING_TYPE_SCALAR(var_type) var_type
#define DO_NOTHING_TYPE_STRING(var_type) void
#define DO_NOTHING_TYPE_STRUCT(var_type) void
@@ -58,34 +124,73 @@ static bool range_contains(char *haystack_start, size_t haystack_size,
#define INIT_CLONE_STRING [FILL_SIZE_STRING]
#define INIT_CLONE_STRUCT /**/
-#define INIT_SCALAR_none /**/
-#define INIT_SCALAR_zero = 0
+#define ZERO_CLONE_SCALAR(zero) memset(&(zero), 0x00, sizeof(zero))
+#define ZERO_CLONE_STRING(zero) memset(&(zero), 0x00, sizeof(zero))
+/*
+ * For the struct, intentionally poison padding to see if it gets
+ * copied out in direct assignments.
+ * */
+#define ZERO_CLONE_STRUCT(zero) \
+ do { \
+ memset(&(zero), 0xFF, sizeof(zero)); \
+ zero.one = 0; \
+ zero.two = 0; \
+ zero.three = 0; \
+ zero.four = 0; \
+ } while (0)
+
+#define INIT_SCALAR_none(var_type) /**/
+#define INIT_SCALAR_zero(var_type) = 0
-#define INIT_STRING_none [FILL_SIZE_STRING] /**/
-#define INIT_STRING_zero [FILL_SIZE_STRING] = { }
+#define INIT_STRING_none(var_type) [FILL_SIZE_STRING] /**/
+#define INIT_STRING_zero(var_type) [FILL_SIZE_STRING] = { }
-#define INIT_STRUCT_none /**/
-#define INIT_STRUCT_zero = { }
-#define INIT_STRUCT_static_partial = { .two = 0, }
-#define INIT_STRUCT_static_all = { .one = arg->one, \
- .two = arg->two, \
- .three = arg->three, \
- .four = arg->four, \
+#define INIT_STRUCT_none(var_type) /**/
+#define INIT_STRUCT_zero(var_type) = { }
+
+
+#define __static_partial { .two = 0, }
+#define __static_all { .one = 0, \
+ .two = 0, \
+ .three = 0, \
+ .four = 0, \
}
-#define INIT_STRUCT_dynamic_partial = { .two = arg->two, }
-#define INIT_STRUCT_dynamic_all = { .one = arg->one, \
- .two = arg->two, \
- .three = arg->three, \
- .four = arg->four, \
+#define __dynamic_partial { .two = arg->two, }
+#define __dynamic_all { .one = arg->one, \
+ .two = arg->two, \
+ .three = arg->three, \
+ .four = arg->four, \
}
-#define INIT_STRUCT_runtime_partial ; \
- var.two = 0
-#define INIT_STRUCT_runtime_all ; \
- var.one = 0; \
+#define __runtime_partial var.two = 0
+#define __runtime_all var.one = 0; \
var.two = 0; \
var.three = 0; \
- memset(&var.four, 0, \
- sizeof(var.four))
+ var.four = 0
+
+#define INIT_STRUCT_static_partial(var_type) \
+ = __static_partial
+#define INIT_STRUCT_static_all(var_type) \
+ = __static_all
+#define INIT_STRUCT_dynamic_partial(var_type) \
+ = __dynamic_partial
+#define INIT_STRUCT_dynamic_all(var_type) \
+ = __dynamic_all
+#define INIT_STRUCT_runtime_partial(var_type) \
+ ; __runtime_partial
+#define INIT_STRUCT_runtime_all(var_type) \
+ ; __runtime_all
+
+#define INIT_STRUCT_assigned_static_partial(var_type) \
+ ; var = (var_type)__static_partial
+#define INIT_STRUCT_assigned_static_all(var_type) \
+ ; var = (var_type)__static_all
+#define INIT_STRUCT_assigned_dynamic_partial(var_type) \
+ ; var = (var_type)__dynamic_partial
+#define INIT_STRUCT_assigned_dynamic_all(var_type) \
+ ; var = (var_type)__dynamic_all
+
+#define INIT_STRUCT_assigned_copy(var_type) \
+ ; var = *(arg)
/*
* @name: unique string name for the test
@@ -106,7 +211,7 @@ static noinline __init int test_ ## name (void) \
BUILD_BUG_ON(sizeof(zero) > MAX_VAR_SIZE); \
\
/* Fill clone type with zero for per-field init. */ \
- memset(&zero, 0x00, sizeof(zero)); \
+ ZERO_CLONE_ ## which(zero); \
/* Clear entire check buffer for 0xFF overlap test. */ \
memset(check_buf, 0x00, sizeof(check_buf)); \
/* Fill stack with 0xFF. */ \
@@ -149,7 +254,7 @@ static noinline __init int test_ ## name (void) \
return (xfail) ? 0 : 1; \
} \
}
-#define DEFINE_TEST(name, var_type, which, init_level) \
+#define DEFINE_TEST(name, var_type, which, init_level, xfail) \
/* no-op to force compiler into ignoring "uninitialized" vars */\
static noinline __init DO_NOTHING_TYPE_ ## which(var_type) \
do_nothing_ ## name(var_type *ptr) \
@@ -165,7 +270,8 @@ static noinline __init int leaf_ ## name(unsigned long sp, \
var_type *arg) \
{ \
char buf[VAR_BUFFER]; \
- var_type var INIT_ ## which ## _ ## init_level; \
+ var_type var \
+ INIT_ ## which ## _ ## init_level(var_type); \
\
target_start = &var; \
target_size = sizeof(var); \
@@ -191,7 +297,7 @@ static noinline __init int leaf_ ## name(unsigned long sp, \
\
return (int)buf[0] | (int)buf[sizeof(buf) - 1]; \
} \
-DEFINE_TEST_DRIVER(name, var_type, which, 0)
+DEFINE_TEST_DRIVER(name, var_type, which, xfail)
/* Structure with no padding. */
struct test_packed {
@@ -210,18 +316,13 @@ struct test_small_hole {
unsigned long four;
};
-/* Try to trigger unhandled padding in a structure. */
-struct test_aligned {
- u32 internal1;
- u64 internal2;
-} __aligned(64);
-
+/* Trigger unhandled padding in a structure. */
struct test_big_hole {
u8 one;
u8 two;
u8 three;
/* 61 byte padding hole here. */
- struct test_aligned four;
+ u8 four __aligned(64);
} __aligned(64);
struct test_trailing_hole {
@@ -240,42 +341,50 @@ struct test_user {
unsigned long four;
};
-#define DEFINE_SCALAR_TEST(name, init) \
- DEFINE_TEST(name ## _ ## init, name, SCALAR, init)
+#define DEFINE_SCALAR_TEST(name, init, xfail) \
+ DEFINE_TEST(name ## _ ## init, name, SCALAR, \
+ init, xfail)
-#define DEFINE_SCALAR_TESTS(init) \
- DEFINE_SCALAR_TEST(u8, init); \
- DEFINE_SCALAR_TEST(u16, init); \
- DEFINE_SCALAR_TEST(u32, init); \
- DEFINE_SCALAR_TEST(u64, init); \
- DEFINE_TEST(char_array_ ## init, unsigned char, STRING, init)
+#define DEFINE_SCALAR_TESTS(init, xfail) \
+ DEFINE_SCALAR_TEST(u8, init, xfail); \
+ DEFINE_SCALAR_TEST(u16, init, xfail); \
+ DEFINE_SCALAR_TEST(u32, init, xfail); \
+ DEFINE_SCALAR_TEST(u64, init, xfail); \
+ DEFINE_TEST(char_array_ ## init, unsigned char, \
+ STRING, init, xfail)
-#define DEFINE_STRUCT_TEST(name, init) \
+#define DEFINE_STRUCT_TEST(name, init, xfail) \
DEFINE_TEST(name ## _ ## init, \
- struct test_ ## name, STRUCT, init)
+ struct test_ ## name, STRUCT, init, \
+ xfail)
+
+#define DEFINE_STRUCT_TESTS(init, xfail) \
+ DEFINE_STRUCT_TEST(small_hole, init, xfail); \
+ DEFINE_STRUCT_TEST(big_hole, init, xfail); \
+ DEFINE_STRUCT_TEST(trailing_hole, init, xfail); \
+ DEFINE_STRUCT_TEST(packed, init, xfail)
-#define DEFINE_STRUCT_TESTS(init) \
- DEFINE_STRUCT_TEST(small_hole, init); \
- DEFINE_STRUCT_TEST(big_hole, init); \
- DEFINE_STRUCT_TEST(trailing_hole, init); \
- DEFINE_STRUCT_TEST(packed, init)
+#define DEFINE_STRUCT_INITIALIZER_TESTS(base) \
+ DEFINE_STRUCT_TESTS(base ## _ ## partial, \
+ WANT_SUCCESS); \
+ DEFINE_STRUCT_TESTS(base ## _ ## all, \
+ WANT_SUCCESS)
/* These should be fully initialized all the time! */
-DEFINE_SCALAR_TESTS(zero);
-DEFINE_STRUCT_TESTS(zero);
-/* Static initialization: padding may be left uninitialized. */
-DEFINE_STRUCT_TESTS(static_partial);
-DEFINE_STRUCT_TESTS(static_all);
-/* Dynamic initialization: padding may be left uninitialized. */
-DEFINE_STRUCT_TESTS(dynamic_partial);
-DEFINE_STRUCT_TESTS(dynamic_all);
-/* Runtime initialization: padding may be left uninitialized. */
-DEFINE_STRUCT_TESTS(runtime_partial);
-DEFINE_STRUCT_TESTS(runtime_all);
+DEFINE_SCALAR_TESTS(zero, WANT_SUCCESS);
+DEFINE_STRUCT_TESTS(zero, WANT_SUCCESS);
+/* Struct initializers: padding may be left uninitialized. */
+DEFINE_STRUCT_INITIALIZER_TESTS(static);
+DEFINE_STRUCT_INITIALIZER_TESTS(dynamic);
+DEFINE_STRUCT_INITIALIZER_TESTS(runtime);
+DEFINE_STRUCT_INITIALIZER_TESTS(assigned_static);
+DEFINE_STRUCT_INITIALIZER_TESTS(assigned_dynamic);
+DEFINE_STRUCT_TESTS(assigned_copy, XFAIL);
/* No initialization without compiler instrumentation. */
-DEFINE_SCALAR_TESTS(none);
-DEFINE_STRUCT_TESTS(none);
-DEFINE_TEST(user, struct test_user, STRUCT, none);
+DEFINE_SCALAR_TESTS(none, WANT_SUCCESS);
+DEFINE_STRUCT_TESTS(none, WANT_SUCCESS);
+/* Initialization of members with __user attribute. */
+DEFINE_TEST(user, struct test_user, STRUCT, none, WANT_SUCCESS);
/*
* Check two uses through a variable declaration outside either path,
@@ -285,6 +394,10 @@ DEFINE_TEST(user, struct test_user, STRUCT, none);
static int noinline __leaf_switch_none(int path, bool fill)
{
switch (path) {
+ /*
+ * This is intentionally unreachable. To silence the
+ * warning, build with -Wno-switch-unreachable
+ */
uint64_t var;
case 1:
@@ -334,8 +447,8 @@ static noinline __init int leaf_switch_2_none(unsigned long sp, bool fill,
* non-code areas (i.e. in a switch statement before the first "case").
* https://bugs.llvm.org/show_bug.cgi?id=44916
*/
-DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, 1);
-DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, 1);
+DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, XFAIL);
+DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, XFAIL);
static int __init test_stackinit_init(void)
{
@@ -361,12 +474,18 @@ static int __init test_stackinit_init(void)
test_structs(zero);
/* Padding here appears to be accidentally always initialized? */
test_structs(dynamic_partial);
+ test_structs(assigned_dynamic_partial);
/* Padding initialization depends on compiler behaviors. */
test_structs(static_partial);
test_structs(static_all);
test_structs(dynamic_all);
test_structs(runtime_partial);
test_structs(runtime_all);
+ test_structs(assigned_static_partial);
+ test_structs(assigned_static_all);
+ test_structs(assigned_dynamic_all);
+ /* Everything fails this since it effectively performs a memcpy(). */
+ test_structs(assigned_copy);
/* STRUCTLEAK_BYREF_ALL should cover everything from here down. */
test_scalars(none);
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index 01e9543de566..e14993bc84d2 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -35,6 +35,9 @@ __param(int, test_repeat_count, 1,
__param(int, test_loop_count, 1000000,
"Set test loop counter");
+__param(int, nr_pages, 0,
+ "Set number of pages for fix_size_alloc_test(default: 1)");
+
__param(int, run_test_mask, INT_MAX,
"Set tests specified in the mask.\n\n"
"\t\tid: 1, name: fix_size_alloc_test\n"
@@ -262,7 +265,7 @@ static int fix_size_alloc_test(void)
int i;
for (i = 0; i < test_loop_count; i++) {
- ptr = vmalloc(3 * PAGE_SIZE);
+ ptr = vmalloc((nr_pages > 0 ? nr_pages:1) * PAGE_SIZE);
if (!ptr)
return -1;
diff --git a/lib/ubsan.c b/lib/ubsan.c
index 26229973049d..bdc380ff5d5c 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
+#include <kunit/test-bug.h>
#include "ubsan.h"
@@ -141,6 +142,8 @@ static void ubsan_prologue(struct source_location *loc, const char *reason)
"========================================\n");
pr_err("UBSAN: %s in %s:%d:%d\n", reason, loc->file_name,
loc->line & LINE_MASK, loc->column & COLUMN_MASK);
+
+ kunit_fail_current_test("%s in %s", reason, loc->file_name);
}
static void ubsan_epilogue(void)
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 1173930ed9d3..f90f91d83920 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -17,7 +17,7 @@
* - scnprintf and vscnprintf
*/
-#include <stdarg.h>
+#include <linux/stdarg.h>
#include <linux/build_bug.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
@@ -2020,10 +2020,15 @@ static const struct page_flags_fields pff[] = {
static
char *format_page_flags(char *buf, char *end, unsigned long flags)
{
- unsigned long main_flags = flags & (BIT(NR_PAGEFLAGS) - 1);
+ unsigned long main_flags = flags & PAGEFLAGS_MASK;
bool append = false;
int i;
+ buf = number(buf, end, flags, default_flag_spec);
+ if (buf < end)
+ *buf = '(';
+ buf++;
+
/* Page flags from the main area. */
if (main_flags) {
buf = format_flags(buf, end, main_flags, pageflag_names);
@@ -2052,6 +2057,9 @@ char *format_page_flags(char *buf, char *end, unsigned long flags)
append = true;
}
+ if (buf < end)
+ *buf = ')';
+ buf++;
return buf;
}