From eb32f9f990d974a7878a8792cee9bb821720c24b Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 7 Jun 2021 14:56:47 +0200 Subject: kcsan: Improve some Kconfig comments Improve comment for CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE. Also shorten the comment above the "strictness" configuration options. Acked-by: Mark Rutland Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- lib/Kconfig.kcsan | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'lib') diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index 0440f373248e..6152fbd5cbb4 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -40,10 +40,14 @@ menuconfig KCSAN if KCSAN -# Compiler capabilities that should not fail the test if they are unavailable. config CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE def_bool (CC_IS_CLANG && $(cc-option,-fsanitize=thread -mllvm -tsan-compound-read-before-write=1)) || \ (CC_IS_GCC && $(cc-option,-fsanitize=thread --param tsan-compound-read-before-write=1)) + help + The compiler instruments plain compound read-write operations + differently (++, --, +=, -=, |=, &=, etc.), which allows KCSAN to + distinguish them from other plain accesses. This is currently + supported by Clang 12 or later. config KCSAN_VERBOSE bool "Show verbose reports with more information about system state" @@ -169,13 +173,9 @@ config KCSAN_REPORT_ONCE_IN_MS reporting to avoid flooding the console with reports. Setting this to 0 disables rate limiting. -# The main purpose of the below options is to control reported data races (e.g. -# in fuzzer configs), and are not expected to be switched frequently by other -# users. We could turn some of them into boot parameters, but given they should -# not be switched normally, let's keep them here to simplify configuration. -# -# The defaults below are chosen to be very conservative, and may miss certain -# bugs. +# The main purpose of the below options is to control reported data races, and +# are not expected to be switched frequently by non-testers or at runtime. +# The defaults are chosen to be conservative, and can miss certain bugs. config KCSAN_REPORT_RACE_UNKNOWN_ORIGIN bool "Report races of unknown origin" -- cgit v1.2.3 From a7a73697360ea81244eea550138b8f614348860c Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 7 Jun 2021 14:56:48 +0200 Subject: kcsan: Remove CONFIG_KCSAN_DEBUG By this point CONFIG_KCSAN_DEBUG is pretty useless, as the system just isn't usable with it due to spamming console (I imagine a randconfig test robot will run into this sooner or later). Remove it. Back in 2019 I used it occasionally to record traces of watchpoints and verify the encoding is correct, but these days we have proper tests. If something similar is needed in future, just add it back ad-hoc. Signed-off-by: Marco Elver Acked-by: Mark Rutland Signed-off-by: Paul E. McKenney --- kernel/kcsan/core.c | 9 --------- lib/Kconfig.kcsan | 3 --- 2 files changed, 12 deletions(-) (limited to 'lib') diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 26709ea65c71..d92977ede7e1 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -479,15 +479,6 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) break; /* ignore; we do not diff the values */ } - if (IS_ENABLED(CONFIG_KCSAN_DEBUG)) { - kcsan_disable_current(); - pr_err("watching %s, size: %zu, addr: %px [slot: %d, encoded: %lx]\n", - is_write ? "write" : "read", size, ptr, - watchpoint_slot((unsigned long)ptr), - encode_watchpoint((unsigned long)ptr, size, is_write)); - kcsan_enable_current(); - } - /* * Delay this thread, to increase probability of observing a racy * conflicting access. diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index 6152fbd5cbb4..5304f211f81f 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -62,9 +62,6 @@ config KCSAN_VERBOSE generated from any one of them, system stability may suffer due to deadlocks or recursion. If in doubt, say N. -config KCSAN_DEBUG - bool "Debugging of KCSAN internals" - config KCSAN_SELFTEST bool "Perform short selftests on boot" default y -- cgit v1.2.3 From e675d2533a74acfa95c62e7bb088335866f44fd2 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 7 Jun 2021 14:56:49 +0200 Subject: kcsan: Introduce CONFIG_KCSAN_STRICT Add a simpler Kconfig variable to configure KCSAN's "strict" mode. This makes it simpler in documentation or messages to suggest just a single configuration option to select the strictest checking mode (vs. currently having to list several options). Signed-off-by: Marco Elver Acked-by: Mark Rutland Signed-off-by: Paul E. McKenney --- Documentation/dev-tools/kcsan.rst | 4 ++++ lib/Kconfig.kcsan | 10 ++++++++++ 2 files changed, 14 insertions(+) (limited to 'lib') diff --git a/Documentation/dev-tools/kcsan.rst b/Documentation/dev-tools/kcsan.rst index 6a600cf8430b..69dc9c502ccc 100644 --- a/Documentation/dev-tools/kcsan.rst +++ b/Documentation/dev-tools/kcsan.rst @@ -127,6 +127,10 @@ Kconfig options: causes KCSAN to not report data races due to conflicts where the only plain accesses are aligned writes up to word size. +To use the strictest possible rules, select ``CONFIG_KCSAN_STRICT=y``, which +configures KCSAN to follow the Linux-kernel memory consistency model (LKMM) as +closely as possible. + DebugFS interface ~~~~~~~~~~~~~~~~~ diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index 5304f211f81f..c76fbb3ee09e 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -183,9 +183,17 @@ config KCSAN_REPORT_RACE_UNKNOWN_ORIGIN reported if it was only possible to infer a race due to a data value change while an access is being delayed on a watchpoint. +config KCSAN_STRICT + bool "Strict data-race checking" + help + KCSAN will report data races with the strictest possible rules, which + closely aligns with the rules defined by the Linux-kernel memory + consistency model (LKMM). + config KCSAN_REPORT_VALUE_CHANGE_ONLY bool "Only report races where watcher observed a data value change" default y + depends on !KCSAN_STRICT help If enabled and a conflicting write is observed via a watchpoint, but the data value of the memory location was observed to remain @@ -194,6 +202,7 @@ config KCSAN_REPORT_VALUE_CHANGE_ONLY config KCSAN_ASSUME_PLAIN_WRITES_ATOMIC bool "Assume that plain aligned writes up to word size are atomic" default y + depends on !KCSAN_STRICT help Assume that plain aligned writes up to word size are atomic by default, and also not subject to other unsafe compiler optimizations @@ -206,6 +215,7 @@ config KCSAN_ASSUME_PLAIN_WRITES_ATOMIC config KCSAN_IGNORE_ATOMICS bool "Do not instrument marked atomic accesses" + depends on !KCSAN_STRICT help Never instrument marked atomic accesses. This option can be used for additional filtering. Conflicting marked atomic reads and plain -- cgit v1.2.3 From 49f72d5358dd3c0d28bcd2232c513000b15480f0 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 7 Jun 2021 14:56:51 +0200 Subject: kcsan: Rework atomic.h into permissive.h Rework atomic.h into permissive.h to better reflect its purpose, and introduce kcsan_ignore_address() and kcsan_ignore_data_race(). Introduce CONFIG_KCSAN_PERMISSIVE and update the stub functions in preparation for subsequent changes. As before, developers who choose to use KCSAN in "strict" mode will see all data races and are not affected. Furthermore, by relying on the value-change filter logic for kcsan_ignore_data_race(), even if the permissive rules are enabled, the opt-outs in report.c:skip_report() override them (such as for RCU-related functions by default). The option CONFIG_KCSAN_PERMISSIVE is disabled by default, so that the documented default behaviour of KCSAN does not change. Instead, like CONFIG_KCSAN_IGNORE_ATOMICS, the option needs to be explicitly opted in. Signed-off-by: Marco Elver Acked-by: Mark Rutland Signed-off-by: Paul E. McKenney --- Documentation/dev-tools/kcsan.rst | 8 +++++++ kernel/kcsan/atomic.h | 23 ------------------- kernel/kcsan/core.c | 33 +++++++++++++++++++-------- kernel/kcsan/permissive.h | 47 +++++++++++++++++++++++++++++++++++++++ lib/Kconfig.kcsan | 10 +++++++++ 5 files changed, 89 insertions(+), 32 deletions(-) delete mode 100644 kernel/kcsan/atomic.h create mode 100644 kernel/kcsan/permissive.h (limited to 'lib') diff --git a/Documentation/dev-tools/kcsan.rst b/Documentation/dev-tools/kcsan.rst index 69dc9c502ccc..7db43c7c09b8 100644 --- a/Documentation/dev-tools/kcsan.rst +++ b/Documentation/dev-tools/kcsan.rst @@ -127,6 +127,14 @@ Kconfig options: causes KCSAN to not report data races due to conflicts where the only plain accesses are aligned writes up to word size. +* ``CONFIG_KCSAN_PERMISSIVE``: Enable additional permissive rules to ignore + certain classes of common data races. Unlike the above, the rules are more + complex involving value-change patterns, access type, and address. This + option depends on ``CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY=y``. For details + please see the ``kernel/kcsan/permissive.h``. Testers and maintainers that + only focus on reports from specific subsystems and not the whole kernel are + recommended to disable this option. + To use the strictest possible rules, select ``CONFIG_KCSAN_STRICT=y``, which configures KCSAN to follow the Linux-kernel memory consistency model (LKMM) as closely as possible. diff --git a/kernel/kcsan/atomic.h b/kernel/kcsan/atomic.h deleted file mode 100644 index 530ae1bda8e7..000000000000 --- a/kernel/kcsan/atomic.h +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Rules for implicitly atomic memory accesses. - * - * Copyright (C) 2019, Google LLC. - */ - -#ifndef _KERNEL_KCSAN_ATOMIC_H -#define _KERNEL_KCSAN_ATOMIC_H - -#include - -/* - * Special rules for certain memory where concurrent conflicting accesses are - * common, however, the current convention is to not mark them; returns true if - * access to @ptr should be considered atomic. Called from slow-path. - */ -static bool kcsan_is_atomic_special(const volatile void *ptr) -{ - return false; -} - -#endif /* _KERNEL_KCSAN_ATOMIC_H */ diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 906100923b88..439edb9dcbb1 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -20,9 +20,9 @@ #include #include -#include "atomic.h" #include "encoding.h" #include "kcsan.h" +#include "permissive.h" static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE); unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK; @@ -353,6 +353,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, atomic_long_t *watchpoint, long encoded_watchpoint) { + const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0; struct kcsan_ctx *ctx = get_ctx(); unsigned long flags; bool consumed; @@ -374,6 +375,16 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, if (ctx->access_mask) return; + /* + * If the other thread does not want to ignore the access, and there was + * a value change as a result of this thread's operation, we will still + * generate a report of unknown origin. + * + * Use CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=n to filter. + */ + if (!is_assert && kcsan_ignore_address(ptr)) + return; + /* * Consuming the watchpoint must be guarded by kcsan_is_enabled() to * avoid erroneously triggering reports if the context is disabled. @@ -396,7 +407,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_REPORT_RACES]); } - if ((type & KCSAN_ACCESS_ASSERT) != 0) + if (is_assert) atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]); else atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_DATA_RACES]); @@ -427,12 +438,10 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) goto out; /* - * Special atomic rules: unlikely to be true, so we check them here in - * the slow-path, and not in the fast-path in is_atomic(). Call after - * kcsan_is_enabled(), as we may access memory that is not yet - * initialized during early boot. + * Check to-ignore addresses after kcsan_is_enabled(), as we may access + * memory that is not yet initialized during early boot. */ - if (!is_assert && kcsan_is_atomic_special(ptr)) + if (!is_assert && kcsan_ignore_address(ptr)) goto out; if (!check_encodable((unsigned long)ptr, size)) { @@ -518,8 +527,14 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) if (access_mask) diff &= access_mask; - /* Were we able to observe a value-change? */ - if (diff != 0) + /* + * Check if we observed a value change. + * + * Also check if the data race should be ignored (the rules depend on + * non-zero diff); if it is to be ignored, the below rules for + * KCSAN_VALUE_CHANGE_MAYBE apply. + */ + if (diff && !kcsan_ignore_data_race(size, type, old, new, diff)) value_change = KCSAN_VALUE_CHANGE_TRUE; /* Check if this access raced with another. */ diff --git a/kernel/kcsan/permissive.h b/kernel/kcsan/permissive.h new file mode 100644 index 000000000000..f90e30800c11 --- /dev/null +++ b/kernel/kcsan/permissive.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Special rules for ignoring entire classes of data-racy memory accesses. None + * of the rules here imply that such data races are generally safe! + * + * All rules in this file can be configured via CONFIG_KCSAN_PERMISSIVE. Keep + * them separate from core code to make it easier to audit. + * + * Copyright (C) 2019, Google LLC. + */ + +#ifndef _KERNEL_KCSAN_PERMISSIVE_H +#define _KERNEL_KCSAN_PERMISSIVE_H + +#include + +/* + * Access ignore rules based on address. + */ +static __always_inline bool kcsan_ignore_address(const volatile void *ptr) +{ + if (!IS_ENABLED(CONFIG_KCSAN_PERMISSIVE)) + return false; + + return false; +} + +/* + * Data race ignore rules based on access type and value change patterns. + */ +static bool +kcsan_ignore_data_race(size_t size, int type, u64 old, u64 new, u64 diff) +{ + if (!IS_ENABLED(CONFIG_KCSAN_PERMISSIVE)) + return false; + + /* + * Rules here are only for plain read accesses, so that we still report + * data races between plain read-write accesses. + */ + if (type || size > sizeof(long)) + return false; + + return false; +} + +#endif /* _KERNEL_KCSAN_PERMISSIVE_H */ diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index c76fbb3ee09e..26f03c754d39 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -231,4 +231,14 @@ config KCSAN_IGNORE_ATOMICS due to two conflicting plain writes will be reported (aligned and unaligned, if CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n). +config KCSAN_PERMISSIVE + bool "Enable all additional permissive rules" + depends on KCSAN_REPORT_VALUE_CHANGE_ONLY + help + Enable additional permissive rules to ignore certain classes of data + races (also see kernel/kcsan/permissive.h). None of the permissive + rules imply that such data races are generally safe, but can be used + to further reduce reported data races due to data-racy patterns + common across the kernel. + endif # KCSAN -- cgit v1.2.3 From e04938042d77addc7f41d983aebea125cddbed33 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 15 Jun 2021 20:39:38 +0200 Subject: kcsan: Make strict mode imply interruptible watchers If CONFIG_KCSAN_STRICT=y, select CONFIG_KCSAN_INTERRUPT_WATCHER as well. With interruptible watchers, we'll also report same-CPU data races; if we requested strict mode, we might as well show these, too. Suggested-by: Paul E. McKenney Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- lib/Kconfig.kcsan | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index 26f03c754d39..e0a93ffdef30 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -150,7 +150,8 @@ config KCSAN_SKIP_WATCH_RANDOMIZE KCSAN_WATCH_SKIP. config KCSAN_INTERRUPT_WATCHER - bool "Interruptible watchers" + bool "Interruptible watchers" if !KCSAN_STRICT + default KCSAN_STRICT help If enabled, a task that set up a watchpoint may be interrupted while delayed. This option will allow KCSAN to detect races between -- cgit v1.2.3 From f9398f15605a50110bf570aaa361163a85113dd1 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 23 Jul 2021 15:19:31 -0700 Subject: lib/test_stackinit: Fix static initializer test The static initializer test got accidentally converted to a dynamic initializer. Fix this and retain the giant padding hole without using an aligned struct member. Fixes: 50ceaa95ea09 ("lib: Introduce test_stackinit module") Cc: Ard Biesheuvel Cc: stable@vger.kernel.org Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20210723221933.3431999-2-keescook@chromium.org --- lib/test_stackinit.c | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) (limited to 'lib') diff --git a/lib/test_stackinit.c b/lib/test_stackinit.c index f93b1e145ada..16b1d3a3a497 100644 --- a/lib/test_stackinit.c +++ b/lib/test_stackinit.c @@ -67,10 +67,10 @@ static bool range_contains(char *haystack_start, size_t haystack_size, #define INIT_STRUCT_none /**/ #define INIT_STRUCT_zero = { } #define INIT_STRUCT_static_partial = { .two = 0, } -#define INIT_STRUCT_static_all = { .one = arg->one, \ - .two = arg->two, \ - .three = arg->three, \ - .four = arg->four, \ +#define INIT_STRUCT_static_all = { .one = 0, \ + .two = 0, \ + .three = 0, \ + .four = 0, \ } #define INIT_STRUCT_dynamic_partial = { .two = arg->two, } #define INIT_STRUCT_dynamic_all = { .one = arg->one, \ @@ -84,8 +84,7 @@ static bool range_contains(char *haystack_start, size_t haystack_size, var.one = 0; \ var.two = 0; \ var.three = 0; \ - memset(&var.four, 0, \ - sizeof(var.four)) + var.four = 0 /* * @name: unique string name for the test @@ -210,18 +209,13 @@ struct test_small_hole { unsigned long four; }; -/* Try to trigger unhandled padding in a structure. */ -struct test_aligned { - u32 internal1; - u64 internal2; -} __aligned(64); - +/* Trigger unhandled padding in a structure. */ struct test_big_hole { u8 one; u8 two; u8 three; /* 61 byte padding hole here. */ - struct test_aligned four; + u8 four __aligned(64); } __aligned(64); struct test_trailing_hole { -- cgit v1.2.3 From 1195505f5de2adfee5f277433f0b35498467cc64 Mon Sep 17 00:00:00 2001 From: Uriel Guajardo Date: Fri, 2 Apr 2021 14:21:31 -0700 Subject: kunit: ubsan integration Integrates UBSAN into the KUnit testing framework. It fails KUnit tests whenever it reports undefined behavior. When CONFIG_KUNIT=n, nothing is printed or even formatted, so this has no behavioral impact outside of tests. kunit_fail_current_test() effectively does a pr_err() as well, so there's some slight duplication, but it also ensures an error is recorded in the debugfs entry for the running KUnit test. Print a shorter version of the message to make it less spammy. Co-developed-by: Daniel Latypov Signed-off-by: Daniel Latypov Signed-off-by: Uriel Guajardo Reviewed-by: Alan Maguire Signed-off-by: Shuah Khan --- lib/ubsan.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'lib') diff --git a/lib/ubsan.c b/lib/ubsan.c index 26229973049d..bdc380ff5d5c 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "ubsan.h" @@ -141,6 +142,8 @@ static void ubsan_prologue(struct source_location *loc, const char *reason) "========================================\n"); pr_err("UBSAN: %s in %s:%d:%d\n", reason, loc->file_name, loc->line & LINE_MASK, loc->column & COLUMN_MASK); + + kunit_fail_current_test("%s in %s", reason, loc->file_name); } static void ubsan_epilogue(void) -- cgit v1.2.3 From acd8e8407b8fcc3229d6d8558cac338bea801aed Mon Sep 17 00:00:00 2001 From: David Gow Date: Tue, 3 Aug 2021 22:08:08 -0700 Subject: kunit: Print test statistics on failure When a number of tests fail, it can be useful to get higher-level statistics of how many tests are failing (or how many parameters are failing in parameterised tests), and in what cases or suites. This is already done by some non-KUnit tests, so add support for automatically generating these for KUnit tests. This change adds a 'kunit.stats_enabled' switch which has three values: - 0: No stats are printed (current behaviour) - 1: Stats are printed only for tests/suites with more than one subtest (new default) - 2: Always print test statistics For parameterised tests, the summary line looks as follows: " # inode_test_xtimestamp_decoding: pass:16 fail:0 skip:0 total:16" For test suites, there are two lines looking like this: "# ext4_inode_test: pass:1 fail:0 skip:0 total:1" "# Totals: pass:16 fail:0 skip:0 total:16" The first line gives the number of direct subtests, the second "Totals" line is the accumulated sum of all tests and test parameters. This format is based on the one used by kselftest[1]. [1]: https://elixir.bootlin.com/linux/latest/source/tools/testing/selftests/kselftest.h#L109 Signed-off-by: David Gow Reviewed-by: Brendan Higgins Signed-off-by: Shuah Khan --- lib/kunit/test.c | 109 ++++++++++++++++++++++++++++++++++++ tools/testing/kunit/kunit_parser.py | 2 +- 2 files changed, 110 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/kunit/test.c b/lib/kunit/test.c index d79ecb86ea57..f246b847024e 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -51,6 +52,51 @@ void __kunit_fail_current_test(const char *file, int line, const char *fmt, ...) EXPORT_SYMBOL_GPL(__kunit_fail_current_test); #endif +/* + * KUnit statistic mode: + * 0 - disabled + * 1 - only when there is more than one subtest + * 2 - enabled + */ +static int kunit_stats_enabled = 1; +module_param_named(stats_enabled, kunit_stats_enabled, int, 0644); +MODULE_PARM_DESC(stats_enabled, + "Print test stats: never (0), only for multiple subtests (1), or always (2)"); + +struct kunit_result_stats { + unsigned long passed; + unsigned long skipped; + unsigned long failed; + unsigned long total; +}; + +static bool kunit_should_print_stats(struct kunit_result_stats stats) +{ + if (kunit_stats_enabled == 0) + return false; + + if (kunit_stats_enabled == 2) + return true; + + return (stats.total > 1); +} + +static void kunit_print_test_stats(struct kunit *test, + struct kunit_result_stats stats) +{ + if (!kunit_should_print_stats(stats)) + return; + + kunit_log(KERN_INFO, test, + KUNIT_SUBTEST_INDENT + "# %s: pass:%lu fail:%lu skip:%lu total:%lu", + test->name, + stats.passed, + stats.failed, + stats.skipped, + stats.total); +} + /* * Append formatted message to log, size of which is limited to * KUNIT_LOG_SIZE bytes (including null terminating byte). @@ -393,15 +439,69 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite, test_case->status = KUNIT_SUCCESS; } +static void kunit_print_suite_stats(struct kunit_suite *suite, + struct kunit_result_stats suite_stats, + struct kunit_result_stats param_stats) +{ + if (kunit_should_print_stats(suite_stats)) { + kunit_log(KERN_INFO, suite, + "# %s: pass:%lu fail:%lu skip:%lu total:%lu", + suite->name, + suite_stats.passed, + suite_stats.failed, + suite_stats.skipped, + suite_stats.total); + } + + if (kunit_should_print_stats(param_stats)) { + kunit_log(KERN_INFO, suite, + "# Totals: pass:%lu fail:%lu skip:%lu total:%lu", + param_stats.passed, + param_stats.failed, + param_stats.skipped, + param_stats.total); + } +} + +static void kunit_update_stats(struct kunit_result_stats *stats, + enum kunit_status status) +{ + switch (status) { + case KUNIT_SUCCESS: + stats->passed++; + break; + case KUNIT_SKIPPED: + stats->skipped++; + break; + case KUNIT_FAILURE: + stats->failed++; + break; + } + + stats->total++; +} + +static void kunit_accumulate_stats(struct kunit_result_stats *total, + struct kunit_result_stats add) +{ + total->passed += add.passed; + total->skipped += add.skipped; + total->failed += add.failed; + total->total += add.total; +} + int kunit_run_tests(struct kunit_suite *suite) { char param_desc[KUNIT_PARAM_DESC_SIZE]; struct kunit_case *test_case; + struct kunit_result_stats suite_stats = { 0 }; + struct kunit_result_stats total_stats = { 0 }; kunit_print_subtest_start(suite); kunit_suite_for_each_test_case(suite, test_case) { struct kunit test = { .param_value = NULL, .param_index = 0 }; + struct kunit_result_stats param_stats = { 0 }; test_case->status = KUNIT_SKIPPED; if (test_case->generate_params) { @@ -431,14 +531,23 @@ int kunit_run_tests(struct kunit_suite *suite) test.param_value = test_case->generate_params(test.param_value, param_desc); test.param_index++; } + + kunit_update_stats(¶m_stats, test.status); + } while (test.param_value); + kunit_print_test_stats(&test, param_stats); + kunit_print_ok_not_ok(&test, true, test_case->status, kunit_test_case_num(suite, test_case), test_case->name, test.status_comment); + + kunit_update_stats(&suite_stats, test_case->status); + kunit_accumulate_stats(&total_stats, param_stats); } + kunit_print_suite_stats(suite, suite_stats, total_stats); kunit_print_subtest_end(suite); return 0; diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 84938fefbac0..6310a641b151 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -133,7 +133,7 @@ def print_log(log) -> None: for m in log: print_with_timestamp(m) -TAP_ENTRIES = re.compile(r'^(TAP|[\s]*ok|[\s]*not ok|[\s]*[0-9]+\.\.[0-9]+|[\s]*#).*$') +TAP_ENTRIES = re.compile(r'^(TAP|[\s]*ok|[\s]*not ok|[\s]*[0-9]+\.\.[0-9]+|[\s]*# (Subtest:|.*: kunit test case crashed!)).*$') def consume_non_diagnostic(lines: LineStream) -> None: while lines and not TAP_ENTRIES.match(lines.peek()): -- cgit v1.2.3 From c0891ac15f0428ffa81b2e818d416bdf3cb74ab6 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Mon, 2 Aug 2021 23:40:32 +0300 Subject: isystem: ship and use stdarg.h Ship minimal stdarg.h (1 type, 4 macros) as . stdarg.h is the only userspace header commonly used in the kernel. GPL 2 version of can be extracted from http://archive.debian.org/debian/pool/main/g/gcc-4.2/gcc-4.2_4.2.4.orig.tar.gz Signed-off-by: Alexey Dobriyan Acked-by: Rafael J. Wysocki Acked-by: Ard Biesheuvel Signed-off-by: Masahiro Yamada --- arch/parisc/kernel/firmware.c | 2 +- arch/powerpc/kernel/prom_init.c | 2 +- arch/powerpc/kernel/rtas.c | 2 +- arch/powerpc/kernel/udbg.c | 2 +- arch/s390/boot/pgm_check_info.c | 2 +- arch/x86/boot/boot.h | 2 +- drivers/firmware/efi/libstub/efi-stub-helper.c | 2 +- drivers/firmware/efi/libstub/vsprintf.c | 2 +- drivers/gpu/drm/amd/display/dc/dc_helper.c | 2 +- drivers/gpu/drm/drm_print.c | 2 +- drivers/isdn/capi/capiutil.c | 2 +- drivers/macintosh/via-cuda.c | 2 +- drivers/macintosh/via-pmu.c | 2 +- .../media/atomisp/pci/hive_isp_css_include/print_support.h | 2 +- drivers/staging/media/atomisp/pci/ia_css_env.h | 2 +- .../media/atomisp/pci/runtime/debug/interface/ia_css_debug.h | 2 +- drivers/staging/media/atomisp/pci/sh_css_internal.h | 2 +- fs/befs/debug.c | 2 +- fs/reiserfs/prints.c | 2 +- fs/ufs/super.c | 2 +- include/acpi/platform/acgcc.h | 2 +- include/linux/kernel.h | 2 +- include/linux/printk.h | 2 +- include/linux/stdarg.h | 11 +++++++++++ include/linux/string.h | 2 +- lib/debug_info.c | 3 +-- lib/kasprintf.c | 2 +- lib/kunit/string-stream.h | 2 +- lib/vsprintf.c | 2 +- mm/kfence/report.c | 2 +- net/batman-adv/log.c | 2 +- 31 files changed, 41 insertions(+), 31 deletions(-) create mode 100644 include/linux/stdarg.h (limited to 'lib') diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index 665b70086685..7034227dbdf3 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -51,7 +51,7 @@ * prumpf 991016 */ -#include +#include #include #include diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index a5bf355ce1d6..10664633f7e3 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -14,7 +14,7 @@ /* we cannot use FORTIFY as it brings in new symbols */ #define __NO_FORTIFY -#include +#include #include #include #include diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 99f2cce635fb..ff80bbad22a5 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -7,7 +7,7 @@ * Copyright (C) 2001 IBM. */ -#include +#include #include #include #include diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c index 01595e8cafe7..b1544b2f6321 100644 --- a/arch/powerpc/kernel/udbg.c +++ b/arch/powerpc/kernel/udbg.c @@ -5,7 +5,7 @@ * c 2001 PPC 64 Team, IBM Corp */ -#include +#include #include #include #include diff --git a/arch/s390/boot/pgm_check_info.c b/arch/s390/boot/pgm_check_info.c index 3a46abed2549..b7d8dd88bbf2 100644 --- a/arch/s390/boot/pgm_check_info.c +++ b/arch/s390/boot/pgm_check_info.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include +#include #include #include #include @@ -8,7 +9,6 @@ #include #include #include -#include #include "boot.h" const char hex_asc[] = "0123456789abcdef"; diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index ca866f1cca2e..34c9dbb6a47d 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h @@ -18,7 +18,7 @@ #ifndef __ASSEMBLY__ -#include +#include #include #include #include diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index ae87dded989d..d489bdc645fe 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -7,7 +7,7 @@ * Copyright 2011 Intel Corporation; author Matt Fleming */ -#include +#include #include #include diff --git a/drivers/firmware/efi/libstub/vsprintf.c b/drivers/firmware/efi/libstub/vsprintf.c index 1088e288c04d..71c71c222346 100644 --- a/drivers/firmware/efi/libstub/vsprintf.c +++ b/drivers/firmware/efi/libstub/vsprintf.c @@ -10,7 +10,7 @@ * Oh, it's a waste of space, but oh-so-yummy for debugging. */ -#include +#include #include #include diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index a612ba6dc389..ab6bc5d79012 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -28,9 +28,9 @@ */ #include +#include #include "dm_services.h" -#include #include "dc.h" #include "dc_dmub_srv.h" diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c index 111b932cf2a9..f783d4963d4b 100644 --- a/drivers/gpu/drm/drm_print.c +++ b/drivers/gpu/drm/drm_print.c @@ -25,7 +25,7 @@ #define DEBUG /* for pr_debug() */ -#include +#include #include #include diff --git a/drivers/isdn/capi/capiutil.c b/drivers/isdn/capi/capiutil.c index f26bf3c66d7e..d7ae42edc4a8 100644 --- a/drivers/isdn/capi/capiutil.c +++ b/drivers/isdn/capi/capiutil.c @@ -379,7 +379,7 @@ static char *pnames[] = /*2f */ "Useruserdata" }; -#include +#include /*-------------------------------------------------------*/ static _cdebbuf *bufprint(_cdebbuf *cdb, char *fmt, ...) diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c index 3581abfb0c6a..cd267392289c 100644 --- a/drivers/macintosh/via-cuda.c +++ b/drivers/macintosh/via-cuda.c @@ -9,7 +9,7 @@ * * Copyright (C) 1996 Paul Mackerras. */ -#include +#include #include #include #include diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 4bdd4c45e7a7..4b98bc26a94b 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -18,7 +18,7 @@ * a sleep or a freq. switch * */ -#include +#include #include #include #include diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h index 540b405cc0f7..a3c7f3de6d17 100644 --- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h +++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h @@ -16,7 +16,7 @@ #ifndef __PRINT_SUPPORT_H_INCLUDED__ #define __PRINT_SUPPORT_H_INCLUDED__ -#include +#include extern int (*sh_css_printf)(const char *fmt, va_list args); /* depends on host supplied print function in ia_css_init() */ diff --git a/drivers/staging/media/atomisp/pci/ia_css_env.h b/drivers/staging/media/atomisp/pci/ia_css_env.h index 6b38723b27cd..3b89bbd837a0 100644 --- a/drivers/staging/media/atomisp/pci/ia_css_env.h +++ b/drivers/staging/media/atomisp/pci/ia_css_env.h @@ -17,7 +17,7 @@ #define __IA_CSS_ENV_H #include -#include /* va_list */ +#include /* va_list */ #include "ia_css_types.h" #include "ia_css_acc_types.h" diff --git a/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h b/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h index 5e6e7447ae00..e37ef4232c55 100644 --- a/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h +++ b/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h @@ -19,7 +19,7 @@ /*! \file */ #include -#include +#include #include "ia_css_types.h" #include "ia_css_binary.h" #include "ia_css_frame_public.h" diff --git a/drivers/staging/media/atomisp/pci/sh_css_internal.h b/drivers/staging/media/atomisp/pci/sh_css_internal.h index 3c669ec79b68..496faa7297a5 100644 --- a/drivers/staging/media/atomisp/pci/sh_css_internal.h +++ b/drivers/staging/media/atomisp/pci/sh_css_internal.h @@ -20,7 +20,7 @@ #include #include #include -#include +#include #if !defined(ISP2401) #include "input_formatter.h" diff --git a/fs/befs/debug.c b/fs/befs/debug.c index eb7bd6c692c7..02fa66fb82c2 100644 --- a/fs/befs/debug.c +++ b/fs/befs/debug.c @@ -14,7 +14,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #ifdef __KERNEL__ -#include +#include #include #include #include diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c index 500f2000eb41..30319dc33c18 100644 --- a/fs/reiserfs/prints.c +++ b/fs/reiserfs/prints.c @@ -8,7 +8,7 @@ #include #include -#include +#include static char error_buf[1024]; static char fmt_buf[1024]; diff --git a/fs/ufs/super.c b/fs/ufs/super.c index 74028b5a7b0a..00a01471ea05 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -70,7 +70,7 @@ #include #include -#include +#include #include diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h index f6656be81760..fb172a03a753 100644 --- a/include/acpi/platform/acgcc.h +++ b/include/acpi/platform/acgcc.h @@ -22,7 +22,7 @@ typedef __builtin_va_list va_list; #define va_arg(v, l) __builtin_va_arg(v, l) #define va_copy(d, s) __builtin_va_copy(d, s) #else -#include +#include #endif #endif diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 1b2f0a7e00d6..2776423a587e 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -2,7 +2,7 @@ #ifndef _LINUX_KERNEL_H #define _LINUX_KERNEL_H -#include +#include #include #include #include diff --git a/include/linux/printk.h b/include/linux/printk.h index e834d78f0478..9f3f29ea348e 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -2,7 +2,7 @@ #ifndef __KERNEL_PRINTK__ #define __KERNEL_PRINTK__ -#include +#include #include #include #include diff --git a/include/linux/stdarg.h b/include/linux/stdarg.h new file mode 100644 index 000000000000..c8dc7f4f390c --- /dev/null +++ b/include/linux/stdarg.h @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#ifndef _LINUX_STDARG_H +#define _LINUX_STDARG_H + +typedef __builtin_va_list va_list; +#define va_start(v, l) __builtin_va_start(v, l) +#define va_end(v) __builtin_va_end(v) +#define va_arg(v, T) __builtin_va_arg(v, T) +#define va_copy(d, s) __builtin_va_copy(d, s) + +#endif diff --git a/include/linux/string.h b/include/linux/string.h index b48d2d28e0b1..5e96d656be7a 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -6,7 +6,7 @@ #include /* for size_t */ #include /* for NULL */ #include /* for E2BIG */ -#include +#include #include extern char *strndup_user(const char __user *, long); diff --git a/lib/debug_info.c b/lib/debug_info.c index 36daf753293c..cc4723c74af5 100644 --- a/lib/debug_info.c +++ b/lib/debug_info.c @@ -5,8 +5,6 @@ * CONFIG_DEBUG_INFO_REDUCED. Please do not add actual code. However, * adding appropriate #includes is fine. */ -#include - #include #include #include @@ -22,6 +20,7 @@ #include #include #include +#include #include #include #include diff --git a/lib/kasprintf.c b/lib/kasprintf.c index bacf7b83ccf0..cd2f5974ed98 100644 --- a/lib/kasprintf.c +++ b/lib/kasprintf.c @@ -5,7 +5,7 @@ * Copyright (C) 1991, 1992 Linus Torvalds */ -#include +#include #include #include #include diff --git a/lib/kunit/string-stream.h b/lib/kunit/string-stream.h index 5e94b623454f..43f9508a55b4 100644 --- a/lib/kunit/string-stream.h +++ b/lib/kunit/string-stream.h @@ -11,7 +11,7 @@ #include #include -#include +#include struct string_stream_fragment { struct kunit *test; diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 26c83943748a..3bcb7be03f93 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -17,7 +17,7 @@ * - scnprintf and vscnprintf */ -#include +#include #include #include #include diff --git a/mm/kfence/report.c b/mm/kfence/report.c index 2a319c21c939..4b891dd75650 100644 --- a/mm/kfence/report.c +++ b/mm/kfence/report.c @@ -5,7 +5,7 @@ * Copyright (C) 2020, Google LLC. */ -#include +#include #include #include diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c index f0e5d1429662..7a93a1e94c40 100644 --- a/net/batman-adv/log.c +++ b/net/batman-adv/log.c @@ -7,7 +7,7 @@ #include "log.h" #include "main.h" -#include +#include #include "trace.h" -- cgit v1.2.3 From 1e2cd3084fff19e12bdf3c83ac1a8d64ef11aa63 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 23 Jul 2021 15:19:32 -0700 Subject: lib/test_stackinit: Allow building stand-alone Especially now that GCC is developing the -ftrivial-auto-var-init option[1], it's helpful to have a stand-alone userspace test for stack variable initialization. Relicense to GPLv2+ (I am the only author), provide stand-alone kernel macro stubs, and update comments for clarity. [1] https://gcc.gnu.org/pipermail/gcc-patches/2021-July/575198.html Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20210723221933.3431999-3-keescook@chromium.org --- lib/test_stackinit.c | 72 +++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 69 insertions(+), 3 deletions(-) (limited to 'lib') diff --git a/lib/test_stackinit.c b/lib/test_stackinit.c index 16b1d3a3a497..70bcc1e5112c 100644 --- a/lib/test_stackinit.c +++ b/lib/test_stackinit.c @@ -1,8 +1,13 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0-or-later /* - * Test cases for compiler-based stack variable zeroing via future - * compiler flags or CONFIG_GCC_PLUGIN_STRUCTLEAK*. + * Test cases for compiler-based stack variable zeroing via + * -ftrivial-auto-var-init={zero,pattern} or CONFIG_GCC_PLUGIN_STRUCTLEAK*. + * + * External build example: + * clang -O2 -Wall -ftrivial-auto-var-init=pattern \ + * -o test_stackinit test_stackinit.c */ +#ifdef __KERNEL__ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include @@ -10,6 +15,63 @@ #include #include +#else + +/* Userspace headers. */ +#include +#include +#include +#include +#include +#include + +/* Linux kernel-ism stubs for stand-alone userspace build. */ +#define KBUILD_MODNAME "stackinit" +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#define pr_err(fmt, ...) fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_warn(fmt, ...) fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_info(fmt, ...) fprintf(stdout, pr_fmt(fmt), ##__VA_ARGS__) +#define __init /**/ +#define __exit /**/ +#define __user /**/ +#define noinline __attribute__((__noinline__)) +#define __aligned(x) __attribute__((__aligned__(x))) +#ifdef __clang__ +# define __compiletime_error(message) /**/ +#else +# define __compiletime_error(message) __attribute__((__error__(message))) +#endif +#define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ + extern void prefix ## suffix(void) __compiletime_error(msg); \ + if (!(condition)) \ + prefix ## suffix(); \ + } while (0) +#define _compiletime_assert(condition, msg, prefix, suffix) \ + __compiletime_assert(condition, msg, prefix, suffix) +#define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) +#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) +#define BUILD_BUG_ON(condition) \ + BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + +#define module_init(func) static int (*do_init)(void) = func +#define module_exit(func) static void (*do_exit)(void) = func +#define MODULE_LICENSE(str) int main(void) { \ + int rc; \ + /* License: str */ \ + rc = do_init(); \ + if (rc == 0) \ + do_exit(); \ + return rc; \ + } + +#endif /* __KERNEL__ */ + /* Exfiltration buffer. */ #define MAX_VAR_SIZE 128 static u8 check_buf[MAX_VAR_SIZE]; @@ -279,6 +341,10 @@ DEFINE_TEST(user, struct test_user, STRUCT, none); static int noinline __leaf_switch_none(int path, bool fill) { switch (path) { + /* + * This is intentionally unreachable. To silence the + * warning, build with -Wno-switch-unreachable + */ uint64_t var; case 1: -- cgit v1.2.3 From a8fc576d4af2f23a87a586424252df97f0ad0b06 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 23 Jul 2021 15:19:33 -0700 Subject: lib/test_stackinit: Add assigned initializers Add whole-variable assignments of cast static initializers. These appear to currently behave like the direct initializers, but best to check them too. For example: struct test_big_hole var; var = (struct test_big_hole){ .one = arg->one, .two= arg->two, .three = arg->three, .four = arg->four }; Additionally adds a test for whole-object assignment, which is expected to fail since it usually falls back to a memcpy(): var = *arg; Suggested-by: Arnd Bergmann Link: https://lore.kernel.org/lkml/CAK8P3a20SEoYCrp3jOK32oZc9OkiPv+1KTjNZ2GxLbHpY4WexQ@mail.gmail.com Cc: Ard Biesheuvel Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20210723221933.3431999-4-keescook@chromium.org --- lib/test_stackinit.c | 169 ++++++++++++++++++++++++++++++++++----------------- 1 file changed, 114 insertions(+), 55 deletions(-) (limited to 'lib') diff --git a/lib/test_stackinit.c b/lib/test_stackinit.c index 70bcc1e5112c..a3c74e6a21ff 100644 --- a/lib/test_stackinit.c +++ b/lib/test_stackinit.c @@ -95,6 +95,10 @@ static bool range_contains(char *haystack_start, size_t haystack_size, return false; } +/* Whether the test is expected to fail. */ +#define WANT_SUCCESS 0 +#define XFAIL 1 + #define DO_NOTHING_TYPE_SCALAR(var_type) var_type #define DO_NOTHING_TYPE_STRING(var_type) void #define DO_NOTHING_TYPE_STRUCT(var_type) void @@ -120,34 +124,74 @@ static bool range_contains(char *haystack_start, size_t haystack_size, #define INIT_CLONE_STRING [FILL_SIZE_STRING] #define INIT_CLONE_STRUCT /**/ -#define INIT_SCALAR_none /**/ -#define INIT_SCALAR_zero = 0 +#define ZERO_CLONE_SCALAR(zero) memset(&(zero), 0x00, sizeof(zero)) +#define ZERO_CLONE_STRING(zero) memset(&(zero), 0x00, sizeof(zero)) +/* + * For the struct, intentionally poison padding to see if it gets + * copied out in direct assignments. + * */ +#define ZERO_CLONE_STRUCT(zero) \ + do { \ + memset(&(zero), 0xFF, sizeof(zero)); \ + zero.one = 0; \ + zero.two = 0; \ + zero.three = 0; \ + zero.four = 0; \ + } while (0) + +#define INIT_SCALAR_none(var_type) /**/ +#define INIT_SCALAR_zero(var_type) = 0 -#define INIT_STRING_none [FILL_SIZE_STRING] /**/ -#define INIT_STRING_zero [FILL_SIZE_STRING] = { } +#define INIT_STRING_none(var_type) [FILL_SIZE_STRING] /**/ +#define INIT_STRING_zero(var_type) [FILL_SIZE_STRING] = { } -#define INIT_STRUCT_none /**/ -#define INIT_STRUCT_zero = { } -#define INIT_STRUCT_static_partial = { .two = 0, } -#define INIT_STRUCT_static_all = { .one = 0, \ - .two = 0, \ - .three = 0, \ - .four = 0, \ +#define INIT_STRUCT_none(var_type) /**/ +#define INIT_STRUCT_zero(var_type) = { } + + +#define __static_partial { .two = 0, } +#define __static_all { .one = 0, \ + .two = 0, \ + .three = 0, \ + .four = 0, \ } -#define INIT_STRUCT_dynamic_partial = { .two = arg->two, } -#define INIT_STRUCT_dynamic_all = { .one = arg->one, \ - .two = arg->two, \ - .three = arg->three, \ - .four = arg->four, \ +#define __dynamic_partial { .two = arg->two, } +#define __dynamic_all { .one = arg->one, \ + .two = arg->two, \ + .three = arg->three, \ + .four = arg->four, \ } -#define INIT_STRUCT_runtime_partial ; \ - var.two = 0 -#define INIT_STRUCT_runtime_all ; \ - var.one = 0; \ +#define __runtime_partial var.two = 0 +#define __runtime_all var.one = 0; \ var.two = 0; \ var.three = 0; \ var.four = 0 +#define INIT_STRUCT_static_partial(var_type) \ + = __static_partial +#define INIT_STRUCT_static_all(var_type) \ + = __static_all +#define INIT_STRUCT_dynamic_partial(var_type) \ + = __dynamic_partial +#define INIT_STRUCT_dynamic_all(var_type) \ + = __dynamic_all +#define INIT_STRUCT_runtime_partial(var_type) \ + ; __runtime_partial +#define INIT_STRUCT_runtime_all(var_type) \ + ; __runtime_all + +#define INIT_STRUCT_assigned_static_partial(var_type) \ + ; var = (var_type)__static_partial +#define INIT_STRUCT_assigned_static_all(var_type) \ + ; var = (var_type)__static_all +#define INIT_STRUCT_assigned_dynamic_partial(var_type) \ + ; var = (var_type)__dynamic_partial +#define INIT_STRUCT_assigned_dynamic_all(var_type) \ + ; var = (var_type)__dynamic_all + +#define INIT_STRUCT_assigned_copy(var_type) \ + ; var = *(arg) + /* * @name: unique string name for the test * @var_type: type to be tested for zeroing initialization @@ -167,7 +211,7 @@ static noinline __init int test_ ## name (void) \ BUILD_BUG_ON(sizeof(zero) > MAX_VAR_SIZE); \ \ /* Fill clone type with zero for per-field init. */ \ - memset(&zero, 0x00, sizeof(zero)); \ + ZERO_CLONE_ ## which(zero); \ /* Clear entire check buffer for 0xFF overlap test. */ \ memset(check_buf, 0x00, sizeof(check_buf)); \ /* Fill stack with 0xFF. */ \ @@ -210,7 +254,7 @@ static noinline __init int test_ ## name (void) \ return (xfail) ? 0 : 1; \ } \ } -#define DEFINE_TEST(name, var_type, which, init_level) \ +#define DEFINE_TEST(name, var_type, which, init_level, xfail) \ /* no-op to force compiler into ignoring "uninitialized" vars */\ static noinline __init DO_NOTHING_TYPE_ ## which(var_type) \ do_nothing_ ## name(var_type *ptr) \ @@ -226,7 +270,8 @@ static noinline __init int leaf_ ## name(unsigned long sp, \ var_type *arg) \ { \ char buf[VAR_BUFFER]; \ - var_type var INIT_ ## which ## _ ## init_level; \ + var_type var \ + INIT_ ## which ## _ ## init_level(var_type); \ \ target_start = &var; \ target_size = sizeof(var); \ @@ -252,7 +297,7 @@ static noinline __init int leaf_ ## name(unsigned long sp, \ \ return (int)buf[0] | (int)buf[sizeof(buf) - 1]; \ } \ -DEFINE_TEST_DRIVER(name, var_type, which, 0) +DEFINE_TEST_DRIVER(name, var_type, which, xfail) /* Structure with no padding. */ struct test_packed { @@ -296,42 +341,50 @@ struct test_user { unsigned long four; }; -#define DEFINE_SCALAR_TEST(name, init) \ - DEFINE_TEST(name ## _ ## init, name, SCALAR, init) +#define DEFINE_SCALAR_TEST(name, init, xfail) \ + DEFINE_TEST(name ## _ ## init, name, SCALAR, \ + init, xfail) -#define DEFINE_SCALAR_TESTS(init) \ - DEFINE_SCALAR_TEST(u8, init); \ - DEFINE_SCALAR_TEST(u16, init); \ - DEFINE_SCALAR_TEST(u32, init); \ - DEFINE_SCALAR_TEST(u64, init); \ - DEFINE_TEST(char_array_ ## init, unsigned char, STRING, init) +#define DEFINE_SCALAR_TESTS(init, xfail) \ + DEFINE_SCALAR_TEST(u8, init, xfail); \ + DEFINE_SCALAR_TEST(u16, init, xfail); \ + DEFINE_SCALAR_TEST(u32, init, xfail); \ + DEFINE_SCALAR_TEST(u64, init, xfail); \ + DEFINE_TEST(char_array_ ## init, unsigned char, \ + STRING, init, xfail) -#define DEFINE_STRUCT_TEST(name, init) \ +#define DEFINE_STRUCT_TEST(name, init, xfail) \ DEFINE_TEST(name ## _ ## init, \ - struct test_ ## name, STRUCT, init) + struct test_ ## name, STRUCT, init, \ + xfail) + +#define DEFINE_STRUCT_TESTS(init, xfail) \ + DEFINE_STRUCT_TEST(small_hole, init, xfail); \ + DEFINE_STRUCT_TEST(big_hole, init, xfail); \ + DEFINE_STRUCT_TEST(trailing_hole, init, xfail); \ + DEFINE_STRUCT_TEST(packed, init, xfail) -#define DEFINE_STRUCT_TESTS(init) \ - DEFINE_STRUCT_TEST(small_hole, init); \ - DEFINE_STRUCT_TEST(big_hole, init); \ - DEFINE_STRUCT_TEST(trailing_hole, init); \ - DEFINE_STRUCT_TEST(packed, init) +#define DEFINE_STRUCT_INITIALIZER_TESTS(base) \ + DEFINE_STRUCT_TESTS(base ## _ ## partial, \ + WANT_SUCCESS); \ + DEFINE_STRUCT_TESTS(base ## _ ## all, \ + WANT_SUCCESS) /* These should be fully initialized all the time! */ -DEFINE_SCALAR_TESTS(zero); -DEFINE_STRUCT_TESTS(zero); -/* Static initialization: padding may be left uninitialized. */ -DEFINE_STRUCT_TESTS(static_partial); -DEFINE_STRUCT_TESTS(static_all); -/* Dynamic initialization: padding may be left uninitialized. */ -DEFINE_STRUCT_TESTS(dynamic_partial); -DEFINE_STRUCT_TESTS(dynamic_all); -/* Runtime initialization: padding may be left uninitialized. */ -DEFINE_STRUCT_TESTS(runtime_partial); -DEFINE_STRUCT_TESTS(runtime_all); +DEFINE_SCALAR_TESTS(zero, WANT_SUCCESS); +DEFINE_STRUCT_TESTS(zero, WANT_SUCCESS); +/* Struct initializers: padding may be left uninitialized. */ +DEFINE_STRUCT_INITIALIZER_TESTS(static); +DEFINE_STRUCT_INITIALIZER_TESTS(dynamic); +DEFINE_STRUCT_INITIALIZER_TESTS(runtime); +DEFINE_STRUCT_INITIALIZER_TESTS(assigned_static); +DEFINE_STRUCT_INITIALIZER_TESTS(assigned_dynamic); +DEFINE_STRUCT_TESTS(assigned_copy, XFAIL); /* No initialization without compiler instrumentation. */ -DEFINE_SCALAR_TESTS(none); -DEFINE_STRUCT_TESTS(none); -DEFINE_TEST(user, struct test_user, STRUCT, none); +DEFINE_SCALAR_TESTS(none, WANT_SUCCESS); +DEFINE_STRUCT_TESTS(none, WANT_SUCCESS); +/* Initialization of members with __user attribute. */ +DEFINE_TEST(user, struct test_user, STRUCT, none, WANT_SUCCESS); /* * Check two uses through a variable declaration outside either path, @@ -394,8 +447,8 @@ static noinline __init int leaf_switch_2_none(unsigned long sp, bool fill, * non-code areas (i.e. in a switch statement before the first "case"). * https://bugs.llvm.org/show_bug.cgi?id=44916 */ -DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, 1); -DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, 1); +DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, XFAIL); +DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, XFAIL); static int __init test_stackinit_init(void) { @@ -421,12 +474,18 @@ static int __init test_stackinit_init(void) test_structs(zero); /* Padding here appears to be accidentally always initialized? */ test_structs(dynamic_partial); + test_structs(assigned_dynamic_partial); /* Padding initialization depends on compiler behaviors. */ test_structs(static_partial); test_structs(static_all); test_structs(dynamic_all); test_structs(runtime_partial); test_structs(runtime_all); + test_structs(assigned_static_partial); + test_structs(assigned_static_all); + test_structs(assigned_dynamic_all); + /* Everything fails this since it effectively performs a memcpy(). */ + test_structs(assigned_copy); /* STRUCTLEAK_BYREF_ALL should cover everything from here down. */ test_scalars(none); -- cgit v1.2.3 From 90e7a6de62781c27d6a111fccfb19b807f9b6887 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Tue, 24 Aug 2021 17:25:29 +0300 Subject: lib/scatterlist: Provide a dedicated function to support table append RDMA is the only in-kernel user that uses __sg_alloc_table_from_pages to append pages dynamically. In the next patch. That mode will be extended and that function will get more parameters. So separate it into a unique function to make such change more clear. Link: https://lore.kernel.org/r/20210824142531.3877007-2-maorg@nvidia.com Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/gpu/drm/drm_prime.c | 13 +++++----- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 11 +++----- drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c | 14 ++++------- drivers/infiniband/core/umem.c | 4 +-- include/linux/scatterlist.h | 39 ++++++++++++++++++++++++++--- lib/scatterlist.c | 36 ++++++++++++++------------ tools/testing/scatterlist/main.c | 25 ++++++++++++------ 7 files changed, 90 insertions(+), 52 deletions(-) (limited to 'lib') diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 2a54f86856af..cf3278041f9c 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -807,8 +807,8 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, struct page **pages, unsigned int nr_pages) { struct sg_table *sg; - struct scatterlist *sge; size_t max_segment = 0; + int err; sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); if (!sg) @@ -818,13 +818,12 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, max_segment = dma_max_mapping_size(dev->dev); if (max_segment == 0) max_segment = UINT_MAX; - sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0, - nr_pages << PAGE_SHIFT, - max_segment, - NULL, 0, GFP_KERNEL); - if (IS_ERR(sge)) { + err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0, + nr_pages << PAGE_SHIFT, + max_segment, GFP_KERNEL); + if (err) { kfree(sg); - sg = ERR_CAST(sge); + sg = ERR_PTR(err); } return sg; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 7487bab11f0b..458f797a9e1e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -133,7 +133,6 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) unsigned int max_segment = i915_sg_segment_size(); struct sg_table *st; unsigned int sg_page_sizes; - struct scatterlist *sg; struct page **pvec; int ret; @@ -153,13 +152,11 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) spin_unlock(&i915->mm.notifier_lock); alloc_table: - sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0, - num_pages << PAGE_SHIFT, max_segment, - NULL, 0, GFP_KERNEL); - if (IS_ERR(sg)) { - ret = PTR_ERR(sg); + ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0, + num_pages << PAGE_SHIFT, + max_segment, GFP_KERNEL); + if (ret) goto err; - } ret = i915_gem_gtt_prepare_pages(obj, st); if (ret) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 0488042fb287..fc372d2e52a1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -363,7 +363,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) int ret = 0; static size_t sgl_size; static size_t sgt_size; - struct scatterlist *sg; if (vmw_tt->mapped) return 0; @@ -386,15 +385,12 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) if (unlikely(ret != 0)) return ret; - sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, - vsgt->num_pages, 0, - (unsigned long) vsgt->num_pages << PAGE_SHIFT, - dma_get_max_seg_size(dev_priv->drm.dev), - NULL, 0, GFP_KERNEL); - if (IS_ERR(sg)) { - ret = PTR_ERR(sg); + ret = sg_alloc_table_from_pages_segment( + &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, + (unsigned long)vsgt->num_pages << PAGE_SHIFT, + dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL); + if (ret) goto out_sg_alloc_fail; - } if (vsgt->num_pages > vmw_tt->sgt.orig_nents) { uint64_t over_alloc = diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 0eb40025075f..b741758e528f 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -226,8 +226,8 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, cur_base += ret * PAGE_SIZE; npages -= ret; - sg = __sg_alloc_table_from_pages(&umem->sg_head, page_list, ret, - 0, ret << PAGE_SHIFT, + sg = sg_alloc_append_table_from_pages(&umem->sg_head, page_list, + ret, 0, ret << PAGE_SHIFT, ib_dma_max_seg_size(device), sg, npages, GFP_KERNEL); umem->sg_nents = umem->sg_head.nents; diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index ecf87484814f..5c700f2a0d18 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -285,14 +285,45 @@ void sg_free_table(struct sg_table *); int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *); int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); -struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt, +struct scatterlist *sg_alloc_append_table_from_pages(struct sg_table *sgt, struct page **pages, unsigned int n_pages, unsigned int offset, unsigned long size, unsigned int max_segment, struct scatterlist *prv, unsigned int left_pages, gfp_t gfp_mask); -int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, - unsigned int n_pages, unsigned int offset, - unsigned long size, gfp_t gfp_mask); +int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages, + unsigned int n_pages, unsigned int offset, + unsigned long size, + unsigned int max_segment, gfp_t gfp_mask); + +/** + * sg_alloc_table_from_pages - Allocate and initialize an sg table from + * an array of pages + * @sgt: The sg table header to use + * @pages: Pointer to an array of page pointers + * @n_pages: Number of pages in the pages array + * @offset: Offset from start of the first page to the start of a buffer + * @size: Number of valid bytes in the buffer (after offset) + * @gfp_mask: GFP allocation mask + * + * Description: + * Allocate and initialize an sg table from a list of pages. Contiguous + * ranges of the pages are squashed into a single scatterlist node. A user + * may provide an offset at a start and a size of valid data in a buffer + * specified by the page array. The returned sg table is released by + * sg_free_table. + * + * Returns: + * 0 on success, negative error on failure + */ +static inline int sg_alloc_table_from_pages(struct sg_table *sgt, + struct page **pages, + unsigned int n_pages, + unsigned int offset, + unsigned long size, gfp_t gfp_mask) +{ + return sg_alloc_table_from_pages_segment(sgt, pages, n_pages, offset, + size, UINT_MAX, gfp_mask); +} #ifdef CONFIG_SGL_ALLOC struct scatterlist *sgl_alloc_order(unsigned long long length, diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 27efa6178153..611c63d4a958 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -397,7 +397,7 @@ static struct scatterlist *get_next_sg(struct sg_table *table, } /** - * __sg_alloc_table_from_pages - Allocate and initialize an sg table from + * sg_alloc_append_table_from_pages - Allocate and initialize an sg table from * an array of pages * @sgt: The sg table header to use * @pages: Pointer to an array of page pointers @@ -425,7 +425,7 @@ static struct scatterlist *get_next_sg(struct sg_table *table, * If this function returns non-0 (eg failure), the caller must call * sg_free_table() to cleanup any leftover allocations. */ -struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt, +struct scatterlist *sg_alloc_append_table_from_pages(struct sg_table *sgt, struct page **pages, unsigned int n_pages, unsigned int offset, unsigned long size, unsigned int max_segment, struct scatterlist *prv, unsigned int left_pages, @@ -520,36 +520,40 @@ out: sg_mark_end(s); return s; } -EXPORT_SYMBOL(__sg_alloc_table_from_pages); +EXPORT_SYMBOL(sg_alloc_append_table_from_pages); /** - * sg_alloc_table_from_pages - Allocate and initialize an sg table from - * an array of pages + * sg_alloc_table_from_pages_segment - Allocate and initialize an sg table from + * an array of pages and given maximum + * segment. * @sgt: The sg table header to use * @pages: Pointer to an array of page pointers * @n_pages: Number of pages in the pages array * @offset: Offset from start of the first page to the start of a buffer * @size: Number of valid bytes in the buffer (after offset) + * @max_segment: Maximum size of a scatterlist element in bytes * @gfp_mask: GFP allocation mask * * Description: * Allocate and initialize an sg table from a list of pages. Contiguous - * ranges of the pages are squashed into a single scatterlist node. A user - * may provide an offset at a start and a size of valid data in a buffer - * specified by the page array. The returned sg table is released by - * sg_free_table. + * ranges of the pages are squashed into a single scatterlist node up to the + * maximum size specified in @max_segment. A user may provide an offset at a + * start and a size of valid data in a buffer specified by the page array. * - * Returns: + * The returned sg table is released by sg_free_table. + * + * Returns: * 0 on success, negative error on failure */ -int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, - unsigned int n_pages, unsigned int offset, - unsigned long size, gfp_t gfp_mask) +int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages, + unsigned int n_pages, unsigned int offset, + unsigned long size, unsigned int max_segment, + gfp_t gfp_mask) { - return PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, n_pages, - offset, size, UINT_MAX, NULL, 0, gfp_mask)); + return PTR_ERR_OR_ZERO(sg_alloc_append_table_from_pages(sgt, pages, + n_pages, offset, size, max_segment, NULL, 0, gfp_mask)); } -EXPORT_SYMBOL(sg_alloc_table_from_pages); +EXPORT_SYMBOL(sg_alloc_table_from_pages_segment); #ifdef CONFIG_SGL_ALLOC diff --git a/tools/testing/scatterlist/main.c b/tools/testing/scatterlist/main.c index 652254754b4c..c2ff9179c2cc 100644 --- a/tools/testing/scatterlist/main.c +++ b/tools/testing/scatterlist/main.c @@ -87,28 +87,39 @@ int main(void) int left_pages = test->pfn_app ? test->num_pages : 0; struct page *pages[MAX_PAGES]; struct sg_table st; - struct scatterlist *sg; + struct scatterlist *sg = NULL; + int ret; set_pages(pages, test->pfn, test->num_pages); - sg = __sg_alloc_table_from_pages(&st, pages, test->num_pages, 0, - test->size, test->max_seg, NULL, left_pages, GFP_KERNEL); - assert(PTR_ERR_OR_ZERO(sg) == test->alloc_ret); + if (test->pfn_app) { + sg = sg_alloc_append_table_from_pages( + &st, pages, test->num_pages, 0, test->size, + test->max_seg, NULL, left_pages, GFP_KERNEL); + assert(PTR_ERR_OR_ZERO(sg) == test->alloc_ret); + } else { + ret = sg_alloc_table_from_pages_segment( + &st, pages, test->num_pages, 0, test->size, + test->max_seg, GFP_KERNEL); + assert(ret == test->alloc_ret); + } if (test->alloc_ret) continue; if (test->pfn_app) { set_pages(pages, test->pfn_app, test->num_pages); - sg = __sg_alloc_table_from_pages(&st, pages, test->num_pages, 0, - test->size, test->max_seg, sg, 0, GFP_KERNEL); + sg = sg_alloc_append_table_from_pages( + &st, pages, test->num_pages, 0, test->size, + test->max_seg, sg, 0, GFP_KERNEL); assert(PTR_ERR_OR_ZERO(sg) == test->alloc_ret); } VALIDATE(st.nents == test->expected_segments, &st, test); if (!test->pfn_app) - VALIDATE(st.orig_nents == test->expected_segments, &st, test); + VALIDATE(st.orig_nents == test->expected_segments, &st, + test); sg_free_table(&st); } -- cgit v1.2.3 From 3e302dbc6774a27edaea39a1d5107f0c12e35cf2 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Tue, 24 Aug 2021 17:25:30 +0300 Subject: lib/scatterlist: Fix wrong update of orig_nents orig_nents should represent the number of entries with pages, but __sg_alloc_table_from_pages sets orig_nents as the number of total entries in the table. This is wrong when the API is used for dynamic allocation where not all the table entries are mapped with pages. It wasn't observed until now, since RDMA umem who uses this API in the dynamic form doesn't use orig_nents implicit or explicit by the scatterlist APIs. Fix it by changing the append API to track the SG append table state and have an API to free the append table according to the total number of entries in the table. Now all APIs set orig_nents as number of enries with pages. Fixes: 07da1223ec93 ("lib/scatterlist: Add support in dynamic allocation of SG table from pages") Link: https://lore.kernel.org/r/20210824142531.3877007-3-maorg@nvidia.com Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/umem.c | 34 ++++++----- include/linux/scatterlist.h | 19 ++++-- include/rdma/ib_umem.h | 1 + lib/scatterlist.c | 127 ++++++++++++++++++++++++--------------- lib/sg_pool.c | 3 +- tools/testing/scatterlist/main.c | 45 +++++++------- 6 files changed, 136 insertions(+), 93 deletions(-) (limited to 'lib') diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index b741758e528f..42481e7a72e8 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -59,7 +59,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d unpin_user_page_range_dirty_lock(sg_page(sg), DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty); - sg_free_table(&umem->sg_head); + sg_free_append_table(&umem->sgt_append); } /** @@ -155,8 +155,7 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, unsigned long dma_attr = 0; struct mm_struct *mm; unsigned long npages; - int ret; - struct scatterlist *sg = NULL; + int pinned, ret; unsigned int gup_flags = FOLL_WRITE; /* @@ -216,28 +215,33 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, while (npages) { cond_resched(); - ret = pin_user_pages_fast(cur_base, + pinned = pin_user_pages_fast(cur_base, min_t(unsigned long, npages, PAGE_SIZE / sizeof(struct page *)), gup_flags | FOLL_LONGTERM, page_list); - if (ret < 0) + if (pinned < 0) { + ret = pinned; goto umem_release; + } - cur_base += ret * PAGE_SIZE; - npages -= ret; - sg = sg_alloc_append_table_from_pages(&umem->sg_head, page_list, - ret, 0, ret << PAGE_SHIFT, - ib_dma_max_seg_size(device), sg, npages, - GFP_KERNEL); - umem->sg_nents = umem->sg_head.nents; - if (IS_ERR(sg)) { - unpin_user_pages_dirty_lock(page_list, ret, 0); - ret = PTR_ERR(sg); + cur_base += pinned * PAGE_SIZE; + npages -= pinned; + ret = sg_alloc_append_table_from_pages( + &umem->sgt_append, page_list, pinned, 0, + pinned << PAGE_SHIFT, ib_dma_max_seg_size(device), + npages, GFP_KERNEL); + umem->sg_nents = umem->sgt_append.sgt.nents; + if (ret) { + memcpy(&umem->sg_head.sgl, &umem->sgt_append.sgt, + sizeof(umem->sgt_append.sgt)); + unpin_user_pages_dirty_lock(page_list, pinned, 0); goto umem_release; } } + memcpy(&umem->sg_head.sgl, &umem->sgt_append.sgt, + sizeof(umem->sgt_append.sgt)); if (access & IB_ACCESS_RELAXED_ORDERING) dma_attr |= DMA_ATTR_WEAK_ORDERING; diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 5c700f2a0d18..266754a55327 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -39,6 +39,12 @@ struct sg_table { unsigned int orig_nents; /* original size of list */ }; +struct sg_append_table { + struct sg_table sgt; /* The scatter list table */ + struct scatterlist *prv; /* last populated sge in the table */ + unsigned int total_nents; /* Total entries in the table */ +}; + /* * Notes on SG table design. * @@ -280,16 +286,17 @@ typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); typedef void (sg_free_fn)(struct scatterlist *, unsigned int); void __sg_free_table(struct sg_table *, unsigned int, unsigned int, - sg_free_fn *); + sg_free_fn *, unsigned int); void sg_free_table(struct sg_table *); +void sg_free_append_table(struct sg_append_table *sgt); int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *); int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); -struct scatterlist *sg_alloc_append_table_from_pages(struct sg_table *sgt, - struct page **pages, unsigned int n_pages, unsigned int offset, - unsigned long size, unsigned int max_segment, - struct scatterlist *prv, unsigned int left_pages, - gfp_t gfp_mask); +int sg_alloc_append_table_from_pages(struct sg_append_table *sgt, + struct page **pages, unsigned int n_pages, + unsigned int offset, unsigned long size, + unsigned int max_segment, + unsigned int left_pages, gfp_t gfp_mask); int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages, unsigned int n_pages, unsigned int offset, unsigned long size, diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index 676c57f5ca80..33cb23b2ee3c 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h @@ -26,6 +26,7 @@ struct ib_umem { u32 is_odp : 1; u32 is_dmabuf : 1; struct work_struct work; + struct sg_append_table sgt_append; struct sg_table sg_head; int nmap; unsigned int sg_nents; diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 611c63d4a958..f4b1ff78ab2d 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -182,6 +182,7 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents) * @nents_first_chunk: Number of entries int the (preallocated) first * scatterlist chunk, 0 means no such preallocated first chunk * @free_fn: Free function + * @num_ents: Number of entries in the table * * Description: * Free an sg table previously allocated and setup with @@ -190,7 +191,8 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents) * **/ void __sg_free_table(struct sg_table *table, unsigned int max_ents, - unsigned int nents_first_chunk, sg_free_fn *free_fn) + unsigned int nents_first_chunk, sg_free_fn *free_fn, + unsigned int num_ents) { struct scatterlist *sgl, *next; unsigned curr_max_ents = nents_first_chunk ?: max_ents; @@ -199,8 +201,8 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents, return; sgl = table->sgl; - while (table->orig_nents) { - unsigned int alloc_size = table->orig_nents; + while (num_ents) { + unsigned int alloc_size = num_ents; unsigned int sg_size; /* @@ -218,7 +220,7 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents, next = NULL; } - table->orig_nents -= sg_size; + num_ents -= sg_size; if (nents_first_chunk) nents_first_chunk = 0; else @@ -231,6 +233,19 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents, } EXPORT_SYMBOL(__sg_free_table); +/** + * sg_free_append_table - Free a previously allocated append sg table. + * @table: The mapped sg append table header + * + **/ +void sg_free_append_table(struct sg_append_table *table) +{ + __sg_free_table(&table->sgt, SG_MAX_SINGLE_ALLOC, false, sg_kfree, + table->total_nents); +} +EXPORT_SYMBOL(sg_free_append_table); + + /** * sg_free_table - Free a previously allocated sg table * @table: The mapped sg table header @@ -238,7 +253,8 @@ EXPORT_SYMBOL(__sg_free_table); **/ void sg_free_table(struct sg_table *table) { - __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree); + __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree, + table->orig_nents); } EXPORT_SYMBOL(sg_free_table); @@ -359,13 +375,12 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, NULL, 0, gfp_mask, sg_kmalloc); if (unlikely(ret)) - __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree); - + sg_free_table(table); return ret; } EXPORT_SYMBOL(sg_alloc_table); -static struct scatterlist *get_next_sg(struct sg_table *table, +static struct scatterlist *get_next_sg(struct sg_append_table *table, struct scatterlist *cur, unsigned long needed_sges, gfp_t gfp_mask) @@ -386,54 +401,52 @@ static struct scatterlist *get_next_sg(struct sg_table *table, return ERR_PTR(-ENOMEM); sg_init_table(new_sg, alloc_size); if (cur) { + table->total_nents += alloc_size - 1; __sg_chain(next_sg, new_sg); - table->orig_nents += alloc_size - 1; } else { - table->sgl = new_sg; - table->orig_nents = alloc_size; - table->nents = 0; + table->sgt.sgl = new_sg; + table->total_nents = alloc_size; } return new_sg; } /** - * sg_alloc_append_table_from_pages - Allocate and initialize an sg table from - * an array of pages - * @sgt: The sg table header to use - * @pages: Pointer to an array of page pointers - * @n_pages: Number of pages in the pages array + * sg_alloc_append_table_from_pages - Allocate and initialize an append sg + * table from an array of pages + * @sgt_append: The sg append table to use + * @pages: Pointer to an array of page pointers + * @n_pages: Number of pages in the pages array * @offset: Offset from start of the first page to the start of a buffer * @size: Number of valid bytes in the buffer (after offset) * @max_segment: Maximum size of a scatterlist element in bytes - * @prv: Last populated sge in sgt * @left_pages: Left pages caller have to set after this call * @gfp_mask: GFP allocation mask * * Description: - * If @prv is NULL, allocate and initialize an sg table from a list of pages, - * else reuse the scatterlist passed in at @prv. - * Contiguous ranges of the pages are squashed into a single scatterlist - * entry up to the maximum size specified in @max_segment. A user may - * provide an offset at a start and a size of valid data in a buffer - * specified by the page array. + * In the first call it allocate and initialize an sg table from a list of + * pages, else reuse the scatterlist from sgt_append. Contiguous ranges of + * the pages are squashed into a single scatterlist entry up to the maximum + * size specified in @max_segment. A user may provide an offset at a start + * and a size of valid data in a buffer specified by the page array. The + * returned sg table is released by sg_free_append_table * * Returns: - * Last SGE in sgt on success, PTR_ERR on otherwise. - * The allocation in @sgt must be released by sg_free_table. + * 0 on success, negative error on failure * * Notes: * If this function returns non-0 (eg failure), the caller must call - * sg_free_table() to cleanup any leftover allocations. + * sg_free_append_table() to cleanup any leftover allocations. + * + * In the fist call, sgt_append must by initialized. */ -struct scatterlist *sg_alloc_append_table_from_pages(struct sg_table *sgt, +int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append, struct page **pages, unsigned int n_pages, unsigned int offset, unsigned long size, unsigned int max_segment, - struct scatterlist *prv, unsigned int left_pages, - gfp_t gfp_mask) + unsigned int left_pages, gfp_t gfp_mask) { unsigned int chunks, cur_page, seg_len, i, prv_len = 0; unsigned int added_nents = 0; - struct scatterlist *s = prv; + struct scatterlist *s = sgt_append->prv; /* * The algorithm below requires max_segment to be aligned to PAGE_SIZE @@ -441,25 +454,26 @@ struct scatterlist *sg_alloc_append_table_from_pages(struct sg_table *sgt, */ max_segment = ALIGN_DOWN(max_segment, PAGE_SIZE); if (WARN_ON(max_segment < PAGE_SIZE)) - return ERR_PTR(-EINVAL); + return -EINVAL; - if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && prv) - return ERR_PTR(-EOPNOTSUPP); + if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && sgt_append->prv) + return -EOPNOTSUPP; - if (prv) { - unsigned long paddr = (page_to_pfn(sg_page(prv)) * PAGE_SIZE + - prv->offset + prv->length) / - PAGE_SIZE; + if (sgt_append->prv) { + unsigned long paddr = + (page_to_pfn(sg_page(sgt_append->prv)) * PAGE_SIZE + + sgt_append->prv->offset + sgt_append->prv->length) / + PAGE_SIZE; if (WARN_ON(offset)) - return ERR_PTR(-EINVAL); + return -EINVAL; /* Merge contiguous pages into the last SG */ - prv_len = prv->length; + prv_len = sgt_append->prv->length; while (n_pages && page_to_pfn(pages[0]) == paddr) { - if (prv->length + PAGE_SIZE > max_segment) + if (sgt_append->prv->length + PAGE_SIZE > max_segment) break; - prv->length += PAGE_SIZE; + sgt_append->prv->length += PAGE_SIZE; paddr++; pages++; n_pages--; @@ -496,15 +510,16 @@ struct scatterlist *sg_alloc_append_table_from_pages(struct sg_table *sgt, } /* Pass how many chunks might be left */ - s = get_next_sg(sgt, s, chunks - i + left_pages, gfp_mask); + s = get_next_sg(sgt_append, s, chunks - i + left_pages, + gfp_mask); if (IS_ERR(s)) { /* * Adjust entry length to be as before function was * called. */ - if (prv) - prv->length = prv_len; - return s; + if (sgt_append->prv) + sgt_append->prv->length = prv_len; + return PTR_ERR(s); } chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; sg_set_page(s, pages[cur_page], @@ -514,11 +529,13 @@ struct scatterlist *sg_alloc_append_table_from_pages(struct sg_table *sgt, offset = 0; cur_page = j; } - sgt->nents += added_nents; + sgt_append->sgt.nents += added_nents; + sgt_append->sgt.orig_nents = sgt_append->sgt.nents; + sgt_append->prv = s; out: if (!left_pages) sg_mark_end(s); - return s; + return 0; } EXPORT_SYMBOL(sg_alloc_append_table_from_pages); @@ -550,8 +567,18 @@ int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages, unsigned long size, unsigned int max_segment, gfp_t gfp_mask) { - return PTR_ERR_OR_ZERO(sg_alloc_append_table_from_pages(sgt, pages, - n_pages, offset, size, max_segment, NULL, 0, gfp_mask)); + struct sg_append_table append = {}; + int err; + + err = sg_alloc_append_table_from_pages(&append, pages, n_pages, offset, + size, max_segment, 0, gfp_mask); + if (err) { + sg_free_append_table(&append); + return err; + } + memcpy(sgt, &append.sgt, sizeof(*sgt)); + WARN_ON(append.total_nents != sgt->orig_nents); + return 0; } EXPORT_SYMBOL(sg_alloc_table_from_pages_segment); diff --git a/lib/sg_pool.c b/lib/sg_pool.c index db29e5c1f790..a0b1a52cd6f7 100644 --- a/lib/sg_pool.c +++ b/lib/sg_pool.c @@ -90,7 +90,8 @@ void sg_free_table_chained(struct sg_table *table, if (nents_first_chunk == 1) nents_first_chunk = 0; - __sg_free_table(table, SG_CHUNK_SIZE, nents_first_chunk, sg_pool_free); + __sg_free_table(table, SG_CHUNK_SIZE, nents_first_chunk, sg_pool_free, + table->orig_nents); } EXPORT_SYMBOL_GPL(sg_free_table_chained); diff --git a/tools/testing/scatterlist/main.c b/tools/testing/scatterlist/main.c index c2ff9179c2cc..08465a701cd5 100644 --- a/tools/testing/scatterlist/main.c +++ b/tools/testing/scatterlist/main.c @@ -85,43 +85,46 @@ int main(void) for (i = 0, test = tests; test->expected_segments; test++, i++) { int left_pages = test->pfn_app ? test->num_pages : 0; + struct sg_append_table append = {}; struct page *pages[MAX_PAGES]; - struct sg_table st; - struct scatterlist *sg = NULL; int ret; set_pages(pages, test->pfn, test->num_pages); - if (test->pfn_app) { - sg = sg_alloc_append_table_from_pages( - &st, pages, test->num_pages, 0, test->size, - test->max_seg, NULL, left_pages, GFP_KERNEL); - assert(PTR_ERR_OR_ZERO(sg) == test->alloc_ret); - } else { + if (test->pfn_app) + ret = sg_alloc_append_table_from_pages( + &append, pages, test->num_pages, 0, test->size, + test->max_seg, left_pages, GFP_KERNEL); + else ret = sg_alloc_table_from_pages_segment( - &st, pages, test->num_pages, 0, test->size, - test->max_seg, GFP_KERNEL); - assert(ret == test->alloc_ret); - } + &append.sgt, pages, test->num_pages, 0, + test->size, test->max_seg, GFP_KERNEL); + + assert(ret == test->alloc_ret); if (test->alloc_ret) continue; if (test->pfn_app) { set_pages(pages, test->pfn_app, test->num_pages); - sg = sg_alloc_append_table_from_pages( - &st, pages, test->num_pages, 0, test->size, - test->max_seg, sg, 0, GFP_KERNEL); + ret = sg_alloc_append_table_from_pages( + &append, pages, test->num_pages, 0, test->size, + test->max_seg, 0, GFP_KERNEL); - assert(PTR_ERR_OR_ZERO(sg) == test->alloc_ret); + assert(ret == test->alloc_ret); } - VALIDATE(st.nents == test->expected_segments, &st, test); + VALIDATE(append.sgt.nents == test->expected_segments, + &append.sgt, test); if (!test->pfn_app) - VALIDATE(st.orig_nents == test->expected_segments, &st, - test); - - sg_free_table(&st); + VALIDATE(append.sgt.orig_nents == + test->expected_segments, + &append.sgt, test); + + if (test->pfn_app) + sg_free_append_table(&append); + else + sg_free_table(&append.sgt); } assert(i == (sizeof(tests) / sizeof(tests[0])) - 1); -- cgit v1.2.3 From 68fdb64485014802152725a4aec63296847f740e Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 25 Jun 2021 10:34:35 +0200 Subject: lib/logic_iomem: fix sparse warnings A couple of sparse warnings happened here due to casts on the prints, a missing static and a missing include. Fix all of them. Reported-by: kernel test robot Fixes: ca2e334232b6 ("lib: add iomem emulation (logic_iomem)") Signed-off-by: Johannes Berg Acked-By: Anton Ivanov Signed-off-by: Richard Weinberger --- lib/logic_iomem.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'lib') diff --git a/lib/logic_iomem.c b/lib/logic_iomem.c index b76b92dd0f1f..9bdfde0c0f86 100644 --- a/lib/logic_iomem.c +++ b/lib/logic_iomem.c @@ -6,6 +6,7 @@ #include #include #include +#include struct logic_iomem_region { const struct resource *res; @@ -78,7 +79,7 @@ static void __iomem *real_ioremap(phys_addr_t offset, size_t size) static void real_iounmap(void __iomem *addr) { WARN(1, "invalid iounmap for addr 0x%llx\n", - (unsigned long long)addr); + (unsigned long long __force)addr); } #endif /* CONFIG_LOGIC_IOMEM_FALLBACK */ @@ -172,14 +173,15 @@ EXPORT_SYMBOL(iounmap); static u##sz real_raw_read ## op(const volatile void __iomem *addr) \ { \ WARN(1, "Invalid read" #op " at address %llx\n", \ - (unsigned long long)addr); \ + (unsigned long long __force)addr); \ return (u ## sz)~0ULL; \ } \ \ -void real_raw_write ## op(u ## sz val, volatile void __iomem *addr) \ +static void real_raw_write ## op(u ## sz val, \ + volatile void __iomem *addr) \ { \ WARN(1, "Invalid writeq" #op " of 0x%llx at address %llx\n", \ - (unsigned long long)val, (unsigned long long)addr); \ + (unsigned long long)val, (unsigned long long __force)addr);\ } \ MAKE_FALLBACK(b, 8); @@ -192,14 +194,14 @@ MAKE_FALLBACK(q, 64); static void real_memset_io(volatile void __iomem *addr, int value, size_t size) { WARN(1, "Invalid memset_io at address 0x%llx\n", - (unsigned long long)addr); + (unsigned long long __force)addr); } static void real_memcpy_fromio(void *buffer, const volatile void __iomem *addr, size_t size) { WARN(1, "Invalid memcpy_fromio at address 0x%llx\n", - (unsigned long long)addr); + (unsigned long long __force)addr); memset(buffer, 0xff, size); } @@ -208,7 +210,7 @@ static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size) { WARN(1, "Invalid memcpy_toio at address 0x%llx\n", - (unsigned long long)addr); + (unsigned long long __force)addr); } #endif /* CONFIG_LOGIC_IOMEM_FALLBACK */ -- cgit v1.2.3 From 55b70eed81cba1331773d4aaf5cba2bb07475cd8 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Wed, 7 Jul 2021 15:38:08 +0200 Subject: parisc: Increase size of gcc stack frame check parisc uses much bigger frames than other architectures, so increase the stack frame check value to avoid compiler warnings. Cc: Arnd Bergmann Cc: Abd-Alrhman Masalkhi Cc: Christoph Hellwig Signed-off-by: Helge Deller --- lib/Kconfig.debug | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 5ddd575159fb..153c5a86aefa 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -346,7 +346,7 @@ config FRAME_WARN int "Warn for stack frames larger than" range 0 8192 default 2048 if GCC_PLUGIN_LATENT_ENTROPY - default 1280 if (!64BIT && PARISC) + default 1536 if (!64BIT && PARISC) default 1024 if (!64BIT && !PARISC) default 2048 if 64BIT help -- cgit v1.2.3 From 7d73c3e9c51400d3e0e755488050804e4d44737a Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Mon, 16 Aug 2021 13:25:01 -0700 Subject: Makefile: remove stale cc-option checks cc-option, cc-option-yn, and cc-disable-warning all invoke the compiler during build time, and can slow down the build when these checks become stale for our supported compilers, whose minimally supported versions increases over time. See Documentation/process/changes.rst for the current supported minimal versions (GCC 4.9+, clang 10.0.1+). Compiler version support for these flags may be verified on godbolt.org. The following flags are GCC only and supported since at least GCC 4.9. Remove cc-option and cc-disable-warning tests. * -fno-tree-loop-im * -Wno-maybe-uninitialized * -fno-reorder-blocks * -fno-ipa-cp-clone * -fno-partial-inlining * -femit-struct-debug-baseonly * -fno-inline-functions-called-once * -fconserve-stack The following flags are supported by all supported versions of GCC and Clang. Remove their cc-option, cc-option-yn, and cc-disable-warning tests. * -fno-delete-null-pointer-checks * -fno-var-tracking * -Wno-array-bounds The following configs are made dependent on GCC, since they use GCC specific flags. * READABLE_ASM * DEBUG_SECTION_MISMATCH -mfentry was not supported by s390-linux-gnu-gcc until gcc-9+, add a comment. --param=allow-store-data-races=0 was renamed to -fno-allow-store-data-races in the GCC 10 release; add a comment. -Wmaybe-uninitialized (GCC specific) was being added for CONFIG_GCOV, then again unconditionally; add it only once. Also, base RETPOLINE_CFLAGS and RETPOLINE_VDSO_CFLAGS on CONFIC_CC_IS_* then remove cc-option tests for Clang. Link: https://github.com/ClangBuiltLinux/linux/issues/1436 Acked-by: Miguel Ojeda Reviewed-by: Nathan Chancellor Signed-off-by: Nick Desaulniers Signed-off-by: Masahiro Yamada --- Makefile | 50 +++++++++++++++++++++++++++++++------------------- lib/Kconfig.debug | 2 ++ 2 files changed, 33 insertions(+), 19 deletions(-) (limited to 'lib') diff --git a/Makefile b/Makefile index 891866af0787..ce5a297ecd7c 100644 --- a/Makefile +++ b/Makefile @@ -669,9 +669,10 @@ endif # KBUILD_EXTMOD # Defaults to vmlinux, but the arch makefile usually adds further targets all: vmlinux -CFLAGS_GCOV := -fprofile-arcs -ftest-coverage \ - $(call cc-option,-fno-tree-loop-im) \ - $(call cc-disable-warning,maybe-uninitialized,) +CFLAGS_GCOV := -fprofile-arcs -ftest-coverage +ifdef CONFIG_CC_IS_GCC +CFLAGS_GCOV += -fno-tree-loop-im +endif export CFLAGS_GCOV # The arch Makefiles can override CC_FLAGS_FTRACE. We may also append it later. @@ -679,12 +680,14 @@ ifdef CONFIG_FUNCTION_TRACER CC_FLAGS_FTRACE := -pg endif -RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register -RETPOLINE_VDSO_CFLAGS_GCC := -mindirect-branch=thunk-inline -mindirect-branch-register -RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk -RETPOLINE_VDSO_CFLAGS_CLANG := -mretpoline -RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) -RETPOLINE_VDSO_CFLAGS := $(call cc-option,$(RETPOLINE_VDSO_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_VDSO_CFLAGS_CLANG))) +ifdef CONFIG_CC_IS_GCC +RETPOLINE_CFLAGS := $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register) +RETPOLINE_VDSO_CFLAGS := $(call cc-option,-mindirect-branch=thunk-inline -mindirect-branch-register) +endif +ifdef CONFIG_CC_IS_CLANG +RETPOLINE_CFLAGS := -mretpoline-external-thunk +RETPOLINE_VDSO_CFLAGS := -mretpoline +endif export RETPOLINE_CFLAGS export RETPOLINE_VDSO_CFLAGS @@ -737,7 +740,7 @@ include/config/auto.conf: endif # may-sync-config endif # need-config -KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) +KBUILD_CFLAGS += -fno-delete-null-pointer-checks KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) @@ -752,17 +755,19 @@ KBUILD_CFLAGS += -Os endif # Tell gcc to never replace conditional load with a non-conditional one +ifdef CONFIG_CC_IS_GCC +# gcc-10 renamed --param=allow-store-data-races=0 to +# -fno-allow-store-data-races. KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) KBUILD_CFLAGS += $(call cc-option,-fno-allow-store-data-races) +endif ifdef CONFIG_READABLE_ASM # Disable optimizations that make assembler listings hard to read. # reorder blocks reorders the control in the function # ipa clone creates specialized cloned functions # partial inlining inlines only parts of functions -KBUILD_CFLAGS += $(call cc-option,-fno-reorder-blocks,) \ - $(call cc-option,-fno-ipa-cp-clone,) \ - $(call cc-option,-fno-partial-inlining) +KBUILD_CFLAGS += -fno-reorder-blocks -fno-ipa-cp-clone -fno-partial-inlining endif ifneq ($(CONFIG_FRAME_WARN),0) @@ -854,8 +859,10 @@ DEBUG_CFLAGS += -gdwarf-$(dwarf-version-y) endif ifdef CONFIG_DEBUG_INFO_REDUCED -DEBUG_CFLAGS += $(call cc-option, -femit-struct-debug-baseonly) \ - $(call cc-option,-fno-var-tracking) +DEBUG_CFLAGS += -fno-var-tracking +ifdef CONFIG_CC_IS_GCC +DEBUG_CFLAGS += -femit-struct-debug-baseonly +endif endif ifdef CONFIG_DEBUG_INFO_COMPRESSED @@ -889,6 +896,7 @@ ifdef CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT endif endif ifdef CONFIG_HAVE_FENTRY + # s390-linux-gnu-gcc did not support -mfentry until gcc-9. ifeq ($(call cc-option-yn, -mfentry),y) CC_FLAGS_FTRACE += -mfentry CC_FLAGS_USING += -DCC_USING_FENTRY @@ -901,7 +909,7 @@ endif # We trigger additional mismatches with less inlining ifdef CONFIG_DEBUG_SECTION_MISMATCH -KBUILD_CFLAGS += $(call cc-option, -fno-inline-functions-called-once) +KBUILD_CFLAGS += -fno-inline-functions-called-once endif ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION @@ -980,14 +988,16 @@ KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation) # We'll want to enable this eventually, but it's not going away for 5.7 at least KBUILD_CFLAGS += $(call cc-disable-warning, zero-length-bounds) -KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds) +KBUILD_CFLAGS += -Wno-array-bounds KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow) # Another good warning that we'll want to enable eventually KBUILD_CFLAGS += $(call cc-disable-warning, restrict) # Enabled with W=2, disabled by default as noisy -KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized) +ifdef CONFIG_CC_IS_GCC +KBUILD_CFLAGS += -Wno-maybe-uninitialized +endif # disable invalid "can't wrap" optimizations for signed / pointers KBUILD_CFLAGS += -fno-strict-overflow @@ -996,7 +1006,9 @@ KBUILD_CFLAGS += -fno-strict-overflow KBUILD_CFLAGS += -fno-stack-check # conserve stack if available -KBUILD_CFLAGS += $(call cc-option,-fconserve-stack) +ifdef CONFIG_CC_IS_GCC +KBUILD_CFLAGS += -fconserve-stack +endif # Prohibit date/time macros, which would make the build non-deterministic KBUILD_CFLAGS += -Werror=date-time diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 5ddd575159fb..7d3d29203a72 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -365,6 +365,7 @@ config STRIP_ASM_SYMS config READABLE_ASM bool "Generate readable assembler code" depends on DEBUG_KERNEL + depends on CC_IS_GCC help Disable some compiler optimizations that tend to generate human unreadable assembler output. This may make the kernel slightly slower, but it helps @@ -383,6 +384,7 @@ config HEADERS_INSTALL config DEBUG_SECTION_MISMATCH bool "Enable full Section mismatch analysis" + depends on CC_IS_GCC help The section mismatch analysis checks if there are illegal references from one section to another section. -- cgit v1.2.3 From 8d0920bde5eb8ec7e567939b85e65a0596c8580d Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 22 Apr 2021 12:08:20 +0200 Subject: mm: remove VM_DENYWRITE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All in-tree users of MAP_DENYWRITE are gone. MAP_DENYWRITE cannot be set from user space, so all users are gone; let's remove it. Acked-by: "Eric W. Biederman" Acked-by: Christian König Signed-off-by: David Hildenbrand --- fs/proc/task_mmu.c | 1 - include/linux/mm.h | 1 - include/linux/mman.h | 1 - include/trace/events/mmflags.h | 1 - kernel/events/core.c | 2 -- kernel/fork.c | 3 --- lib/test_printf.c | 5 ++--- mm/mmap.c | 27 +++------------------------ 8 files changed, 5 insertions(+), 36 deletions(-) (limited to 'lib') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index eb97468dfe4c..cf25be3e0321 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -619,7 +619,6 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) [ilog2(VM_MAYSHARE)] = "ms", [ilog2(VM_GROWSDOWN)] = "gd", [ilog2(VM_PFNMAP)] = "pf", - [ilog2(VM_DENYWRITE)] = "dw", [ilog2(VM_LOCKED)] = "lo", [ilog2(VM_IO)] = "io", [ilog2(VM_SEQ_READ)] = "sr", diff --git a/include/linux/mm.h b/include/linux/mm.h index 56b1cd41db61..257995f62e83 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -281,7 +281,6 @@ extern unsigned int kobjsize(const void *objp); #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ #define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */ #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ -#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ #define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */ #define VM_LOCKED 0x00002000 diff --git a/include/linux/mman.h b/include/linux/mman.h index ebb09a964272..bd9aadda047b 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -153,7 +153,6 @@ static inline unsigned long calc_vm_flag_bits(unsigned long flags) { return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | - _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) | _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) | arch_calc_vm_flag_bits(flags); diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index f160484afc5c..0b53e855c4ac 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -165,7 +165,6 @@ IF_HAVE_PG_SKIP_KASAN_POISON(PG_skip_kasan_poison, "skip_kasan_poison") {VM_UFFD_MISSING, "uffd_missing" }, \ IF_HAVE_UFFD_MINOR(VM_UFFD_MINOR, "uffd_minor" ) \ {VM_PFNMAP, "pfnmap" }, \ - {VM_DENYWRITE, "denywrite" }, \ {VM_UFFD_WP, "uffd_wp" }, \ {VM_LOCKED, "locked" }, \ {VM_IO, "io" }, \ diff --git a/kernel/events/core.c b/kernel/events/core.c index 1cb1f9b8392e..19767bb9933c 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -8307,8 +8307,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) else flags = MAP_PRIVATE; - if (vma->vm_flags & VM_DENYWRITE) - flags |= MAP_DENYWRITE; if (vma->vm_flags & VM_LOCKED) flags |= MAP_LOCKED; if (is_vm_hugetlb_page(vma)) diff --git a/kernel/fork.c b/kernel/fork.c index 7677f897ecb6..feef1057081d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -570,12 +570,9 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT); file = tmp->vm_file; if (file) { - struct inode *inode = file_inode(file); struct address_space *mapping = file->f_mapping; get_file(file); - if (tmp->vm_flags & VM_DENYWRITE) - put_write_access(inode); i_mmap_lock_write(mapping); if (tmp->vm_flags & VM_SHARED) mapping_allow_writable(mapping); diff --git a/lib/test_printf.c b/lib/test_printf.c index 8ac71aee46af..8a48b61c3763 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c @@ -675,9 +675,8 @@ flags(void) "uptodate|dirty|lru|active|swapbacked", cmp_buffer); - flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC - | VM_DENYWRITE; - test("read|exec|mayread|maywrite|mayexec|denywrite", "%pGv", &flags); + flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; + test("read|exec|mayread|maywrite|mayexec", "%pGv", &flags); gfp = GFP_TRANSHUGE; test("GFP_TRANSHUGE", "%pGg", &gfp); diff --git a/mm/mmap.c b/mm/mmap.c index ca54d36d203a..589dc1dc13db 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -148,8 +148,6 @@ void vma_set_page_prot(struct vm_area_struct *vma) static void __remove_shared_vm_struct(struct vm_area_struct *vma, struct file *file, struct address_space *mapping) { - if (vma->vm_flags & VM_DENYWRITE) - allow_write_access(file); if (vma->vm_flags & VM_SHARED) mapping_unmap_writable(mapping); @@ -666,8 +664,6 @@ static void __vma_link_file(struct vm_area_struct *vma) if (file) { struct address_space *mapping = file->f_mapping; - if (vma->vm_flags & VM_DENYWRITE) - put_write_access(file_inode(file)); if (vma->vm_flags & VM_SHARED) mapping_allow_writable(mapping); @@ -1788,22 +1784,12 @@ unsigned long mmap_region(struct file *file, unsigned long addr, vma->vm_pgoff = pgoff; if (file) { - if (vm_flags & VM_DENYWRITE) { - error = deny_write_access(file); - if (error) - goto free_vma; - } if (vm_flags & VM_SHARED) { error = mapping_map_writable(file->f_mapping); if (error) - goto allow_write_and_free_vma; + goto free_vma; } - /* ->mmap() can change vma->vm_file, but must guarantee that - * vma_link() below can deny write-access if VM_DENYWRITE is set - * and map writably if VM_SHARED is set. This usually means the - * new file must not have been exposed to user-space, yet. - */ vma->vm_file = get_file(file); error = call_mmap(file, vma); if (error) @@ -1860,13 +1846,9 @@ unsigned long mmap_region(struct file *file, unsigned long addr, vma_link(mm, vma, prev, rb_link, rb_parent); /* Once vma denies write, undo our temporary denial count */ - if (file) { unmap_writable: - if (vm_flags & VM_SHARED) - mapping_unmap_writable(file->f_mapping); - if (vm_flags & VM_DENYWRITE) - allow_write_access(file); - } + if (file && vm_flags & VM_SHARED) + mapping_unmap_writable(file->f_mapping); file = vma->vm_file; out: perf_event_mmap(vma); @@ -1906,9 +1888,6 @@ unmap_and_free_vma: charged = 0; if (vm_flags & VM_SHARED) mapping_unmap_writable(file->f_mapping); -allow_write_and_free_vma: - if (vm_flags & VM_DENYWRITE) - allow_write_access(file); free_vma: vm_area_free(vma); unacct_error: -- cgit v1.2.3 From 0e84f5dbf8d6c33d685c45300da55bafd5dd786e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 2 Sep 2021 14:56:33 -0700 Subject: scatterlist: replace flush_kernel_dcache_page with flush_dcache_page Pages used in scatterlist can be mapped page cache pages (and often are), so we must use flush_dcache_page here instead of the more limited flush_kernel_dcache_page that is intended for highmem pages only. Also remove the PageSlab check given that page_mapping_file as used by the flush_dcache_page implementations already contains that check. Link: https://lkml.kernel.org/r/20210712060928.4161649-5-hch@lst.de Signed-off-by: Christoph Hellwig Acked-by: Linus Torvalds Cc: Alex Shi Cc: Geoff Levand Cc: Greentime Hu Cc: Guo Ren Cc: Helge Deller Cc: "James E.J. Bottomley" Cc: Nick Hu Cc: Paul Cercueil Cc: Rich Felker Cc: Russell King Cc: Thomas Bogendoerfer Cc: Ulf Hansson Cc: Vincent Chen Cc: Yoshinori Sato Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/scatterlist.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'lib') diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 27efa6178153..627aa84f8bbd 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -887,9 +887,8 @@ void sg_miter_stop(struct sg_mapping_iter *miter) miter->__offset += miter->consumed; miter->__remaining -= miter->consumed; - if ((miter->__flags & SG_MITER_TO_SG) && - !PageSlab(miter->page)) - flush_kernel_dcache_page(miter->page); + if (miter->__flags & SG_MITER_TO_SG) + flush_dcache_page(miter->page); if (miter->__flags & SG_MITER_ATOMIC) { WARN_ON_ONCE(preemptible()); -- cgit v1.2.3 From f8bcbecfb6b48c026e2205679c063d1f16f5a2c0 Mon Sep 17 00:00:00 2001 From: "Uladzislau Rezki (Sony)" Date: Thu, 2 Sep 2021 14:57:23 -0700 Subject: lib/test_vmalloc.c: add a new 'nr_pages' parameter In order to simulate different fixed sizes for vmalloc allocation introduce a new parameter that sets number of pages to be allocated for the "fix_size_alloc_test" test. By default 1 page is used unless a different number is specified over the new parameter. Link: https://lkml.kernel.org/r/20210710194151.21370-1-urezki@gmail.com Signed-off-by: Uladzislau Rezki (Sony) Cc: Mel Gorman Cc: Christoph Hellwig Cc: Matthew Wilcox Cc: Nicholas Piggin Cc: Hillf Danton Cc: Michal Hocko Cc: Oleksiy Avramchenko Cc: Steven Rostedt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test_vmalloc.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index 01e9543de566..e14993bc84d2 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -35,6 +35,9 @@ __param(int, test_repeat_count, 1, __param(int, test_loop_count, 1000000, "Set test loop counter"); +__param(int, nr_pages, 0, + "Set number of pages for fix_size_alloc_test(default: 1)"); + __param(int, run_test_mask, INT_MAX, "Set tests specified in the mask.\n\n" "\t\tid: 1, name: fix_size_alloc_test\n" @@ -262,7 +265,7 @@ static int fix_size_alloc_test(void) int i; for (i = 0; i < test_loop_count; i++) { - ptr = vmalloc(3 * PAGE_SIZE); + ptr = vmalloc((nr_pages > 0 ? nr_pages:1) * PAGE_SIZE); if (!ptr) return -1; -- cgit v1.2.3 From ab512805710fa0e4ec6b61fee8a52d044a060009 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 2 Sep 2021 14:57:32 -0700 Subject: kasan: test: rework kmalloc_oob_right Patch series "kasan: test: avoid crashing the kernel with HW_TAGS", v2. KASAN tests do out-of-bounds and use-after-free accesses. Running the tests works fine for the GENERIC mode, as it uses qurantine and redzones. But the HW_TAGS mode uses neither, and running the tests might crash the kernel. Rework the tests to avoid corrupting kernel memory. This patch (of 8): Rework kmalloc_oob_right() to do these bad access checks: 1. An unaligned access one byte past the requested kmalloc size (can only be detected by KASAN_GENERIC). 2. An aligned access into the first out-of-bounds granule that falls within the aligned kmalloc object. 3. Out-of-bounds access past the aligned kmalloc object. Test #3 deliberately uses a read access to avoid corrupting memory. Otherwise, this test might lead to crashes with the HW_TAGS mode, as it neither uses quarantine nor redzones. Link: https://lkml.kernel.org/r/cover.1628779805.git.andreyknvl@gmail.com Link: https://lkml.kernel.org/r/474aa8b7b538c6737a4c6d0090350af2e1776bef.1628779805.git.andreyknvl@gmail.com Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Alexander Potapenko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test_kasan.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 8f7b0b2f6e11..1bc3cdd2957f 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -122,12 +122,28 @@ static void kasan_test_exit(struct kunit *test) static void kmalloc_oob_right(struct kunit *test) { char *ptr; - size_t size = 123; + size_t size = 128 - KASAN_GRANULE_SIZE - 5; ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); - KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 'x'); + /* + * An unaligned access past the requested kmalloc size. + * Only generic KASAN can precisely detect these. + */ + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) + KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x'); + + /* + * An aligned access into the first out-of-bounds granule that falls + * within the aligned kmalloc object. + */ + KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y'); + + /* Out-of-bounds access past the aligned kmalloc object. */ + KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = + ptr[size + KASAN_GRANULE_SIZE + 5]); + kfree(ptr); } -- cgit v1.2.3 From 8fbad19bdcb4b9be8131536e5bb9616ab2e4eeb3 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 2 Sep 2021 14:57:35 -0700 Subject: kasan: test: avoid writing invalid memory Multiple KASAN tests do writes past the allocated objects or writes to freed memory. Turn these writes into reads to avoid corrupting memory. Otherwise, these tests might lead to crashes with the HW_TAGS mode, as it neither uses quarantine nor redzones. Link: https://lkml.kernel.org/r/c3cd2a383e757e27dd9131635fc7d09a48a49cf9.1628779805.git.andreyknvl@gmail.com Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test_kasan.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'lib') diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 1bc3cdd2957f..c82a82eb5393 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -167,7 +167,7 @@ static void kmalloc_node_oob_right(struct kunit *test) ptr = kmalloc_node(size, GFP_KERNEL, 0); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); - KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0); + KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]); kfree(ptr); } @@ -203,7 +203,7 @@ static void kmalloc_pagealloc_uaf(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); kfree(ptr); - KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0); + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); } static void kmalloc_pagealloc_invalid_free(struct kunit *test) @@ -237,7 +237,7 @@ static void pagealloc_oob_right(struct kunit *test) ptr = page_address(pages); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); - KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0); + KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]); free_pages((unsigned long)ptr, order); } @@ -252,7 +252,7 @@ static void pagealloc_uaf(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); free_pages((unsigned long)ptr, order); - KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0); + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); } static void kmalloc_large_oob_right(struct kunit *test) @@ -514,7 +514,7 @@ static void kmalloc_uaf(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); kfree(ptr); - KUNIT_EXPECT_KASAN_FAIL(test, *(ptr + 8) = 'x'); + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]); } static void kmalloc_uaf_memset(struct kunit *test) @@ -553,7 +553,7 @@ again: goto again; } - KUNIT_EXPECT_KASAN_FAIL(test, ptr1[40] = 'x'); + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]); KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2); kfree(ptr2); @@ -700,7 +700,7 @@ static void ksize_unpoisons_memory(struct kunit *test) ptr[size] = 'x'; /* This one must. */ - KUNIT_EXPECT_KASAN_FAIL(test, ptr[real_size] = 'y'); + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]); kfree(ptr); } -- cgit v1.2.3 From 555999a009aacd90ea51a6690e8eb2a5d0427edc Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 2 Sep 2021 14:57:38 -0700 Subject: kasan: test: avoid corrupting memory via memset kmalloc_oob_memset_*() tests do writes past the allocated objects. As the result, they corrupt memory, which might lead to crashes with the HW_TAGS mode, as it neither uses quarantine nor redzones. Adjust the tests to only write memory within the aligned kmalloc objects. Also add a comment mentioning that memset tests are designed to touch both valid and invalid memory. Link: https://lkml.kernel.org/r/64fd457668a16e7b58d094f14a165f9d5170c5a9.1628779805.git.andreyknvl@gmail.com Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test_kasan.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) (limited to 'lib') diff --git a/lib/test_kasan.c b/lib/test_kasan.c index c82a82eb5393..db73bc9e3fa2 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -428,64 +428,70 @@ static void kmalloc_uaf_16(struct kunit *test) kfree(ptr1); } +/* + * Note: in the memset tests below, the written range touches both valid and + * invalid memory. This makes sure that the instrumentation does not only check + * the starting address but the whole range. + */ + static void kmalloc_oob_memset_2(struct kunit *test) { char *ptr; - size_t size = 8; + size_t size = 128 - KASAN_GRANULE_SIZE; ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); - KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 7 + OOB_TAG_OFF, 0, 2)); + KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, 2)); kfree(ptr); } static void kmalloc_oob_memset_4(struct kunit *test) { char *ptr; - size_t size = 8; + size_t size = 128 - KASAN_GRANULE_SIZE; ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); - KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 5 + OOB_TAG_OFF, 0, 4)); + KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, 4)); kfree(ptr); } - static void kmalloc_oob_memset_8(struct kunit *test) { char *ptr; - size_t size = 8; + size_t size = 128 - KASAN_GRANULE_SIZE; ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); - KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 8)); + KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, 8)); kfree(ptr); } static void kmalloc_oob_memset_16(struct kunit *test) { char *ptr; - size_t size = 16; + size_t size = 128 - KASAN_GRANULE_SIZE; ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); - KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 16)); + KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, 16)); kfree(ptr); } static void kmalloc_oob_in_memset(struct kunit *test) { char *ptr; - size_t size = 666; + size_t size = 128 - KASAN_GRANULE_SIZE; ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); - KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size + 5 + OOB_TAG_OFF)); + KUNIT_EXPECT_KASAN_FAIL(test, + memset(ptr, 0, size + KASAN_GRANULE_SIZE)); kfree(ptr); } -- cgit v1.2.3 From 1b0668be62cfa394903bb368641c80533bf42d5a Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 2 Sep 2021 14:57:41 -0700 Subject: kasan: test: disable kmalloc_memmove_invalid_size for HW_TAGS The HW_TAGS mode doesn't check memmove for negative size. As a result, the kmalloc_memmove_invalid_size test corrupts memory, which can result in a crash. Disable this test with HW_TAGS KASAN. Link: https://lkml.kernel.org/r/088733a06ac21eba29aa85b6f769d2abd74f9638.1628779805.git.andreyknvl@gmail.com Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test_kasan.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/test_kasan.c b/lib/test_kasan.c index db73bc9e3fa2..1f533a7346d9 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -501,11 +501,17 @@ static void kmalloc_memmove_invalid_size(struct kunit *test) size_t size = 64; volatile size_t invalid_size = -2; + /* + * Hardware tag-based mode doesn't check memmove for negative size. + * As a result, this test introduces a side-effect memory corruption, + * which can result in a crash. + */ + KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS); + ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); memset((char *)ptr, 0, 64); - KUNIT_EXPECT_KASAN_FAIL(test, memmove((char *)ptr, (char *)ptr + 4, invalid_size)); kfree(ptr); -- cgit v1.2.3 From 25b12a58e848459ae2dbf2e7d318ef168bd1c5e2 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 2 Sep 2021 14:57:44 -0700 Subject: kasan: test: only do kmalloc_uaf_memset for generic mode kmalloc_uaf_memset() writes to freed memory, which is only safe with the GENERIC mode (as it uses quarantine). For other modes, this test corrupts kernel memory, which might result in a crash. Only enable kmalloc_uaf_memset() for the GENERIC mode. Link: https://lkml.kernel.org/r/2e1c87b607b1292556cde3cab2764f108542b60c.1628779805.git.andreyknvl@gmail.com Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test_kasan.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'lib') diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 1f533a7346d9..1dcba6dbfc97 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -534,6 +534,12 @@ static void kmalloc_uaf_memset(struct kunit *test) char *ptr; size_t size = 33; + /* + * Only generic KASAN uses quarantine, which is required to avoid a + * kernel memory corruption this test causes. + */ + KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); + ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); -- cgit v1.2.3 From b38fcca339dbcf680c9e43054502608fabc81508 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 2 Sep 2021 14:57:47 -0700 Subject: kasan: test: clean up ksize_uaf Some KASAN tests use global variables to store function returns values so that the compiler doesn't optimize away these functions. ksize_uaf() doesn't call any functions, so it doesn't need to use kasan_int_result. Use volatile accesses instead, to be consistent with other similar tests. Link: https://lkml.kernel.org/r/a1fc34faca4650f4a6e4dfb3f8d8d82c82eb953a.1628779805.git.andreyknvl@gmail.com Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test_kasan.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 1dcba6dbfc97..30f2cde96e81 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -737,8 +737,8 @@ static void ksize_uaf(struct kunit *test) kfree(ptr); KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr)); - KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *ptr); - KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *(ptr + size)); + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); + KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]); } static void kasan_stack_oob(struct kunit *test) -- cgit v1.2.3 From 756e5a47a5ddf0caa3708f922385a92af9d330b5 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 2 Sep 2021 14:57:50 -0700 Subject: kasan: test: avoid corrupting memory in copy_user_test copy_user_test() does writes past the allocated object. As the result, it corrupts kernel memory, which might lead to crashes with the HW_TAGS mode, as it neither uses quarantine nor redzones. (Technically, this test can't yet be enabled with the HW_TAGS mode, but this will be implemented in the future.) Adjust the test to only write memory within the aligned kmalloc object. Link: https://lkml.kernel.org/r/19bf3a5112ee65b7db88dc731643b657b816c5e8.1628779805.git.andreyknvl@gmail.com Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test_kasan_module.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) (limited to 'lib') diff --git a/lib/test_kasan_module.c b/lib/test_kasan_module.c index f1017f345d6c..fa73b9df0be4 100644 --- a/lib/test_kasan_module.c +++ b/lib/test_kasan_module.c @@ -15,13 +15,11 @@ #include "../mm/kasan/kasan.h" -#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE) - static noinline void __init copy_user_test(void) { char *kmem; char __user *usermem; - size_t size = 10; + size_t size = 128 - KASAN_GRANULE_SIZE; int __maybe_unused unused; kmem = kmalloc(size, GFP_KERNEL); @@ -38,25 +36,25 @@ static noinline void __init copy_user_test(void) } pr_info("out-of-bounds in copy_from_user()\n"); - unused = copy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF); + unused = copy_from_user(kmem, usermem, size + 1); pr_info("out-of-bounds in copy_to_user()\n"); - unused = copy_to_user(usermem, kmem, size + 1 + OOB_TAG_OFF); + unused = copy_to_user(usermem, kmem, size + 1); pr_info("out-of-bounds in __copy_from_user()\n"); - unused = __copy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF); + unused = __copy_from_user(kmem, usermem, size + 1); pr_info("out-of-bounds in __copy_to_user()\n"); - unused = __copy_to_user(usermem, kmem, size + 1 + OOB_TAG_OFF); + unused = __copy_to_user(usermem, kmem, size + 1); pr_info("out-of-bounds in __copy_from_user_inatomic()\n"); - unused = __copy_from_user_inatomic(kmem, usermem, size + 1 + OOB_TAG_OFF); + unused = __copy_from_user_inatomic(kmem, usermem, size + 1); pr_info("out-of-bounds in __copy_to_user_inatomic()\n"); - unused = __copy_to_user_inatomic(usermem, kmem, size + 1 + OOB_TAG_OFF); + unused = __copy_to_user_inatomic(usermem, kmem, size + 1); pr_info("out-of-bounds in strncpy_from_user()\n"); - unused = strncpy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF); + unused = strncpy_from_user(kmem, usermem, size + 1); vm_munmap((unsigned long)usermem, PAGE_SIZE); kfree(kmem); -- cgit v1.2.3 From f16de0bcdb55bf18e2533ca625f3e4b4952f254c Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 2 Sep 2021 14:57:53 -0700 Subject: kasan: test: avoid corrupting memory in kasan_rcu_uaf kasan_rcu_uaf() writes to freed memory via kasan_rcu_reclaim(), which is only safe with the GENERIC mode (as it uses quarantine). For other modes, this test corrupts kernel memory, which might result in a crash. Turn the write into a read. Link: https://lkml.kernel.org/r/b6f2c3bf712d2457c783fa59498225b66a634f62.1628779805.git.andreyknvl@gmail.com Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test_kasan_module.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/test_kasan_module.c b/lib/test_kasan_module.c index fa73b9df0be4..7ebf433edef3 100644 --- a/lib/test_kasan_module.c +++ b/lib/test_kasan_module.c @@ -71,7 +71,7 @@ static noinline void __init kasan_rcu_reclaim(struct rcu_head *rp) struct kasan_rcu_info, rcu); kfree(fp); - fp->i = 1; + ((volatile struct kasan_rcu_info *)fp)->i; } static noinline void __init kasan_rcu_uaf(void) -- cgit v1.2.3 From ba7b1f861086d760ef1032915fe7c809a191434e Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 6 Sep 2021 11:04:03 -0700 Subject: lib/test_scanf: split up number parsing test routines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It turns out that gcc has real trouble merging all the temporary on-stack buffer allocation. So despite the fact that their lifetimes do not overlap, gcc will allocate stack for all of them when they have different types. Which they do in the number scanning test routines. This is unfortunate in general, but with lots of test-cases in one function, it becomes a real problem. gcc will allocate a huge stack frame for no actual good reason. We have tried to counteract this tendency of gcc not merging stack slots (see "-fconserve-stack"), but that has limited effect (and should be on by default these days, iirc). So with all the debug options enabled on an i386 allmodconfig build, we end up with overly big stack frames, and the resulting stack frame size warnings (now errors): lib/test_scanf.c: In function ‘numbers_list_field_width_val_width’: lib/test_scanf.c:530:1: error: the frame size of 2088 bytes is larger than 2048 bytes [-Werror=frame-larger-than=] 530 | } | ^ lib/test_scanf.c: In function ‘numbers_list_field_width_typemax’: lib/test_scanf.c:488:1: error: the frame size of 2568 bytes is larger than 2048 bytes [-Werror=frame-larger-than=] 488 | } | ^ lib/test_scanf.c: In function ‘numbers_list’: lib/test_scanf.c:437:1: error: the frame size of 2088 bytes is larger than 2048 bytes [-Werror=frame-larger-than=] 437 | } | ^ In this particular case, the reasonably straightforward solution is to just split out the test routines into multiple more targeted versions. That way we don't have one huge stack, but several smaller ones, and they aren't active all at the same time. Signed-off-by: Linus Torvalds --- lib/test_scanf.c | 79 ++++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 71 insertions(+), 8 deletions(-) (limited to 'lib') diff --git a/lib/test_scanf.c b/lib/test_scanf.c index abae88848972..b620cf7de503 100644 --- a/lib/test_scanf.c +++ b/lib/test_scanf.c @@ -398,7 +398,7 @@ do { \ test_array_8(fn, expect, test_buffer, fmt_buffer, result); \ } while (0) -static void __init numbers_list(const char *delim) +static void __init numbers_list_ll(const char *delim) { numbers_list_8(unsigned long long, "%llu", delim, "llu", check_ull); numbers_list_8(long long, "%lld", delim, "lld", check_ll); @@ -406,28 +406,40 @@ static void __init numbers_list(const char *delim) numbers_list_8(unsigned long long, "%llx", delim, "llx", check_ull); numbers_list_8(unsigned long long, "0x%llx", delim, "llx", check_ull); numbers_list_8(long long, "0x%llx", delim, "lli", check_ll); +} +static void __init numbers_list_l(const char *delim) +{ numbers_list_8(unsigned long, "%lu", delim, "lu", check_ulong); numbers_list_8(long, "%ld", delim, "ld", check_long); numbers_list_8(long, "%ld", delim, "li", check_long); numbers_list_8(unsigned long, "%lx", delim, "lx", check_ulong); numbers_list_8(unsigned long, "0x%lx", delim, "lx", check_ulong); numbers_list_8(long, "0x%lx", delim, "li", check_long); +} +static void __init numbers_list_d(const char *delim) +{ numbers_list_8(unsigned int, "%u", delim, "u", check_uint); numbers_list_8(int, "%d", delim, "d", check_int); numbers_list_8(int, "%d", delim, "i", check_int); numbers_list_8(unsigned int, "%x", delim, "x", check_uint); numbers_list_8(unsigned int, "0x%x", delim, "x", check_uint); numbers_list_8(int, "0x%x", delim, "i", check_int); +} +static void __init numbers_list_h(const char *delim) +{ numbers_list_8(unsigned short, "%hu", delim, "hu", check_ushort); numbers_list_8(short, "%hd", delim, "hd", check_short); numbers_list_8(short, "%hd", delim, "hi", check_short); numbers_list_8(unsigned short, "%hx", delim, "hx", check_ushort); numbers_list_8(unsigned short, "0x%hx", delim, "hx", check_ushort); numbers_list_8(short, "0x%hx", delim, "hi", check_short); +} +static void __init numbers_list_hh(const char *delim) +{ numbers_list_8(unsigned char, "%hhu", delim, "hhu", check_uchar); numbers_list_8(signed char, "%hhd", delim, "hhd", check_char); numbers_list_8(signed char, "%hhd", delim, "hhi", check_char); @@ -436,11 +448,16 @@ static void __init numbers_list(const char *delim) numbers_list_8(signed char, "0x%hhx", delim, "hhi", check_char); } -/* - * List of numbers separated by delim. Each field width specifier is the - * maximum possible digits for the given type and base. - */ -static void __init numbers_list_field_width_typemax(const char *delim) +static void __init numbers_list(const char *delim) +{ + numbers_list_ll(delim); + numbers_list_l(delim); + numbers_list_d(delim); + numbers_list_h(delim); + numbers_list_hh(delim); +} + +static void __init numbers_list_field_width_ll(const char *delim) { numbers_list_fix_width(unsigned long long, "%llu", delim, 20, "llu", check_ull); numbers_list_fix_width(long long, "%lld", delim, 20, "lld", check_ll); @@ -448,7 +465,10 @@ static void __init numbers_list_field_width_typemax(const char *delim) numbers_list_fix_width(unsigned long long, "%llx", delim, 16, "llx", check_ull); numbers_list_fix_width(unsigned long long, "0x%llx", delim, 18, "llx", check_ull); numbers_list_fix_width(long long, "0x%llx", delim, 18, "lli", check_ll); +} +static void __init numbers_list_field_width_l(const char *delim) +{ #if BITS_PER_LONG == 64 numbers_list_fix_width(unsigned long, "%lu", delim, 20, "lu", check_ulong); numbers_list_fix_width(long, "%ld", delim, 20, "ld", check_long); @@ -464,21 +484,30 @@ static void __init numbers_list_field_width_typemax(const char *delim) numbers_list_fix_width(unsigned long, "0x%lx", delim, 10, "lx", check_ulong); numbers_list_fix_width(long, "0x%lx", delim, 10, "li", check_long); #endif +} +static void __init numbers_list_field_width_d(const char *delim) +{ numbers_list_fix_width(unsigned int, "%u", delim, 10, "u", check_uint); numbers_list_fix_width(int, "%d", delim, 11, "d", check_int); numbers_list_fix_width(int, "%d", delim, 11, "i", check_int); numbers_list_fix_width(unsigned int, "%x", delim, 8, "x", check_uint); numbers_list_fix_width(unsigned int, "0x%x", delim, 10, "x", check_uint); numbers_list_fix_width(int, "0x%x", delim, 10, "i", check_int); +} +static void __init numbers_list_field_width_h(const char *delim) +{ numbers_list_fix_width(unsigned short, "%hu", delim, 5, "hu", check_ushort); numbers_list_fix_width(short, "%hd", delim, 6, "hd", check_short); numbers_list_fix_width(short, "%hd", delim, 6, "hi", check_short); numbers_list_fix_width(unsigned short, "%hx", delim, 4, "hx", check_ushort); numbers_list_fix_width(unsigned short, "0x%hx", delim, 6, "hx", check_ushort); numbers_list_fix_width(short, "0x%hx", delim, 6, "hi", check_short); +} +static void __init numbers_list_field_width_hh(const char *delim) +{ numbers_list_fix_width(unsigned char, "%hhu", delim, 3, "hhu", check_uchar); numbers_list_fix_width(signed char, "%hhd", delim, 4, "hhd", check_char); numbers_list_fix_width(signed char, "%hhd", delim, 4, "hhi", check_char); @@ -489,9 +518,18 @@ static void __init numbers_list_field_width_typemax(const char *delim) /* * List of numbers separated by delim. Each field width specifier is the - * exact length of the corresponding value digits in the string being scanned. + * maximum possible digits for the given type and base. */ -static void __init numbers_list_field_width_val_width(const char *delim) +static void __init numbers_list_field_width_typemax(const char *delim) +{ + numbers_list_field_width_ll(delim); + numbers_list_field_width_l(delim); + numbers_list_field_width_d(delim); + numbers_list_field_width_h(delim); + numbers_list_field_width_hh(delim); +} + +static void __init numbers_list_field_width_val_ll(const char *delim) { numbers_list_val_width(unsigned long long, "%llu", delim, "llu", check_ull); numbers_list_val_width(long long, "%lld", delim, "lld", check_ll); @@ -499,28 +537,40 @@ static void __init numbers_list_field_width_val_width(const char *delim) numbers_list_val_width(unsigned long long, "%llx", delim, "llx", check_ull); numbers_list_val_width(unsigned long long, "0x%llx", delim, "llx", check_ull); numbers_list_val_width(long long, "0x%llx", delim, "lli", check_ll); +} +static void __init numbers_list_field_width_val_l(const char *delim) +{ numbers_list_val_width(unsigned long, "%lu", delim, "lu", check_ulong); numbers_list_val_width(long, "%ld", delim, "ld", check_long); numbers_list_val_width(long, "%ld", delim, "li", check_long); numbers_list_val_width(unsigned long, "%lx", delim, "lx", check_ulong); numbers_list_val_width(unsigned long, "0x%lx", delim, "lx", check_ulong); numbers_list_val_width(long, "0x%lx", delim, "li", check_long); +} +static void __init numbers_list_field_width_val_d(const char *delim) +{ numbers_list_val_width(unsigned int, "%u", delim, "u", check_uint); numbers_list_val_width(int, "%d", delim, "d", check_int); numbers_list_val_width(int, "%d", delim, "i", check_int); numbers_list_val_width(unsigned int, "%x", delim, "x", check_uint); numbers_list_val_width(unsigned int, "0x%x", delim, "x", check_uint); numbers_list_val_width(int, "0x%x", delim, "i", check_int); +} +static void __init numbers_list_field_width_val_h(const char *delim) +{ numbers_list_val_width(unsigned short, "%hu", delim, "hu", check_ushort); numbers_list_val_width(short, "%hd", delim, "hd", check_short); numbers_list_val_width(short, "%hd", delim, "hi", check_short); numbers_list_val_width(unsigned short, "%hx", delim, "hx", check_ushort); numbers_list_val_width(unsigned short, "0x%hx", delim, "hx", check_ushort); numbers_list_val_width(short, "0x%hx", delim, "hi", check_short); +} +static void __init numbers_list_field_width_val_hh(const char *delim) +{ numbers_list_val_width(unsigned char, "%hhu", delim, "hhu", check_uchar); numbers_list_val_width(signed char, "%hhd", delim, "hhd", check_char); numbers_list_val_width(signed char, "%hhd", delim, "hhi", check_char); @@ -529,6 +579,19 @@ static void __init numbers_list_field_width_val_width(const char *delim) numbers_list_val_width(signed char, "0x%hhx", delim, "hhi", check_char); } +/* + * List of numbers separated by delim. Each field width specifier is the + * exact length of the corresponding value digits in the string being scanned. + */ +static void __init numbers_list_field_width_val_width(const char *delim) +{ + numbers_list_field_width_val_ll(delim); + numbers_list_field_width_val_l(delim); + numbers_list_field_width_val_d(delim); + numbers_list_field_width_val_h(delim); + numbers_list_field_width_val_hh(delim); +} + /* * Slice a continuous string of digits without field delimiters, containing * numbers of varying length, using the field width to extract each group -- cgit v1.2.3 From 41c961b9013ee9b6d0491f6926df546e37964b1f Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 7 Sep 2021 19:56:15 -0700 Subject: mm: introduce PAGEFLAGS_MASK to replace ((1UL << NR_PAGEFLAGS) - 1) Instead of hard-coding ((1UL << NR_PAGEFLAGS) - 1) everywhere, introducing PAGEFLAGS_MASK to make the code clear to get the page flags. Link: https://lkml.kernel.org/r/20210819150712.59948-1-songmuchun@bytedance.com Signed-off-by: Muchun Song Reviewed-by: Roman Gushchin Acked-by: Johannes Weiner Reviewed-by: Shakeel Butt Cc: Michal Hocko Cc: Vladimir Davydov Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page-flags.h | 4 +++- include/trace/events/page_ref.h | 4 ++-- lib/test_printf.c | 2 +- lib/vsprintf.c | 2 +- 4 files changed, 7 insertions(+), 5 deletions(-) (limited to 'lib') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 5922031ffab6..6b8d66965145 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -178,6 +178,8 @@ enum pageflags { PG_reported = PG_uptodate, }; +#define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1) + #ifndef __GENERATING_BOUNDS_H static inline unsigned long _compound_head(const struct page *page) @@ -859,7 +861,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) * alloc-free cycle to prevent from reusing the page. */ #define PAGE_FLAGS_CHECK_AT_PREP \ - (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) + (PAGEFLAGS_MASK & ~__PG_HWPOISON) #define PAGE_FLAGS_PRIVATE \ (1UL << PG_private | 1UL << PG_private_2) diff --git a/include/trace/events/page_ref.h b/include/trace/events/page_ref.h index 5d2ea93956ce..8a99c1cd417b 100644 --- a/include/trace/events/page_ref.h +++ b/include/trace/events/page_ref.h @@ -38,7 +38,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_template, TP_printk("pfn=0x%lx flags=%s count=%d mapcount=%d mapping=%p mt=%d val=%d", __entry->pfn, - show_page_flags(__entry->flags & ((1UL << NR_PAGEFLAGS) - 1)), + show_page_flags(__entry->flags & PAGEFLAGS_MASK), __entry->count, __entry->mapcount, __entry->mapping, __entry->mt, __entry->val) @@ -88,7 +88,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_and_test_template, TP_printk("pfn=0x%lx flags=%s count=%d mapcount=%d mapping=%p mt=%d val=%d ret=%d", __entry->pfn, - show_page_flags(__entry->flags & ((1UL << NR_PAGEFLAGS) - 1)), + show_page_flags(__entry->flags & PAGEFLAGS_MASK), __entry->count, __entry->mapcount, __entry->mapping, __entry->mt, __entry->val, __entry->ret) diff --git a/lib/test_printf.c b/lib/test_printf.c index 8ac71aee46af..ec69953cf80c 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c @@ -614,7 +614,7 @@ page_flags_test(int section, int node, int zone, int last_cpupid, bool append = false; int i; - flags &= BIT(NR_PAGEFLAGS) - 1; + flags &= PAGEFLAGS_MASK; if (flags) { page_flags |= flags; snprintf(cmp_buf + size, BUF_SIZE - size, "%s", name); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 26c83943748a..cc7bdd3ac2ee 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -2019,7 +2019,7 @@ static const struct page_flags_fields pff[] = { static char *format_page_flags(char *buf, char *end, unsigned long flags) { - unsigned long main_flags = flags & (BIT(NR_PAGEFLAGS) - 1); + unsigned long main_flags = flags & PAGEFLAGS_MASK; bool append = false; int i; -- cgit v1.2.3 From bcda5fd34417c89f653cc0912cc0608b36ea032c Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 7 Sep 2021 19:58:33 -0700 Subject: math: make RATIONAL tristate Patch series "math: RATIONAL and RATIONAL_KUNIT_TEST improvements". This series makes the RATIONAL symbol tristate, so it is not forced builtin if all users are modular, and makes the RATIONAL_KUNIT_TEST depend on RATIONAL, to avoid enabling RATIONAL if there are no real users. This patch (of 2): All but one symbols that select RATIONAL are tristate, but RATIONAL itself is bool. Change it to tristate, so the rational fractions support code can be modular if no builtin code relies on it. Link: https://lkml.kernel.org/r/20210706100945.3803694-1-geert@linux-m68k.org Link: https://lkml.kernel.org/r/20210706100945.3803694-2-geert@linux-m68k.org Signed-off-by: Geert Uytterhoeven Reviewed-by: Andy Shevchenko Cc: Trent Piepho Cc: Colin Ian King Cc: Brendan Higgins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/math/Kconfig | 2 +- lib/math/rational.c | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/math/Kconfig b/lib/math/Kconfig index f19bc9734fa7..0634b428d0cb 100644 --- a/lib/math/Kconfig +++ b/lib/math/Kconfig @@ -14,4 +14,4 @@ config PRIME_NUMBERS If unsure, say N. config RATIONAL - bool + tristate diff --git a/lib/math/rational.c b/lib/math/rational.c index c0ab51d8fbb9..ec59d426ea63 100644 --- a/lib/math/rational.c +++ b/lib/math/rational.c @@ -13,6 +13,7 @@ #include #include #include +#include /* * calculate best rational approximation for a given fraction @@ -106,3 +107,5 @@ void rational_best_approximation( } EXPORT_SYMBOL(rational_best_approximation); + +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 8ba739ede49dec361ddcb70afe24986b4b8cfe17 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 7 Sep 2021 19:58:36 -0700 Subject: math: RATIONAL_KUNIT_TEST should depend on RATIONAL instead of selecting it RATIONAL_KUNIT_TEST selects RATIONAL, thus enabling an optional feature the user may not want to have enabled. Fix this by making the test depend on RATIONAL instead. Link: https://lkml.kernel.org/r/20210706100945.3803694-3-geert@linux-m68k.org Fixes: b6c75c4afceb8bc0 ("lib/math/rational: add Kunit test cases") Signed-off-by: Geert Uytterhoeven Cc: Andy Shevchenko Cc: Brendan Higgins Cc: Colin Ian King Cc: Trent Piepho Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 5ddd575159fb..021bc9cd43da 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2460,8 +2460,7 @@ config SLUB_KUNIT_TEST config RATIONAL_KUNIT_TEST tristate "KUnit test for rational.c" if !KUNIT_ALL_TESTS - depends on KUNIT - select RATIONAL + depends on KUNIT && RATIONAL default KUNIT_ALL_TESTS help This builds the rational math unit test. -- cgit v1.2.3 From 36f33b562936295a0fb365ea0b1b2afcd8974242 Mon Sep 17 00:00:00 2001 From: Daniel Latypov Date: Tue, 7 Sep 2021 19:58:48 -0700 Subject: lib/test: convert test_sort.c to use KUnit This follows up commit ebd09577be6c ("lib/test: convert lib/test_list_sort.c to use KUnit"). Converting this test to KUnit makes the test a bit shorter, standardizes how it reports pass/fail, and adds an easier way to run the test [1]. Like ebd09577be6c, this leaves the file and Kconfig option name the same, but slightly changes their dependencies (needs CONFIG_KUNIT). [1] Can be run via $ ./tools/testing/kunit/kunit.py run --kunitconfig /dev/stdin < Cc: Pravin Shedge Cc: Brendan Higgins Cc: David Gow Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 5 +++-- lib/test_sort.c | 40 +++++++++++++++++++--------------------- 2 files changed, 22 insertions(+), 23 deletions(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 021bc9cd43da..84e050048343 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2078,8 +2078,9 @@ config TEST_MIN_HEAP If unsure, say N. config TEST_SORT - tristate "Array-based sort test" - depends on DEBUG_KERNEL || m + tristate "Array-based sort test" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS help This option enables the self-test function of 'sort()' at boot, or at module load time. diff --git a/lib/test_sort.c b/lib/test_sort.c index 52edbe10f2e5..be02e3a098cf 100644 --- a/lib/test_sort.c +++ b/lib/test_sort.c @@ -1,4 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only + +#include + #include #include #include @@ -7,18 +10,17 @@ #define TEST_LEN 1000 -static int __init cmpint(const void *a, const void *b) +static int cmpint(const void *a, const void *b) { return *(int *)a - *(int *)b; } -static int __init test_sort_init(void) +static void test_sort(struct kunit *test) { - int *a, i, r = 1, err = -ENOMEM; + int *a, i, r = 1; - a = kmalloc_array(TEST_LEN, sizeof(*a), GFP_KERNEL); - if (!a) - return err; + a = kunit_kmalloc_array(test, TEST_LEN, sizeof(*a), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a); for (i = 0; i < TEST_LEN; i++) { r = (r * 725861) % 6599; @@ -27,24 +29,20 @@ static int __init test_sort_init(void) sort(a, TEST_LEN, sizeof(*a), cmpint, NULL); - err = -EINVAL; for (i = 0; i < TEST_LEN-1; i++) - if (a[i] > a[i+1]) { - pr_err("test has failed\n"); - goto exit; - } - err = 0; - pr_info("test passed\n"); -exit: - kfree(a); - return err; + KUNIT_ASSERT_LE(test, a[i], a[i + 1]); } -static void __exit test_sort_exit(void) -{ -} +static struct kunit_case sort_test_cases[] = { + KUNIT_CASE(test_sort), + {} +}; + +static struct kunit_suite sort_test_suite = { + .name = "lib_sort", + .test_cases = sort_test_cases, +}; -module_init(test_sort_init); -module_exit(test_sort_exit); +kunit_test_suites(&sort_test_suite); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 83a29beb23bcc770a3838fc33970efbaaad26bf7 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 7 Sep 2021 19:58:51 -0700 Subject: lib/dump_stack: correct kernel-doc notation Fix kernel-doc warnings in dump_stack.c: lib/dump_stack.c:97: warning: Function parameter or member 'log_lvl' not described in 'dump_stack_lvl' lib/dump_stack.c:97: warning: expecting prototype for dump_stack(). Prototype was for dump_stack_lvl() instead Link: https://lkml.kernel.org/r/20210809051643.17567-1-rdunlap@infradead.org Signed-off-by: Randy Dunlap Cc: Andrew Morton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/dump_stack.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/dump_stack.c b/lib/dump_stack.c index cd3387bb34e5..6b7f1bf6715d 100644 --- a/lib/dump_stack.c +++ b/lib/dump_stack.c @@ -89,7 +89,8 @@ static void __dump_stack(const char *log_lvl) } /** - * dump_stack - dump the current task information and its stack trace + * dump_stack_lvl - dump the current task information and its stack trace + * @log_lvl: log level * * Architectures can override this implementation by implementing its own. */ -- cgit v1.2.3 From 44e5599775541eb5e25d6dfb01d9abbd5ad79823 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 7 Sep 2021 19:58:54 -0700 Subject: lib/iov_iter.c: fix kernel-doc warnings Fix all kernel-doc warnings in lib/iov_iter.c: lib/iov_iter.c:695: warning: Function parameter or member 'i' not described in '_copy_mc_to_iter' lib/iov_iter.c:695: warning: Excess function parameter 'iter' description in '_copy_mc_to_iter' lib/iov_iter.c:695: warning: No description found for return value of '_copy_mc_to_iter' lib/iov_iter.c:758: warning: Function parameter or member 'i' not described in '_copy_from_iter_flushcache' lib/iov_iter.c:758: warning: Excess function parameter 'iter' description in '_copy_from_iter_flushcache' lib/iov_iter.c:758: warning: No description found for return value of '_copy_from_iter_flushcache' Link: https://lkml.kernel.org/r/20210809051053.6531-1-rdunlap@infradead.org Signed-off-by: Randy Dunlap Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/iov_iter.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/iov_iter.c b/lib/iov_iter.c index e23123ae3a13..f2d50d69a6c3 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -672,7 +672,7 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, * _copy_mc_to_iter - copy to iter with source memory error exception handling * @addr: source kernel address * @bytes: total transfer length - * @iter: destination iterator + * @i: destination iterator * * The pmem driver deploys this for the dax operation * (dax_copy_to_iter()) for dax reads (bypass page-cache and the @@ -690,6 +690,8 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. * Compare to copy_to_iter() where only ITER_IOVEC attempts might return * a short copy. + * + * Return: number of bytes copied (may be %0) */ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { @@ -744,7 +746,7 @@ EXPORT_SYMBOL(_copy_from_iter_nocache); * _copy_from_iter_flushcache - write destination through cpu cache * @addr: destination kernel address * @bytes: total transfer length - * @iter: source iterator + * @i: source iterator * * The pmem driver arranges for filesystem-dax to use this facility via * dax_copy_from_iter() for ensuring that writes to persistent memory @@ -753,6 +755,8 @@ EXPORT_SYMBOL(_copy_from_iter_nocache); * all iterator types. The _copy_from_iter_nocache() only attempts to * bypass the cache for the ITER_IOVEC case, and on some archs may use * instructions that strand dirty-data in the cache. + * + * Return: number of bytes copied (may be %0) */ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) { -- cgit v1.2.3 From 6fe26259b4884b657cbc233fb9cdade9d704976e Mon Sep 17 00:00:00 2001 From: Lukas Bulwahn Date: Tue, 7 Sep 2021 20:00:47 -0700 Subject: Kconfig.debug: drop selecting non-existing HARDLOCKUP_DETECTOR_ARCH Commit 05a4a9527931 ("kernel/watchdog: split up config options") adds a new config HARDLOCKUP_DETECTOR, which selects the non-existing config HARDLOCKUP_DETECTOR_ARCH. Hence, ./scripts/checkkconfigsymbols.py warns: HARDLOCKUP_DETECTOR_ARCH Referencing files: lib/Kconfig.debug Simply drop selecting the non-existing HARDLOCKUP_DETECTOR_ARCH. Link: https://lkml.kernel.org/r/20210806115618.22088-1-lukas.bulwahn@gmail.com Fixes: 05a4a9527931 ("kernel/watchdog: split up config options") Signed-off-by: Lukas Bulwahn Cc: Nicholas Piggin Cc: Masahiro Yamada Cc: Babu Moger Cc: Don Zickus Cc: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 1 - 1 file changed, 1 deletion(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 84e050048343..266adb8835c3 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1062,7 +1062,6 @@ config HARDLOCKUP_DETECTOR depends on HAVE_HARDLOCKUP_DETECTOR_PERF || HAVE_HARDLOCKUP_DETECTOR_ARCH select LOCKUP_DETECTOR select HARDLOCKUP_DETECTOR_PERF if HAVE_HARDLOCKUP_DETECTOR_PERF - select HARDLOCKUP_DETECTOR_ARCH if HAVE_HARDLOCKUP_DETECTOR_ARCH help Say Y here to enable the kernel to act as a watchdog to detect hard lockups. -- cgit v1.2.3 From 5dfe50b05588010f347cb2f436434bf22b7a84ed Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 9 Sep 2021 22:36:38 +0900 Subject: bootconfig: Rename xbc_node_find_child() to xbc_node_find_subkey() Rename xbc_node_find_child() to xbc_node_find_subkey() for clarifying that function returns a key node (no value node). Since there are xbc_node_for_each_child() (loop on all child nodes) and xbc_node_for_each_subkey() (loop on only subkey nodes), this name distinction is necessary to avoid confusing users. Link: https://lkml.kernel.org/r/163119459826.161018.11200274779483115300.stgit@devnote2 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- include/linux/bootconfig.h | 4 ++-- kernel/trace/trace_boot.c | 24 ++++++++++++------------ lib/bootconfig.c | 8 ++++---- 3 files changed, 18 insertions(+), 18 deletions(-) (limited to 'lib') diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h index abe089c27529..537e1b991f11 100644 --- a/include/linux/bootconfig.h +++ b/include/linux/bootconfig.h @@ -110,7 +110,7 @@ static inline __init bool xbc_node_is_leaf(struct xbc_node *node) } /* Tree-based key-value access APIs */ -struct xbc_node * __init xbc_node_find_child(struct xbc_node *parent, +struct xbc_node * __init xbc_node_find_subkey(struct xbc_node *parent, const char *key); const char * __init xbc_node_find_value(struct xbc_node *parent, @@ -148,7 +148,7 @@ xbc_find_value(const char *key, struct xbc_node **vnode) */ static inline struct xbc_node * __init xbc_find_node(const char *key) { - return xbc_node_find_child(NULL, key); + return xbc_node_find_subkey(NULL, key); } /** diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index db6ee372dc6d..8d252f63cd78 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -262,9 +262,9 @@ trace_boot_hist_add_one_handler(struct xbc_node *hnode, char **bufp, append_printf(bufp, end, ":%s(%s)", handler, p); /* Compose 'action' parameter */ - knode = xbc_node_find_child(hnode, "trace"); + knode = xbc_node_find_subkey(hnode, "trace"); if (!knode) - knode = xbc_node_find_child(hnode, "save"); + knode = xbc_node_find_subkey(hnode, "save"); if (knode) { anode = xbc_node_get_child(knode); @@ -283,7 +283,7 @@ trace_boot_hist_add_one_handler(struct xbc_node *hnode, char **bufp, sep = ','; } append_printf(bufp, end, ")"); - } else if (xbc_node_find_child(hnode, "snapshot")) { + } else if (xbc_node_find_subkey(hnode, "snapshot")) { append_printf(bufp, end, ".snapshot()"); } else { pr_err("hist.%s requires an action.\n", @@ -314,7 +314,7 @@ trace_boot_hist_add_handlers(struct xbc_node *hnode, char **bufp, break; } - if (xbc_node_find_child(hnode, param)) + if (xbc_node_find_subkey(hnode, param)) ret = trace_boot_hist_add_one_handler(hnode, bufp, end, handler, param); return ret; @@ -374,7 +374,7 @@ trace_boot_compose_hist_cmd(struct xbc_node *hnode, char *buf, size_t size) if (p) append_printf(&buf, end, ":name=%s", p); - node = xbc_node_find_child(hnode, "var"); + node = xbc_node_find_subkey(hnode, "var"); if (node) { xbc_node_for_each_key_value(node, knode, p) { /* Expression must not include spaces. */ @@ -393,13 +393,13 @@ trace_boot_compose_hist_cmd(struct xbc_node *hnode, char *buf, size_t size) append_printf(&buf, end, ":clear"); /* Histogram handler and actions */ - node = xbc_node_find_child(hnode, "onmax"); + node = xbc_node_find_subkey(hnode, "onmax"); if (node && trace_boot_hist_add_handlers(node, &buf, end, "var") < 0) return -EINVAL; - node = xbc_node_find_child(hnode, "onchange"); + node = xbc_node_find_subkey(hnode, "onchange"); if (node && trace_boot_hist_add_handlers(node, &buf, end, "var") < 0) return -EINVAL; - node = xbc_node_find_child(hnode, "onmatch"); + node = xbc_node_find_subkey(hnode, "onmatch"); if (node && trace_boot_hist_add_handlers(node, &buf, end, "event") < 0) return -EINVAL; @@ -436,7 +436,7 @@ trace_boot_init_histograms(struct trace_event_file *file, } } - if (xbc_node_find_child(hnode, "keys")) { + if (xbc_node_find_subkey(hnode, "keys")) { if (trace_boot_compose_hist_cmd(hnode, buf, size) == 0) { tmp = kstrdup(buf, GFP_KERNEL); if (trigger_process_regex(file, buf) < 0) @@ -495,7 +495,7 @@ trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode, else if (trigger_process_regex(file, buf) < 0) pr_err("Failed to apply an action: %s\n", p); } - anode = xbc_node_find_child(enode, "hist"); + anode = xbc_node_find_subkey(enode, "hist"); if (anode) trace_boot_init_histograms(file, anode, buf, ARRAY_SIZE(buf)); } else if (xbc_node_find_value(enode, "actions", NULL)) @@ -517,7 +517,7 @@ trace_boot_init_events(struct trace_array *tr, struct xbc_node *node) bool enable, enable_all = false; const char *data; - node = xbc_node_find_child(node, "event"); + node = xbc_node_find_subkey(node, "event"); if (!node) return; /* per-event key starts with "event.GROUP.EVENT" */ @@ -620,7 +620,7 @@ trace_boot_init_instances(struct xbc_node *node) struct trace_array *tr; const char *p; - node = xbc_node_find_child(node, "instance"); + node = xbc_node_find_subkey(node, "instance"); if (!node) return; diff --git a/lib/bootconfig.c b/lib/bootconfig.c index 927017431fb6..f8419cff1147 100644 --- a/lib/bootconfig.c +++ b/lib/bootconfig.c @@ -142,16 +142,16 @@ xbc_node_match_prefix(struct xbc_node *node, const char **prefix) } /** - * xbc_node_find_child() - Find a child node which matches given key + * xbc_node_find_subkey() - Find a subkey node which matches given key * @parent: An XBC node. * @key: A key string. * - * Search a node under @parent which matches @key. The @key can contain + * Search a key node under @parent which matches @key. The @key can contain * several words jointed with '.'. If @parent is NULL, this searches the * node from whole tree. Return NULL if no node is matched. */ struct xbc_node * __init -xbc_node_find_child(struct xbc_node *parent, const char *key) +xbc_node_find_subkey(struct xbc_node *parent, const char *key) { struct xbc_node *node; @@ -191,7 +191,7 @@ const char * __init xbc_node_find_value(struct xbc_node *parent, const char *key, struct xbc_node **vnode) { - struct xbc_node *node = xbc_node_find_child(parent, key); + struct xbc_node *node = xbc_node_find_subkey(parent, key); if (!node || !xbc_node_is_key(node)) return NULL; -- cgit v1.2.3 From c666d447e091be3a742588b49290e7733115769f Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 19 Oct 2021 15:26:17 +0100 Subject: test_printf: Make pft array const Instead of assigning ptf[i].value, leave the values in the on-stack array and then we can make the array const. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Yafang Shao Reviewed-by: Petr Mladek Reviewed-by: Anshuman Khandual Signed-off-by: Petr Mladek Link: https://lore.kernel.org/r/20211019142621.2810043-2-willy@infradead.org --- lib/test_printf.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) (limited to 'lib') diff --git a/lib/test_printf.c b/lib/test_printf.c index 55082432f37e..a52c1c3a55ba 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c @@ -586,22 +586,21 @@ struct page_flags_test { int width; int shift; int mask; - unsigned long value; const char *fmt; const char *name; }; -static struct page_flags_test pft[] = { +static const struct page_flags_test pft[] = { {SECTIONS_WIDTH, SECTIONS_PGSHIFT, SECTIONS_MASK, - 0, "%d", "section"}, + "%d", "section"}, {NODES_WIDTH, NODES_PGSHIFT, NODES_MASK, - 0, "%d", "node"}, + "%d", "node"}, {ZONES_WIDTH, ZONES_PGSHIFT, ZONES_MASK, - 0, "%d", "zone"}, + "%d", "zone"}, {LAST_CPUPID_WIDTH, LAST_CPUPID_PGSHIFT, LAST_CPUPID_MASK, - 0, "%#x", "lastcpupid"}, + "%#x", "lastcpupid"}, {KASAN_TAG_WIDTH, KASAN_TAG_PGSHIFT, KASAN_TAG_MASK, - 0, "%#x", "kasantag"}, + "%#x", "kasantag"}, }; static void __init @@ -627,10 +626,6 @@ page_flags_test(int section, int node, int zone, int last_cpupid, #endif } - /* Set the test value */ - for (i = 0; i < ARRAY_SIZE(pft); i++) - pft[i].value = values[i]; - for (i = 0; i < ARRAY_SIZE(pft); i++) { if (!pft[i].width) continue; @@ -640,11 +635,11 @@ page_flags_test(int section, int node, int zone, int last_cpupid, size = strlen(cmp_buf); } - page_flags |= (pft[i].value & pft[i].mask) << pft[i].shift; + page_flags |= (values[i] & pft[i].mask) << pft[i].shift; snprintf(cmp_buf + size, BUF_SIZE - size, "%s=", pft[i].name); size = strlen(cmp_buf); snprintf(cmp_buf + size, BUF_SIZE - size, pft[i].fmt, - pft[i].value & pft[i].mask); + values[i] & pft[i].mask); size = strlen(cmp_buf); append = true; } -- cgit v1.2.3 From a25a0854a2264a0c592ba1ea01a165101f8c1a6c Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 19 Oct 2021 15:26:18 +0100 Subject: test_printf: Remove separate page_flags variable Keep flags intact so that we also test what happens when unknown flags are passed to %pGp. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Yafang Shao Reviewed-by: Petr Mladek Signed-off-by: Petr Mladek Link: https://lore.kernel.org/r/20211019142621.2810043-3-willy@infradead.org --- lib/test_printf.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'lib') diff --git a/lib/test_printf.c b/lib/test_printf.c index a52c1c3a55ba..4531063afd45 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c @@ -605,17 +605,15 @@ static const struct page_flags_test pft[] = { static void __init page_flags_test(int section, int node, int zone, int last_cpupid, - int kasan_tag, int flags, const char *name, char *cmp_buf) + int kasan_tag, unsigned long flags, const char *name, + char *cmp_buf) { unsigned long values[] = {section, node, zone, last_cpupid, kasan_tag}; - unsigned long page_flags = 0; unsigned long size = 0; bool append = false; int i; - flags &= PAGEFLAGS_MASK; - if (flags) { - page_flags |= flags; + if (flags & PAGEFLAGS_MASK) { snprintf(cmp_buf + size, BUF_SIZE - size, "%s", name); size = strlen(cmp_buf); #if SECTIONS_WIDTH || NODES_WIDTH || ZONES_WIDTH || \ @@ -635,7 +633,7 @@ page_flags_test(int section, int node, int zone, int last_cpupid, size = strlen(cmp_buf); } - page_flags |= (values[i] & pft[i].mask) << pft[i].shift; + flags |= (values[i] & pft[i].mask) << pft[i].shift; snprintf(cmp_buf + size, BUF_SIZE - size, "%s=", pft[i].name); size = strlen(cmp_buf); snprintf(cmp_buf + size, BUF_SIZE - size, pft[i].fmt, @@ -644,7 +642,7 @@ page_flags_test(int section, int node, int zone, int last_cpupid, append = true; } - test(cmp_buf, "%pGp", &page_flags); + test(cmp_buf, "%pGp", &flags); } static void __init -- cgit v1.2.3 From 5b358b0de963f822226bfee916fb53c80bae4000 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 19 Oct 2021 15:26:19 +0100 Subject: test_printf: Remove custom appending of '|' Instead of having an ifdef to decide whether to print a |, use the 'append' functionality of the main loop to print it. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Yafang Shao Reviewed-by: Petr Mladek Reviewed-by: Anshuman Khandual Signed-off-by: Petr Mladek Link: https://lore.kernel.org/r/20211019142621.2810043-4-willy@infradead.org --- lib/test_printf.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'lib') diff --git a/lib/test_printf.c b/lib/test_printf.c index 4531063afd45..ec584196cb99 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c @@ -616,12 +616,7 @@ page_flags_test(int section, int node, int zone, int last_cpupid, if (flags & PAGEFLAGS_MASK) { snprintf(cmp_buf + size, BUF_SIZE - size, "%s", name); size = strlen(cmp_buf); -#if SECTIONS_WIDTH || NODES_WIDTH || ZONES_WIDTH || \ - LAST_CPUPID_WIDTH || KASAN_TAG_WIDTH - /* Other information also included in page flags */ - snprintf(cmp_buf + size, BUF_SIZE - size, "|"); - size = strlen(cmp_buf); -#endif + append = true; } for (i = 0; i < ARRAY_SIZE(pft); i++) { -- cgit v1.2.3 From 507f98603607d43cb76ed39c370c4dc1ed6a94f9 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 19 Oct 2021 15:26:20 +0100 Subject: test_printf: Append strings more efficiently Use scnprintf instead of snprintf + strlen. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Petr Mladek Reviewed-by: Yafang Shao Signed-off-by: Petr Mladek Link: https://lore.kernel.org/r/20211019142621.2810043-5-willy@infradead.org --- lib/test_printf.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) (limited to 'lib') diff --git a/lib/test_printf.c b/lib/test_printf.c index ec584196cb99..d09993fca463 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c @@ -614,8 +614,7 @@ page_flags_test(int section, int node, int zone, int last_cpupid, int i; if (flags & PAGEFLAGS_MASK) { - snprintf(cmp_buf + size, BUF_SIZE - size, "%s", name); - size = strlen(cmp_buf); + size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s", name); append = true; } @@ -623,17 +622,14 @@ page_flags_test(int section, int node, int zone, int last_cpupid, if (!pft[i].width) continue; - if (append) { - snprintf(cmp_buf + size, BUF_SIZE - size, "|"); - size = strlen(cmp_buf); - } + if (append) + size += scnprintf(cmp_buf + size, BUF_SIZE - size, "|"); flags |= (values[i] & pft[i].mask) << pft[i].shift; - snprintf(cmp_buf + size, BUF_SIZE - size, "%s=", pft[i].name); - size = strlen(cmp_buf); - snprintf(cmp_buf + size, BUF_SIZE - size, pft[i].fmt, - values[i] & pft[i].mask); - size = strlen(cmp_buf); + size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s=", + pft[i].name); + size += scnprintf(cmp_buf + size, BUF_SIZE - size, pft[i].fmt, + values[i] & pft[i].mask); append = true; } -- cgit v1.2.3 From 23efd0804c0a869dfb1e78470f80a27251317b7e Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 19 Oct 2021 15:26:21 +0100 Subject: vsprintf: Make %pGp print the hex value All existing users of %pGp want the hex value as well as the decoded flag names. This looks awkward (passing the same parameter to printf twice), so move that functionality into the core. If we want, we can make that optional with flag arguments to %pGp in the future. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Yafang Shao Reviewed-by: Petr Mladek Signed-off-by: Petr Mladek Link: https://lore.kernel.org/r/20211019142621.2810043-6-willy@infradead.org --- lib/test_printf.c | 9 +++++++-- lib/vsprintf.c | 8 ++++++++ mm/debug.c | 2 +- mm/memory-failure.c | 8 ++++---- mm/page_owner.c | 4 ++-- mm/slub.c | 4 ++-- 6 files changed, 24 insertions(+), 11 deletions(-) (limited to 'lib') diff --git a/lib/test_printf.c b/lib/test_printf.c index d09993fca463..07309c45f327 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c @@ -609,10 +609,14 @@ page_flags_test(int section, int node, int zone, int last_cpupid, char *cmp_buf) { unsigned long values[] = {section, node, zone, last_cpupid, kasan_tag}; - unsigned long size = 0; + unsigned long size; bool append = false; int i; + for (i = 0; i < ARRAY_SIZE(values); i++) + flags |= (values[i] & pft[i].mask) << pft[i].shift; + + size = scnprintf(cmp_buf, BUF_SIZE, "%#lx(", flags); if (flags & PAGEFLAGS_MASK) { size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s", name); append = true; @@ -625,7 +629,6 @@ page_flags_test(int section, int node, int zone, int last_cpupid, if (append) size += scnprintf(cmp_buf + size, BUF_SIZE - size, "|"); - flags |= (values[i] & pft[i].mask) << pft[i].shift; size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s=", pft[i].name); size += scnprintf(cmp_buf + size, BUF_SIZE - size, pft[i].fmt, @@ -633,6 +636,8 @@ page_flags_test(int section, int node, int zone, int last_cpupid, append = true; } + snprintf(cmp_buf + size, BUF_SIZE - size, ")"); + test(cmp_buf, "%pGp", &flags); } diff --git a/lib/vsprintf.c b/lib/vsprintf.c index d7ad44f2c8f5..214098248610 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -2023,6 +2023,11 @@ char *format_page_flags(char *buf, char *end, unsigned long flags) bool append = false; int i; + buf = number(buf, end, flags, default_flag_spec); + if (buf < end) + *buf = '('; + buf++; + /* Page flags from the main area. */ if (main_flags) { buf = format_flags(buf, end, main_flags, pageflag_names); @@ -2051,6 +2056,9 @@ char *format_page_flags(char *buf, char *end, unsigned long flags) append = true; } + if (buf < end) + *buf = ')'; + buf++; return buf; } diff --git a/mm/debug.c b/mm/debug.c index e73fe0a8ec3d..ca9611784e4b 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -160,7 +160,7 @@ static void __dump_page(struct page *page) out_mapping: BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); - pr_warn("%sflags: %#lx(%pGp)%s\n", type, head->flags, &head->flags, + pr_warn("%sflags: %pGp%s\n", type, &head->flags, page_cma ? " CMA" : ""); print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32, sizeof(unsigned long), page, diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 54879c339024..58ab5161a8ce 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -2109,14 +2109,14 @@ static int __soft_offline_page(struct page *page) if (!list_empty(&pagelist)) putback_movable_pages(&pagelist); - pr_info("soft offline: %#lx: %s migration failed %d, type %lx (%pGp)\n", - pfn, msg_page[huge], ret, page->flags, &page->flags); + pr_info("soft offline: %#lx: %s migration failed %d, type %pGp\n", + pfn, msg_page[huge], ret, &page->flags); if (ret > 0) ret = -EBUSY; } } else { - pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %lx (%pGp)\n", - pfn, msg_page[huge], page_count(page), page->flags, &page->flags); + pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %pGp\n", + pfn, msg_page[huge], page_count(page), &page->flags); ret = -EBUSY; } return ret; diff --git a/mm/page_owner.c b/mm/page_owner.c index 62402d22539b..4afc713ca525 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -351,12 +351,12 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn, pageblock_mt = get_pageblock_migratetype(page); page_mt = gfp_migratetype(page_owner->gfp_mask); ret += snprintf(kbuf + ret, count - ret, - "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n", + "PFN %lu type %s Block %lu type %s Flags %pGp\n", pfn, migratetype_names[page_mt], pfn >> pageblock_order, migratetype_names[pageblock_mt], - page->flags, &page->flags); + &page->flags); if (ret >= count) goto err; diff --git a/mm/slub.c b/mm/slub.c index 3d2025f7163b..f7ac28646580 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -763,9 +763,9 @@ void print_tracking(struct kmem_cache *s, void *object) static void print_page_info(struct page *page) { - pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%#lx(%pGp)\n", + pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n", page, page->objects, page->inuse, page->freelist, - page->flags, &page->flags); + &page->flags); } -- cgit v1.2.3