summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig9
-rw-r--r--lib/Kconfig.debug58
-rw-r--r--lib/Kconfig.kasan6
-rw-r--r--lib/Kconfig.kfence82
-rw-r--r--lib/Kconfig.ubsan17
-rw-r--r--lib/Makefile8
-rw-r--r--lib/bug.c3
-rw-r--r--lib/buildid.c149
-rw-r--r--lib/cmdline.c28
-rw-r--r--lib/cmdline_kunit.c56
-rw-r--r--lib/crc7.c2
-rw-r--r--lib/crypto/blake2s.c48
-rw-r--r--lib/crypto/chacha20poly1305.c5
-rw-r--r--lib/genalloc.c3
-rw-r--r--lib/iov_iter.c45
-rw-r--r--lib/kunit/Kconfig1
-rw-r--r--lib/kunit/assert.c39
-rw-r--r--lib/kunit/executor.c93
-rw-r--r--lib/linear_ranges.c8
-rw-r--r--lib/locking-selftest.c334
-rw-r--r--lib/logic_pio.c3
-rw-r--r--lib/parman.c1
-rw-r--r--lib/parser.c44
-rw-r--r--lib/percpu-refcount.c12
-rw-r--r--lib/stackdepot.c37
-rw-r--r--lib/test_bitmap.c3
-rw-r--r--lib/test_bpf.c21
-rw-r--r--lib/test_fpu.c6
-rw-r--r--lib/test_kasan.c535
-rw-r--r--lib/test_kasan_module.c5
-rw-r--r--lib/test_printf.c16
-rw-r--r--lib/test_ubsan.c49
-rw-r--r--lib/timerqueue.c28
-rw-r--r--lib/ubsan.c68
-rw-r--r--lib/vsprintf.c36
35 files changed, 1432 insertions, 426 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 46806332a8cc..a38cc61256f1 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -651,6 +651,15 @@ config STACKDEPOT
bool
select STACKTRACE
+config STACK_HASH_ORDER
+ int "stack depot hash size (12 => 4KB, 20 => 1024KB)"
+ range 12 20
+ default 20
+ depends on STACKDEPOT
+ help
+ Select the hash size as a power of 2 for the stackdepot hash table.
+ Choose a lower value to reduce the memory impact.
+
config SBITMAP
bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 7937265ef879..2779c29d9981 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -256,14 +256,51 @@ config DEBUG_INFO_SPLIT
to know about the .dwo files and include them.
Incompatible with older versions of ccache.
+choice
+ prompt "DWARF version"
+ help
+ Which version of DWARF debug info to emit.
+
+config DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
+ bool "Rely on the toolchain's implicit default DWARF version"
+ help
+ The implicit default version of DWARF debug info produced by a
+ toolchain changes over time.
+
+ This can break consumers of the debug info that haven't upgraded to
+ support newer revisions, and prevent testing newer versions, but
+ those should be less common scenarios.
+
+ If unsure, say Y.
+
config DEBUG_INFO_DWARF4
- bool "Generate dwarf4 debuginfo"
- depends on $(cc-option,-gdwarf-4)
+ bool "Generate DWARF Version 4 debuginfo"
help
- Generate dwarf4 debug info. This requires recent versions
- of gcc and gdb. It makes the debug information larger.
- But it significantly improves the success of resolving
- variables in gdb on optimized code.
+ Generate DWARF v4 debug info. This requires gcc 4.5+ and gdb 7.0+.
+
+ If you have consumers of DWARF debug info that are not ready for
+ newer revisions of DWARF, you may wish to choose this or have your
+ config select this.
+
+config DEBUG_INFO_DWARF5
+ bool "Generate DWARF Version 5 debuginfo"
+ depends on GCC_VERSION >= 50000 || CC_IS_CLANG
+ depends on CC_IS_GCC || $(success,$(srctree)/scripts/test_dwarf5_support.sh $(CC) $(CLANG_FLAGS))
+ depends on !DEBUG_INFO_BTF
+ help
+ Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc
+ 5.0+ accepts the -gdwarf-5 flag but only had partial support for some
+ draft features until 7.0), and gdb 8.0+.
+
+ Changes to the structure of debug info in Version 5 allow for around
+ 15-18% savings in resulting image and debug info section sizes as
+ compared to DWARF Version 4. DWARF Version 5 standardizes previous
+ extensions such as accelerators for symbol indexing and the format
+ for fission (.dwo/.dwp) files. Users may not want to select this
+ config if they rely on tooling that has not yet been updated to
+ support DWARF Version 5.
+
+endchoice # "DWARF version"
config DEBUG_INFO_BTF
bool "Generate BTF typeinfo"
@@ -901,6 +938,7 @@ config DEBUG_STACKOVERFLOW
If in doubt, say "N".
source "lib/Kconfig.kasan"
+source "lib/Kconfig.kfence"
endmenu # "Memory Debugging"
@@ -1335,6 +1373,7 @@ config LOCKDEP_SMALL
config DEBUG_LOCKDEP
bool "Lock dependency engine debugging"
depends on DEBUG_KERNEL && LOCKDEP
+ select DEBUG_IRQFLAGS
help
If you say Y here, the lock dependency engine will do
additional runtime checks to debug itself, at the price
@@ -1423,6 +1462,13 @@ config TRACE_IRQFLAGS_NMI
depends on TRACE_IRQFLAGS
depends on TRACE_IRQFLAGS_NMI_SUPPORT
+config DEBUG_IRQFLAGS
+ bool "Debug IRQ flag manipulation"
+ help
+ Enables checks for potentially unsafe enabling or disabling of
+ interrupts, such as calling raw_local_irq_restore() when interrupts
+ are enabled.
+
config STACKTRACE
bool "Stack backtrace support"
depends on STACKTRACE_SUPPORT
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index f5fa4ba126bf..624ae1df7984 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -190,11 +190,11 @@ config KASAN_KUNIT_TEST
kernel debugging features like KASAN.
For more information on KUnit and unit tests in general, please refer
- to the KUnit documentation in Documentation/dev-tools/kunit
+ to the KUnit documentation in Documentation/dev-tools/kunit.
-config TEST_KASAN_MODULE
+config KASAN_MODULE_TEST
tristate "KUnit-incompatible tests of KASAN bug detection capabilities"
- depends on m && KASAN
+ depends on m && KASAN && !KASAN_HW_TAGS
help
This is a part of the KASAN test suite that is incompatible with
KUnit. Currently includes tests that do bad copy_from/to_user
diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence
new file mode 100644
index 000000000000..78f50ccb3b45
--- /dev/null
+++ b/lib/Kconfig.kfence
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config HAVE_ARCH_KFENCE
+ bool
+
+menuconfig KFENCE
+ bool "KFENCE: low-overhead sampling-based memory safety error detector"
+ depends on HAVE_ARCH_KFENCE && (SLAB || SLUB)
+ select STACKTRACE
+ help
+ KFENCE is a low-overhead sampling-based detector of heap out-of-bounds
+ access, use-after-free, and invalid-free errors. KFENCE is designed
+ to have negligible cost to permit enabling it in production
+ environments.
+
+ See <file:Documentation/dev-tools/kfence.rst> for more details.
+
+ Note that, KFENCE is not a substitute for explicit testing with tools
+ such as KASAN. KFENCE can detect a subset of bugs that KASAN can
+ detect, albeit at very different performance profiles. If you can
+ afford to use KASAN, continue using KASAN, for example in test
+ environments. If your kernel targets production use, and cannot
+ enable KASAN due to its cost, consider using KFENCE.
+
+if KFENCE
+
+config KFENCE_STATIC_KEYS
+ bool "Use static keys to set up allocations"
+ default y
+ depends on JUMP_LABEL # To ensure performance, require jump labels
+ help
+ Use static keys (static branches) to set up KFENCE allocations. Using
+ static keys is normally recommended, because it avoids a dynamic
+ branch in the allocator's fast path. However, with very low sample
+ intervals, or on systems that do not support jump labels, a dynamic
+ branch may still be an acceptable performance trade-off.
+
+config KFENCE_SAMPLE_INTERVAL
+ int "Default sample interval in milliseconds"
+ default 100
+ help
+ The KFENCE sample interval determines the frequency with which heap
+ allocations will be guarded by KFENCE. May be overridden via boot
+ parameter "kfence.sample_interval".
+
+ Set this to 0 to disable KFENCE by default, in which case only
+ setting "kfence.sample_interval" to a non-zero value enables KFENCE.
+
+config KFENCE_NUM_OBJECTS
+ int "Number of guarded objects available"
+ range 1 65535
+ default 255
+ help
+ The number of guarded objects available. For each KFENCE object, 2
+ pages are required; with one containing the object and two adjacent
+ ones used as guard pages.
+
+config KFENCE_STRESS_TEST_FAULTS
+ int "Stress testing of fault handling and error reporting" if EXPERT
+ default 0
+ help
+ The inverse probability with which to randomly protect KFENCE object
+ pages, resulting in spurious use-after-frees. The main purpose of
+ this option is to stress test KFENCE with concurrent error reports
+ and allocations/frees. A value of 0 disables stress testing logic.
+
+ Only for KFENCE testing; set to 0 if you are not a KFENCE developer.
+
+config KFENCE_KUNIT_TEST
+ tristate "KFENCE integration test suite" if !KUNIT_ALL_TESTS
+ default KUNIT_ALL_TESTS
+ depends on TRACEPOINTS && KUNIT
+ help
+ Test suite for KFENCE, testing various error detection scenarios with
+ various allocation types, and checking that reports are correctly
+ output to console.
+
+ Say Y here if you want the test to be built into the kernel and run
+ during boot; say M if you want the test to build as a module; say N
+ if you are unsure.
+
+endif # KFENCE
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 3a0b1c930733..e5372a13511d 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -112,23 +112,6 @@ config UBSAN_UNREACHABLE
This option enables -fsanitize=unreachable which checks for control
flow reaching an expected-to-be-unreachable position.
-config UBSAN_SIGNED_OVERFLOW
- bool "Perform checking for signed arithmetic overflow"
- default UBSAN
- depends on $(cc-option,-fsanitize=signed-integer-overflow)
- help
- This option enables -fsanitize=signed-integer-overflow which checks
- for overflow of any arithmetic operations with signed integers.
-
-config UBSAN_UNSIGNED_OVERFLOW
- bool "Perform checking for unsigned arithmetic overflow"
- depends on $(cc-option,-fsanitize=unsigned-integer-overflow)
- depends on !X86_32 # avoid excessive stack usage on x86-32/clang
- help
- This option enables -fsanitize=unsigned-integer-overflow which checks
- for overflow of any arithmetic operations with unsigned integers. This
- currently causes x86 to fail to boot.
-
config UBSAN_OBJECT_SIZE
bool "Perform checking for accesses beyond the end of objects"
default UBSAN
diff --git a/lib/Makefile b/lib/Makefile
index afeff05fa8c5..b5307d3eec1a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -27,16 +27,14 @@ KASAN_SANITIZE_string.o := n
CFLAGS_string.o += -fno-stack-protector
endif
-# Used by KCSAN while enabled, avoid recursion.
-KCSAN_SANITIZE_random32.o := n
-
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o timerqueue.o xarray.o \
idr.o extable.o sha1.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
- nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o
+ nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o \
+ buildid.o
lib-$(CONFIG_PRINTK) += dump_stack.o
lib-$(CONFIG_SMP) += cpumask.o
@@ -68,7 +66,7 @@ obj-$(CONFIG_TEST_IDA) += test_ida.o
obj-$(CONFIG_KASAN_KUNIT_TEST) += test_kasan.o
CFLAGS_test_kasan.o += -fno-builtin
CFLAGS_test_kasan.o += $(call cc-disable-warning, vla)
-obj-$(CONFIG_TEST_KASAN_MODULE) += test_kasan_module.o
+obj-$(CONFIG_KASAN_MODULE_TEST) += test_kasan_module.o
CFLAGS_test_kasan_module.o += -fno-builtin
obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla)
diff --git a/lib/bug.c b/lib/bug.c
index 7103440c0ee1..8f9d537bfb2a 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -91,8 +91,6 @@ void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
char *secstrings;
unsigned int i;
- lockdep_assert_held(&module_mutex);
-
mod->bug_table = NULL;
mod->num_bugs = 0;
@@ -118,7 +116,6 @@ void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
void module_bug_cleanup(struct module *mod)
{
- lockdep_assert_held(&module_mutex);
list_del_rcu(&mod->bug_list);
}
diff --git a/lib/buildid.c b/lib/buildid.c
new file mode 100644
index 000000000000..6156997c3895
--- /dev/null
+++ b/lib/buildid.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/buildid.h>
+#include <linux/elf.h>
+#include <linux/pagemap.h>
+
+#define BUILD_ID 3
+/*
+ * Parse build id from the note segment. This logic can be shared between
+ * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
+ * identical.
+ */
+static inline int parse_build_id(void *page_addr,
+ unsigned char *build_id,
+ __u32 *size,
+ void *note_start,
+ Elf32_Word note_size)
+{
+ Elf32_Word note_offs = 0, new_offs;
+
+ /* check for overflow */
+ if (note_start < page_addr || note_start + note_size < note_start)
+ return -EINVAL;
+
+ /* only supports note that fits in the first page */
+ if (note_start + note_size > page_addr + PAGE_SIZE)
+ return -EINVAL;
+
+ while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
+ Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
+
+ if (nhdr->n_type == BUILD_ID &&
+ nhdr->n_namesz == sizeof("GNU") &&
+ nhdr->n_descsz > 0 &&
+ nhdr->n_descsz <= BUILD_ID_SIZE_MAX) {
+ memcpy(build_id,
+ note_start + note_offs +
+ ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
+ nhdr->n_descsz);
+ memset(build_id + nhdr->n_descsz, 0,
+ BUILD_ID_SIZE_MAX - nhdr->n_descsz);
+ if (size)
+ *size = nhdr->n_descsz;
+ return 0;
+ }
+ new_offs = note_offs + sizeof(Elf32_Nhdr) +
+ ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
+ if (new_offs <= note_offs) /* overflow */
+ break;
+ note_offs = new_offs;
+ }
+ return -EINVAL;
+}
+
+/* Parse build ID from 32-bit ELF */
+static int get_build_id_32(void *page_addr, unsigned char *build_id,
+ __u32 *size)
+{
+ Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
+ Elf32_Phdr *phdr;
+ int i;
+
+ /* only supports phdr that fits in one page */
+ if (ehdr->e_phnum >
+ (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
+ return -EINVAL;
+
+ phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
+
+ for (i = 0; i < ehdr->e_phnum; ++i) {
+ if (phdr[i].p_type == PT_NOTE &&
+ !parse_build_id(page_addr, build_id, size,
+ page_addr + phdr[i].p_offset,
+ phdr[i].p_filesz))
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/* Parse build ID from 64-bit ELF */
+static int get_build_id_64(void *page_addr, unsigned char *build_id,
+ __u32 *size)
+{
+ Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
+ Elf64_Phdr *phdr;
+ int i;
+
+ /* only supports phdr that fits in one page */
+ if (ehdr->e_phnum >
+ (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
+ return -EINVAL;
+
+ phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
+
+ for (i = 0; i < ehdr->e_phnum; ++i) {
+ if (phdr[i].p_type == PT_NOTE &&
+ !parse_build_id(page_addr, build_id, size,
+ page_addr + phdr[i].p_offset,
+ phdr[i].p_filesz))
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/*
+ * Parse build ID of ELF file mapped to vma
+ * @vma: vma object
+ * @build_id: buffer to store build id, at least BUILD_ID_SIZE long
+ * @size: returns actual build id size in case of success
+ *
+ * Returns 0 on success, otherwise error (< 0).
+ */
+int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
+ __u32 *size)
+{
+ Elf32_Ehdr *ehdr;
+ struct page *page;
+ void *page_addr;
+ int ret;
+
+ /* only works for page backed storage */
+ if (!vma->vm_file)
+ return -EINVAL;
+
+ page = find_get_page(vma->vm_file->f_mapping, 0);
+ if (!page)
+ return -EFAULT; /* page not mapped */
+
+ ret = -EINVAL;
+ page_addr = kmap_atomic(page);
+ ehdr = (Elf32_Ehdr *)page_addr;
+
+ /* compare magic x7f "ELF" */
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
+ goto out;
+
+ /* only support executable file and shared object file */
+ if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN)
+ goto out;
+
+ if (ehdr->e_ident[EI_CLASS] == ELFCLASS32)
+ ret = get_build_id_32(page_addr, build_id, size);
+ else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
+ ret = get_build_id_64(page_addr, build_id, size);
+out:
+ kunmap_atomic(page_addr);
+ put_page(page);
+ return ret;
+}
diff --git a/lib/cmdline.c b/lib/cmdline.c
index b390dd03363b..5d474c626e24 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -83,7 +83,7 @@ EXPORT_SYMBOL(get_option);
* get_options - Parse a string into a list of integers
* @str: String to be parsed
* @nints: size of integer array
- * @ints: integer array
+ * @ints: integer array (must have room for at least one element)
*
* This function parses a string containing a comma-separated
* list of integers, a hyphen-separated range of _positive_ integers,
@@ -91,6 +91,14 @@ EXPORT_SYMBOL(get_option);
* full, or when no more numbers can be retrieved from the
* string.
*
+ * When @nints is 0, the function just validates the given @str and
+ * returns the amount of parseable integers as described below.
+ *
+ * Returns:
+ *
+ * The first element is filled by the number of collected integers
+ * in the range. The rest is what was parsed from the @str.
+ *
* Return value is the character in the string which caused
* the parse to end (typically a null terminator, if @str is
* completely parseable).
@@ -98,15 +106,20 @@ EXPORT_SYMBOL(get_option);
char *get_options(const char *str, int nints, int *ints)
{
+ bool validate = (nints == 0);
int res, i = 1;
- while (i < nints) {
- res = get_option((char **)&str, ints + i);
+ while (i < nints || validate) {
+ int *pint = validate ? ints : ints + i;
+
+ res = get_option((char **)&str, pint);
if (res == 0)
break;
if (res == 3) {
+ int n = validate ? 0 : nints - i;
int range_nums;
- range_nums = get_range((char **)&str, ints + i, nints - i);
+
+ range_nums = get_range((char **)&str, pint, n);
if (range_nums < 0)
break;
/*
@@ -215,7 +228,6 @@ char *next_arg(char *args, char **param, char **val)
{
unsigned int i, equals = 0;
int in_quote = 0, quoted = 0;
- char *next;
if (*args == '"') {
args++;
@@ -253,10 +265,10 @@ char *next_arg(char *args, char **param, char **val)
if (args[i]) {
args[i] = '\0';
- next = args + i + 1;
+ args += i + 1;
} else
- next = args + i;
+ args += i;
/* Chew up trailing spaces. */
- return skip_spaces(next);
+ return skip_spaces(args);
}
diff --git a/lib/cmdline_kunit.c b/lib/cmdline_kunit.c
index 550e7a47fd24..018bfc8113c4 100644
--- a/lib/cmdline_kunit.c
+++ b/lib/cmdline_kunit.c
@@ -18,6 +18,26 @@ static const int cmdline_test_values[] = {
1, 3, 2, 1, 1, 1, 3, 1,
};
+static_assert(ARRAY_SIZE(cmdline_test_strings) == ARRAY_SIZE(cmdline_test_values));
+
+static const char *cmdline_test_range_strings[] = {
+ "-7" , "--7" , "-1-2" , "7--9",
+ "7-" , "-7--9", "7-9," , "9-7" ,
+ "5-a", "a-5" , "5-8" , ",8-5",
+ "+,1", "-,4" , "-3,0-1,6", "4,-" ,
+ " +2", " -9" , "0-1,-3,6", "- 9" ,
+};
+
+static const int cmdline_test_range_values[][16] = {
+ { 1, -7, }, { 0, -0, }, { 4, -1, 0, +1, 2, }, { 0, 7, },
+ { 0, +7, }, { 0, -7, }, { 3, +7, 8, +9, 0, }, { 0, 9, },
+ { 0, +5, }, { 0, -0, }, { 4, +5, 6, +7, 8, }, { 0, 0, },
+ { 0, +0, }, { 0, -0, }, { 4, -3, 0, +1, 6, }, { 1, 4, },
+ { 0, +0, }, { 0, -0, }, { 4, +0, 1, -3, 6, }, { 0, 0, },
+};
+
+static_assert(ARRAY_SIZE(cmdline_test_range_strings) == ARRAY_SIZE(cmdline_test_range_values));
+
static void cmdline_do_one_test(struct kunit *test, const char *in, int rc, int offset)
{
const char *fmt = "Pattern: %s";
@@ -84,10 +104,46 @@ static void cmdline_test_tail_int(struct kunit *test)
} while (++i < ARRAY_SIZE(cmdline_test_strings));
}
+static void cmdline_do_one_range_test(struct kunit *test, const char *in,
+ unsigned int n, const int *e)
+{
+ unsigned int i;
+ int r[16];
+ int *p;
+
+ memset(r, 0, sizeof(r));
+ get_options(in, ARRAY_SIZE(r), r);
+ KUNIT_EXPECT_EQ_MSG(test, r[0], e[0], "in test %u (parsed) expected %d numbers, got %d",
+ n, e[0], r[0]);
+ for (i = 1; i < ARRAY_SIZE(r); i++)
+ KUNIT_EXPECT_EQ_MSG(test, r[i], e[i], "in test %u at %u", n, i);
+
+ memset(r, 0, sizeof(r));
+ get_options(in, 0, r);
+ KUNIT_EXPECT_EQ_MSG(test, r[0], e[0], "in test %u (validated) expected %d numbers, got %d",
+ n, e[0], r[0]);
+
+ p = memchr_inv(&r[1], 0, sizeof(r) - sizeof(r[0]));
+ KUNIT_EXPECT_PTR_EQ_MSG(test, p, (int *)0, "in test %u at %u out of bound", n, p - r);
+}
+
+static void cmdline_test_range(struct kunit *test)
+{
+ unsigned int i = 0;
+
+ do {
+ const char *str = cmdline_test_range_strings[i];
+ const int *e = cmdline_test_range_values[i];
+
+ cmdline_do_one_range_test(test, str, i, e);
+ } while (++i < ARRAY_SIZE(cmdline_test_range_strings));
+}
+
static struct kunit_case cmdline_test_cases[] = {
KUNIT_CASE(cmdline_test_noint),
KUNIT_CASE(cmdline_test_lead_int),
KUNIT_CASE(cmdline_test_tail_int),
+ KUNIT_CASE(cmdline_test_range),
{}
};
diff --git a/lib/crc7.c b/lib/crc7.c
index 6a848d73e804..3848e313b722 100644
--- a/lib/crc7.c
+++ b/lib/crc7.c
@@ -51,7 +51,7 @@ const u8 crc7_be_syndrome_table[256] = {
EXPORT_SYMBOL(crc7_be_syndrome_table);
/**
- * crc7 - update the CRC7 for the data buffer
+ * crc7_be - update the CRC7 for the data buffer
* @crc: previous CRC7 value
* @buffer: data pointer
* @len: number of bytes in the buffer
diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c
index 6a4b6b78d630..c64ac8bfb6a9 100644
--- a/lib/crypto/blake2s.c
+++ b/lib/crypto/blake2s.c
@@ -15,55 +15,23 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bug.h>
-#include <asm/unaligned.h>
+
+#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S)
+# define blake2s_compress blake2s_compress_arch
+#else
+# define blake2s_compress blake2s_compress_generic
+#endif
void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen)
{
- const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
-
- if (unlikely(!inlen))
- return;
- if (inlen > fill) {
- memcpy(state->buf + state->buflen, in, fill);
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S))
- blake2s_compress_arch(state, state->buf, 1,
- BLAKE2S_BLOCK_SIZE);
- else
- blake2s_compress_generic(state, state->buf, 1,
- BLAKE2S_BLOCK_SIZE);
- state->buflen = 0;
- in += fill;
- inlen -= fill;
- }
- if (inlen > BLAKE2S_BLOCK_SIZE) {
- const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
- /* Hash one less (full) block than strictly possible */
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S))
- blake2s_compress_arch(state, in, nblocks - 1,
- BLAKE2S_BLOCK_SIZE);
- else
- blake2s_compress_generic(state, in, nblocks - 1,
- BLAKE2S_BLOCK_SIZE);
- in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
- inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
- }
- memcpy(state->buf + state->buflen, in, inlen);
- state->buflen += inlen;
+ __blake2s_update(state, in, inlen, blake2s_compress);
}
EXPORT_SYMBOL(blake2s_update);
void blake2s_final(struct blake2s_state *state, u8 *out)
{
WARN_ON(IS_ENABLED(DEBUG) && !out);
- blake2s_set_lastblock(state);
- memset(state->buf + state->buflen, 0,
- BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S))
- blake2s_compress_arch(state, state->buf, 1, state->buflen);
- else
- blake2s_compress_generic(state, state->buf, 1, state->buflen);
- cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
- memcpy(out, state->h, state->outlen);
+ __blake2s_final(state, out, blake2s_compress);
memzero_explicit(state, sizeof(*state));
}
EXPORT_SYMBOL(blake2s_final);
diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c
index 5850f3b87359..c2fcdb98cc02 100644
--- a/lib/crypto/chacha20poly1305.c
+++ b/lib/crypto/chacha20poly1305.c
@@ -362,7 +362,12 @@ static int __init mod_init(void)
return 0;
}
+static void __exit mod_exit(void)
+{
+}
+
module_init(mod_init);
+module_exit(mod_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ChaCha20Poly1305 AEAD construction");
MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/lib/genalloc.c b/lib/genalloc.c
index dab97bb69df6..5dcf9cdcbc46 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -81,7 +81,8 @@ static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
* users set the same bit, one user will return remain bits, otherwise
* return 0.
*/
-static int bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr)
+static unsigned long
+bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr)
{
unsigned long *p = map + BIT_WORD(start);
const unsigned long size = start + nr;
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 9889e9903cdf..f66c62aa7154 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -72,8 +72,6 @@
__start.bi_bvec_done = skip; \
__start.bi_idx = 0; \
for_each_bvec(__v, i->bvec, __bi, __start) { \
- if (!__v.bv_len) \
- continue; \
(void)(STEP); \
} \
}
@@ -578,14 +576,15 @@ static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
}
static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
- __wsum *csum, struct iov_iter *i)
+ struct csum_state *csstate,
+ struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
unsigned int p_mask = pipe->ring_size - 1;
+ __wsum sum = csstate->csum;
+ size_t off = csstate->off;
unsigned int i_head;
size_t n, r;
- size_t off = 0;
- __wsum sum = *csum;
if (!sanity(i))
return 0;
@@ -607,7 +606,8 @@ static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
i_head++;
} while (n);
i->count -= bytes;
- *csum = sum;
+ csstate->csum = sum;
+ csstate->off = off;
return bytes;
}
@@ -1053,6 +1053,21 @@ static void pipe_advance(struct iov_iter *i, size_t size)
pipe_truncate(i);
}
+static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
+{
+ struct bvec_iter bi;
+
+ bi.bi_size = i->count;
+ bi.bi_bvec_done = i->iov_offset;
+ bi.bi_idx = 0;
+ bvec_iter_advance(i->bvec, &bi, size);
+
+ i->bvec += bi.bi_idx;
+ i->nr_segs -= bi.bi_idx;
+ i->count = bi.bi_size;
+ i->iov_offset = bi.bi_bvec_done;
+}
+
void iov_iter_advance(struct iov_iter *i, size_t size)
{
if (unlikely(iov_iter_is_pipe(i))) {
@@ -1063,6 +1078,10 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
i->count -= size;
return;
}
+ if (iov_iter_is_bvec(i)) {
+ iov_iter_bvec_advance(i, size);
+ return;
+ }
iterate_and_advance(i, size, v, 0, 0, 0)
}
EXPORT_SYMBOL(iov_iter_advance);
@@ -1508,18 +1527,19 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
}
EXPORT_SYMBOL(csum_and_copy_from_iter_full);
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
+size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
struct iov_iter *i)
{
+ struct csum_state *csstate = _csstate;
const char *from = addr;
- __wsum *csum = csump;
__wsum sum, next;
- size_t off = 0;
+ size_t off;
if (unlikely(iov_iter_is_pipe(i)))
- return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
+ return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
- sum = *csum;
+ sum = csstate->csum;
+ off = csstate->off;
if (unlikely(iov_iter_is_discard(i))) {
WARN_ON(1); /* for now */
return 0;
@@ -1547,7 +1567,8 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
off += v.iov_len;
})
)
- *csum = sum;
+ csstate->csum = sum;
+ csstate->off = off;
return bytes;
}
EXPORT_SYMBOL(csum_and_copy_to_iter);
diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig
index 00909e6a2443..0b5dfb001bac 100644
--- a/lib/kunit/Kconfig
+++ b/lib/kunit/Kconfig
@@ -4,6 +4,7 @@
menuconfig KUNIT
tristate "KUnit - Enable support for unit tests"
+ select GLOB if KUNIT=y
help
Enables support for kernel unit tests (KUnit), a lightweight unit
testing and mocking framework for the Linux kernel. These tests are
diff --git a/lib/kunit/assert.c b/lib/kunit/assert.c
index 33acdaa28a7d..e0ec7d6fed6f 100644
--- a/lib/kunit/assert.c
+++ b/lib/kunit/assert.c
@@ -85,6 +85,29 @@ void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
}
EXPORT_SYMBOL_GPL(kunit_ptr_not_err_assert_format);
+/* Checks if `text` is a literal representing `value`, e.g. "5" and 5 */
+static bool is_literal(struct kunit *test, const char *text, long long value,
+ gfp_t gfp)
+{
+ char *buffer;
+ int len;
+ bool ret;
+
+ len = snprintf(NULL, 0, "%lld", value);
+ if (strlen(text) != len)
+ return false;
+
+ buffer = kunit_kmalloc(test, len+1, gfp);
+ if (!buffer)
+ return false;
+
+ snprintf(buffer, len+1, "%lld", value);
+ ret = strncmp(buffer, text, len) == 0;
+
+ kunit_kfree(test, buffer);
+ return ret;
+}
+
void kunit_binary_assert_format(const struct kunit_assert *assert,
struct string_stream *stream)
{
@@ -97,12 +120,16 @@ void kunit_binary_assert_format(const struct kunit_assert *assert,
binary_assert->left_text,
binary_assert->operation,
binary_assert->right_text);
- string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld\n",
- binary_assert->left_text,
- binary_assert->left_value);
- string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld",
- binary_assert->right_text,
- binary_assert->right_value);
+ if (!is_literal(stream->test, binary_assert->left_text,
+ binary_assert->left_value, stream->gfp))
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld\n",
+ binary_assert->left_text,
+ binary_assert->left_value);
+ if (!is_literal(stream->test, binary_assert->right_text,
+ binary_assert->right_value, stream->gfp))
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld",
+ binary_assert->right_text,
+ binary_assert->right_value);
kunit_assert_print_msg(assert, stream);
}
EXPORT_SYMBOL_GPL(kunit_binary_assert_format);
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index a95742a4ece7..15832ed44668 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <kunit/test.h>
+#include <linux/glob.h>
+#include <linux/moduleparam.h>
/*
* These symbols point to the .kunit_test_suites section and are defined in
@@ -11,14 +13,81 @@ extern struct kunit_suite * const * const __kunit_suites_end[];
#if IS_BUILTIN(CONFIG_KUNIT)
-static void kunit_print_tap_header(void)
+static char *filter_glob;
+module_param(filter_glob, charp, 0);
+MODULE_PARM_DESC(filter_glob,
+ "Filter which KUnit test suites run at boot-time, e.g. list*");
+
+static struct kunit_suite * const *
+kunit_filter_subsuite(struct kunit_suite * const * const subsuite)
+{
+ int i, n = 0;
+ struct kunit_suite **filtered;
+
+ n = 0;
+ for (i = 0; subsuite[i] != NULL; ++i) {
+ if (glob_match(filter_glob, subsuite[i]->name))
+ ++n;
+ }
+
+ if (n == 0)
+ return NULL;
+
+ filtered = kmalloc_array(n + 1, sizeof(*filtered), GFP_KERNEL);
+ if (!filtered)
+ return NULL;
+
+ n = 0;
+ for (i = 0; subsuite[i] != NULL; ++i) {
+ if (glob_match(filter_glob, subsuite[i]->name))
+ filtered[n++] = subsuite[i];
+ }
+ filtered[n] = NULL;
+
+ return filtered;
+}
+
+struct suite_set {
+ struct kunit_suite * const * const *start;
+ struct kunit_suite * const * const *end;
+};
+
+static struct suite_set kunit_filter_suites(void)
+{
+ int i;
+ struct kunit_suite * const **copy, * const *filtered_subsuite;
+ struct suite_set filtered;
+
+ const size_t max = __kunit_suites_end - __kunit_suites_start;
+
+ if (!filter_glob) {
+ filtered.start = __kunit_suites_start;
+ filtered.end = __kunit_suites_end;
+ return filtered;
+ }
+
+ copy = kmalloc_array(max, sizeof(*filtered.start), GFP_KERNEL);
+ filtered.start = copy;
+ if (!copy) { /* won't be able to run anything, return an empty set */
+ filtered.end = copy;
+ return filtered;
+ }
+
+ for (i = 0; i < max; ++i) {
+ filtered_subsuite = kunit_filter_subsuite(__kunit_suites_start[i]);
+ if (filtered_subsuite)
+ *copy++ = filtered_subsuite;
+ }
+ filtered.end = copy;
+ return filtered;
+}
+
+static void kunit_print_tap_header(struct suite_set *suite_set)
{
struct kunit_suite * const * const *suites, * const *subsuite;
int num_of_suites = 0;
- for (suites = __kunit_suites_start;
- suites < __kunit_suites_end;
- suites++)
+ for (suites = suite_set->start; suites < suite_set->end; suites++)
for (subsuite = *suites; *subsuite != NULL; subsuite++)
num_of_suites++;
@@ -30,12 +99,18 @@ int kunit_run_all_tests(void)
{
struct kunit_suite * const * const *suites;
- kunit_print_tap_header();
+ struct suite_set suite_set = kunit_filter_suites();
+
+ kunit_print_tap_header(&suite_set);
+
+ for (suites = suite_set.start; suites < suite_set.end; suites++)
+ __kunit_test_suites_init(*suites);
- for (suites = __kunit_suites_start;
- suites < __kunit_suites_end;
- suites++)
- __kunit_test_suites_init(*suites);
+ if (filter_glob) { /* a copy was made of each array */
+ for (suites = suite_set.start; suites < suite_set.end; suites++)
+ kfree(*suites);
+ kfree(suite_set.start);
+ }
return 0;
}
diff --git a/lib/linear_ranges.c b/lib/linear_ranges.c
index 9495ef3572b7..ced5c15d3f04 100644
--- a/lib/linear_ranges.c
+++ b/lib/linear_ranges.c
@@ -128,7 +128,7 @@ EXPORT_SYMBOL_GPL(linear_range_get_value_array);
* @selector: address where found selector value is updated
* @found: flag to indicate that given value was in the range
*
- * Return selector which which range value is closest match for given
+ * Return selector for which range value is closest match for given
* input value. Value is matching if it is equal or smaller than given
* value. If given value is in the range, then @found is set true.
*
@@ -168,11 +168,11 @@ EXPORT_SYMBOL_GPL(linear_range_get_selector_low);
* @selector: address where found selector value is updated
* @found: flag to indicate that given value was in the range
*
- * Scan array of ranges for selector which which range value matches given
+ * Scan array of ranges for selector for which range value matches given
* input value. Value is matching if it is equal or smaller than given
* value. If given value is found to be in a range scanning is stopped and
* @found is set true. If a range with values smaller than given value is found
- * but the range max is being smaller than given value, then the ranges
+ * but the range max is being smaller than given value, then the range's
* biggest selector is updated to @selector but scanning ranges is continued
* and @found is set to false.
*
@@ -209,7 +209,7 @@ EXPORT_SYMBOL_GPL(linear_range_get_selector_low_array);
* @selector: address where found selector value is updated
* @found: flag to indicate that given value was in the range
*
- * Return selector which which range value is closest match for given
+ * Return selector for which range value is closest match for given
* input value. Value is matching if it is equal or higher than given
* value. If given value is in the range, then @found is set true.
*
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 9959ea23529e..2d85abac1744 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -24,6 +24,7 @@
#include <linux/debug_locks.h>
#include <linux/irqflags.h>
#include <linux/rtmutex.h>
+#include <linux/local_lock.h>
/*
* Change this to 1 if you want to see the failure printouts:
@@ -51,6 +52,7 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose);
#define LOCKTYPE_RWSEM 0x8
#define LOCKTYPE_WW 0x10
#define LOCKTYPE_RTMUTEX 0x20
+#define LOCKTYPE_LL 0x40
static struct ww_acquire_ctx t, t2;
static struct ww_mutex o, o2, o3;
@@ -64,6 +66,9 @@ static DEFINE_SPINLOCK(lock_B);
static DEFINE_SPINLOCK(lock_C);
static DEFINE_SPINLOCK(lock_D);
+static DEFINE_RAW_SPINLOCK(raw_lock_A);
+static DEFINE_RAW_SPINLOCK(raw_lock_B);
+
static DEFINE_RWLOCK(rwlock_A);
static DEFINE_RWLOCK(rwlock_B);
static DEFINE_RWLOCK(rwlock_C);
@@ -133,6 +138,8 @@ static DEFINE_RT_MUTEX(rtmutex_Z2);
#endif
+static local_lock_t local_A = INIT_LOCAL_LOCK(local_A);
+
/*
* non-inlined runtime initializers, to let separate locks share
* the same lock-class:
@@ -1306,19 +1313,23 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map)
+# define I_RAW_SPINLOCK(x) lockdep_reset_lock(&raw_lock_##x.dep_map)
# define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map)
# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
# define I_WW(x) lockdep_reset_lock(&x.dep_map)
+# define I_LOCAL_LOCK(x) lockdep_reset_lock(&local_##x.dep_map)
#ifdef CONFIG_RT_MUTEXES
# define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map)
#endif
#else
# define I_SPINLOCK(x)
+# define I_RAW_SPINLOCK(x)
# define I_RWLOCK(x)
# define I_MUTEX(x)
# define I_RWSEM(x)
# define I_WW(x)
+# define I_LOCAL_LOCK(x)
#endif
#ifndef I_RTMUTEX
@@ -1358,9 +1369,16 @@ static void reset_locks(void)
I1(A); I1(B); I1(C); I1(D);
I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2);
I_WW(t); I_WW(t2); I_WW(o.base); I_WW(o2.base); I_WW(o3.base);
+ I_RAW_SPINLOCK(A); I_RAW_SPINLOCK(B);
+ I_LOCAL_LOCK(A);
+
lockdep_reset();
+
I2(A); I2(B); I2(C); I2(D);
init_shared_classes();
+ raw_spin_lock_init(&raw_lock_A);
+ raw_spin_lock_init(&raw_lock_B);
+ local_lock_init(&local_A);
ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep);
memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2));
@@ -1382,6 +1400,8 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
WARN_ON(irqs_disabled());
+ debug_locks_silent = !(debug_locks_verbose & lockclass_mask);
+
testcase_fn();
/*
* Filter out expected failures:
@@ -1402,7 +1422,7 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
}
testcase_total++;
- if (debug_locks_verbose)
+ if (debug_locks_verbose & lockclass_mask)
pr_cont(" lockclass mask: %x, debug_locks: %d, expected: %d\n",
lockclass_mask, debug_locks, expected);
/*
@@ -2419,6 +2439,311 @@ static void fs_reclaim_tests(void)
pr_cont("\n");
}
+#define __guard(cleanup) __maybe_unused __attribute__((__cleanup__(cleanup)))
+
+static void hardirq_exit(int *_)
+{
+ HARDIRQ_EXIT();
+}
+
+#define HARDIRQ_CONTEXT(name, ...) \
+ int hardirq_guard_##name __guard(hardirq_exit); \
+ HARDIRQ_ENTER();
+
+#define NOTTHREADED_HARDIRQ_CONTEXT(name, ...) \
+ int notthreaded_hardirq_guard_##name __guard(hardirq_exit); \
+ local_irq_disable(); \
+ __irq_enter(); \
+ WARN_ON(!in_irq());
+
+static void softirq_exit(int *_)
+{
+ SOFTIRQ_EXIT();
+}
+
+#define SOFTIRQ_CONTEXT(name, ...) \
+ int softirq_guard_##name __guard(softirq_exit); \
+ SOFTIRQ_ENTER();
+
+static void rcu_exit(int *_)
+{
+ rcu_read_unlock();
+}
+
+#define RCU_CONTEXT(name, ...) \
+ int rcu_guard_##name __guard(rcu_exit); \
+ rcu_read_lock();
+
+static void rcu_bh_exit(int *_)
+{
+ rcu_read_unlock_bh();
+}
+
+#define RCU_BH_CONTEXT(name, ...) \
+ int rcu_bh_guard_##name __guard(rcu_bh_exit); \
+ rcu_read_lock_bh();
+
+static void rcu_sched_exit(int *_)
+{
+ rcu_read_unlock_sched();
+}
+
+#define RCU_SCHED_CONTEXT(name, ...) \
+ int rcu_sched_guard_##name __guard(rcu_sched_exit); \
+ rcu_read_lock_sched();
+
+static void rcu_callback_exit(int *_)
+{
+ rcu_lock_release(&rcu_callback_map);
+}
+
+#define RCU_CALLBACK_CONTEXT(name, ...) \
+ int rcu_callback_guard_##name __guard(rcu_callback_exit); \
+ rcu_lock_acquire(&rcu_callback_map);
+
+
+static void raw_spinlock_exit(raw_spinlock_t **lock)
+{
+ raw_spin_unlock(*lock);
+}
+
+#define RAW_SPINLOCK_CONTEXT(name, lock) \
+ raw_spinlock_t *raw_spinlock_guard_##name __guard(raw_spinlock_exit) = &(lock); \
+ raw_spin_lock(&(lock));
+
+static void spinlock_exit(spinlock_t **lock)
+{
+ spin_unlock(*lock);
+}
+
+#define SPINLOCK_CONTEXT(name, lock) \
+ spinlock_t *spinlock_guard_##name __guard(spinlock_exit) = &(lock); \
+ spin_lock(&(lock));
+
+static void mutex_exit(struct mutex **lock)
+{
+ mutex_unlock(*lock);
+}
+
+#define MUTEX_CONTEXT(name, lock) \
+ struct mutex *mutex_guard_##name __guard(mutex_exit) = &(lock); \
+ mutex_lock(&(lock));
+
+#define GENERATE_2_CONTEXT_TESTCASE(outer, outer_lock, inner, inner_lock) \
+ \
+static void __maybe_unused inner##_in_##outer(void) \
+{ \
+ outer##_CONTEXT(_, outer_lock); \
+ { \
+ inner##_CONTEXT(_, inner_lock); \
+ } \
+}
+
+/*
+ * wait contexts (considering PREEMPT_RT)
+ *
+ * o: inner is allowed in outer
+ * x: inner is disallowed in outer
+ *
+ * \ inner | RCU | RAW_SPIN | SPIN | MUTEX
+ * outer \ | | | |
+ * ---------------+-------+----------+------+-------
+ * HARDIRQ | o | o | o | x
+ * ---------------+-------+----------+------+-------
+ * NOTTHREADED_IRQ| o | o | x | x
+ * ---------------+-------+----------+------+-------
+ * SOFTIRQ | o | o | o | x
+ * ---------------+-------+----------+------+-------
+ * RCU | o | o | o | x
+ * ---------------+-------+----------+------+-------
+ * RCU_BH | o | o | o | x
+ * ---------------+-------+----------+------+-------
+ * RCU_CALLBACK | o | o | o | x
+ * ---------------+-------+----------+------+-------
+ * RCU_SCHED | o | o | x | x
+ * ---------------+-------+----------+------+-------
+ * RAW_SPIN | o | o | x | x
+ * ---------------+-------+----------+------+-------
+ * SPIN | o | o | o | x
+ * ---------------+-------+----------+------+-------
+ * MUTEX | o | o | o | o
+ * ---------------+-------+----------+------+-------
+ */
+
+#define GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(HARDIRQ, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(NOTTHREADED_HARDIRQ, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(SOFTIRQ, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(RCU, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(RCU_BH, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(RCU_CALLBACK, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(RCU_SCHED, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(RAW_SPINLOCK, raw_lock_A, inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(SPINLOCK, lock_A, inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(MUTEX, mutex_A, inner, inner_lock)
+
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RCU, )
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RAW_SPINLOCK, raw_lock_B)
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(SPINLOCK, lock_B)
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(MUTEX, mutex_B)
+
+/* the outer context allows all kinds of preemption */
+#define DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(outer) \
+ dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
+ dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(MUTEX_in_##outer, SUCCESS, LOCKTYPE_MUTEX); \
+
+/*
+ * the outer context only allows the preemption introduced by spinlock_t (which
+ * is a sleepable lock for PREEMPT_RT)
+ */
+#define DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(outer) \
+ dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
+ dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(MUTEX_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
+
+/* the outer doesn't allows any kind of preemption */
+#define DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(outer) \
+ dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
+ dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(SPINLOCK_in_##outer, FAILURE, LOCKTYPE_SPIN); \
+ dotest(MUTEX_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
+
+static void wait_context_tests(void)
+{
+ printk(" --------------------------------------------------------------------------\n");
+ printk(" | wait context tests |\n");
+ printk(" --------------------------------------------------------------------------\n");
+ printk(" | rcu | raw | spin |mutex |\n");
+ printk(" --------------------------------------------------------------------------\n");
+ print_testname("in hardirq context");
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(HARDIRQ);
+ pr_cont("\n");
+
+ print_testname("in hardirq context (not threaded)");
+ DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(NOTTHREADED_HARDIRQ);
+ pr_cont("\n");
+
+ print_testname("in softirq context");
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(SOFTIRQ);
+ pr_cont("\n");
+
+ print_testname("in RCU context");
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU);
+ pr_cont("\n");
+
+ print_testname("in RCU-bh context");
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_BH);
+ pr_cont("\n");
+
+ print_testname("in RCU callback context");
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_CALLBACK);
+ pr_cont("\n");
+
+ print_testname("in RCU-sched context");
+ DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RCU_SCHED);
+ pr_cont("\n");
+
+ print_testname("in RAW_SPINLOCK context");
+ DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RAW_SPINLOCK);
+ pr_cont("\n");
+
+ print_testname("in SPINLOCK context");
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(SPINLOCK);
+ pr_cont("\n");
+
+ print_testname("in MUTEX context");
+ DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(MUTEX);
+ pr_cont("\n");
+}
+
+static void local_lock_2(void)
+{
+ local_lock_acquire(&local_A); /* IRQ-ON */
+ local_lock_release(&local_A);
+
+ HARDIRQ_ENTER();
+ spin_lock(&lock_A); /* IN-IRQ */
+ spin_unlock(&lock_A);
+ HARDIRQ_EXIT()
+
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+ local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */
+ local_lock_release(&local_A);
+ spin_unlock(&lock_A);
+ HARDIRQ_ENABLE();
+}
+
+static void local_lock_3A(void)
+{
+ local_lock_acquire(&local_A); /* IRQ-ON */
+ spin_lock(&lock_B); /* IRQ-ON */
+ spin_unlock(&lock_B);
+ local_lock_release(&local_A);
+
+ HARDIRQ_ENTER();
+ spin_lock(&lock_A); /* IN-IRQ */
+ spin_unlock(&lock_A);
+ HARDIRQ_EXIT()
+
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+ local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
+ local_lock_release(&local_A);
+ spin_unlock(&lock_A);
+ HARDIRQ_ENABLE();
+}
+
+static void local_lock_3B(void)
+{
+ local_lock_acquire(&local_A); /* IRQ-ON */
+ spin_lock(&lock_B); /* IRQ-ON */
+ spin_unlock(&lock_B);
+ local_lock_release(&local_A);
+
+ HARDIRQ_ENTER();
+ spin_lock(&lock_A); /* IN-IRQ */
+ spin_unlock(&lock_A);
+ HARDIRQ_EXIT()
+
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+ local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
+ local_lock_release(&local_A);
+ spin_unlock(&lock_A);
+ HARDIRQ_ENABLE();
+
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+ spin_lock(&lock_B); /* IN-IRQ <-> IRQ-ON cycle, true */
+ spin_unlock(&lock_B);
+ spin_unlock(&lock_A);
+ HARDIRQ_DISABLE();
+
+}
+
+static void local_lock_tests(void)
+{
+ printk(" --------------------------------------------------------------------------\n");
+ printk(" | local_lock tests |\n");
+ printk(" ---------------------\n");
+
+ print_testname("local_lock inversion 2");
+ dotest(local_lock_2, SUCCESS, LOCKTYPE_LL);
+ pr_cont("\n");
+
+ print_testname("local_lock inversion 3A");
+ dotest(local_lock_3A, SUCCESS, LOCKTYPE_LL);
+ pr_cont("\n");
+
+ print_testname("local_lock inversion 3B");
+ dotest(local_lock_3B, FAILURE, LOCKTYPE_LL);
+ pr_cont("\n");
+}
+
void locking_selftest(void)
{
/*
@@ -2446,7 +2771,6 @@ void locking_selftest(void)
printk(" --------------------------------------------------------------------------\n");
init_shared_classes();
- debug_locks_silent = !debug_locks_verbose;
lockdep_set_selftest_task(current);
DO_TESTCASE_6R("A-A deadlock", AA);
@@ -2542,6 +2866,12 @@ void locking_selftest(void)
fs_reclaim_tests();
+ /* Wait context test cases that are specific for RAW_LOCK_NESTING */
+ if (IS_ENABLED(CONFIG_PROVE_RAW_LOCK_NESTING))
+ wait_context_tests();
+
+ local_lock_tests();
+
if (unexpected_testcase_failures) {
printk("-----------------------------------------------------------------\n");
debug_locks = 0;
diff --git a/lib/logic_pio.c b/lib/logic_pio.c
index f32fe481b492..07b4b9a1f54b 100644
--- a/lib/logic_pio.c
+++ b/lib/logic_pio.c
@@ -28,6 +28,8 @@ static DEFINE_MUTEX(io_range_mutex);
* @new_range: pointer to the IO range to be registered.
*
* Returns 0 on success, the error code in case of failure.
+ * If the range already exists, -EEXIST will be returned, which should be
+ * considered a success.
*
* Register a new IO range node in the IO range list.
*/
@@ -51,6 +53,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
list_for_each_entry(range, &io_range_list, list) {
if (range->fwnode == new_range->fwnode) {
/* range already there */
+ ret = -EEXIST;
goto end_register;
}
if (range->flags == LOGIC_PIO_CPU_MMIO &&
diff --git a/lib/parman.c b/lib/parman.c
index c6e42a8db824..a11f2f667639 100644
--- a/lib/parman.c
+++ b/lib/parman.c
@@ -85,7 +85,6 @@ static int parman_shrink(struct parman *parman)
}
static bool parman_prio_used(struct parman_prio *prio)
-
{
return !list_empty(&prio->item_list);
}
diff --git a/lib/parser.c b/lib/parser.c
index f5b3e5d7a7f9..7a5769db389f 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -11,7 +11,7 @@
#include <linux/string.h>
/**
- * match_one: - Determines if a string matches a simple pattern
+ * match_one - Determines if a string matches a simple pattern
* @s: the string to examine for presence of the pattern
* @p: the string containing the pattern
* @args: array of %MAX_OPT_ARGS &substring_t elements. Used to return match
@@ -89,7 +89,7 @@ static int match_one(char *s, const char *p, substring_t args[])
}
/**
- * match_token: - Find a token (and optional args) in a string
+ * match_token - Find a token (and optional args) in a string
* @s: the string to examine for token/argument pairs
* @table: match_table_t describing the set of allowed option tokens and the
* arguments that may be associated with them. Must be terminated with a
@@ -114,7 +114,7 @@ int match_token(char *s, const match_table_t table, substring_t args[])
EXPORT_SYMBOL(match_token);
/**
- * match_number: scan a number in the given base from a substring_t
+ * match_number - scan a number in the given base from a substring_t
* @s: substring to be scanned
* @result: resulting integer on success
* @base: base to use when converting string
@@ -147,7 +147,7 @@ static int match_number(substring_t *s, int *result, int base)
}
/**
- * match_u64int: scan a number in the given base from a substring_t
+ * match_u64int - scan a number in the given base from a substring_t
* @s: substring to be scanned
* @result: resulting u64 on success
* @base: base to use when converting string
@@ -174,7 +174,7 @@ static int match_u64int(substring_t *s, u64 *result, int base)
}
/**
- * match_int: - scan a decimal representation of an integer from a substring_t
+ * match_int - scan a decimal representation of an integer from a substring_t
* @s: substring_t to be scanned
* @result: resulting integer on success
*
@@ -188,8 +188,30 @@ int match_int(substring_t *s, int *result)
}
EXPORT_SYMBOL(match_int);
+/*
+ * match_uint - scan a decimal representation of an integer from a substring_t
+ * @s: substring_t to be scanned
+ * @result: resulting integer on success
+ *
+ * Description: Attempts to parse the &substring_t @s as a decimal integer. On
+ * success, sets @result to the integer represented by the string and returns 0.
+ * Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
+ */
+int match_uint(substring_t *s, unsigned int *result)
+{
+ int err = -ENOMEM;
+ char *buf = match_strdup(s);
+
+ if (buf) {
+ err = kstrtouint(buf, 10, result);
+ kfree(buf);
+ }
+ return err;
+}
+EXPORT_SYMBOL(match_uint);
+
/**
- * match_u64: - scan a decimal representation of a u64 from
+ * match_u64 - scan a decimal representation of a u64 from
* a substring_t
* @s: substring_t to be scanned
* @result: resulting unsigned long long on success
@@ -206,7 +228,7 @@ int match_u64(substring_t *s, u64 *result)
EXPORT_SYMBOL(match_u64);
/**
- * match_octal: - scan an octal representation of an integer from a substring_t
+ * match_octal - scan an octal representation of an integer from a substring_t
* @s: substring_t to be scanned
* @result: resulting integer on success
*
@@ -221,7 +243,7 @@ int match_octal(substring_t *s, int *result)
EXPORT_SYMBOL(match_octal);
/**
- * match_hex: - scan a hex representation of an integer from a substring_t
+ * match_hex - scan a hex representation of an integer from a substring_t
* @s: substring_t to be scanned
* @result: resulting integer on success
*
@@ -236,7 +258,7 @@ int match_hex(substring_t *s, int *result)
EXPORT_SYMBOL(match_hex);
/**
- * match_wildcard: - parse if a string matches given wildcard pattern
+ * match_wildcard - parse if a string matches given wildcard pattern
* @pattern: wildcard pattern
* @str: the string to be parsed
*
@@ -287,7 +309,7 @@ bool match_wildcard(const char *pattern, const char *str)
EXPORT_SYMBOL(match_wildcard);
/**
- * match_strlcpy: - Copy the characters from a substring_t to a sized buffer
+ * match_strlcpy - Copy the characters from a substring_t to a sized buffer
* @dest: where to copy to
* @src: &substring_t to copy
* @size: size of destination buffer
@@ -310,7 +332,7 @@ size_t match_strlcpy(char *dest, const substring_t *src, size_t size)
EXPORT_SYMBOL(match_strlcpy);
/**
- * match_strdup: - allocate a new string with the contents of a substring_t
+ * match_strdup - allocate a new string with the contents of a substring_t
* @s: &substring_t to copy
*
* Description: Allocates and returns a string filled with the contents of
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index e59eda07305e..a1071cdefb5a 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -5,6 +5,7 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/slab.h>
+#include <linux/mm.h>
#include <linux/percpu-refcount.h>
/*
@@ -168,6 +169,7 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
struct percpu_ref_data, rcu);
struct percpu_ref *ref = data->ref;
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
+ static atomic_t underflows;
unsigned long count = 0;
int cpu;
@@ -191,9 +193,13 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
*/
atomic_long_add((long)count - PERCPU_COUNT_BIAS, &data->count);
- WARN_ONCE(atomic_long_read(&data->count) <= 0,
- "percpu ref (%ps) <= 0 (%ld) after switching to atomic",
- data->release, atomic_long_read(&data->count));
+ if (WARN_ONCE(atomic_long_read(&data->count) <= 0,
+ "percpu ref (%ps) <= 0 (%ld) after switching to atomic",
+ data->release, atomic_long_read(&data->count)) &&
+ atomic_inc_return(&underflows) < 4) {
+ pr_err("%s(): percpu_ref underflow", __func__);
+ mem_dump_obj(data);
+ }
/* @ref is viewed as dead on all CPUs, send out switch confirmation */
percpu_ref_call_confirm_rcu(rcu);
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 890dcc2e984e..49f67a0c6e5d 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -31,6 +31,7 @@
#include <linux/stackdepot.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/memblock.h>
#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
@@ -141,14 +142,38 @@ static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
return stack;
}
-#define STACK_HASH_ORDER 20
-#define STACK_HASH_SIZE (1L << STACK_HASH_ORDER)
+#define STACK_HASH_SIZE (1L << CONFIG_STACK_HASH_ORDER)
#define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
#define STACK_HASH_SEED 0x9747b28c
-static struct stack_record *stack_table[STACK_HASH_SIZE] = {
- [0 ... STACK_HASH_SIZE - 1] = NULL
-};
+static bool stack_depot_disable;
+static struct stack_record **stack_table;
+
+static int __init is_stack_depot_disabled(char *str)
+{
+ int ret;
+
+ ret = kstrtobool(str, &stack_depot_disable);
+ if (!ret && stack_depot_disable) {
+ pr_info("Stack Depot is disabled\n");
+ stack_table = NULL;
+ }
+ return 0;
+}
+early_param("stack_depot_disable", is_stack_depot_disabled);
+
+int __init stack_depot_init(void)
+{
+ if (!stack_depot_disable) {
+ size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *));
+ int i;
+
+ stack_table = memblock_alloc(size, size);
+ for (i = 0; i < STACK_HASH_SIZE; i++)
+ stack_table[i] = NULL;
+ }
+ return 0;
+}
/* Calculate hash for a stack */
static inline u32 hash_stack(unsigned long *entries, unsigned int size)
@@ -242,7 +267,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned long flags;
u32 hash;
- if (unlikely(nr_entries == 0))
+ if (unlikely(nr_entries == 0) || stack_depot_disable)
goto fast_exit;
hash = hash_stack(entries, nr_entries);
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 4425a1dd4ef1..0ea0e8258f14 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -16,8 +16,7 @@
#include "../tools/testing/selftests/kselftest_module.h"
-static unsigned total_tests __initdata;
-static unsigned failed_tests __initdata;
+KSTM_MODULE_GLOBALS();
static char pbl_buffer[PAGE_SIZE] __initdata;
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index ca7d635bccd9..4dc4dcbecd12 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -345,7 +345,7 @@ static int __bpf_fill_ja(struct bpf_test *self, unsigned int len,
static int bpf_fill_maxinsns11(struct bpf_test *self)
{
- /* Hits 70 passes on x86_64, so cannot get JITed there. */
+ /* Hits 70 passes on x86_64 and triggers NOPs padding. */
return __bpf_fill_ja(self, BPF_MAXINSNS, 68);
}
@@ -4295,13 +4295,13 @@ static struct bpf_test tests[] = {
{ { 0, 0xffffffff } },
.stack_depth = 40,
},
- /* BPF_STX | BPF_XADD | BPF_W/DW */
+ /* BPF_STX | BPF_ATOMIC | BPF_W/DW */
{
"STX_XADD_W: Test: 0x12 + 0x10 = 0x22",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_W, R10, -40, 0x10),
- BPF_STX_XADD(BPF_W, R10, R0, -40),
+ BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40),
BPF_LDX_MEM(BPF_W, R0, R10, -40),
BPF_EXIT_INSN(),
},
@@ -4316,7 +4316,7 @@ static struct bpf_test tests[] = {
BPF_ALU64_REG(BPF_MOV, R1, R10),
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_W, R10, -40, 0x10),
- BPF_STX_XADD(BPF_W, R10, R0, -40),
+ BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40),
BPF_ALU64_REG(BPF_MOV, R0, R10),
BPF_ALU64_REG(BPF_SUB, R0, R1),
BPF_EXIT_INSN(),
@@ -4331,7 +4331,7 @@ static struct bpf_test tests[] = {
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_W, R10, -40, 0x10),
- BPF_STX_XADD(BPF_W, R10, R0, -40),
+ BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
@@ -4352,7 +4352,7 @@ static struct bpf_test tests[] = {
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
- BPF_STX_XADD(BPF_DW, R10, R0, -40),
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40),
BPF_LDX_MEM(BPF_DW, R0, R10, -40),
BPF_EXIT_INSN(),
},
@@ -4367,7 +4367,7 @@ static struct bpf_test tests[] = {
BPF_ALU64_REG(BPF_MOV, R1, R10),
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
- BPF_STX_XADD(BPF_DW, R10, R0, -40),
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40),
BPF_ALU64_REG(BPF_MOV, R0, R10),
BPF_ALU64_REG(BPF_SUB, R0, R1),
BPF_EXIT_INSN(),
@@ -4382,7 +4382,7 @@ static struct bpf_test tests[] = {
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
- BPF_STX_XADD(BPF_DW, R10, R0, -40),
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
@@ -5318,15 +5318,10 @@ static struct bpf_test tests[] = {
{
"BPF_MAXINSNS: Jump, gap, jump, ...",
{ },
-#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_X86)
- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
-#else
CLASSIC | FLAG_NO_DATA,
-#endif
{ },
{ { 0, 0xababcbac } },
.fill_helper = bpf_fill_maxinsns11,
- .expected_errcode = -ENOTSUPP,
},
{
"BPF_MAXINSNS: jump over MSH",
diff --git a/lib/test_fpu.c b/lib/test_fpu.c
index c33764aa3eb8..e82db19fed84 100644
--- a/lib/test_fpu.c
+++ b/lib/test_fpu.c
@@ -63,7 +63,7 @@ static int test_fpu_get(void *data, u64 *val)
return status;
}
-DEFINE_SIMPLE_ATTRIBUTE(test_fpu_fops, test_fpu_get, NULL, "%lld\n");
+DEFINE_DEBUGFS_ATTRIBUTE(test_fpu_fops, test_fpu_get, NULL, "%lld\n");
static struct dentry *selftest_dir;
static int __init test_fpu_init(void)
@@ -72,8 +72,8 @@ static int __init test_fpu_init(void)
if (!selftest_dir)
return -ENOMEM;
- debugfs_create_file("test_fpu", 0444, selftest_dir, NULL,
- &test_fpu_fops);
+ debugfs_create_file_unsafe("test_fpu", 0444, selftest_dir, NULL,
+ &test_fpu_fops);
return 0;
}
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 2947274cc2d3..e5647d147b35 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -13,6 +13,7 @@
#include <linux/mman.h>
#include <linux/module.h>
#include <linux/printk.h>
+#include <linux/random.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
@@ -28,10 +29,9 @@
#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
/*
- * We assign some test results to these globals to make sure the tests
- * are not eliminated as dead code.
+ * Some tests use these global variables to store return values from function
+ * calls that could otherwise be eliminated by the compiler as dead code.
*/
-
void *kasan_ptr_result;
int kasan_int_result;
@@ -39,40 +39,81 @@ static struct kunit_resource resource;
static struct kunit_kasan_expectation fail_data;
static bool multishot;
+/*
+ * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the
+ * first detected bug and panic the kernel if panic_on_warn is enabled. For
+ * hardware tag-based KASAN also allow tag checking to be reenabled for each
+ * test, see the comment for KUNIT_EXPECT_KASAN_FAIL().
+ */
static int kasan_test_init(struct kunit *test)
{
- /*
- * Temporarily enable multi-shot mode and set panic_on_warn=0.
- * Otherwise, we'd only get a report for the first case.
- */
- multishot = kasan_save_enable_multi_shot();
+ if (!kasan_enabled()) {
+ kunit_err(test, "can't run KASAN tests with KASAN disabled");
+ return -1;
+ }
+ multishot = kasan_save_enable_multi_shot();
+ kasan_set_tagging_report_once(false);
return 0;
}
static void kasan_test_exit(struct kunit *test)
{
+ kasan_set_tagging_report_once(true);
kasan_restore_multi_shot(multishot);
}
/**
- * KUNIT_EXPECT_KASAN_FAIL() - Causes a test failure when the expression does
- * not cause a KASAN error. This uses a KUnit resource named "kasan_data." Do
- * Do not use this name for a KUnit resource outside here.
+ * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
+ * KASAN report; causes a test failure otherwise. This relies on a KUnit
+ * resource named "kasan_data". Do not use this name for KUnit resources
+ * outside of KASAN tests.
+ *
+ * For hardware tag-based KASAN, when a tag fault happens, tag checking is
+ * normally auto-disabled. When this happens, this test handler reenables
+ * tag checking. As tag checking can be only disabled or enabled per CPU, this
+ * handler disables migration (preemption).
*
+ * Since the compiler doesn't see that the expression can change the fail_data
+ * fields, it can reorder or optimize away the accesses to those fields.
+ * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
+ * expression to prevent that.
*/
-#define KUNIT_EXPECT_KASAN_FAIL(test, condition) do { \
- fail_data.report_expected = true; \
- fail_data.report_found = false; \
- kunit_add_named_resource(test, \
- NULL, \
- NULL, \
- &resource, \
- "kasan_data", &fail_data); \
- condition; \
- KUNIT_EXPECT_EQ(test, \
- fail_data.report_expected, \
- fail_data.report_found); \
+#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
+ if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) \
+ migrate_disable(); \
+ WRITE_ONCE(fail_data.report_expected, true); \
+ WRITE_ONCE(fail_data.report_found, false); \
+ kunit_add_named_resource(test, \
+ NULL, \
+ NULL, \
+ &resource, \
+ "kasan_data", &fail_data); \
+ barrier(); \
+ expression; \
+ barrier(); \
+ KUNIT_EXPECT_EQ(test, \
+ READ_ONCE(fail_data.report_expected), \
+ READ_ONCE(fail_data.report_found)); \
+ if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \
+ if (READ_ONCE(fail_data.report_found)) \
+ kasan_enable_tagging(); \
+ migrate_enable(); \
+ } \
+} while (0)
+
+#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
+ if (!IS_ENABLED(config)) { \
+ kunit_info((test), "skipping, " #config " required"); \
+ return; \
+ } \
+} while (0)
+
+#define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
+ if (IS_ENABLED(config)) { \
+ kunit_info((test), "skipping, " #config " enabled"); \
+ return; \
+ } \
} while (0)
static void kmalloc_oob_right(struct kunit *test)
@@ -111,23 +152,24 @@ static void kmalloc_node_oob_right(struct kunit *test)
kfree(ptr);
}
+/*
+ * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
+ * fit into a slab cache and therefore is allocated via the page allocator
+ * fallback. Since this kind of fallback is only implemented for SLUB, these
+ * tests are limited to that allocator.
+ */
static void kmalloc_pagealloc_oob_right(struct kunit *test)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
- if (!IS_ENABLED(CONFIG_SLUB)) {
- kunit_info(test, "CONFIG_SLUB is not enabled.");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
- /* Allocate a chunk that does not fit into a SLUB cache to trigger
- * the page allocator fallback.
- */
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
+
kfree(ptr);
}
@@ -136,15 +178,12 @@ static void kmalloc_pagealloc_uaf(struct kunit *test)
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
- if (!IS_ENABLED(CONFIG_SLUB)) {
- kunit_info(test, "CONFIG_SLUB is not enabled.");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
-
kfree(ptr);
+
KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
}
@@ -153,10 +192,7 @@ static void kmalloc_pagealloc_invalid_free(struct kunit *test)
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
- if (!IS_ENABLED(CONFIG_SLUB)) {
- kunit_info(test, "CONFIG_SLUB is not enabled.");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@@ -164,11 +200,49 @@ static void kmalloc_pagealloc_invalid_free(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
}
+static void pagealloc_oob_right(struct kunit *test)
+{
+ char *ptr;
+ struct page *pages;
+ size_t order = 4;
+ size_t size = (1UL << (PAGE_SHIFT + order));
+
+ /*
+ * With generic KASAN page allocations have no redzones, thus
+ * out-of-bounds detection is not guaranteed.
+ * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
+ */
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
+
+ pages = alloc_pages(GFP_KERNEL, order);
+ ptr = page_address(pages);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
+ free_pages((unsigned long)ptr, order);
+}
+
+static void pagealloc_uaf(struct kunit *test)
+{
+ char *ptr;
+ struct page *pages;
+ size_t order = 4;
+
+ pages = alloc_pages(GFP_KERNEL, order);
+ ptr = page_address(pages);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ free_pages((unsigned long)ptr, order);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
+}
+
static void kmalloc_large_oob_right(struct kunit *test)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
- /* Allocate a chunk that is large enough, but still fits into a slab
+
+ /*
+ * Allocate a chunk that is large enough, but still fits into a slab
* and does not trigger the page allocator fallback in SLUB.
*/
ptr = kmalloc(size, GFP_KERNEL);
@@ -178,11 +252,14 @@ static void kmalloc_large_oob_right(struct kunit *test)
kfree(ptr);
}
-static void kmalloc_oob_krealloc_more(struct kunit *test)
+static void krealloc_more_oob_helper(struct kunit *test,
+ size_t size1, size_t size2)
{
char *ptr1, *ptr2;
- size_t size1 = 17;
- size_t size2 = 19;
+ size_t middle;
+
+ KUNIT_ASSERT_LT(test, size1, size2);
+ middle = size1 + (size2 - size1) / 2;
ptr1 = kmalloc(size1, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
@@ -190,15 +267,31 @@ static void kmalloc_oob_krealloc_more(struct kunit *test)
ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
- KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2 + OOB_TAG_OFF] = 'x');
+ /* All offsets up to size2 must be accessible. */
+ ptr2[size1 - 1] = 'x';
+ ptr2[size1] = 'x';
+ ptr2[middle] = 'x';
+ ptr2[size2 - 1] = 'x';
+
+ /* Generic mode is precise, so unaligned size2 must be inaccessible. */
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC))
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
+
+ /* For all modes first aligned offset after size2 must be inaccessible. */
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
+
kfree(ptr2);
}
-static void kmalloc_oob_krealloc_less(struct kunit *test)
+static void krealloc_less_oob_helper(struct kunit *test,
+ size_t size1, size_t size2)
{
char *ptr1, *ptr2;
- size_t size1 = 17;
- size_t size2 = 15;
+ size_t middle;
+
+ KUNIT_ASSERT_LT(test, size2, size1);
+ middle = size2 + (size1 - size2) / 2;
ptr1 = kmalloc(size1, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
@@ -206,10 +299,79 @@ static void kmalloc_oob_krealloc_less(struct kunit *test)
ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
- KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2 + OOB_TAG_OFF] = 'x');
+ /* Must be accessible for all modes. */
+ ptr2[size2 - 1] = 'x';
+
+ /* Generic mode is precise, so unaligned size2 must be inaccessible. */
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC))
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
+
+ /* For all modes first aligned offset after size2 must be inaccessible. */
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
+
+ /*
+ * For all modes all size2, middle, and size1 should land in separate
+ * granules and thus the latter two offsets should be inaccessible.
+ */
+ KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
+ round_down(middle, KASAN_GRANULE_SIZE));
+ KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
+ round_down(size1, KASAN_GRANULE_SIZE));
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
+
kfree(ptr2);
}
+static void krealloc_more_oob(struct kunit *test)
+{
+ krealloc_more_oob_helper(test, 201, 235);
+}
+
+static void krealloc_less_oob(struct kunit *test)
+{
+ krealloc_less_oob_helper(test, 235, 201);
+}
+
+static void krealloc_pagealloc_more_oob(struct kunit *test)
+{
+ /* page_alloc fallback in only implemented for SLUB. */
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
+
+ krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
+ KMALLOC_MAX_CACHE_SIZE + 235);
+}
+
+static void krealloc_pagealloc_less_oob(struct kunit *test)
+{
+ /* page_alloc fallback in only implemented for SLUB. */
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
+
+ krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
+ KMALLOC_MAX_CACHE_SIZE + 201);
+}
+
+/*
+ * Check that krealloc() detects a use-after-free, returns NULL,
+ * and doesn't unpoison the freed object.
+ */
+static void krealloc_uaf(struct kunit *test)
+{
+ char *ptr1, *ptr2;
+ int size1 = 201;
+ int size2 = 235;
+
+ ptr1 = kmalloc(size1, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
+ kfree(ptr1);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
+ KUNIT_ASSERT_PTR_EQ(test, (void *)ptr2, NULL);
+ KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
+}
+
static void kmalloc_oob_16(struct kunit *test)
{
struct {
@@ -217,10 +379,7 @@ static void kmalloc_oob_16(struct kunit *test)
} *ptr1, *ptr2;
/* This test is specifically crafted for the generic mode. */
- if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
- kunit_info(test, "CONFIG_KASAN_GENERIC required\n");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
@@ -355,7 +514,9 @@ static void kmalloc_uaf2(struct kunit *test)
{
char *ptr1, *ptr2;
size_t size = 43;
+ int counter = 0;
+again:
ptr1 = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
@@ -364,6 +525,15 @@ static void kmalloc_uaf2(struct kunit *test)
ptr2 = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
+ /*
+ * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
+ * Allow up to 16 attempts at generating different tags.
+ */
+ if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
+ kfree(ptr2);
+ goto again;
+ }
+
KUNIT_EXPECT_KASAN_FAIL(test, ptr1[40] = 'x');
KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
@@ -402,10 +572,11 @@ static void kmem_cache_oob(struct kunit *test)
{
char *p;
size_t size = 200;
- struct kmem_cache *cache = kmem_cache_create("test_cache",
- size, 0,
- 0, NULL);
+ struct kmem_cache *cache;
+
+ cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
p = kmem_cache_alloc(cache, GFP_KERNEL);
if (!p) {
kunit_err(test, "Allocation failed: %s\n", __func__);
@@ -414,11 +585,12 @@ static void kmem_cache_oob(struct kunit *test)
}
KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
+
kmem_cache_free(cache, p);
kmem_cache_destroy(cache);
}
-static void memcg_accounted_kmem_cache(struct kunit *test)
+static void kmem_cache_accounted(struct kunit *test)
{
int i;
char *p;
@@ -445,6 +617,31 @@ free_cache:
kmem_cache_destroy(cache);
}
+static void kmem_cache_bulk(struct kunit *test)
+{
+ struct kmem_cache *cache;
+ size_t size = 200;
+ char *p[10];
+ bool ret;
+ int i;
+
+ cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
+ ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
+ if (!ret) {
+ kunit_err(test, "Allocation failed: %s\n", __func__);
+ kmem_cache_destroy(cache);
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(p); i++)
+ p[i][0] = p[i][size - 1] = 42;
+
+ kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
+ kmem_cache_destroy(cache);
+}
+
static char global_array[10];
static void kasan_global_oob(struct kunit *test)
@@ -453,14 +650,12 @@ static void kasan_global_oob(struct kunit *test)
char *p = &global_array[ARRAY_SIZE(global_array) + i];
/* Only generic mode instruments globals. */
- if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
- kunit_info(test, "CONFIG_KASAN_GENERIC required");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
+/* Check that ksize() makes the whole object accessible. */
static void ksize_unpoisons_memory(struct kunit *test)
{
char *ptr;
@@ -469,11 +664,32 @@ static void ksize_unpoisons_memory(struct kunit *test)
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
real_size = ksize(ptr);
- /* This access doesn't trigger an error. */
+
+ /* This access shouldn't trigger a KASAN report. */
ptr[size] = 'x';
- /* This one does. */
+
+ /* This one must. */
KUNIT_EXPECT_KASAN_FAIL(test, ptr[real_size] = 'y');
+
+ kfree(ptr);
+}
+
+/*
+ * Check that a use-after-free is detected by ksize() and via normal accesses
+ * after it.
+ */
+static void ksize_uaf(struct kunit *test)
+{
+ char *ptr;
+ int size = 128 - KASAN_GRANULE_SIZE;
+
+ ptr = kmalloc(size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
kfree(ptr);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
+ KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *ptr);
+ KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *(ptr + size));
}
static void kasan_stack_oob(struct kunit *test)
@@ -482,10 +698,7 @@ static void kasan_stack_oob(struct kunit *test)
volatile int i = OOB_TAG_OFF;
char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
- if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
- kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
@@ -497,15 +710,8 @@ static void kasan_alloca_oob_left(struct kunit *test)
char *p = alloca_array - 1;
/* Only generic mode instruments dynamic allocas. */
- if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
- kunit_info(test, "CONFIG_KASAN_GENERIC required");
- return;
- }
-
- if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
- kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
@@ -517,15 +723,8 @@ static void kasan_alloca_oob_right(struct kunit *test)
char *p = alloca_array + i;
/* Only generic mode instruments dynamic allocas. */
- if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
- kunit_info(test, "CONFIG_KASAN_GENERIC required");
- return;
- }
-
- if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
- kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
@@ -568,7 +767,7 @@ static void kmem_cache_invalid_free(struct kunit *test)
return;
}
- /* Trigger invalid free, the object doesn't get freed */
+ /* Trigger invalid free, the object doesn't get freed. */
KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
/*
@@ -585,12 +784,11 @@ static void kasan_memchr(struct kunit *test)
char *ptr;
size_t size = 24;
- /* See https://bugzilla.kernel.org/show_bug.cgi?id=206337 */
- if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
- kunit_info(test,
- "str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT");
- return;
- }
+ /*
+ * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
+ * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
+ */
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
if (OOB_TAG_OFF)
size = round_up(size, OOB_TAG_OFF);
@@ -610,12 +808,11 @@ static void kasan_memcmp(struct kunit *test)
size_t size = 24;
int arr[9];
- /* See https://bugzilla.kernel.org/show_bug.cgi?id=206337 */
- if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
- kunit_info(test,
- "str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT");
- return;
- }
+ /*
+ * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
+ * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
+ */
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
if (OOB_TAG_OFF)
size = round_up(size, OOB_TAG_OFF);
@@ -634,12 +831,11 @@ static void kasan_strings(struct kunit *test)
char *ptr;
size_t size = 24;
- /* See https://bugzilla.kernel.org/show_bug.cgi?id=206337 */
- if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
- kunit_info(test,
- "str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT");
- return;
- }
+ /*
+ * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
+ * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
+ */
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@@ -700,13 +896,10 @@ static void kasan_bitops_generic(struct kunit *test)
long *bits;
/* This test is specifically crafted for the generic mode. */
- if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
- kunit_info(test, "CONFIG_KASAN_GENERIC required\n");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
/*
- * Allocate 1 more byte, which causes kzalloc to round up to 16-bytes;
+ * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
* this way we do not actually corrupt other memory.
*/
bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
@@ -731,19 +924,16 @@ static void kasan_bitops_tags(struct kunit *test)
{
long *bits;
- /* This test is specifically crafted for the tag-based mode. */
- if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
- kunit_info(test, "CONFIG_KASAN_SW_TAGS required\n");
- return;
- }
+ /* This test is specifically crafted for tag-based modes. */
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
- /* Allocation size will be rounded to up granule size, which is 16. */
- bits = kzalloc(sizeof(*bits), GFP_KERNEL);
+ /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
+ bits = kzalloc(48, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
- /* Do the accesses past the 16 allocated bytes. */
- kasan_bitops_modify(test, BITS_PER_LONG, &bits[1]);
- kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, &bits[1]);
+ /* Do the accesses past the 48 allocated bytes, but within the redone. */
+ kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
+ kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
kfree(bits);
}
@@ -764,10 +954,7 @@ static void vmalloc_oob(struct kunit *test)
{
void *area;
- if (!IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
- kunit_info(test, "CONFIG_KASAN_VMALLOC is not enabled.");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
/*
* We have to be careful not to hit the guard page.
@@ -780,6 +967,94 @@ static void vmalloc_oob(struct kunit *test)
vfree(area);
}
+/*
+ * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
+ * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
+ * modes.
+ */
+static void match_all_not_assigned(struct kunit *test)
+{
+ char *ptr;
+ struct page *pages;
+ int i, size, order;
+
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
+
+ for (i = 0; i < 256; i++) {
+ size = (get_random_int() % 1024) + 1;
+ ptr = kmalloc(size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
+ KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
+ kfree(ptr);
+ }
+
+ for (i = 0; i < 256; i++) {
+ order = (get_random_int() % 4) + 1;
+ pages = alloc_pages(GFP_KERNEL, order);
+ ptr = page_address(pages);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
+ KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
+ free_pages((unsigned long)ptr, order);
+ }
+}
+
+/* Check that 0xff works as a match-all pointer tag for tag-based modes. */
+static void match_all_ptr_tag(struct kunit *test)
+{
+ char *ptr;
+ u8 tag;
+
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
+
+ ptr = kmalloc(128, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+ /* Backup the assigned tag. */
+ tag = get_tag(ptr);
+ KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
+
+ /* Reset the tag to 0xff.*/
+ ptr = set_tag(ptr, KASAN_TAG_KERNEL);
+
+ /* This access shouldn't trigger a KASAN report. */
+ *ptr = 0;
+
+ /* Recover the pointer tag and free. */
+ ptr = set_tag(ptr, tag);
+ kfree(ptr);
+}
+
+/* Check that there are no match-all memory tags for tag-based modes. */
+static void match_all_mem_tag(struct kunit *test)
+{
+ char *ptr;
+ int tag;
+
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
+
+ ptr = kmalloc(128, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
+
+ /* For each possible tag value not matching the pointer tag. */
+ for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
+ if (tag == get_tag(ptr))
+ continue;
+
+ /* Mark the first memory granule with the chosen memory tag. */
+ kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag);
+
+ /* This access must cause a KASAN report. */
+ KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
+ }
+
+ /* Recover the memory tag and free. */
+ kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr));
+ kfree(ptr);
+}
+
static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_oob_right),
KUNIT_CASE(kmalloc_oob_left),
@@ -787,9 +1062,14 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_pagealloc_oob_right),
KUNIT_CASE(kmalloc_pagealloc_uaf),
KUNIT_CASE(kmalloc_pagealloc_invalid_free),
+ KUNIT_CASE(pagealloc_oob_right),
+ KUNIT_CASE(pagealloc_uaf),
KUNIT_CASE(kmalloc_large_oob_right),
- KUNIT_CASE(kmalloc_oob_krealloc_more),
- KUNIT_CASE(kmalloc_oob_krealloc_less),
+ KUNIT_CASE(krealloc_more_oob),
+ KUNIT_CASE(krealloc_less_oob),
+ KUNIT_CASE(krealloc_pagealloc_more_oob),
+ KUNIT_CASE(krealloc_pagealloc_less_oob),
+ KUNIT_CASE(krealloc_uaf),
KUNIT_CASE(kmalloc_oob_16),
KUNIT_CASE(kmalloc_uaf_16),
KUNIT_CASE(kmalloc_oob_in_memset),
@@ -804,12 +1084,14 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kfree_via_page),
KUNIT_CASE(kfree_via_phys),
KUNIT_CASE(kmem_cache_oob),
- KUNIT_CASE(memcg_accounted_kmem_cache),
+ KUNIT_CASE(kmem_cache_accounted),
+ KUNIT_CASE(kmem_cache_bulk),
KUNIT_CASE(kasan_global_oob),
KUNIT_CASE(kasan_stack_oob),
KUNIT_CASE(kasan_alloca_oob_left),
KUNIT_CASE(kasan_alloca_oob_right),
KUNIT_CASE(ksize_unpoisons_memory),
+ KUNIT_CASE(ksize_uaf),
KUNIT_CASE(kmem_cache_double_free),
KUNIT_CASE(kmem_cache_invalid_free),
KUNIT_CASE(kasan_memchr),
@@ -819,6 +1101,9 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kasan_bitops_tags),
KUNIT_CASE(kmalloc_double_kzfree),
KUNIT_CASE(vmalloc_oob),
+ KUNIT_CASE(match_all_not_assigned),
+ KUNIT_CASE(match_all_ptr_tag),
+ KUNIT_CASE(match_all_mem_tag),
{}
};
diff --git a/lib/test_kasan_module.c b/lib/test_kasan_module.c
index 3b4cc77992d2..eee017ff8980 100644
--- a/lib/test_kasan_module.c
+++ b/lib/test_kasan_module.c
@@ -123,8 +123,9 @@ static noinline void __init kasan_workqueue_uaf(void)
static int __init test_kasan_module_init(void)
{
/*
- * Temporarily enable multi-shot mode. Otherwise, we'd only get a
- * report for the first case.
+ * Temporarily enable multi-shot mode. Otherwise, KASAN would only
+ * report the first detected bug and panic the kernel if panic_on_warn
+ * is enabled.
*/
bool multishot = kasan_save_enable_multi_shot();
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 7ac87f18a10f..95a2f82427c7 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -30,11 +30,13 @@
#define PAD_SIZE 16
#define FILL_CHAR '$'
-static unsigned total_tests __initdata;
-static unsigned failed_tests __initdata;
+KSTM_MODULE_GLOBALS();
+
static char *test_buffer __initdata;
static char *alloced_buffer __initdata;
+extern bool no_hash_pointers;
+
static int __printf(4, 0) __init
do_test(int bufsize, const char *expect, int elen,
const char *fmt, va_list ap)
@@ -301,6 +303,12 @@ plain(void)
{
int err;
+ if (no_hash_pointers) {
+ pr_warn("skipping plain 'p' tests");
+ skipped_tests += 2;
+ return;
+ }
+
err = plain_hash();
if (err) {
pr_warn("plain 'p' does not appear to be hashed\n");
@@ -644,9 +652,7 @@ static void __init fwnode_pointer(void)
test(second_name, "%pfwP", software_node_fwnode(&softnodes[1]));
test(third_name, "%pfwP", software_node_fwnode(&softnodes[2]));
- software_node_unregister(&softnodes[2]);
- software_node_unregister(&softnodes[1]);
- software_node_unregister(&softnodes[0]);
+ software_node_unregister_nodes(softnodes);
}
static void __init
diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c
index 5e5d9355ef49..7e7bbd0f3fd2 100644
--- a/lib/test_ubsan.c
+++ b/lib/test_ubsan.c
@@ -11,51 +11,6 @@ typedef void(*test_ubsan_fp)(void);
#config, IS_ENABLED(config) ? "y" : "n"); \
} while (0)
-static void test_ubsan_add_overflow(void)
-{
- volatile int val = INT_MAX;
- volatile unsigned int uval = UINT_MAX;
-
- UBSAN_TEST(CONFIG_UBSAN_SIGNED_OVERFLOW);
- val += 2;
-
- UBSAN_TEST(CONFIG_UBSAN_UNSIGNED_OVERFLOW);
- uval += 2;
-}
-
-static void test_ubsan_sub_overflow(void)
-{
- volatile int val = INT_MIN;
- volatile unsigned int uval = 0;
- volatile int val2 = 2;
-
- UBSAN_TEST(CONFIG_UBSAN_SIGNED_OVERFLOW);
- val -= val2;
-
- UBSAN_TEST(CONFIG_UBSAN_UNSIGNED_OVERFLOW);
- uval -= val2;
-}
-
-static void test_ubsan_mul_overflow(void)
-{
- volatile int val = INT_MAX / 2;
- volatile unsigned int uval = UINT_MAX / 2;
-
- UBSAN_TEST(CONFIG_UBSAN_SIGNED_OVERFLOW);
- val *= 3;
-
- UBSAN_TEST(CONFIG_UBSAN_UNSIGNED_OVERFLOW);
- uval *= 3;
-}
-
-static void test_ubsan_negate_overflow(void)
-{
- volatile int val = INT_MIN;
-
- UBSAN_TEST(CONFIG_UBSAN_SIGNED_OVERFLOW);
- val = -val;
-}
-
static void test_ubsan_divrem_overflow(void)
{
volatile int val = 16;
@@ -155,10 +110,6 @@ static void test_ubsan_object_size_mismatch(void)
}
static const test_ubsan_fp test_ubsan_array[] = {
- test_ubsan_add_overflow,
- test_ubsan_sub_overflow,
- test_ubsan_mul_overflow,
- test_ubsan_negate_overflow,
test_ubsan_shift_out_of_bounds,
test_ubsan_out_of_bounds,
test_ubsan_load_invalid_value,
diff --git a/lib/timerqueue.c b/lib/timerqueue.c
index c52710964593..cdb9c7658478 100644
--- a/lib/timerqueue.c
+++ b/lib/timerqueue.c
@@ -14,6 +14,14 @@
#include <linux/rbtree.h>
#include <linux/export.h>
+#define __node_2_tq(_n) \
+ rb_entry((_n), struct timerqueue_node, node)
+
+static inline bool __timerqueue_less(struct rb_node *a, const struct rb_node *b)
+{
+ return __node_2_tq(a)->expires < __node_2_tq(b)->expires;
+}
+
/**
* timerqueue_add - Adds timer to timerqueue.
*
@@ -26,28 +34,10 @@
*/
bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
{
- struct rb_node **p = &head->rb_root.rb_root.rb_node;
- struct rb_node *parent = NULL;
- struct timerqueue_node *ptr;
- bool leftmost = true;
-
/* Make sure we don't add nodes that are already added */
WARN_ON_ONCE(!RB_EMPTY_NODE(&node->node));
- while (*p) {
- parent = *p;
- ptr = rb_entry(parent, struct timerqueue_node, node);
- if (node->expires < ptr->expires) {
- p = &(*p)->rb_left;
- } else {
- p = &(*p)->rb_right;
- leftmost = false;
- }
- }
- rb_link_node(&node->node, parent, p);
- rb_insert_color_cached(&node->node, &head->rb_root, leftmost);
-
- return leftmost;
+ return rb_add_cached(&node->node, &head->rb_root, __timerqueue_less);
}
EXPORT_SYMBOL_GPL(timerqueue_add);
diff --git a/lib/ubsan.c b/lib/ubsan.c
index bec38c64d6a6..26229973049d 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -163,74 +163,6 @@ static void ubsan_epilogue(void)
}
}
-static void handle_overflow(struct overflow_data *data, void *lhs,
- void *rhs, char op)
-{
-
- struct type_descriptor *type = data->type;
- char lhs_val_str[VALUE_LENGTH];
- char rhs_val_str[VALUE_LENGTH];
-
- if (suppress_report(&data->location))
- return;
-
- ubsan_prologue(&data->location, type_is_signed(type) ?
- "signed-integer-overflow" :
- "unsigned-integer-overflow");
-
- val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs);
- val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs);
- pr_err("%s %c %s cannot be represented in type %s\n",
- lhs_val_str,
- op,
- rhs_val_str,
- type->type_name);
-
- ubsan_epilogue();
-}
-
-void __ubsan_handle_add_overflow(void *data,
- void *lhs, void *rhs)
-{
-
- handle_overflow(data, lhs, rhs, '+');
-}
-EXPORT_SYMBOL(__ubsan_handle_add_overflow);
-
-void __ubsan_handle_sub_overflow(void *data,
- void *lhs, void *rhs)
-{
- handle_overflow(data, lhs, rhs, '-');
-}
-EXPORT_SYMBOL(__ubsan_handle_sub_overflow);
-
-void __ubsan_handle_mul_overflow(void *data,
- void *lhs, void *rhs)
-{
- handle_overflow(data, lhs, rhs, '*');
-}
-EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
-
-void __ubsan_handle_negate_overflow(void *_data, void *old_val)
-{
- struct overflow_data *data = _data;
- char old_val_str[VALUE_LENGTH];
-
- if (suppress_report(&data->location))
- return;
-
- ubsan_prologue(&data->location, "negation-overflow");
-
- val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val);
-
- pr_err("negation of %s cannot be represented in type %s:\n",
- old_val_str, data->type->type_name);
-
- ubsan_epilogue();
-}
-EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
-
-
void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs)
{
struct overflow_data *data = _data;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 3b53c73580c5..41ddc353ebb8 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -2090,6 +2090,32 @@ char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
return widen_string(buf, buf - buf_start, end, spec);
}
+/* Disable pointer hashing if requested */
+bool no_hash_pointers __ro_after_init;
+EXPORT_SYMBOL_GPL(no_hash_pointers);
+
+static int __init no_hash_pointers_enable(char *str)
+{
+ no_hash_pointers = true;
+
+ pr_warn("**********************************************************\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("** **\n");
+ pr_warn("** This system shows unhashed kernel memory addresses **\n");
+ pr_warn("** via the console, logs, and other interfaces. This **\n");
+ pr_warn("** might reduce the security of your system. **\n");
+ pr_warn("** **\n");
+ pr_warn("** If you see this message and you are not debugging **\n");
+ pr_warn("** the kernel, report this immediately to your system **\n");
+ pr_warn("** administrator! **\n");
+ pr_warn("** **\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("**********************************************************\n");
+
+ return 0;
+}
+early_param("no_hash_pointers", no_hash_pointers_enable);
+
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
* by an extra set of alphanumeric characters that are extended format
@@ -2297,8 +2323,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
}
}
- /* default is to _not_ leak addresses, hash before printing */
- return ptr_to_id(buf, end, ptr, spec);
+ /*
+ * default is to _not_ leak addresses, so hash before printing,
+ * unless no_hash_pointers is specified on the command line.
+ */
+ if (unlikely(no_hash_pointers))
+ return pointer_string(buf, end, ptr, spec);
+ else
+ return ptr_to_id(buf, end, ptr, spec);
}
/*