diff options
Diffstat (limited to 'lib')
47 files changed, 390 insertions, 372 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 9bbf8a4b2108..48e021846472 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -24,6 +24,7 @@ config LINEAR_RANGES config PACKING bool "Generic bitfield packing and unpacking" + select BITREVERSE default n help This option provides the packing() helper function, which permits @@ -529,8 +530,8 @@ config CPUMASK_OFFSTACK stack overflow. config FORCE_NR_CPUS - bool "NR_CPUS is set to an actual number of CPUs" - depends on SMP + bool "Set number of CPUs at compile time" + depends on SMP && EXPERT && !COMPILE_TEST help Say Yes if you have NR_CPUS set to an actual number of possible CPUs in your system, not to a default value. This forces the core @@ -672,6 +673,9 @@ config ARCH_HAS_PMEM_API config MEMREGION bool +config ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION + bool + config ARCH_HAS_MEMREMAP_COMPAT_ALIGN bool diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index a0dc28fdc567..1b2bdc02abf4 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1717,6 +1717,16 @@ config LATENCYTOP Enable this option if you want to use the LatencyTOP tool to find out which userspace is blocking on what kernel operations. +config DEBUG_CGROUP_REF + bool "Disable inlining of cgroup css reference count functions" + depends on DEBUG_KERNEL + depends on CGROUPS + depends on KPROBES + default n + help + Force cgroup css reference count functions to not be inlined so + that they can be kprobed for debugging. + source "kernel/trace/Kconfig" config PROVIDE_OHCI1394_DMA_INIT @@ -1875,8 +1885,14 @@ config NETDEV_NOTIFIER_ERROR_INJECT If unsure, say N. config FUNCTION_ERROR_INJECTION - def_bool y + bool "Fault-injections of functions" depends on HAVE_FUNCTION_ERROR_INJECTION && KPROBES + help + Add fault injections into various functions that are annotated with + ALLOW_ERROR_INJECTION() in the kernel. BPF may also modify the return + value of theses functions. This is useful to test error paths of code. + + If unsure, say N config FAULT_INJECTION bool "Fault-injection framework" @@ -2808,6 +2824,22 @@ config RUST_OVERFLOW_CHECKS If unsure, say Y. +config RUST_BUILD_ASSERT_ALLOW + bool "Allow unoptimized build-time assertions" + depends on RUST + help + Controls how are `build_error!` and `build_assert!` handled during build. + + If calls to them exist in the binary, it may indicate a violated invariant + or that the optimizer failed to verify the invariant during compilation. + + This should not happen, thus by default the build is aborted. However, + as an escape hatch, you can choose Y here to ignore them during build + and let the check be carried at runtime (with `panic!` being called if + the check fails). + + If unsure, say N. + endmenu # "Rust" source "Documentation/Kconfig" diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index ba5b27962c34..be6ee6020290 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -37,7 +37,7 @@ menuconfig KASAN (HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)) && \ CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \ HAVE_ARCH_KASAN_HW_TAGS - depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB) + depends on (SLUB && SYSFS && !SLUB_TINY) || (SLAB && !DEBUG_SLAB) select STACKDEPOT_ALWAYS_INIT help Enables KASAN (Kernel Address Sanitizer) - a dynamic memory safety diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index 47a693c45864..375575a5a0e3 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -125,7 +125,7 @@ config KCSAN_SKIP_WATCH default 4000 help The number of per-CPU memory operations to skip, before another - watchpoint is set up, i.e. one in KCSAN_WATCH_SKIP per-CPU + watchpoint is set up, i.e. one in KCSAN_SKIP_WATCH per-CPU memory operations are used to set up a watchpoint. A smaller value results in more aggressive race detection, whereas a larger value improves system performance at the cost of missing some races. @@ -135,8 +135,8 @@ config KCSAN_SKIP_WATCH_RANDOMIZE default y help If instruction skip count should be randomized, where the maximum is - KCSAN_WATCH_SKIP. If false, the chosen value is always - KCSAN_WATCH_SKIP. + KCSAN_SKIP_WATCH. If false, the chosen value is always + KCSAN_SKIP_WATCH. config KCSAN_INTERRUPT_WATCHER bool "Interruptible watchers" if !KCSAN_STRICT diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 337d797a7141..df86e649d8be 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -437,6 +437,7 @@ static int object_cpu_offline(unsigned int cpu) struct debug_percpu_free *percpu_pool; struct hlist_node *tmp; struct debug_obj *obj; + unsigned long flags; /* Remote access is safe as the CPU is dead already */ percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu); @@ -444,6 +445,12 @@ static int object_cpu_offline(unsigned int cpu) hlist_del(&obj->node); kmem_cache_free(obj_cache, obj); } + + raw_spin_lock_irqsave(&pool_lock, flags); + obj_pool_used -= percpu_pool->obj_free; + debug_objects_freed += percpu_pool->obj_free; + raw_spin_unlock_irqrestore(&pool_lock, flags); + percpu_pool->obj_free = 0; return 0; @@ -500,9 +507,9 @@ static void debug_print_object(struct debug_obj *obj, char *msg) descr->debug_hint(obj->object) : NULL; limit++; WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " - "object type: %s hint: %pS\n", + "object: %p object type: %s hint: %pS\n", msg, obj_states[obj->state], obj->astate, - descr->name, hint); + obj->object, descr->name, hint); } debug_objects_warnings++; } @@ -1318,6 +1325,8 @@ static int __init debug_objects_replace_static_objects(void) hlist_add_head(&obj->node, &objects); } + debug_objects_allocated += i; + /* * debug_objects_mem_init() is now called early that only one CPU is up * and interrupts have been disabled, so it is safe to replace the @@ -1386,6 +1395,7 @@ void __init debug_objects_mem_init(void) debug_objects_enabled = 0; kmem_cache_destroy(obj_cache); pr_warn("out of memory.\n"); + return; } else debug_objects_selftest(); diff --git a/lib/fault-inject.c b/lib/fault-inject.c index adb2f9355ee6..1421818c9ef7 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -136,7 +136,7 @@ bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags) return false; } - if (attr->probability <= prandom_u32_max(100)) + if (attr->probability <= get_random_u32_below(100)) return false; if (!fail_stacktrace(attr)) diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c index 7c3c011abd29..d3fb09e6eff1 100644 --- a/lib/find_bit_benchmark.c +++ b/lib/find_bit_benchmark.c @@ -174,8 +174,8 @@ static int __init find_bit_test(void) bitmap_zero(bitmap2, BITMAP_LEN); while (nbits--) { - __set_bit(prandom_u32_max(BITMAP_LEN), bitmap); - __set_bit(prandom_u32_max(BITMAP_LEN), bitmap2); + __set_bit(get_random_u32_below(BITMAP_LEN), bitmap); + __set_bit(get_random_u32_below(BITMAP_LEN), bitmap2); } test_find_next_bit(bitmap, BITMAP_LEN); diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c index 5f4b07b56cd9..973866438608 100644 --- a/lib/fonts/fonts.c +++ b/lib/fonts/fonts.c @@ -135,8 +135,8 @@ const struct font_desc *get_default_font(int xres, int yres, u32 font_w, if (res > 20) c += 20 - res; - if ((font_w & (1 << (f->width - 1))) && - (font_h & (1 << (f->height - 1)))) + if ((font_w & (1U << (f->width - 1))) && + (font_h & (1U << (f->height - 1)))) c += 1000; if (c > cc) { diff --git a/lib/iov_iter.c b/lib/iov_iter.c index c3ca28ca68a6..f9a3ff37ecd1 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -520,6 +520,8 @@ static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { + if (WARN_ON_ONCE(i->data_source)) + return 0; if (unlikely(iov_iter_is_pipe(i))) return copy_pipe_to_iter(addr, bytes, i); if (user_backed_iter(i)) @@ -606,6 +608,8 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, */ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { + if (WARN_ON_ONCE(i->data_source)) + return 0; if (unlikely(iov_iter_is_pipe(i))) return copy_mc_pipe_to_iter(addr, bytes, i); if (user_backed_iter(i)) @@ -622,10 +626,9 @@ EXPORT_SYMBOL_GPL(_copy_mc_to_iter); size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { - if (unlikely(iov_iter_is_pipe(i))) { - WARN_ON(1); + if (WARN_ON_ONCE(!i->data_source)) return 0; - } + if (user_backed_iter(i)) might_fault(); iterate_and_advance(i, bytes, base, len, off, @@ -639,10 +642,9 @@ EXPORT_SYMBOL(_copy_from_iter); size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) { - if (unlikely(iov_iter_is_pipe(i))) { - WARN_ON(1); + if (WARN_ON_ONCE(!i->data_source)) return 0; - } + iterate_and_advance(i, bytes, base, len, off, __copy_from_user_inatomic_nocache(addr + off, base, len), memcpy(addr + off, base, len) @@ -671,10 +673,9 @@ EXPORT_SYMBOL(_copy_from_iter_nocache); */ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) { - if (unlikely(iov_iter_is_pipe(i))) { - WARN_ON(1); + if (WARN_ON_ONCE(!i->data_source)) return 0; - } + iterate_and_advance(i, bytes, base, len, off, __copy_from_user_flushcache(addr + off, base, len), memcpy_flushcache(addr + off, base, len) @@ -703,17 +704,18 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) head = compound_head(page); v += (page - head) << PAGE_SHIFT; - if (likely(n <= v && v <= (page_size(head)))) - return true; - WARN_ON(1); - return false; + if (WARN_ON(n > v || v > page_size(head))) + return false; + return true; } size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { size_t res = 0; - if (unlikely(!page_copy_sane(page, offset, bytes))) + if (!page_copy_sane(page, offset, bytes)) + return 0; + if (WARN_ON_ONCE(i->data_source)) return 0; if (unlikely(iov_iter_is_pipe(i))) return copy_page_to_iter_pipe(page, offset, bytes, i); @@ -808,13 +810,12 @@ size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t byt struct iov_iter *i) { char *kaddr = kmap_atomic(page), *p = kaddr + offset; - if (unlikely(!page_copy_sane(page, offset, bytes))) { + if (!page_copy_sane(page, offset, bytes)) { kunmap_atomic(kaddr); return 0; } - if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { + if (WARN_ON_ONCE(!i->data_source)) { kunmap_atomic(kaddr); - WARN_ON(1); return 0; } iterate_and_advance(i, bytes, base, len, off, @@ -1430,7 +1431,8 @@ static struct page *first_bvec_segment(const struct iov_iter *i, static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, - unsigned int maxpages, size_t *start) + unsigned int maxpages, size_t *start, + unsigned int gup_flags) { unsigned int n; @@ -1442,7 +1444,6 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, maxsize = MAX_RW_COUNT; if (likely(user_backed_iter(i))) { - unsigned int gup_flags = 0; unsigned long addr; int res; @@ -1492,33 +1493,49 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, return -EFAULT; } -ssize_t iov_iter_get_pages2(struct iov_iter *i, +ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, - size_t *start) + size_t *start, unsigned gup_flags) { if (!maxpages) return 0; BUG_ON(!pages); - return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start); + return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, + start, gup_flags); +} +EXPORT_SYMBOL_GPL(iov_iter_get_pages); + +ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages, + size_t maxsize, unsigned maxpages, size_t *start) +{ + return iov_iter_get_pages(i, pages, maxsize, maxpages, start, 0); } EXPORT_SYMBOL(iov_iter_get_pages2); -ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, +ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, - size_t *start) + size_t *start, unsigned gup_flags) { ssize_t len; *pages = NULL; - len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start); + len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start, + gup_flags); if (len <= 0) { kvfree(*pages); *pages = NULL; } return len; } +EXPORT_SYMBOL_GPL(iov_iter_get_pages_alloc); + +ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, + struct page ***pages, size_t maxsize, size_t *start) +{ + return iov_iter_get_pages_alloc(i, pages, maxsize, start, 0); +} EXPORT_SYMBOL(iov_iter_get_pages_alloc2); size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, @@ -1526,10 +1543,9 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, { __wsum sum, next; sum = *csum; - if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { - WARN_ON(1); + if (WARN_ON_ONCE(!i->data_source)) return 0; - } + iterate_and_advance(i, bytes, base, len, off, ({ next = csum_and_copy_from_user(base, addr + off, len); sum = csum_block_add(sum, next, off); @@ -1549,9 +1565,15 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, struct csum_state *csstate = _csstate; __wsum sum, next; - if (unlikely(iov_iter_is_discard(i))) { - WARN_ON(1); /* for now */ + if (WARN_ON_ONCE(i->data_source)) return 0; + if (unlikely(iov_iter_is_discard(i))) { + // can't use csum_memcpy() for that one - data is not copied + csstate->csum = csum_block_add(csstate->csum, + csum_partial(addr, bytes, 0), + csstate->off); + csstate->off += bytes; + return bytes; } sum = csum_shift(csstate->csum, csstate->off); diff --git a/lib/is_signed_type_kunit.c b/lib/is_signed_type_kunit.c index 207207522925..0a7f6ae62839 100644 --- a/lib/is_signed_type_kunit.c +++ b/lib/is_signed_type_kunit.c @@ -21,11 +21,7 @@ static void is_signed_type_test(struct kunit *test) KUNIT_EXPECT_EQ(test, is_signed_type(bool), false); KUNIT_EXPECT_EQ(test, is_signed_type(signed char), true); KUNIT_EXPECT_EQ(test, is_signed_type(unsigned char), false); -#ifdef __CHAR_UNSIGNED__ KUNIT_EXPECT_EQ(test, is_signed_type(char), false); -#else - KUNIT_EXPECT_EQ(test, is_signed_type(char), true); -#endif KUNIT_EXPECT_EQ(test, is_signed_type(int), true); KUNIT_EXPECT_EQ(test, is_signed_type(unsigned int), false); KUNIT_EXPECT_EQ(test, is_signed_type(long), true); diff --git a/lib/kobject.c b/lib/kobject.c index a0b2dbfcfa23..af1f5f2954d4 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -694,7 +694,7 @@ static void kobject_release(struct kref *kref) { struct kobject *kobj = container_of(kref, struct kobject, kref); #ifdef CONFIG_DEBUG_KOBJECT_RELEASE - unsigned long delay = HZ + HZ * prandom_u32_max(4); + unsigned long delay = HZ + HZ * get_random_u32_below(4); pr_info("kobject: '%s' (%p): %s, parent %p (delayed %ld)\n", kobject_name(kobj), kobj, __func__, kobj->parent, delay); INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup); diff --git a/lib/kunit/assert.c b/lib/kunit/assert.c index d00d6d181ee8..f5b50babe38d 100644 --- a/lib/kunit/assert.c +++ b/lib/kunit/assert.c @@ -127,13 +127,15 @@ void kunit_binary_assert_format(const struct kunit_assert *assert, binary_assert->text->right_text); if (!is_literal(stream->test, binary_assert->text->left_text, binary_assert->left_value, stream->gfp)) - string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld\n", + string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld (0x%llx)\n", binary_assert->text->left_text, + binary_assert->left_value, binary_assert->left_value); if (!is_literal(stream->test, binary_assert->text->right_text, binary_assert->right_value, stream->gfp)) - string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld", + string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld (0x%llx)", binary_assert->text->right_text, + binary_assert->right_value, binary_assert->right_value); kunit_assert_print_msg(message, stream); } @@ -204,3 +206,59 @@ void kunit_binary_str_assert_format(const struct kunit_assert *assert, kunit_assert_print_msg(message, stream); } EXPORT_SYMBOL_GPL(kunit_binary_str_assert_format); + +/* Adds a hexdump of a buffer to a string_stream comparing it with + * a second buffer. The different bytes are marked with <>. + */ +static void kunit_assert_hexdump(struct string_stream *stream, + const void *buf, + const void *compared_buf, + const size_t len) +{ + size_t i; + const u8 *buf1 = buf; + const u8 *buf2 = compared_buf; + + string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT); + + for (i = 0; i < len; ++i) { + if (!(i % 16) && i) + string_stream_add(stream, "\n" KUNIT_SUBSUBTEST_INDENT); + + if (buf1[i] != buf2[i]) + string_stream_add(stream, "<%02x>", buf1[i]); + else + string_stream_add(stream, " %02x ", buf1[i]); + } +} + +void kunit_mem_assert_format(const struct kunit_assert *assert, + const struct va_format *message, + struct string_stream *stream) +{ + struct kunit_mem_assert *mem_assert; + + mem_assert = container_of(assert, struct kunit_mem_assert, + assert); + + string_stream_add(stream, + KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n", + mem_assert->text->left_text, + mem_assert->text->operation, + mem_assert->text->right_text); + + string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n", + mem_assert->text->left_text); + kunit_assert_hexdump(stream, mem_assert->left_value, + mem_assert->right_value, mem_assert->size); + + string_stream_add(stream, "\n"); + + string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n", + mem_assert->text->right_text); + kunit_assert_hexdump(stream, mem_assert->right_value, + mem_assert->left_value, mem_assert->size); + + kunit_assert_print_msg(message, stream); +} +EXPORT_SYMBOL_GPL(kunit_mem_assert_format); diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c index 1048ef1b8d6e..de0ee2e03ed6 100644 --- a/lib/kunit/debugfs.c +++ b/lib/kunit/debugfs.c @@ -63,7 +63,7 @@ static int debugfs_print_results(struct seq_file *seq, void *v) kunit_suite_for_each_test_case(suite, test_case) debugfs_print_result(seq, suite, test_case); - seq_printf(seq, "%s %d - %s\n", + seq_printf(seq, "%s %d %s\n", kunit_status_to_ok_not_ok(success), 1, suite->name); return 0; } diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c index 9bbc422c284b..74982b83707c 100644 --- a/lib/kunit/executor.c +++ b/lib/kunit/executor.c @@ -166,7 +166,7 @@ static void kunit_exec_run_tests(struct suite_set *suite_set) { size_t num_suites = suite_set->end - suite_set->start; - pr_info("TAP version 14\n"); + pr_info("KTAP version 1\n"); pr_info("1..%zu\n", num_suites); __kunit_test_suites_init(suite_set->start, num_suites); @@ -177,8 +177,8 @@ static void kunit_exec_list_tests(struct suite_set *suite_set) struct kunit_suite * const *suites; struct kunit_case *test_case; - /* Hack: print a tap header so kunit.py can find the start of KUnit output. */ - pr_info("TAP version 14\n"); + /* Hack: print a ktap header so kunit.py can find the start of KUnit output. */ + pr_info("KTAP version 1\n"); for (suites = suite_set->start; suites < suite_set->end; suites++) kunit_suite_for_each_test_case((*suites), test_case) { diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c index f8fe582c9e36..66cc4e2365ec 100644 --- a/lib/kunit/kunit-example-test.c +++ b/lib/kunit/kunit-example-test.c @@ -86,6 +86,9 @@ static void example_mark_skipped_test(struct kunit *test) */ static void example_all_expect_macros_test(struct kunit *test) { + const u32 array1[] = { 0x0F, 0xFF }; + const u32 array2[] = { 0x1F, 0xFF }; + /* Boolean assertions */ KUNIT_EXPECT_TRUE(test, true); KUNIT_EXPECT_FALSE(test, false); @@ -109,6 +112,10 @@ static void example_all_expect_macros_test(struct kunit *test) KUNIT_EXPECT_STREQ(test, "hi", "hi"); KUNIT_EXPECT_STRNEQ(test, "hi", "bye"); + /* Memory block assertions */ + KUNIT_EXPECT_MEMEQ(test, array1, array1, sizeof(array1)); + KUNIT_EXPECT_MEMNEQ(test, array1, array2, sizeof(array1)); + /* * There are also ASSERT variants of all of the above that abort test * execution if they fail. Useful for memory allocations, etc. diff --git a/lib/kunit/string-stream.c b/lib/kunit/string-stream.c index a608746020a9..f5f51166d8c2 100644 --- a/lib/kunit/string-stream.c +++ b/lib/kunit/string-stream.c @@ -131,11 +131,6 @@ bool string_stream_is_empty(struct string_stream *stream) return list_empty(&stream->fragments); } -struct string_stream_alloc_context { - struct kunit *test; - gfp_t gfp; -}; - struct string_stream *alloc_string_stream(struct kunit *test, gfp_t gfp) { struct string_stream *stream; diff --git a/lib/kunit/test.c b/lib/kunit/test.c index 2a6992fe7c3e..c9ebf975e56b 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -20,6 +20,8 @@ #include "string-stream.h" #include "try-catch-impl.h" +DEFINE_STATIC_KEY_FALSE(kunit_running); + #if IS_BUILTIN(CONFIG_KUNIT) /* * Fail the current test and print an error message to the log. @@ -149,6 +151,7 @@ EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases); static void kunit_print_suite_start(struct kunit_suite *suite) { + kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "KTAP version 1\n"); kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "# Subtest: %s", suite->name); kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "1..%zd", @@ -175,13 +178,13 @@ static void kunit_print_ok_not_ok(void *test_or_suite, * representation. */ if (suite) - pr_info("%s %zd - %s%s%s\n", + pr_info("%s %zd %s%s%s\n", kunit_status_to_ok_not_ok(status), test_number, description, directive_header, (status == KUNIT_SKIPPED) ? directive : ""); else kunit_log(KERN_INFO, test, - KUNIT_SUBTEST_INDENT "%s %zd - %s%s%s", + KUNIT_SUBTEST_INDENT "%s %zd %s%s%s", kunit_status_to_ok_not_ok(status), test_number, description, directive_header, (status == KUNIT_SKIPPED) ? directive : ""); @@ -543,6 +546,8 @@ int kunit_run_tests(struct kunit_suite *suite) param_desc[0] = '\0'; test.param_value = test_case->generate_params(NULL, param_desc); kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT + "KTAP version 1\n"); + kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT "# Subtest: %s", test_case->name); while (test.param_value) { @@ -555,7 +560,7 @@ int kunit_run_tests(struct kunit_suite *suite) kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT - "%s %d - %s", + "%s %d %s", kunit_status_to_ok_not_ok(test.status), test.param_index + 1, param_desc); @@ -612,10 +617,14 @@ int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_ return 0; } + static_branch_inc(&kunit_running); + for (i = 0; i < num_suites; i++) { kunit_init_suite(suites[i]); kunit_run_tests(suites[i]); } + + static_branch_dec(&kunit_running); return 0; } EXPORT_SYMBOL_GPL(__kunit_test_suites_init); diff --git a/lib/llist.c b/lib/llist.c index 7d78b736e8af..6e668fa5a2c6 100644 --- a/lib/llist.c +++ b/lib/llist.c @@ -26,10 +26,10 @@ bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, struct llist_head *head) { - struct llist_node *first; + struct llist_node *first = READ_ONCE(head->first); do { - new_last->next = first = READ_ONCE(head->first); + new_last->next = first; } while (!try_cmpxchg(&head->first, &first, new_first)); return !first; diff --git a/lib/lru_cache.c b/lib/lru_cache.c index dc35464216d3..b3d9187611de 100644 --- a/lib/lru_cache.c +++ b/lib/lru_cache.c @@ -60,17 +60,6 @@ int lc_try_lock(struct lru_cache *lc) } while (unlikely (val == LC_PARANOIA)); /* Spin until no-one is inside a PARANOIA_ENTRY()/RETURN() section. */ return 0 == val; -#if 0 - /* Alternative approach, spin in case someone enters or leaves a - * PARANOIA_ENTRY()/RETURN() section. */ - unsigned long old, new, val; - do { - old = lc->flags & LC_PARANOIA; - new = old | LC_LOCKED; - val = cmpxchg(&lc->flags, old, new); - } while (unlikely (val == (old ^ LC_PARANOIA))); - return old == val; -#endif } /** @@ -364,7 +353,7 @@ static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, unsig struct lc_element *e; PARANOIA_ENTRY(); - if (lc->flags & LC_STARVING) { + if (test_bit(__LC_STARVING, &lc->flags)) { ++lc->starving; RETURN(NULL); } @@ -417,7 +406,7 @@ static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, unsig * the LRU element, we have to wait ... */ if (!lc_unused_element_available(lc)) { - __set_bit(__LC_STARVING, &lc->flags); + set_bit(__LC_STARVING, &lc->flags); RETURN(NULL); } @@ -586,48 +575,6 @@ struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i) } /** - * lc_index_of - * @lc: the lru cache to operate on - * @e: the element to query for its index position in lc->element - */ -unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e) -{ - PARANOIA_LC_ELEMENT(lc, e); - return e->lc_index; -} - -/** - * lc_set - associate index with label - * @lc: the lru cache to operate on - * @enr: the label to set - * @index: the element index to associate label with. - * - * Used to initialize the active set to some previously recorded state. - */ -void lc_set(struct lru_cache *lc, unsigned int enr, int index) -{ - struct lc_element *e; - struct list_head *lh; - - if (index < 0 || index >= lc->nr_elements) - return; - - e = lc_element_by_index(lc, index); - BUG_ON(e->lc_number != e->lc_new_number); - BUG_ON(e->refcnt != 0); - - e->lc_number = e->lc_new_number = enr; - hlist_del_init(&e->colision); - if (enr == LC_FREE) - lh = &lc->free; - else { - hlist_add_head(&e->colision, lc_hash_slot(lc, enr)); - lh = &lc->lru; - } - list_move(&e->list, lh); -} - -/** * lc_seq_dump_details - Dump a complete LRU cache to seq in textual form. * @lc: the lru cache to operate on * @seq: the &struct seq_file pointer to seq_printf into @@ -661,7 +608,6 @@ void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext EXPORT_SYMBOL(lc_create); EXPORT_SYMBOL(lc_reset); EXPORT_SYMBOL(lc_destroy); -EXPORT_SYMBOL(lc_set); EXPORT_SYMBOL(lc_del); EXPORT_SYMBOL(lc_try_get); EXPORT_SYMBOL(lc_find); @@ -669,7 +615,6 @@ EXPORT_SYMBOL(lc_get); EXPORT_SYMBOL(lc_put); EXPORT_SYMBOL(lc_committed); EXPORT_SYMBOL(lc_element_by_index); -EXPORT_SYMBOL(lc_index_of); EXPORT_SYMBOL(lc_seq_printf_stats); EXPORT_SYMBOL(lc_seq_dump_details); EXPORT_SYMBOL(lc_try_lock); diff --git a/lib/math/div64.c b/lib/math/div64.c index 46866394fc84..55a81782e271 100644 --- a/lib/math/div64.c +++ b/lib/math/div64.c @@ -63,12 +63,6 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base) EXPORT_SYMBOL(__div64_32); #endif -/** - * div_s64_rem - signed 64bit divide with 64bit divisor and remainder - * @dividend: 64bit dividend - * @divisor: 64bit divisor - * @remainder: 64bit remainder - */ #ifndef div_s64_rem s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) { @@ -89,7 +83,7 @@ s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) EXPORT_SYMBOL(div_s64_rem); #endif -/** +/* * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder * @dividend: 64bit dividend * @divisor: 64bit divisor @@ -129,7 +123,7 @@ u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) EXPORT_SYMBOL(div64_u64_rem); #endif -/** +/* * div64_u64 - unsigned 64bit divide with 64bit divisor * @dividend: 64bit dividend * @divisor: 64bit divisor @@ -163,11 +157,6 @@ u64 div64_u64(u64 dividend, u64 divisor) EXPORT_SYMBOL(div64_u64); #endif -/** - * div64_s64 - signed 64bit divide with 64bit divisor - * @dividend: 64bit dividend - * @divisor: 64bit divisor - */ #ifndef div64_s64 s64 div64_s64(s64 dividend, s64 divisor) { diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c index 2b5cc70ac53f..7513e6d5dc90 100644 --- a/lib/memcpy_kunit.c +++ b/lib/memcpy_kunit.c @@ -105,6 +105,8 @@ static void memcpy_test(struct kunit *test) #undef TEST_OP } +static unsigned char larger_array [2048]; + static void memmove_test(struct kunit *test) { #define TEST_OP "memmove" @@ -179,6 +181,26 @@ static void memmove_test(struct kunit *test) ptr = &overlap.data[2]; memmove(ptr, overlap.data, 5); compare("overlapping write", overlap, overlap_expected); + + /* Verify larger overlapping moves. */ + larger_array[256] = 0xAAu; + /* + * Test a backwards overlapping memmove first. 256 and 1024 are + * important for i386 to use rep movsl. + */ + memmove(larger_array, larger_array + 256, 1024); + KUNIT_ASSERT_EQ(test, larger_array[0], 0xAAu); + KUNIT_ASSERT_EQ(test, larger_array[256], 0x00); + KUNIT_ASSERT_NULL(test, + memchr(larger_array + 1, 0xaa, ARRAY_SIZE(larger_array) - 1)); + /* Test a forwards overlapping memmove. */ + larger_array[0] = 0xBBu; + memmove(larger_array + 256, larger_array, 1024); + KUNIT_ASSERT_EQ(test, larger_array[0], 0xBBu); + KUNIT_ASSERT_EQ(test, larger_array[256], 0xBBu); + KUNIT_ASSERT_NULL(test, memchr(larger_array + 1, 0xBBu, 256 - 1)); + KUNIT_ASSERT_NULL(test, + memchr(larger_array + 257, 0xBBu, ARRAY_SIZE(larger_array) - 257)); #undef TEST_OP } diff --git a/lib/net_utils.c b/lib/net_utils.c index af525353395d..c17201df3d08 100644 --- a/lib/net_utils.c +++ b/lib/net_utils.c @@ -6,10 +6,11 @@ bool mac_pton(const char *s, u8 *mac) { + size_t maxlen = 3 * ETH_ALEN - 1; int i; /* XX:XX:XX:XX:XX:XX */ - if (strlen(s) < 3 * ETH_ALEN - 1) + if (strnlen(s, maxlen) < maxlen) return false; /* Don't dirty result unless string is valid MAC. */ diff --git a/lib/nlattr.c b/lib/nlattr.c index b67a53e29b8f..9055e8b4d144 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -646,7 +646,7 @@ EXPORT_SYMBOL(__nla_validate); /** * nla_policy_len - Determine the max. length of a policy - * @policy: policy to use + * @p: policy to use * @n: number of policies * * Determines the max. length of the policy. It is currently used diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c index 21016b32d313..2b24ea6c9497 100644 --- a/lib/notifier-error-inject.c +++ b/lib/notifier-error-inject.c @@ -15,7 +15,7 @@ static int debugfs_errno_get(void *data, u64 *val) return 0; } -DEFINE_SIMPLE_ATTRIBUTE(fops_errno, debugfs_errno_get, debugfs_errno_set, +DEFINE_SIMPLE_ATTRIBUTE_SIGNED(fops_errno, debugfs_errno_get, debugfs_errno_set, "%lld\n"); static struct dentry *debugfs_create_errno(const char *name, umode_t mode, diff --git a/lib/oid_registry.c b/lib/oid_registry.c index e592d48b1974..fe6705cfd780 100644 --- a/lib/oid_registry.c +++ b/lib/oid_registry.c @@ -146,7 +146,6 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize) bufsize -= count; while (v < end) { - num = 0; n = *v++; if (!(n & 0x80)) { num = n; diff --git a/lib/packing.c b/lib/packing.c index 9a72f4bbf0e2..a96169237ae6 100644 --- a/lib/packing.c +++ b/lib/packing.c @@ -7,6 +7,7 @@ #include <linux/bitops.h> #include <linux/errno.h> #include <linux/types.h> +#include <linux/bitrev.h> static int get_le_offset(int offset) { @@ -29,19 +30,6 @@ static int get_reverse_lsw32_offset(int offset, size_t len) return word_index * 4 + offset; } -static u64 bit_reverse(u64 val, unsigned int width) -{ - u64 new_val = 0; - unsigned int bit; - unsigned int i; - - for (i = 0; i < width; i++) { - bit = (val & (1 << i)) != 0; - new_val |= (bit << (width - i - 1)); - } - return new_val; -} - static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit, int *box_end_bit, u8 *box_mask) { @@ -49,7 +37,7 @@ static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit, int new_box_start_bit, new_box_end_bit; *to_write >>= *box_end_bit; - *to_write = bit_reverse(*to_write, box_bit_width); + *to_write = bitrev8(*to_write) >> (8 - box_bit_width); *to_write <<= *box_end_bit; new_box_end_bit = box_bit_width - *box_start_bit - 1; diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index e5c5315da274..668f6aa6a75d 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c @@ -230,7 +230,8 @@ static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref, percpu_ref_noop_confirm_switch; percpu_ref_get(ref); /* put after confirmation */ - call_rcu(&ref->data->rcu, percpu_ref_switch_to_atomic_rcu); + call_rcu_hurry(&ref->data->rcu, + percpu_ref_switch_to_atomic_rcu); } static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 3c78e1e8b2ad..049ba132f7ef 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1029,7 +1029,7 @@ void *radix_tree_tag_clear(struct radix_tree_root *root, { struct radix_tree_node *node, *parent; unsigned long maxindex; - int offset; + int offset = 0; radix_tree_load_root(root, &node, &maxindex); if (index > maxindex) diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c index 39b74221f4a7..a22a05c9af8a 100644 --- a/lib/raid6/algos.c +++ b/lib/raid6/algos.c @@ -18,12 +18,10 @@ #else #include <linux/module.h> #include <linux/gfp.h> -#if !RAID6_USE_EMPTY_ZERO_PAGE /* In .bss so it's zeroed */ const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); EXPORT_SYMBOL(raid6_empty_zero_page); #endif -#endif struct raid6_calls raid6_call; EXPORT_SYMBOL_GPL(raid6_call); diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc index 9e597e1f91a4..b25dfc9c7759 100644 --- a/lib/raid6/s390vx.uc +++ b/lib/raid6/s390vx.uc @@ -13,8 +13,7 @@ #include <linux/raid/pq.h> #include <asm/fpu/api.h> - -asm(".include \"asm/vx-insn.h\"\n"); +#include <asm/vx-insn.h> #define NSIZE 16 diff --git a/lib/reed_solomon/test_rslib.c b/lib/reed_solomon/test_rslib.c index 848e7eb5da92..75cb1adac884 100644 --- a/lib/reed_solomon/test_rslib.c +++ b/lib/reed_solomon/test_rslib.c @@ -183,7 +183,7 @@ static int get_rcw_we(struct rs_control *rs, struct wspace *ws, do { /* Must not choose the same location twice */ - errloc = prandom_u32_max(len); + errloc = get_random_u32_below(len); } while (errlocs[errloc] != 0); errlocs[errloc] = 1; @@ -194,12 +194,12 @@ static int get_rcw_we(struct rs_control *rs, struct wspace *ws, for (i = 0; i < eras; i++) { do { /* Must not choose the same location twice */ - errloc = prandom_u32_max(len); + errloc = get_random_u32_below(len); } while (errlocs[errloc] != 0); derrlocs[i] = errloc; - if (ewsc && prandom_u32_max(2)) { + if (ewsc && get_random_u32_below(2)) { /* Erasure with the symbol intact */ errlocs[errloc] = 2; } else { diff --git a/lib/rhashtable.c b/lib/rhashtable.c index e12bbfb240b8..6ae2ba8e06a2 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -231,6 +231,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, struct rhash_head *head, *next, *entry; struct rhash_head __rcu **pprev = NULL; unsigned int new_hash; + unsigned long flags; if (new_tbl->nest) goto out; @@ -253,13 +254,14 @@ static int rhashtable_rehash_one(struct rhashtable *ht, new_hash = head_hashfn(ht, new_tbl, entry); - rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING); + flags = rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], + SINGLE_DEPTH_NESTING); head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash); RCU_INIT_POINTER(entry->next, head); - rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry); + rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry, flags); if (pprev) rcu_assign_pointer(*pprev, next); @@ -276,18 +278,19 @@ static int rhashtable_rehash_chain(struct rhashtable *ht, { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash); + unsigned long flags; int err; if (!bkt) return 0; - rht_lock(old_tbl, bkt); + flags = rht_lock(old_tbl, bkt); while (!(err = rhashtable_rehash_one(ht, bkt, old_hash))) ; if (err == -ENOENT) err = 0; - rht_unlock(old_tbl, bkt); + rht_unlock(old_tbl, bkt, flags); return err; } @@ -590,6 +593,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, struct bucket_table *new_tbl; struct bucket_table *tbl; struct rhash_lock_head __rcu **bkt; + unsigned long flags; unsigned int hash; void *data; @@ -607,7 +611,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); data = ERR_PTR(-EAGAIN); } else { - rht_lock(tbl, bkt); + flags = rht_lock(tbl, bkt); data = rhashtable_lookup_one(ht, bkt, tbl, hash, key, obj); new_tbl = rhashtable_insert_one(ht, bkt, tbl, @@ -615,7 +619,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, if (PTR_ERR(new_tbl) != -EEXIST) data = ERR_CAST(new_tbl); - rht_unlock(tbl, bkt); + rht_unlock(tbl, bkt, flags); } } while (!IS_ERR_OR_NULL(new_tbl)); diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 7280ae8ca88c..1fcede228fa2 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -21,7 +21,7 @@ static int init_alloc_hint(struct sbitmap *sb, gfp_t flags) int i; for_each_possible_cpu(i) - *per_cpu_ptr(sb->alloc_hint, i) = prandom_u32_max(depth); + *per_cpu_ptr(sb->alloc_hint, i) = get_random_u32_below(depth); } return 0; } @@ -33,7 +33,7 @@ static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb, hint = this_cpu_read(*sb->alloc_hint); if (unlikely(hint >= depth)) { - hint = depth ? prandom_u32_max(depth) : 0; + hint = depth ? get_random_u32_below(depth) : 0; this_cpu_write(*sb->alloc_hint, hint); } @@ -434,6 +434,8 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); atomic_set(&sbq->wake_index, 0); atomic_set(&sbq->ws_active, 0); + atomic_set(&sbq->completion_cnt, 0); + atomic_set(&sbq->wakeup_cnt, 0); sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); if (!sbq->ws) { @@ -441,40 +443,21 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, return -ENOMEM; } - for (i = 0; i < SBQ_WAIT_QUEUES; i++) { + for (i = 0; i < SBQ_WAIT_QUEUES; i++) init_waitqueue_head(&sbq->ws[i].wait); - atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch); - } return 0; } EXPORT_SYMBOL_GPL(sbitmap_queue_init_node); -static inline void __sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, - unsigned int wake_batch) -{ - int i; - - if (sbq->wake_batch != wake_batch) { - WRITE_ONCE(sbq->wake_batch, wake_batch); - /* - * Pairs with the memory barrier in sbitmap_queue_wake_up() - * to ensure that the batch size is updated before the wait - * counts. - */ - smp_mb(); - for (i = 0; i < SBQ_WAIT_QUEUES; i++) - atomic_set(&sbq->ws[i].wait_cnt, 1); - } -} - static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, unsigned int depth) { unsigned int wake_batch; wake_batch = sbq_calc_wake_batch(sbq, depth); - __sbitmap_queue_update_wake_batch(sbq, wake_batch); + if (sbq->wake_batch != wake_batch) + WRITE_ONCE(sbq->wake_batch, wake_batch); } void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq, @@ -488,7 +471,8 @@ void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq, wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES, min_batch, SBQ_WAKE_BATCH); - __sbitmap_queue_update_wake_batch(sbq, wake_batch); + + WRITE_ONCE(sbq->wake_batch, wake_batch); } EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch); @@ -576,106 +560,56 @@ void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, } EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth); -static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) +static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr) { int i, wake_index; if (!atomic_read(&sbq->ws_active)) - return NULL; + return; wake_index = atomic_read(&sbq->wake_index); for (i = 0; i < SBQ_WAIT_QUEUES; i++) { struct sbq_wait_state *ws = &sbq->ws[wake_index]; - if (waitqueue_active(&ws->wait) && atomic_read(&ws->wait_cnt)) { - if (wake_index != atomic_read(&sbq->wake_index)) - atomic_set(&sbq->wake_index, wake_index); - return ws; - } - + /* + * Advance the index before checking the current queue. + * It improves fairness, by ensuring the queue doesn't + * need to be fully emptied before trying to wake up + * from the next one. + */ wake_index = sbq_index_inc(wake_index); + + /* + * It is sufficient to wake up at least one waiter to + * guarantee forward progress. + */ + if (waitqueue_active(&ws->wait) && + wake_up_nr(&ws->wait, nr)) + break; } - return NULL; + if (wake_index != atomic_read(&sbq->wake_index)) + atomic_set(&sbq->wake_index, wake_index); } -static bool __sbq_wake_up(struct sbitmap_queue *sbq, int *nr) +void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr) { - struct sbq_wait_state *ws; - unsigned int wake_batch; - int wait_cnt, cur, sub; - bool ret; + unsigned int wake_batch = READ_ONCE(sbq->wake_batch); + unsigned int wakeups; - if (*nr <= 0) - return false; + if (!atomic_read(&sbq->ws_active)) + return; - ws = sbq_wake_ptr(sbq); - if (!ws) - return false; + atomic_add(nr, &sbq->completion_cnt); + wakeups = atomic_read(&sbq->wakeup_cnt); - cur = atomic_read(&ws->wait_cnt); do { - /* - * For concurrent callers of this, callers should call this - * function again to wakeup a new batch on a different 'ws'. - */ - if (cur == 0) - return true; - sub = min(*nr, cur); - wait_cnt = cur - sub; - } while (!atomic_try_cmpxchg(&ws->wait_cnt, &cur, wait_cnt)); - - /* - * If we decremented queue without waiters, retry to avoid lost - * wakeups. - */ - if (wait_cnt > 0) - return !waitqueue_active(&ws->wait); - - *nr -= sub; - - /* - * When wait_cnt == 0, we have to be particularly careful as we are - * responsible to reset wait_cnt regardless whether we've actually - * woken up anybody. But in case we didn't wakeup anybody, we still - * need to retry. - */ - ret = !waitqueue_active(&ws->wait); - wake_batch = READ_ONCE(sbq->wake_batch); - - /* - * Wake up first in case that concurrent callers decrease wait_cnt - * while waitqueue is empty. - */ - wake_up_nr(&ws->wait, wake_batch); + if (atomic_read(&sbq->completion_cnt) - wakeups < wake_batch) + return; + } while (!atomic_try_cmpxchg(&sbq->wakeup_cnt, + &wakeups, wakeups + wake_batch)); - /* - * Pairs with the memory barrier in sbitmap_queue_resize() to - * ensure that we see the batch size update before the wait - * count is reset. - * - * Also pairs with the implicit barrier between decrementing wait_cnt - * and checking for waitqueue_active() to make sure waitqueue_active() - * sees result of the wakeup if atomic_dec_return() has seen the result - * of atomic_set(). - */ - smp_mb__before_atomic(); - - /* - * Increase wake_index before updating wait_cnt, otherwise concurrent - * callers can see valid wait_cnt in old waitqueue, which can cause - * invalid wakeup on the old waitqueue. - */ - sbq_index_atomic_inc(&sbq->wake_index); - atomic_set(&ws->wait_cnt, wake_batch); - - return ret || *nr; -} - -void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr) -{ - while (__sbq_wake_up(sbq, &nr)) - ; + __sbitmap_queue_wake_up(sbq, wake_batch); } EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up); @@ -792,9 +726,7 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) seq_puts(m, "ws={\n"); for (i = 0; i < SBQ_WAIT_QUEUES; i++) { struct sbq_wait_state *ws = &sbq->ws[i]; - - seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n", - atomic_read(&ws->wait_cnt), + seq_printf(m, "\t{.wait=%s},\n", waitqueue_active(&ws->wait) ? "active" : "inactive"); } seq_puts(m, "}\n"); diff --git a/lib/scatterlist.c b/lib/scatterlist.c index c8c3d675845c..a0ad2a7959b5 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -410,6 +410,15 @@ static struct scatterlist *get_next_sg(struct sg_append_table *table, return new_sg; } +static bool pages_are_mergeable(struct page *a, struct page *b) +{ + if (page_to_pfn(a) != page_to_pfn(b) + 1) + return false; + if (!zone_device_pages_have_same_pgmap(a, b)) + return false; + return true; +} + /** * sg_alloc_append_table_from_pages - Allocate and initialize an append sg * table from an array of pages @@ -447,6 +456,7 @@ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append, unsigned int chunks, cur_page, seg_len, i, prv_len = 0; unsigned int added_nents = 0; struct scatterlist *s = sgt_append->prv; + struct page *last_pg; /* * The algorithm below requires max_segment to be aligned to PAGE_SIZE @@ -460,21 +470,17 @@ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append, return -EOPNOTSUPP; if (sgt_append->prv) { - unsigned long paddr = - (page_to_pfn(sg_page(sgt_append->prv)) * PAGE_SIZE + - sgt_append->prv->offset + sgt_append->prv->length) / - PAGE_SIZE; - if (WARN_ON(offset)) return -EINVAL; /* Merge contiguous pages into the last SG */ prv_len = sgt_append->prv->length; - while (n_pages && page_to_pfn(pages[0]) == paddr) { + last_pg = sg_page(sgt_append->prv); + while (n_pages && pages_are_mergeable(last_pg, pages[0])) { if (sgt_append->prv->length + PAGE_SIZE > max_segment) break; sgt_append->prv->length += PAGE_SIZE; - paddr++; + last_pg = pages[0]; pages++; n_pages--; } @@ -488,7 +494,7 @@ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append, for (i = 1; i < n_pages; i++) { seg_len += PAGE_SIZE; if (seg_len >= max_segment || - page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) { + !pages_are_mergeable(pages[i], pages[i - 1])) { chunks++; seg_len = 0; } @@ -504,8 +510,7 @@ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append, for (j = cur_page + 1; j < n_pages; j++) { seg_len += PAGE_SIZE; if (seg_len >= max_segment || - page_to_pfn(pages[j]) != - page_to_pfn(pages[j - 1]) + 1) + !pages_are_mergeable(pages[j], pages[j - 1])) break; } diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c index 7a0564d7cb7a..d4a3730b08fa 100644 --- a/lib/slub_kunit.c +++ b/lib/slub_kunit.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <kunit/test.h> +#include <kunit/test-bug.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/module.h> @@ -9,10 +10,25 @@ static struct kunit_resource resource; static int slab_errors; +/* + * Wrapper function for kmem_cache_create(), which reduces 2 parameters: + * 'align' and 'ctor', and sets SLAB_SKIP_KFENCE flag to avoid getting an + * object from kfence pool, where the operation could be caught by both + * our test and kfence sanity check. + */ +static struct kmem_cache *test_kmem_cache_create(const char *name, + unsigned int size, slab_flags_t flags) +{ + struct kmem_cache *s = kmem_cache_create(name, size, 0, + (flags | SLAB_NO_USER_FLAGS), NULL); + s->flags |= SLAB_SKIP_KFENCE; + return s; +} + static void test_clobber_zone(struct kunit *test) { - struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_alloc", 64, 0, - SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL); + struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_alloc", 64, + SLAB_RED_ZONE); u8 *p = kmem_cache_alloc(s, GFP_KERNEL); kasan_disable_current(); @@ -29,8 +45,8 @@ static void test_clobber_zone(struct kunit *test) #ifndef CONFIG_KASAN static void test_next_pointer(struct kunit *test) { - struct kmem_cache *s = kmem_cache_create("TestSlub_next_ptr_free", 64, 0, - SLAB_POISON|SLAB_NO_USER_FLAGS, NULL); + struct kmem_cache *s = test_kmem_cache_create("TestSlub_next_ptr_free", + 64, SLAB_POISON); u8 *p = kmem_cache_alloc(s, GFP_KERNEL); unsigned long tmp; unsigned long *ptr_addr; @@ -74,8 +90,8 @@ static void test_next_pointer(struct kunit *test) static void test_first_word(struct kunit *test) { - struct kmem_cache *s = kmem_cache_create("TestSlub_1th_word_free", 64, 0, - SLAB_POISON|SLAB_NO_USER_FLAGS, NULL); + struct kmem_cache *s = test_kmem_cache_create("TestSlub_1th_word_free", + 64, SLAB_POISON); u8 *p = kmem_cache_alloc(s, GFP_KERNEL); kmem_cache_free(s, p); @@ -89,8 +105,8 @@ static void test_first_word(struct kunit *test) static void test_clobber_50th_byte(struct kunit *test) { - struct kmem_cache *s = kmem_cache_create("TestSlub_50th_word_free", 64, 0, - SLAB_POISON|SLAB_NO_USER_FLAGS, NULL); + struct kmem_cache *s = test_kmem_cache_create("TestSlub_50th_word_free", + 64, SLAB_POISON); u8 *p = kmem_cache_alloc(s, GFP_KERNEL); kmem_cache_free(s, p); @@ -105,8 +121,8 @@ static void test_clobber_50th_byte(struct kunit *test) static void test_clobber_redzone_free(struct kunit *test) { - struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_free", 64, 0, - SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL); + struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_free", 64, + SLAB_RED_ZONE); u8 *p = kmem_cache_alloc(s, GFP_KERNEL); kasan_disable_current(); @@ -120,6 +136,27 @@ static void test_clobber_redzone_free(struct kunit *test) kmem_cache_destroy(s); } +static void test_kmalloc_redzone_access(struct kunit *test) +{ + struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32, + SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE); + u8 *p = kmalloc_trace(s, GFP_KERNEL, 18); + + kasan_disable_current(); + + /* Suppress the -Warray-bounds warning */ + OPTIMIZER_HIDE_VAR(p); + p[18] = 0xab; + p[19] = 0xab; + + validate_slab_cache(s); + KUNIT_EXPECT_EQ(test, 2, slab_errors); + + kasan_enable_current(); + kmem_cache_free(s, p); + kmem_cache_destroy(s); +} + static int test_init(struct kunit *test) { slab_errors = 0; @@ -139,6 +176,7 @@ static struct kunit_case test_cases[] = { #endif KUNIT_CASE(test_clobber_redzone_free), + KUNIT_CASE(test_kmalloc_redzone_access), {} }; diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c index 86fadd3ba08c..41d3447bc3b4 100644 --- a/lib/test-string_helpers.c +++ b/lib/test-string_helpers.c @@ -587,7 +587,7 @@ static int __init test_string_helpers_init(void) for (i = 0; i < UNESCAPE_ALL_MASK + 1; i++) test_string_unescape("unescape", i, false); test_string_unescape("unescape inplace", - prandom_u32_max(UNESCAPE_ANY + 1), true); + get_random_u32_below(UNESCAPE_ANY + 1), true); /* Without dictionary */ for (i = 0; i < ESCAPE_ALL_MASK + 1; i++) diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 5820704165a6..ade9ac672adb 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -14346,7 +14346,6 @@ static struct sk_buff *populate_skb(char *buf, int size) skb->hash = SKB_HASH; skb->queue_mapping = SKB_QUEUE_MAP; skb->vlan_tci = SKB_VLAN_TCI; - skb->vlan_present = SKB_VLAN_PRESENT; skb->vlan_proto = htons(ETH_P_IP); dev_net_set(&dev, &init_net); skb->dev = &dev; diff --git a/lib/test_fprobe.c b/lib/test_fprobe.c index e0381b3ec410..1fb56cf5e5ce 100644 --- a/lib/test_fprobe.c +++ b/lib/test_fprobe.c @@ -144,10 +144,7 @@ static unsigned long get_ftrace_location(void *func) static int fprobe_test_init(struct kunit *test) { - do { - rand1 = get_random_u32(); - } while (rand1 <= div_factor); - + rand1 = get_random_u32_above(div_factor); target = fprobe_selftest_target; target2 = fprobe_selftest_target2; target_ip = get_ftrace_location(target); diff --git a/lib/test_hexdump.c b/lib/test_hexdump.c index 0927f44cd478..b916801f23a8 100644 --- a/lib/test_hexdump.c +++ b/lib/test_hexdump.c @@ -149,7 +149,7 @@ static void __init test_hexdump(size_t len, int rowsize, int groupsize, static void __init test_hexdump_set(int rowsize, bool ascii) { size_t d = min_t(size_t, sizeof(data_b), rowsize); - size_t len = prandom_u32_max(d) + 1; + size_t len = get_random_u32_inclusive(1, d); test_hexdump(len, rowsize, 4, ascii); test_hexdump(len, rowsize, 2, ascii); @@ -208,11 +208,11 @@ static void __init test_hexdump_overflow(size_t buflen, size_t len, static void __init test_hexdump_overflow_set(size_t buflen, bool ascii) { unsigned int i = 0; - int rs = (prandom_u32_max(2) + 1) * 16; + int rs = get_random_u32_inclusive(1, 2) * 16; do { int gs = 1 << i; - size_t len = prandom_u32_max(rs) + gs; + size_t len = get_random_u32_below(rs) + gs; test_hexdump_overflow(buflen, rounddown(len, gs), rs, gs, ascii); } while (i++ < 3); @@ -223,11 +223,11 @@ static int __init test_hexdump_init(void) unsigned int i; int rowsize; - rowsize = (prandom_u32_max(2) + 1) * 16; + rowsize = get_random_u32_inclusive(1, 2) * 16; for (i = 0; i < 16; i++) test_hexdump_set(rowsize, false); - rowsize = (prandom_u32_max(2) + 1) * 16; + rowsize = get_random_u32_inclusive(1, 2) * 16; for (i = 0; i < 16; i++) test_hexdump_set(rowsize, true); diff --git a/lib/test_kprobes.c b/lib/test_kprobes.c index eeb1d728d974..1c95e5719802 100644 --- a/lib/test_kprobes.c +++ b/lib/test_kprobes.c @@ -339,10 +339,7 @@ static int kprobes_test_init(struct kunit *test) stacktrace_target = kprobe_stacktrace_target; internal_target = kprobe_stacktrace_internal_target; stacktrace_driver = kprobe_stacktrace_driver; - - do { - rand1 = get_random_u32(); - } while (rand1 <= div_factor); + rand1 = get_random_u32_above(div_factor); return 0; } diff --git a/lib/test_linear_ranges.c b/lib/test_linear_ranges.c index 676e0b8abcdd..c18f9c0f1f25 100644 --- a/lib/test_linear_ranges.c +++ b/lib/test_linear_ranges.c @@ -107,17 +107,8 @@ static const unsigned int range2_vals[] = { RANGE2_MIN, RANGE2_MIN + #define SMALLEST_VAL RANGE1_MIN static struct linear_range testr[] = { - { - .min = RANGE1_MIN, - .min_sel = RANGE1_MIN_SEL, - .max_sel = RANGE1_MAX_SEL, - .step = RANGE1_STEP, - }, { - .min = RANGE2_MIN, - .min_sel = RANGE2_MIN_SEL, - .max_sel = RANGE2_MAX_SEL, - .step = RANGE2_STEP - }, + LINEAR_RANGE(RANGE1_MIN, RANGE1_MIN_SEL, RANGE1_MAX_SEL, RANGE1_STEP), + LINEAR_RANGE(RANGE2_MIN, RANGE2_MIN_SEL, RANGE2_MAX_SEL, RANGE2_STEP), }; static void range_test_get_value(struct kunit *test) diff --git a/lib/test_list_sort.c b/lib/test_list_sort.c index 19ff229b9c3a..cc5f335f29b5 100644 --- a/lib/test_list_sort.c +++ b/lib/test_list_sort.c @@ -71,7 +71,7 @@ static void list_sort_test(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, el); /* force some equivalencies */ - el->value = prandom_u32_max(TEST_LIST_LEN / 3); + el->value = get_random_u32_below(TEST_LIST_LEN / 3); el->serial = i; el->poison1 = TEST_POISON1; el->poison2 = TEST_POISON2; diff --git a/lib/test_printf.c b/lib/test_printf.c index 4bd15a593fbd..d34dc636b81c 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c @@ -126,7 +126,7 @@ __test(const char *expect, int elen, const char *fmt, ...) * be able to print it as expected. */ failed_tests += do_test(BUF_SIZE, expect, elen, fmt, ap); - rand = 1 + prandom_u32_max(elen+1); + rand = get_random_u32_inclusive(1, elen + 1); /* Since elen < BUF_SIZE, we have 1 <= rand <= BUF_SIZE. */ failed_tests += do_test(rand, expect, elen, fmt, ap); failed_tests += do_test(0, expect, elen, fmt, ap); @@ -179,18 +179,6 @@ test_number(void) * behaviour. */ test("00|0|0|0|0", "%.2d|%.1d|%.0d|%.*d|%1.0d", 0, 0, 0, 0, 0, 0); -#ifndef __CHAR_UNSIGNED__ - { - /* - * Passing a 'char' to a %02x specifier doesn't do - * what was presumably the intention when char is - * signed and the value is negative. One must either & - * with 0xff or cast to u8. - */ - char val = -16; - test("0xfffffff0|0xf0|0xf0", "%#02x|%#02x|%#02x", val, val & 0xff, (u8)val); - } -#endif } static void __init @@ -704,31 +692,29 @@ flags(void) static void __init fwnode_pointer(void) { - const struct software_node softnodes[] = { - { .name = "first", }, - { .name = "second", .parent = &softnodes[0], }, - { .name = "third", .parent = &softnodes[1], }, - { NULL /* Guardian */ } - }; - const char * const full_name = "first/second/third"; + const struct software_node first = { .name = "first" }; + const struct software_node second = { .name = "second", .parent = &first }; + const struct software_node third = { .name = "third", .parent = &second }; + const struct software_node *group[] = { &first, &second, &third, NULL }; const char * const full_name_second = "first/second"; + const char * const full_name_third = "first/second/third"; const char * const second_name = "second"; const char * const third_name = "third"; int rval; - rval = software_node_register_nodes(softnodes); + rval = software_node_register_node_group(group); if (rval) { pr_warn("cannot register softnodes; rval %d\n", rval); return; } - test(full_name_second, "%pfw", software_node_fwnode(&softnodes[1])); - test(full_name, "%pfw", software_node_fwnode(&softnodes[2])); - test(full_name, "%pfwf", software_node_fwnode(&softnodes[2])); - test(second_name, "%pfwP", software_node_fwnode(&softnodes[1])); - test(third_name, "%pfwP", software_node_fwnode(&softnodes[2])); + test(full_name_second, "%pfw", software_node_fwnode(&second)); + test(full_name_third, "%pfw", software_node_fwnode(&third)); + test(full_name_third, "%pfwf", software_node_fwnode(&third)); + test(second_name, "%pfwP", software_node_fwnode(&second)); + test(third_name, "%pfwP", software_node_fwnode(&third)); - software_node_unregister_nodes(softnodes); + software_node_unregister_node_group(group); } static void __init fourcc_pointer(void) diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index f2ba5787055a..c20f6cb4bf55 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -368,8 +368,8 @@ static int __init test_rhltable(unsigned int entries) pr_info("test %d random rhlist add/delete operations\n", entries); for (j = 0; j < entries; j++) { - u32 i = prandom_u32_max(entries); - u32 prand = prandom_u32_max(4); + u32 i = get_random_u32_below(entries); + u32 prand = get_random_u32_below(4); cond_resched(); @@ -396,7 +396,7 @@ static int __init test_rhltable(unsigned int entries) } if (prand & 2) { - i = prandom_u32_max(entries); + i = get_random_u32_below(entries); if (test_bit(i, obj_in_table)) { err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); WARN(err, "cannot remove element at slot %d", i); @@ -434,7 +434,7 @@ out_free: static int __init test_rhashtable_max(struct test_obj *array, unsigned int entries) { - unsigned int i, insert_retries = 0; + unsigned int i; int err; test_rht_params.max_size = roundup_pow_of_two(entries / 8); @@ -447,9 +447,7 @@ static int __init test_rhashtable_max(struct test_obj *array, obj->value.id = i * 2; err = insert_retry(&ht, obj, test_rht_params); - if (err > 0) - insert_retries += err; - else if (err) + if (err < 0) return err; } diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index cf7780572f5b..f90d2c27675b 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -151,7 +151,7 @@ static int random_size_alloc_test(void) int i; for (i = 0; i < test_loop_count; i++) { - n = prandom_u32_max(100) + 1; + n = get_random_u32_inclusive(1, 100); p = vmalloc(n * PAGE_SIZE); if (!p) @@ -291,12 +291,12 @@ pcpu_alloc_test(void) return -1; for (i = 0; i < 35000; i++) { - size = prandom_u32_max(PAGE_SIZE / 4) + 1; + size = get_random_u32_inclusive(1, PAGE_SIZE / 4); /* * Maximum PAGE_SIZE */ - align = 1 << (prandom_u32_max(11) + 1); + align = 1 << get_random_u32_inclusive(1, 11); pcpu[i] = __alloc_percpu(size, align); if (!pcpu[i]) @@ -391,7 +391,7 @@ static void shuffle_array(int *arr, int n) for (i = n - 1; i > 0; i--) { /* Cut the range. */ - j = prandom_u32_max(i); + j = get_random_u32_below(i); /* Swap indexes. */ swap(arr[i], arr[j]); diff --git a/lib/vdso/Makefile b/lib/vdso/Makefile index c415a685d61b..e814061d6aa0 100644 --- a/lib/vdso/Makefile +++ b/lib/vdso/Makefile @@ -17,6 +17,6 @@ $(error ARCH_REL_TYPE_ABS is not set) endif quiet_cmd_vdso_check = VDSOCHK $@ - cmd_vdso_check = if $(OBJDUMP) -R $@ | egrep -h "$(ARCH_REL_TYPE_ABS)"; \ + cmd_vdso_check = if $(OBJDUMP) -R $@ | grep -E -h "$(ARCH_REL_TYPE_ABS)"; \ then (echo >&2 "$@: dynamic relocations are not supported"; \ rm -f $@; /bin/false); fi diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 24f37bab8bc1..be71a03c936a 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -41,6 +41,7 @@ #include <linux/siphash.h> #include <linux/compiler.h> #include <linux/property.h> +#include <linux/notifier.h> #ifdef CONFIG_BLOCK #include <linux/blkdev.h> #endif @@ -752,26 +753,21 @@ early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable); static bool filled_random_ptr_key __read_mostly; static siphash_key_t ptr_key __read_mostly; -static void fill_ptr_key_workfn(struct work_struct *work); -static DECLARE_DELAYED_WORK(fill_ptr_key_work, fill_ptr_key_workfn); -static void fill_ptr_key_workfn(struct work_struct *work) +static int fill_ptr_key(struct notifier_block *nb, unsigned long action, void *data) { - if (!rng_is_initialized()) { - queue_delayed_work(system_unbound_wq, &fill_ptr_key_work, HZ * 2); - return; - } - get_random_bytes(&ptr_key, sizeof(ptr_key)); /* Pairs with smp_rmb() before reading ptr_key. */ smp_wmb(); WRITE_ONCE(filled_random_ptr_key, true); + return NOTIFY_DONE; } static int __init vsprintf_init_hashval(void) { - fill_ptr_key_workfn(NULL); + static struct notifier_block fill_ptr_key_nb = { .notifier_call = fill_ptr_key }; + execute_with_initialized_rng(&fill_ptr_key_nb); return 0; } subsys_initcall(vsprintf_init_hashval) @@ -866,7 +862,7 @@ char *restricted_pointer(char *buf, char *end, const void *ptr, * kptr_restrict==1 cannot be used in IRQ context * because its test for CAP_SYSLOG would be meaningless. */ - if (in_irq() || in_serving_softirq() || in_nmi()) { + if (in_hardirq() || in_serving_softirq() || in_nmi()) { if (spec.field_width == -1) spec.field_width = 2 * sizeof(ptr); return error_string(buf, end, "pK-error", spec); |