diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/842/842_compress.c | 2 | ||||
-rw-r--r-- | lib/Kconfig.debug | 35 | ||||
-rw-r--r-- | lib/alloc_tag.c | 6 | ||||
-rw-r--r-- | lib/bug.c | 22 | ||||
-rw-r--r-- | lib/crypto/Kconfig | 45 | ||||
-rw-r--r-- | lib/crypto/chacha20poly1305.c | 7 | ||||
-rw-r--r-- | lib/idr.c | 67 | ||||
-rw-r--r-- | lib/interval_tree.c | 12 | ||||
-rw-r--r-- | lib/interval_tree_test.c | 237 | ||||
-rw-r--r-- | lib/lzo/Makefile | 2 | ||||
-rw-r--r-- | lib/lzo/lzo1x_compress.c | 102 | ||||
-rw-r--r-- | lib/lzo/lzo1x_compress_safe.c | 18 | ||||
-rw-r--r-- | lib/maple_tree.c | 10 | ||||
-rw-r--r-- | lib/min_heap.c | 4 | ||||
-rw-r--r-- | lib/plist.c | 12 | ||||
-rw-r--r-- | lib/raid6/s390vx.uc | 1 | ||||
-rw-r--r-- | lib/rbtree_test.c | 30 | ||||
-rw-r--r-- | lib/scatterlist.c | 12 | ||||
-rw-r--r-- | lib/stackdepot.c | 10 | ||||
-rw-r--r-- | lib/test_hmm.c | 72 | ||||
-rw-r--r-- | lib/test_ida.c | 70 | ||||
-rw-r--r-- | lib/test_xarray.c | 52 | ||||
-rw-r--r-- | lib/tests/Makefile | 4 | ||||
-rw-r--r-- | lib/tests/longest_symbol_kunit.c | 82 | ||||
-rwxr-xr-x | lib/tests/module/gen_test_kallsyms.sh | 2 | ||||
-rw-r--r-- | lib/vsprintf.c | 12 | ||||
-rw-r--r-- | lib/xarray.c | 157 | ||||
-rw-r--r-- | lib/zlib_deflate/deflate.c | 6 |
28 files changed, 914 insertions, 177 deletions
diff --git a/lib/842/842_compress.c b/lib/842/842_compress.c index c02baa4168e1..055356508d97 100644 --- a/lib/842/842_compress.c +++ b/lib/842/842_compress.c @@ -532,6 +532,8 @@ int sw842_compress(const u8 *in, unsigned int ilen, } if (repeat_count) { ret = add_repeat_template(p, repeat_count); + if (ret) + return ret; repeat_count = 0; if (next == last) /* reached max repeat bits */ goto repeat; diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 8fb1d685f696..df9587aa5c5e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1280,6 +1280,17 @@ config BOOTPARAM_HUNG_TASK_PANIC Say N if unsure. +config DETECT_HUNG_TASK_BLOCKER + bool "Dump Hung Tasks Blocker" + depends on DETECT_HUNG_TASK + depends on !PREEMPT_RT + default y + help + Say Y here to show the blocker task's stacktrace who acquires + the mutex lock which "hung tasks" are waiting. + This will add overhead a bit but shows suspicious tasks and + call trace if it comes from waiting a mutex. + config WQ_WATCHDOG bool "Detect Workqueue Stalls" depends on DEBUG_KERNEL @@ -2502,13 +2513,20 @@ config TEST_IDA tristate "Perform selftest on IDA functions" config TEST_MISC_MINOR - tristate "Basic misc minor Kunit test" if !KUNIT_ALL_TESTS + tristate "miscdevice KUnit test" if !KUNIT_ALL_TESTS depends on KUNIT default KUNIT_ALL_TESTS help - Kunit test for the misc minor. - It tests misc minor functions for dynamic and misc dynamic minor. - This include misc_xxx functions + Kunit test for miscdevice API, specially its behavior in respect to + static and dynamic minor numbers. + + KUnit tests run during boot and output the results to the debug log + in TAP format (https://testanything.org/). Only useful for kernel devs + running the KUnit test harness, and not intended for inclusion into a + production build. + + For more information on KUnit and unit tests in general please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. If unsure, say N. @@ -2866,6 +2884,15 @@ config FORTIFY_KUNIT_TEST by the str*() and mem*() family of functions. For testing runtime traps of FORTIFY_SOURCE, see LKDTM's "FORTIFY_*" tests. +config LONGEST_SYM_KUNIT_TEST + tristate "Test the longest symbol possible" if !KUNIT_ALL_TESTS + depends on KUNIT && KPROBES + default KUNIT_ALL_TESTS + help + Tests the longest symbol possible + + If unsure, say N. + config HW_BREAKPOINT_KUNIT_TEST bool "Test hw_breakpoint constraints accounting" if !KUNIT_ALL_TESTS depends on HAVE_HW_BREAKPOINT diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 19b45617bdcf..1d893e313614 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -174,7 +174,7 @@ void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) if (!mem_alloc_profiling_enabled()) return; - tag = pgalloc_tag_get(&folio->page); + tag = __pgalloc_tag_get(&folio->page); if (!tag) return; @@ -200,10 +200,10 @@ void pgalloc_tag_swap(struct folio *new, struct folio *old) if (!mem_alloc_profiling_enabled()) return; - tag_old = pgalloc_tag_get(&old->page); + tag_old = __pgalloc_tag_get(&old->page); if (!tag_old) return; - tag_new = pgalloc_tag_get(&new->page); + tag_new = __pgalloc_tag_get(&new->page); if (!tag_new) return; diff --git a/lib/bug.c b/lib/bug.c index e0ff21989990..b1f07459c2ee 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -66,23 +66,19 @@ static LIST_HEAD(module_bug_list); static struct bug_entry *module_find_bug(unsigned long bugaddr) { + struct bug_entry *bug; struct module *mod; - struct bug_entry *bug = NULL; - rcu_read_lock_sched(); + guard(rcu)(); list_for_each_entry_rcu(mod, &module_bug_list, bug_list) { unsigned i; bug = mod->bug_table; for (i = 0; i < mod->num_bugs; ++i, ++bug) if (bugaddr == bug_addr(bug)) - goto out; + return bug; } - bug = NULL; -out: - rcu_read_unlock_sched(); - - return bug; + return NULL; } void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, @@ -235,11 +231,11 @@ void generic_bug_clear_once(void) #ifdef CONFIG_MODULES struct module *mod; - rcu_read_lock_sched(); - list_for_each_entry_rcu(mod, &module_bug_list, bug_list) - clear_once_table(mod->bug_table, - mod->bug_table + mod->num_bugs); - rcu_read_unlock_sched(); + scoped_guard(rcu) { + list_for_each_entry_rcu(mod, &module_bug_list, bug_list) + clear_once_table(mod->bug_table, + mod->bug_table + mod->num_bugs); + } #endif clear_once_table(__start___bug_table, __stop___bug_table); diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index b01253cac70a..798972b29b68 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -42,7 +42,7 @@ config CRYPTO_LIB_BLAKE2S_GENERIC of CRYPTO_LIB_BLAKE2S. config CRYPTO_ARCH_HAVE_LIB_CHACHA - tristate + bool help Declares whether the architecture provides an arch-specific accelerated implementation of the ChaCha library interface, @@ -58,17 +58,21 @@ config CRYPTO_LIB_CHACHA_GENERIC implementation is enabled, this implementation serves the users of CRYPTO_LIB_CHACHA. -config CRYPTO_LIB_CHACHA - tristate "ChaCha library interface" - depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA +config CRYPTO_LIB_CHACHA_INTERNAL + tristate select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n + +config CRYPTO_LIB_CHACHA + tristate + select CRYPTO + select CRYPTO_LIB_CHACHA_INTERNAL help Enable the ChaCha library interface. This interface may be fulfilled by either the generic implementation or an arch-specific one, if one is available and enabled. config CRYPTO_ARCH_HAVE_LIB_CURVE25519 - tristate + bool help Declares whether the architecture provides an arch-specific accelerated implementation of the Curve25519 library interface, @@ -76,6 +80,7 @@ config CRYPTO_ARCH_HAVE_LIB_CURVE25519 config CRYPTO_LIB_CURVE25519_GENERIC tristate + select CRYPTO_LIB_UTILS help This symbol can be depended upon by arch implementations of the Curve25519 library interface that require the generic code as a @@ -83,11 +88,14 @@ config CRYPTO_LIB_CURVE25519_GENERIC implementation is enabled, this implementation serves the users of CRYPTO_LIB_CURVE25519. -config CRYPTO_LIB_CURVE25519 - tristate "Curve25519 scalar multiplication library" - depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519 +config CRYPTO_LIB_CURVE25519_INTERNAL + tristate select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n - select CRYPTO_LIB_UTILS + +config CRYPTO_LIB_CURVE25519 + tristate + select CRYPTO + select CRYPTO_LIB_CURVE25519_INTERNAL help Enable the Curve25519 library interface. This interface may be fulfilled by either the generic implementation or an arch-specific @@ -104,7 +112,7 @@ config CRYPTO_LIB_POLY1305_RSIZE default 1 config CRYPTO_ARCH_HAVE_LIB_POLY1305 - tristate + bool help Declares whether the architecture provides an arch-specific accelerated implementation of the Poly1305 library interface, @@ -119,23 +127,24 @@ config CRYPTO_LIB_POLY1305_GENERIC implementation is enabled, this implementation serves the users of CRYPTO_LIB_POLY1305. -config CRYPTO_LIB_POLY1305 - tristate "Poly1305 library interface" - depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305 +config CRYPTO_LIB_POLY1305_INTERNAL + tristate select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n + +config CRYPTO_LIB_POLY1305 + tristate + select CRYPTO + select CRYPTO_LIB_POLY1305_INTERNAL help Enable the Poly1305 library interface. This interface may be fulfilled by either the generic implementation or an arch-specific one, if one is available and enabled. config CRYPTO_LIB_CHACHA20POLY1305 - tristate "ChaCha20-Poly1305 AEAD support (8-byte nonce library version)" - depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA - depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305 - depends on CRYPTO + tristate select CRYPTO_LIB_CHACHA select CRYPTO_LIB_POLY1305 - select CRYPTO_ALGAPI + select CRYPTO_LIB_UTILS config CRYPTO_LIB_SHA1 tristate diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c index a839c0ac60b2..9cfa886f1f89 100644 --- a/lib/crypto/chacha20poly1305.c +++ b/lib/crypto/chacha20poly1305.c @@ -7,11 +7,10 @@ * Information: https://tools.ietf.org/html/rfc8439 */ -#include <crypto/algapi.h> #include <crypto/chacha20poly1305.h> #include <crypto/chacha.h> #include <crypto/poly1305.h> -#include <crypto/scatterwalk.h> +#include <crypto/utils.h> #include <linux/unaligned.h> #include <linux/kernel.h> @@ -318,8 +317,8 @@ bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src, if (unlikely(sl > -POLY1305_DIGEST_SIZE)) { poly1305_final(&poly1305_state, b.mac[1]); - scatterwalk_map_and_copy(b.mac[encrypt], src, src_len, - sizeof(b.mac[1]), encrypt); + sg_copy_buffer(src, sg_nents(src), b.mac[encrypt], + sizeof(b.mac[1]), src_len, !encrypt); ret = encrypt || !crypto_memneq(b.mac[0], b.mac[1], POLY1305_DIGEST_SIZE); } diff --git a/lib/idr.c b/lib/idr.c index da36054c3ca0..e2adc457abb4 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -477,6 +477,73 @@ nospc: EXPORT_SYMBOL(ida_alloc_range); /** + * ida_find_first_range - Get the lowest used ID. + * @ida: IDA handle. + * @min: Lowest ID to get. + * @max: Highest ID to get. + * + * Get the lowest used ID between @min and @max, inclusive. The returned + * ID will not exceed %INT_MAX, even if @max is larger. + * + * Context: Any context. Takes and releases the xa_lock. + * Return: The lowest used ID, or errno if no used ID is found. + */ +int ida_find_first_range(struct ida *ida, unsigned int min, unsigned int max) +{ + unsigned long index = min / IDA_BITMAP_BITS; + unsigned int offset = min % IDA_BITMAP_BITS; + unsigned long *addr, size, bit; + unsigned long tmp = 0; + unsigned long flags; + void *entry; + int ret; + + if ((int)min < 0) + return -EINVAL; + if ((int)max < 0) + max = INT_MAX; + + xa_lock_irqsave(&ida->xa, flags); + + entry = xa_find(&ida->xa, &index, max / IDA_BITMAP_BITS, XA_PRESENT); + if (!entry) { + ret = -ENOENT; + goto err_unlock; + } + + if (index > min / IDA_BITMAP_BITS) + offset = 0; + if (index * IDA_BITMAP_BITS + offset > max) { + ret = -ENOENT; + goto err_unlock; + } + + if (xa_is_value(entry)) { + tmp = xa_to_value(entry); + addr = &tmp; + size = BITS_PER_XA_VALUE; + } else { + addr = ((struct ida_bitmap *)entry)->bitmap; + size = IDA_BITMAP_BITS; + } + + bit = find_next_bit(addr, size, offset); + + xa_unlock_irqrestore(&ida->xa, flags); + + if (bit == size || + index * IDA_BITMAP_BITS + bit > max) + return -ENOENT; + + return index * IDA_BITMAP_BITS + bit; + +err_unlock: + xa_unlock_irqrestore(&ida->xa, flags); + return ret; +} +EXPORT_SYMBOL(ida_find_first_range); + +/** * ida_free() - Release an allocated ID. * @ida: IDA handle. * @id: Previously allocated ID. diff --git a/lib/interval_tree.c b/lib/interval_tree.c index 3412737ff365..324766e9bf63 100644 --- a/lib/interval_tree.c +++ b/lib/interval_tree.c @@ -20,9 +20,15 @@ EXPORT_SYMBOL_GPL(interval_tree_iter_next); /* * Roll nodes[1] into nodes[0] by advancing nodes[1] to the end of a contiguous * span of nodes. This makes nodes[0]->last the end of that contiguous used span - * indexes that started at the original nodes[1]->start. nodes[1] is now the - * first node starting the next used span. A hole span is between nodes[0]->last - * and nodes[1]->start. nodes[1] must be !NULL. + * of indexes that started at the original nodes[1]->start. + * + * If there is an interior hole, nodes[1] is now the first node starting the + * next used span. A hole span is between nodes[0]->last and nodes[1]->start. + * + * If there is a tailing hole, nodes[1] is now NULL. A hole span is between + * nodes[0]->last and last_index. + * + * If the contiguous used range span to last_index, nodes[1] is set to NULL. */ static void interval_tree_span_iter_next_gap(struct interval_tree_span_iter *state) diff --git a/lib/interval_tree_test.c b/lib/interval_tree_test.c index 837064b83a6c..5fd62656f42e 100644 --- a/lib/interval_tree_test.c +++ b/lib/interval_tree_test.c @@ -5,6 +5,8 @@ #include <linux/prandom.h> #include <linux/slab.h> #include <asm/timex.h> +#include <linux/bitmap.h> +#include <linux/maple_tree.h> #define __param(type, name, init, msg) \ static type name = init; \ @@ -19,6 +21,7 @@ __param(int, search_loops, 1000, "Number of iterations searching the tree"); __param(bool, search_all, false, "Searches will iterate all nodes in the tree"); __param(uint, max_endpoint, ~0, "Largest value for the interval's endpoint"); +__param(ullong, seed, 3141592653589793238ULL, "Random seed"); static struct rb_root_cached root = RB_ROOT_CACHED; static struct interval_tree_node *nodes = NULL; @@ -59,26 +62,13 @@ static void init(void) queries[i] = (prandom_u32_state(&rnd) >> 4) % max_endpoint; } -static int interval_tree_test_init(void) +static int basic_check(void) { int i, j; - unsigned long results; cycles_t time1, time2, time; - nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node), - GFP_KERNEL); - if (!nodes) - return -ENOMEM; - - queries = kmalloc_array(nsearches, sizeof(int), GFP_KERNEL); - if (!queries) { - kfree(nodes); - return -ENOMEM; - } - printk(KERN_ALERT "interval tree insert/remove"); - prandom_seed_state(&rnd, 3141592653589793238ULL); init(); time1 = get_cycles(); @@ -96,8 +86,19 @@ static int interval_tree_test_init(void) time = div_u64(time, perf_loops); printk(" -> %llu cycles\n", (unsigned long long)time); + return 0; +} + +static int search_check(void) +{ + int i, j; + unsigned long results; + cycles_t time1, time2, time; + printk(KERN_ALERT "interval tree search"); + init(); + for (j = 0; j < nnodes; j++) interval_tree_insert(nodes + j, &root); @@ -120,6 +121,214 @@ static int interval_tree_test_init(void) printk(" -> %llu cycles (%lu results)\n", (unsigned long long)time, results); + for (j = 0; j < nnodes; j++) + interval_tree_remove(nodes + j, &root); + + return 0; +} + +static int intersection_range_check(void) +{ + int i, j, k; + unsigned long start, last; + struct interval_tree_node *node; + unsigned long *intxn1; + unsigned long *intxn2; + + printk(KERN_ALERT "interval tree iteration\n"); + + intxn1 = bitmap_alloc(nnodes, GFP_KERNEL); + if (!intxn1) { + WARN_ON_ONCE("Failed to allocate intxn1\n"); + return -ENOMEM; + } + + intxn2 = bitmap_alloc(nnodes, GFP_KERNEL); + if (!intxn2) { + WARN_ON_ONCE("Failed to allocate intxn2\n"); + bitmap_free(intxn1); + return -ENOMEM; + } + + for (i = 0; i < search_loops; i++) { + /* Initialize interval tree for each round */ + init(); + for (j = 0; j < nnodes; j++) + interval_tree_insert(nodes + j, &root); + + /* Let's try nsearches different ranges */ + for (k = 0; k < nsearches; k++) { + /* Try whole range once */ + if (!k) { + start = 0UL; + last = ULONG_MAX; + } else { + last = (prandom_u32_state(&rnd) >> 4) % max_endpoint; + start = (prandom_u32_state(&rnd) >> 4) % last; + } + + /* Walk nodes to mark intersection nodes */ + bitmap_zero(intxn1, nnodes); + for (j = 0; j < nnodes; j++) { + node = nodes + j; + + if (start <= node->last && last >= node->start) + bitmap_set(intxn1, j, 1); + } + + /* Iterate tree to clear intersection nodes */ + bitmap_zero(intxn2, nnodes); + for (node = interval_tree_iter_first(&root, start, last); node; + node = interval_tree_iter_next(node, start, last)) + bitmap_set(intxn2, node - nodes, 1); + + WARN_ON_ONCE(!bitmap_equal(intxn1, intxn2, nnodes)); + } + + for (j = 0; j < nnodes; j++) + interval_tree_remove(nodes + j, &root); + } + + bitmap_free(intxn1); + bitmap_free(intxn2); + return 0; +} + +#ifdef CONFIG_INTERVAL_TREE_SPAN_ITER +/* + * Helper function to get span of current position from maple tree point of + * view. + */ +static void mas_cur_span(struct ma_state *mas, struct interval_tree_span_iter *state) +{ + unsigned long cur_start; + unsigned long cur_last; + int is_hole; + + if (mas->status == ma_overflow) + return; + + /* walk to current position */ + state->is_hole = mas_walk(mas) ? 0 : 1; + + cur_start = mas->index < state->first_index ? + state->first_index : mas->index; + + /* whether we have followers */ + do { + + cur_last = mas->last > state->last_index ? + state->last_index : mas->last; + + is_hole = mas_next_range(mas, state->last_index) ? 0 : 1; + + } while (mas->status != ma_overflow && is_hole == state->is_hole); + + if (state->is_hole) { + state->start_hole = cur_start; + state->last_hole = cur_last; + } else { + state->start_used = cur_start; + state->last_used = cur_last; + } + + /* advance position for next round */ + if (mas->status != ma_overflow) + mas_set(mas, cur_last + 1); +} + +static int span_iteration_check(void) +{ + int i, j, k; + unsigned long start, last; + struct interval_tree_span_iter span, mas_span; + + DEFINE_MTREE(tree); + + MA_STATE(mas, &tree, 0, 0); + + printk(KERN_ALERT "interval tree span iteration\n"); + + for (i = 0; i < search_loops; i++) { + /* Initialize interval tree for each round */ + init(); + for (j = 0; j < nnodes; j++) + interval_tree_insert(nodes + j, &root); + + /* Put all the range into maple tree */ + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + mt_set_in_rcu(&tree); + + for (j = 0; j < nnodes; j++) + WARN_ON_ONCE(mtree_store_range(&tree, nodes[j].start, + nodes[j].last, nodes + j, GFP_KERNEL)); + + /* Let's try nsearches different ranges */ + for (k = 0; k < nsearches; k++) { + /* Try whole range once */ + if (!k) { + start = 0UL; + last = ULONG_MAX; + } else { + last = (prandom_u32_state(&rnd) >> 4) % max_endpoint; + start = (prandom_u32_state(&rnd) >> 4) % last; + } + + mas_span.first_index = start; + mas_span.last_index = last; + mas_span.is_hole = -1; + mas_set(&mas, start); + + interval_tree_for_each_span(&span, &root, start, last) { + mas_cur_span(&mas, &mas_span); + + WARN_ON_ONCE(span.is_hole != mas_span.is_hole); + + if (span.is_hole) { + WARN_ON_ONCE(span.start_hole != mas_span.start_hole); + WARN_ON_ONCE(span.last_hole != mas_span.last_hole); + } else { + WARN_ON_ONCE(span.start_used != mas_span.start_used); + WARN_ON_ONCE(span.last_used != mas_span.last_used); + } + } + + } + + WARN_ON_ONCE(mas.status != ma_overflow); + + /* Cleanup maple tree for each round */ + mtree_destroy(&tree); + /* Cleanup interval tree for each round */ + for (j = 0; j < nnodes; j++) + interval_tree_remove(nodes + j, &root); + } + return 0; +} +#else +static inline int span_iteration_check(void) {return 0; } +#endif + +static int interval_tree_test_init(void) +{ + nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node), + GFP_KERNEL); + if (!nodes) + return -ENOMEM; + + queries = kmalloc_array(nsearches, sizeof(int), GFP_KERNEL); + if (!queries) { + kfree(nodes); + return -ENOMEM; + } + + prandom_seed_state(&rnd, seed); + + basic_check(); + search_check(); + intersection_range_check(); + span_iteration_check(); + kfree(queries); kfree(nodes); diff --git a/lib/lzo/Makefile b/lib/lzo/Makefile index 2f58fafbbddd..fc7b2b7ef4b2 100644 --- a/lib/lzo/Makefile +++ b/lib/lzo/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -lzo_compress-objs := lzo1x_compress.o +lzo_compress-objs := lzo1x_compress.o lzo1x_compress_safe.o lzo_decompress-objs := lzo1x_decompress_safe.o obj-$(CONFIG_LZO_COMPRESS) += lzo_compress.o diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c index 47d6d43ea957..7b10ca86a893 100644 --- a/lib/lzo/lzo1x_compress.c +++ b/lib/lzo/lzo1x_compress.c @@ -18,11 +18,22 @@ #include <linux/lzo.h> #include "lzodefs.h" -static noinline size_t -lzo1x_1_do_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, - size_t ti, void *wrkmem, signed char *state_offset, - const unsigned char bitstream_version) +#undef LZO_UNSAFE + +#ifndef LZO_SAFE +#define LZO_UNSAFE 1 +#define LZO_SAFE(name) name +#define HAVE_OP(x) 1 +#endif + +#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun + +static noinline int +LZO_SAFE(lzo1x_1_do_compress)(const unsigned char *in, size_t in_len, + unsigned char **out, unsigned char *op_end, + size_t *tp, void *wrkmem, + signed char *state_offset, + const unsigned char bitstream_version) { const unsigned char *ip; unsigned char *op; @@ -30,8 +41,9 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, const unsigned char * const ip_end = in + in_len - 20; const unsigned char *ii; lzo_dict_t * const dict = (lzo_dict_t *) wrkmem; + size_t ti = *tp; - op = out; + op = *out; ip = in; ii = ip; ip += ti < 4 ? 4 - ti : 0; @@ -116,25 +128,32 @@ next: if (t != 0) { if (t <= 3) { op[*state_offset] |= t; + NEED_OP(4); COPY4(op, ii); op += t; } else if (t <= 16) { + NEED_OP(17); *op++ = (t - 3); COPY8(op, ii); COPY8(op + 8, ii + 8); op += t; } else { if (t <= 18) { + NEED_OP(1); *op++ = (t - 3); } else { size_t tt = t - 18; + NEED_OP(1); *op++ = 0; while (unlikely(tt > 255)) { tt -= 255; + NEED_OP(1); *op++ = 0; } + NEED_OP(1); *op++ = tt; } + NEED_OP(t); do { COPY8(op, ii); COPY8(op + 8, ii + 8); @@ -151,6 +170,7 @@ next: if (unlikely(run_length)) { ip += run_length; run_length -= MIN_ZERO_RUN_LENGTH; + NEED_OP(4); put_unaligned_le32((run_length << 21) | 0xfffc18 | (run_length & 0x7), op); op += 4; @@ -243,10 +263,12 @@ m_len_done: ip += m_len; if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) { m_off -= 1; + NEED_OP(2); *op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2)); *op++ = (m_off >> 3); } else if (m_off <= M3_MAX_OFFSET) { m_off -= 1; + NEED_OP(1); if (m_len <= M3_MAX_LEN) *op++ = (M3_MARKER | (m_len - 2)); else { @@ -254,14 +276,18 @@ m_len_done: *op++ = M3_MARKER | 0; while (unlikely(m_len > 255)) { m_len -= 255; + NEED_OP(1); *op++ = 0; } + NEED_OP(1); *op++ = (m_len); } + NEED_OP(2); *op++ = (m_off << 2); *op++ = (m_off >> 6); } else { m_off -= 0x4000; + NEED_OP(1); if (m_len <= M4_MAX_LEN) *op++ = (M4_MARKER | ((m_off >> 11) & 8) | (m_len - 2)); @@ -282,11 +308,14 @@ m_len_done: m_len -= M4_MAX_LEN; *op++ = (M4_MARKER | ((m_off >> 11) & 8)); while (unlikely(m_len > 255)) { + NEED_OP(1); m_len -= 255; *op++ = 0; } + NEED_OP(1); *op++ = (m_len); } + NEED_OP(2); *op++ = (m_off << 2); *op++ = (m_off >> 6); } @@ -295,14 +324,20 @@ finished_writing_instruction: ii = ip; goto next; } - *out_len = op - out; - return in_end - (ii - ti); + *out = op; + *tp = in_end - (ii - ti); + return LZO_E_OK; + +output_overrun: + return LZO_E_OUTPUT_OVERRUN; } -static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, - void *wrkmem, const unsigned char bitstream_version) +static int LZO_SAFE(lzogeneric1x_1_compress)( + const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + void *wrkmem, const unsigned char bitstream_version) { + unsigned char * const op_end = out + *out_len; const unsigned char *ip = in; unsigned char *op = out; unsigned char *data_start; @@ -326,14 +361,18 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, while (l > 20) { size_t ll = min_t(size_t, l, m4_max_offset + 1); uintptr_t ll_end = (uintptr_t) ip + ll; + int err; + if ((ll_end + ((t + ll) >> 5)) <= ll_end) break; BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS); memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t)); - t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem, - &state_offset, bitstream_version); + err = LZO_SAFE(lzo1x_1_do_compress)( + ip, ll, &op, op_end, &t, wrkmem, + &state_offset, bitstream_version); + if (err != LZO_E_OK) + return err; ip += ll; - op += *out_len; l -= ll; } t += l; @@ -342,20 +381,26 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, const unsigned char *ii = in + in_len - t; if (op == data_start && t <= 238) { + NEED_OP(1); *op++ = (17 + t); } else if (t <= 3) { op[state_offset] |= t; } else if (t <= 18) { + NEED_OP(1); *op++ = (t - 3); } else { size_t tt = t - 18; + NEED_OP(1); *op++ = 0; while (tt > 255) { tt -= 255; + NEED_OP(1); *op++ = 0; } + NEED_OP(1); *op++ = tt; } + NEED_OP(t); if (t >= 16) do { COPY8(op, ii); COPY8(op + 8, ii + 8); @@ -368,31 +413,38 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, } while (--t > 0); } + NEED_OP(3); *op++ = M4_MARKER | 1; *op++ = 0; *op++ = 0; *out_len = op - out; return LZO_E_OK; + +output_overrun: + return LZO_E_OUTPUT_OVERRUN; } -int lzo1x_1_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, - void *wrkmem) +int LZO_SAFE(lzo1x_1_compress)(const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + void *wrkmem) { - return lzogeneric1x_1_compress(in, in_len, out, out_len, wrkmem, 0); + return LZO_SAFE(lzogeneric1x_1_compress)( + in, in_len, out, out_len, wrkmem, 0); } -int lzorle1x_1_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, - void *wrkmem) +int LZO_SAFE(lzorle1x_1_compress)(const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + void *wrkmem) { - return lzogeneric1x_1_compress(in, in_len, out, out_len, - wrkmem, LZO_VERSION); + return LZO_SAFE(lzogeneric1x_1_compress)( + in, in_len, out, out_len, wrkmem, LZO_VERSION); } -EXPORT_SYMBOL_GPL(lzo1x_1_compress); -EXPORT_SYMBOL_GPL(lzorle1x_1_compress); +EXPORT_SYMBOL_GPL(LZO_SAFE(lzo1x_1_compress)); +EXPORT_SYMBOL_GPL(LZO_SAFE(lzorle1x_1_compress)); +#ifndef LZO_UNSAFE MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LZO1X-1 Compressor"); +#endif diff --git a/lib/lzo/lzo1x_compress_safe.c b/lib/lzo/lzo1x_compress_safe.c new file mode 100644 index 000000000000..371c9f849492 --- /dev/null +++ b/lib/lzo/lzo1x_compress_safe.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * LZO1X Compressor from LZO + * + * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com> + * + * The full LZO package can be found at: + * http://www.oberhumer.com/opensource/lzo/ + * + * Changed for Linux kernel use by: + * Nitin Gupta <nitingupta910@gmail.com> + * Richard Purdie <rpurdie@openedhand.com> + */ + +#define LZO_SAFE(name) name##_safe +#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x)) + +#include "lzo1x_compress.c" diff --git a/lib/maple_tree.c b/lib/maple_tree.c index f7153ade1be5..d0bea23fa4bc 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -584,13 +584,10 @@ static __always_inline bool ma_dead_node(const struct maple_node *node) */ static __always_inline bool mte_dead_node(const struct maple_enode *enode) { - struct maple_node *parent, *node; + struct maple_node *node; node = mte_to_node(enode); - /* Do not reorder reads from the node prior to the parent check */ - smp_rmb(); - parent = mte_parent(enode); - return (parent == node); + return ma_dead_node(node); } /* @@ -1245,7 +1242,6 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) if (mas->mas_flags & MA_STATE_PREALLOC) { if (allocated) return; - BUG_ON(!allocated); WARN_ON(!allocated); } @@ -1353,7 +1349,7 @@ static void mas_node_count(struct ma_state *mas, int count) * mas_start() - Sets up maple state for operations. * @mas: The maple state. * - * If mas->status == mas_start, then set the min, max and depth to + * If mas->status == ma_start, then set the min, max and depth to * defaults. * * Return: diff --git a/lib/min_heap.c b/lib/min_heap.c index 4485372ff3b1..96f01a4c5fb6 100644 --- a/lib/min_heap.c +++ b/lib/min_heap.c @@ -2,7 +2,7 @@ #include <linux/export.h> #include <linux/min_heap.h> -void __min_heap_init(min_heap_char *heap, void *data, int size) +void __min_heap_init(min_heap_char *heap, void *data, size_t size) { __min_heap_init_inline(heap, data, size); } @@ -20,7 +20,7 @@ bool __min_heap_full(min_heap_char *heap) } EXPORT_SYMBOL(__min_heap_full); -void __min_heap_sift_down(min_heap_char *heap, int pos, size_t elem_size, +void __min_heap_sift_down(min_heap_char *heap, size_t pos, size_t elem_size, const struct min_heap_callbacks *func, void *args) { __min_heap_sift_down_inline(heap, pos, elem_size, func, args); diff --git a/lib/plist.c b/lib/plist.c index c6bce1226874..330febb4bd7d 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -171,12 +171,24 @@ void plist_requeue(struct plist_node *node, struct plist_head *head) plist_del(node, head); + /* + * After plist_del(), iter is the replacement of the node. If the node + * was on prio_list, take shortcut to find node_next instead of looping. + */ + if (!list_empty(&iter->prio_list)) { + iter = list_entry(iter->prio_list.next, struct plist_node, + prio_list); + node_next = &iter->node_list; + goto queue; + } + plist_for_each_continue(iter, head) { if (node->prio != iter->prio) { node_next = &iter->node_list; break; } } +queue: list_add_tail(&node->node_list, node_next); plist_check_head(head); diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc index 863e2d320938..8aa53eb2f395 100644 --- a/lib/raid6/s390vx.uc +++ b/lib/raid6/s390vx.uc @@ -11,6 +11,7 @@ * This file is postprocessed using unroll.awk. */ +#include <linux/cpufeature.h> #include <linux/raid/pq.h> #include <asm/fpu.h> diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c index 8655a76d29a1..690cede46ac2 100644 --- a/lib/rbtree_test.c +++ b/lib/rbtree_test.c @@ -14,6 +14,7 @@ __param(int, nnodes, 100, "Number of nodes in the rb-tree"); __param(int, perf_loops, 1000, "Number of iterations modifying the rb-tree"); __param(int, check_loops, 100, "Number of iterations modifying and verifying the rb-tree"); +__param(ullong, seed, 3141592653589793238ULL, "Random seed"); struct test_node { u32 key; @@ -239,19 +240,14 @@ static void check_augmented(int nr_nodes) } } -static int __init rbtree_test_init(void) +static int basic_check(void) { int i, j; cycles_t time1, time2, time; struct rb_node *node; - nodes = kmalloc_array(nnodes, sizeof(*nodes), GFP_KERNEL); - if (!nodes) - return -ENOMEM; - printk(KERN_ALERT "rbtree testing"); - prandom_seed_state(&rnd, 3141592653589793238ULL); init(); time1 = get_cycles(); @@ -343,6 +339,14 @@ static int __init rbtree_test_init(void) check(0); } + return 0; +} + +static int augmented_check(void) +{ + int i, j; + cycles_t time1, time2, time; + printk(KERN_ALERT "augmented rbtree testing"); init(); @@ -390,6 +394,20 @@ static int __init rbtree_test_init(void) check_augmented(0); } + return 0; +} + +static int __init rbtree_test_init(void) +{ + nodes = kmalloc_array(nnodes, sizeof(*nodes), GFP_KERNEL); + if (!nodes) + return -ENOMEM; + + prandom_seed_state(&rnd, seed); + + basic_check(); + augmented_check(); + kfree(nodes); return -EAGAIN; /* Fail will directly unload the module */ diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 5bb6b8aff232..b58d5ef1a34b 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -879,7 +879,7 @@ EXPORT_SYMBOL(sg_miter_skip); * @miter->addr and @miter->length point to the current mapping. * * Context: - * May sleep if !SG_MITER_ATOMIC. + * May sleep if !SG_MITER_ATOMIC && !SG_MITER_LOCAL. * * Returns: * true if @miter contains the next mapping. false if end of sg @@ -901,6 +901,8 @@ bool sg_miter_next(struct sg_mapping_iter *miter) if (miter->__flags & SG_MITER_ATOMIC) miter->addr = kmap_atomic(miter->page) + miter->__offset; + else if (miter->__flags & SG_MITER_LOCAL) + miter->addr = kmap_local_page(miter->page) + miter->__offset; else miter->addr = kmap(miter->page) + miter->__offset; @@ -936,7 +938,9 @@ void sg_miter_stop(struct sg_mapping_iter *miter) if (miter->__flags & SG_MITER_ATOMIC) { WARN_ON_ONCE(!pagefault_disabled()); kunmap_atomic(miter->addr); - } else + } else if (miter->__flags & SG_MITER_LOCAL) + kunmap_local(miter->addr); + else kunmap(miter->page); miter->page = NULL; @@ -965,7 +969,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, { unsigned int offset = 0; struct sg_mapping_iter miter; - unsigned int sg_flags = SG_MITER_ATOMIC; + unsigned int sg_flags = SG_MITER_LOCAL; if (to_buffer) sg_flags |= SG_MITER_FROM_SG; @@ -1080,7 +1084,7 @@ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents, { unsigned int offset = 0; struct sg_mapping_iter miter; - unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; + unsigned int sg_flags = SG_MITER_LOCAL | SG_MITER_TO_SG; sg_miter_start(&miter, sgl, nents, sg_flags); diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 245d5b416699..73d7b50924ef 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -591,7 +591,8 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries, depot_stack_handle_t handle = 0; struct page *page = NULL; void *prealloc = NULL; - bool can_alloc = depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC; + bool allow_spin = gfpflags_allow_spinning(alloc_flags); + bool can_alloc = (depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC) && allow_spin; unsigned long flags; u32 hash; @@ -630,7 +631,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries, prealloc = page_address(page); } - if (in_nmi()) { + if (in_nmi() || !allow_spin) { /* We can never allocate in NMI context. */ WARN_ON_ONCE(can_alloc); /* Best effort; bail if we fail to take the lock. */ @@ -671,7 +672,10 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries, exit: if (prealloc) { /* Stack depot didn't use this memory, free it. */ - free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER); + if (!allow_spin) + free_pages_nolock(virt_to_page(prealloc), DEPOT_POOL_ORDER); + else + free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER); } if (found) handle = found->handle.handle; diff --git a/lib/test_hmm.c b/lib/test_hmm.c index 056f2e411d7b..5b144bc5c4ec 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -195,7 +195,8 @@ static int dmirror_fops_release(struct inode *inode, struct file *filp) static struct dmirror_chunk *dmirror_page_to_chunk(struct page *page) { - return container_of(page->pgmap, struct dmirror_chunk, pagemap); + return container_of(page_pgmap(page), struct dmirror_chunk, + pagemap); } static struct dmirror_device *dmirror_page_to_device(struct page *page) @@ -706,34 +707,23 @@ static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start, return 0; } -static int dmirror_atomic_map(unsigned long start, unsigned long end, - struct page **pages, struct dmirror *dmirror) +static int dmirror_atomic_map(unsigned long addr, struct page *page, + struct dmirror *dmirror) { - unsigned long pfn, mapped = 0; - int i; + void *entry; /* Map the migrated pages into the device's page tables. */ mutex_lock(&dmirror->mutex); - for (i = 0, pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++, i++) { - void *entry; - - if (!pages[i]) - continue; - - entry = pages[i]; - entry = xa_tag_pointer(entry, DPT_XA_TAG_ATOMIC); - entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC); - if (xa_is_err(entry)) { - mutex_unlock(&dmirror->mutex); - return xa_err(entry); - } - - mapped++; + entry = xa_tag_pointer(page, DPT_XA_TAG_ATOMIC); + entry = xa_store(&dmirror->pt, addr >> PAGE_SHIFT, entry, GFP_ATOMIC); + if (xa_is_err(entry)) { + mutex_unlock(&dmirror->mutex); + return xa_err(entry); } mutex_unlock(&dmirror->mutex); - return mapped; + return 0; } static int dmirror_migrate_finalize_and_map(struct migrate_vma *args, @@ -780,10 +770,8 @@ static int dmirror_exclusive(struct dmirror *dmirror, unsigned long start, end, addr; unsigned long size = cmd->npages << PAGE_SHIFT; struct mm_struct *mm = dmirror->notifier.mm; - struct page *pages[64]; struct dmirror_bounce bounce; - unsigned long next; - int ret; + int ret = 0; start = cmd->addr; end = start + size; @@ -795,36 +783,26 @@ static int dmirror_exclusive(struct dmirror *dmirror, return -EINVAL; mmap_read_lock(mm); - for (addr = start; addr < end; addr = next) { - unsigned long mapped = 0; - int i; - - next = min(end, addr + (ARRAY_SIZE(pages) << PAGE_SHIFT)); + for (addr = start; !ret && addr < end; addr += PAGE_SIZE) { + struct folio *folio; + struct page *page; - ret = make_device_exclusive_range(mm, addr, next, pages, NULL); - /* - * Do dmirror_atomic_map() iff all pages are marked for - * exclusive access to avoid accessing uninitialized - * fields of pages. - */ - if (ret == (next - addr) >> PAGE_SHIFT) - mapped = dmirror_atomic_map(addr, next, pages, dmirror); - for (i = 0; i < ret; i++) { - if (pages[i]) { - unlock_page(pages[i]); - put_page(pages[i]); - } + page = make_device_exclusive(mm, addr, NULL, &folio); + if (IS_ERR(page)) { + ret = PTR_ERR(page); + break; } - if (addr + (mapped << PAGE_SHIFT) < next) { - mmap_read_unlock(mm); - mmput(mm); - return -EBUSY; - } + ret = dmirror_atomic_map(addr, page, dmirror); + folio_unlock(folio); + folio_put(folio); } mmap_read_unlock(mm); mmput(mm); + if (ret) + return ret; + /* Return the migrated data for verification. */ ret = dmirror_bounce_init(&bounce, start, size); if (ret) diff --git a/lib/test_ida.c b/lib/test_ida.c index c80155a1956d..63078f8dc13f 100644 --- a/lib/test_ida.c +++ b/lib/test_ida.c @@ -189,6 +189,75 @@ static void ida_check_bad_free(struct ida *ida) IDA_BUG_ON(ida, !ida_is_empty(ida)); } +/* + * Check ida_find_first_range() and varriants. + */ +static void ida_check_find_first(struct ida *ida) +{ + /* IDA is empty; all of the below should be not exist */ + IDA_BUG_ON(ida, ida_exists(ida, 0)); + IDA_BUG_ON(ida, ida_exists(ida, 3)); + IDA_BUG_ON(ida, ida_exists(ida, 63)); + IDA_BUG_ON(ida, ida_exists(ida, 1023)); + IDA_BUG_ON(ida, ida_exists(ida, (1 << 20) - 1)); + + /* IDA contains a single value entry */ + IDA_BUG_ON(ida, ida_alloc_min(ida, 3, GFP_KERNEL) != 3); + IDA_BUG_ON(ida, ida_exists(ida, 0)); + IDA_BUG_ON(ida, !ida_exists(ida, 3)); + IDA_BUG_ON(ida, ida_exists(ida, 63)); + IDA_BUG_ON(ida, ida_exists(ida, 1023)); + IDA_BUG_ON(ida, ida_exists(ida, (1 << 20) - 1)); + + IDA_BUG_ON(ida, ida_alloc_min(ida, 63, GFP_KERNEL) != 63); + IDA_BUG_ON(ida, ida_exists(ida, 0)); + IDA_BUG_ON(ida, !ida_exists(ida, 3)); + IDA_BUG_ON(ida, !ida_exists(ida, 63)); + IDA_BUG_ON(ida, ida_exists(ida, 1023)); + IDA_BUG_ON(ida, ida_exists(ida, (1 << 20) - 1)); + + /* IDA contains a single bitmap */ + IDA_BUG_ON(ida, ida_alloc_min(ida, 1023, GFP_KERNEL) != 1023); + IDA_BUG_ON(ida, ida_exists(ida, 0)); + IDA_BUG_ON(ida, !ida_exists(ida, 3)); + IDA_BUG_ON(ida, !ida_exists(ida, 63)); + IDA_BUG_ON(ida, !ida_exists(ida, 1023)); + IDA_BUG_ON(ida, ida_exists(ida, (1 << 20) - 1)); + + /* IDA contains a tree */ + IDA_BUG_ON(ida, ida_alloc_min(ida, (1 << 20) - 1, GFP_KERNEL) != (1 << 20) - 1); + IDA_BUG_ON(ida, ida_exists(ida, 0)); + IDA_BUG_ON(ida, !ida_exists(ida, 3)); + IDA_BUG_ON(ida, !ida_exists(ida, 63)); + IDA_BUG_ON(ida, !ida_exists(ida, 1023)); + IDA_BUG_ON(ida, !ida_exists(ida, (1 << 20) - 1)); + + /* Now try to find first */ + IDA_BUG_ON(ida, ida_find_first(ida) != 3); + IDA_BUG_ON(ida, ida_find_first_range(ida, -1, 2) != -EINVAL); + IDA_BUG_ON(ida, ida_find_first_range(ida, 0, 2) != -ENOENT); // no used ID + IDA_BUG_ON(ida, ida_find_first_range(ida, 0, 3) != 3); + IDA_BUG_ON(ida, ida_find_first_range(ida, 1, 3) != 3); + IDA_BUG_ON(ida, ida_find_first_range(ida, 3, 3) != 3); + IDA_BUG_ON(ida, ida_find_first_range(ida, 2, 4) != 3); + IDA_BUG_ON(ida, ida_find_first_range(ida, 4, 3) != -ENOENT); // min > max, fail + IDA_BUG_ON(ida, ida_find_first_range(ida, 4, 60) != -ENOENT); // no used ID + IDA_BUG_ON(ida, ida_find_first_range(ida, 4, 64) != 63); + IDA_BUG_ON(ida, ida_find_first_range(ida, 63, 63) != 63); + IDA_BUG_ON(ida, ida_find_first_range(ida, 64, 1026) != 1023); + IDA_BUG_ON(ida, ida_find_first_range(ida, 1023, 1023) != 1023); + IDA_BUG_ON(ida, ida_find_first_range(ida, 1023, (1 << 20) - 1) != 1023); + IDA_BUG_ON(ida, ida_find_first_range(ida, 1024, (1 << 20) - 1) != (1 << 20) - 1); + IDA_BUG_ON(ida, ida_find_first_range(ida, (1 << 20), INT_MAX) != -ENOENT); + + ida_free(ida, 3); + ida_free(ida, 63); + ida_free(ida, 1023); + ida_free(ida, (1 << 20) - 1); + + IDA_BUG_ON(ida, !ida_is_empty(ida)); +} + static DEFINE_IDA(ida); static int ida_checks(void) @@ -202,6 +271,7 @@ static int ida_checks(void) ida_check_max(&ida); ida_check_conv(&ida); ida_check_bad_free(&ida); + ida_check_find_first(&ida); printk("IDA: %u of %u tests passed\n", tests_passed, tests_run); return (tests_run != tests_passed) ? 0 : -EINVAL; diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 0e865bab4a10..080a39d22e73 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -1858,6 +1858,54 @@ static void check_split_1(struct xarray *xa, unsigned long index, xa_destroy(xa); } +static void check_split_2(struct xarray *xa, unsigned long index, + unsigned int order, unsigned int new_order) +{ + XA_STATE_ORDER(xas, xa, index, new_order); + unsigned int i, found; + void *entry; + + xa_store_order(xa, index, order, xa, GFP_KERNEL); + xa_set_mark(xa, index, XA_MARK_1); + + /* allocate a node for xas_try_split() */ + xas_set_err(&xas, -ENOMEM); + XA_BUG_ON(xa, !xas_nomem(&xas, GFP_KERNEL)); + + xas_lock(&xas); + xas_try_split(&xas, xa, order); + if (((new_order / XA_CHUNK_SHIFT) < (order / XA_CHUNK_SHIFT)) && + new_order < order - 1) { + XA_BUG_ON(xa, !xas_error(&xas) || xas_error(&xas) != -EINVAL); + xas_unlock(&xas); + goto out; + } + for (i = 0; i < (1 << order); i += (1 << new_order)) + __xa_store(xa, index + i, xa_mk_index(index + i), 0); + xas_unlock(&xas); + + for (i = 0; i < (1 << order); i++) { + unsigned int val = index + (i & ~((1 << new_order) - 1)); + XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val)); + } + + xa_set_mark(xa, index, XA_MARK_0); + XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); + + xas_set_order(&xas, index, 0); + found = 0; + rcu_read_lock(); + xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_1) { + found++; + XA_BUG_ON(xa, xa_is_internal(entry)); + } + rcu_read_unlock(); + XA_BUG_ON(xa, found != 1 << (order - new_order)); +out: + xas_destroy(&xas); + xa_destroy(xa); +} + static noinline void check_split(struct xarray *xa) { unsigned int order, new_order; @@ -1869,6 +1917,10 @@ static noinline void check_split(struct xarray *xa) check_split_1(xa, 0, order, new_order); check_split_1(xa, 1UL << order, order, new_order); check_split_1(xa, 3UL << order, order, new_order); + + check_split_2(xa, 0, order, new_order); + check_split_2(xa, 1UL << order, order, new_order); + check_split_2(xa, 3UL << order, order, new_order); } } } diff --git a/lib/tests/Makefile b/lib/tests/Makefile index a434c7cb733a..5a4794c1826e 100644 --- a/lib/tests/Makefile +++ b/lib/tests/Makefile @@ -27,6 +27,10 @@ obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o obj-$(CONFIG_KFIFO_KUNIT_TEST) += kfifo_kunit.o obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o + +CFLAGS_longest_symbol_kunit.o += $(call cc-disable-warning, missing-prototypes) +obj-$(CONFIG_LONGEST_SYM_KUNIT_TEST) += longest_symbol_kunit.o + obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o CFLAGS_overflow_kunit.o = $(call cc-disable-warning, tautological-constant-out-of-range-compare) obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o diff --git a/lib/tests/longest_symbol_kunit.c b/lib/tests/longest_symbol_kunit.c new file mode 100644 index 000000000000..e3c28ff1807f --- /dev/null +++ b/lib/tests/longest_symbol_kunit.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Test the longest symbol length. Execute with: + * ./tools/testing/kunit/kunit.py run longest-symbol + * --arch=x86_64 --kconfig_add CONFIG_KPROBES=y --kconfig_add CONFIG_MODULES=y + * --kconfig_add CONFIG_RETPOLINE=n --kconfig_add CONFIG_CFI_CLANG=n + * --kconfig_add CONFIG_MITIGATION_RETPOLINE=n + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <kunit/test.h> +#include <linux/stringify.h> +#include <linux/kprobes.h> +#include <linux/kallsyms.h> + +#define DI(name) s##name##name +#define DDI(name) DI(n##name##name) +#define DDDI(name) DDI(n##name##name) +#define DDDDI(name) DDDI(n##name##name) +#define DDDDDI(name) DDDDI(n##name##name) + +/*Generate a symbol whose name length is 511 */ +#define LONGEST_SYM_NAME DDDDDI(g1h2i3j4k5l6m7n) + +#define RETURN_LONGEST_SYM 0xAAAAA + +noinline int LONGEST_SYM_NAME(void); +noinline int LONGEST_SYM_NAME(void) +{ + return RETURN_LONGEST_SYM; +} + +_Static_assert(sizeof(__stringify(LONGEST_SYM_NAME)) == KSYM_NAME_LEN, +"Incorrect symbol length found. Expected KSYM_NAME_LEN: " +__stringify(KSYM_NAME_LEN) ", but found: " +__stringify(sizeof(LONGEST_SYM_NAME))); + +static void test_longest_symbol(struct kunit *test) +{ + KUNIT_EXPECT_EQ(test, RETURN_LONGEST_SYM, LONGEST_SYM_NAME()); +}; + +static void test_longest_symbol_kallsyms(struct kunit *test) +{ + unsigned long (*kallsyms_lookup_name)(const char *name); + static int (*longest_sym)(void); + + struct kprobe kp = { + .symbol_name = "kallsyms_lookup_name", + }; + + if (register_kprobe(&kp) < 0) { + pr_info("%s: kprobe not registered", __func__); + KUNIT_FAIL(test, "test_longest_symbol kallsyms: kprobe not registered\n"); + return; + } + + kunit_warn(test, "test_longest_symbol kallsyms: kprobe registered\n"); + kallsyms_lookup_name = (unsigned long (*)(const char *name))kp.addr; + unregister_kprobe(&kp); + + longest_sym = + (void *) kallsyms_lookup_name(__stringify(LONGEST_SYM_NAME)); + KUNIT_EXPECT_EQ(test, RETURN_LONGEST_SYM, longest_sym()); +}; + +static struct kunit_case longest_symbol_test_cases[] = { + KUNIT_CASE(test_longest_symbol), + KUNIT_CASE(test_longest_symbol_kallsyms), + {} +}; + +static struct kunit_suite longest_symbol_test_suite = { + .name = "longest-symbol", + .test_cases = longest_symbol_test_cases, +}; +kunit_test_suite(longest_symbol_test_suite); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Test the longest symbol length"); +MODULE_AUTHOR("Sergio González Collado"); diff --git a/lib/tests/module/gen_test_kallsyms.sh b/lib/tests/module/gen_test_kallsyms.sh index 561dcac0f359..31fe4ed63de8 100755 --- a/lib/tests/module/gen_test_kallsyms.sh +++ b/lib/tests/module/gen_test_kallsyms.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash TARGET=$(basename $1) DIR=lib/tests/module diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 734bd70c8b9b..01699852f30c 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -1699,8 +1699,12 @@ char *escaped_string(char *buf, char *end, u8 *addr, struct printf_spec spec, return buf; } +#pragma GCC diagnostic push +#ifndef __clang__ +#pragma GCC diagnostic ignored "-Wsuggest-attribute=format" +#endif static char *va_format(char *buf, char *end, struct va_format *va_fmt, - struct printf_spec spec, const char *fmt) + struct printf_spec spec) { va_list va; @@ -1713,6 +1717,7 @@ static char *va_format(char *buf, char *end, struct va_format *va_fmt, return buf; } +#pragma GCC diagnostic pop static noinline_for_stack char *uuid_string(char *buf, char *end, const u8 *addr, @@ -2291,9 +2296,6 @@ int __init no_hash_pointers_enable(char *str) } early_param("no_hash_pointers", no_hash_pointers_enable); -/* Used for Rust formatting ('%pA'). */ -char *rust_fmt_argument(char *buf, char *end, void *ptr); - /* * Show a '%p' thing. A kernel extension is that the '%p' is followed * by an extra set of alphanumeric characters that are extended format @@ -2469,7 +2471,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, case 'U': return uuid_string(buf, end, ptr, spec, fmt); case 'V': - return va_format(buf, end, ptr, spec, fmt); + return va_format(buf, end, ptr, spec); case 'K': return restricted_pointer(buf, end, ptr, spec); case 'N': diff --git a/lib/xarray.c b/lib/xarray.c index 116e9286c64e..9644b18af18d 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -278,6 +278,7 @@ void xas_destroy(struct xa_state *xas) xas->xa_alloc = node = next; } } +EXPORT_SYMBOL_GPL(xas_destroy); /** * xas_nomem() - Allocate memory if needed. @@ -1007,6 +1008,26 @@ static void node_set_marks(struct xa_node *node, unsigned int offset, } } +static void __xas_init_node_for_split(struct xa_state *xas, + struct xa_node *node, void *entry) +{ + unsigned int i; + void *sibling = NULL; + unsigned int mask = xas->xa_sibs; + + if (!node) + return; + node->array = xas->xa; + for (i = 0; i < XA_CHUNK_SIZE; i++) { + if ((i & mask) == 0) { + RCU_INIT_POINTER(node->slots[i], entry); + sibling = xa_mk_sibling(i); + } else { + RCU_INIT_POINTER(node->slots[i], sibling); + } + } +} + /** * xas_split_alloc() - Allocate memory for splitting an entry. * @xas: XArray operation state. @@ -1025,7 +1046,6 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order, gfp_t gfp) { unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; - unsigned int mask = xas->xa_sibs; /* XXX: no support for splitting really large entries yet */ if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT <= order)) @@ -1034,22 +1054,13 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order, return; do { - unsigned int i; - void *sibling = NULL; struct xa_node *node; node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); if (!node) goto nomem; - node->array = xas->xa; - for (i = 0; i < XA_CHUNK_SIZE; i++) { - if ((i & mask) == 0) { - RCU_INIT_POINTER(node->slots[i], entry); - sibling = xa_mk_sibling(i); - } else { - RCU_INIT_POINTER(node->slots[i], sibling); - } - } + + __xas_init_node_for_split(xas, node, entry); RCU_INIT_POINTER(node->parent, xas->xa_alloc); xas->xa_alloc = node; } while (sibs-- > 0); @@ -1122,6 +1133,128 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order) xas_update(xas, node); } EXPORT_SYMBOL_GPL(xas_split); + +/** + * xas_try_split_min_order() - Minimal split order xas_try_split() can accept + * @order: Current entry order. + * + * xas_try_split() can split a multi-index entry to smaller than @order - 1 if + * no new xa_node is needed. This function provides the minimal order + * xas_try_split() supports. + * + * Return: the minimal order xas_try_split() supports + * + * Context: Any context. + * + */ +unsigned int xas_try_split_min_order(unsigned int order) +{ + if (order % XA_CHUNK_SHIFT == 0) + return order == 0 ? 0 : order - 1; + + return order - (order % XA_CHUNK_SHIFT); +} +EXPORT_SYMBOL_GPL(xas_try_split_min_order); + +/** + * xas_try_split() - Try to split a multi-index entry. + * @xas: XArray operation state. + * @entry: New entry to store in the array. + * @order: Current entry order. + * + * The size of the new entries is set in @xas. The value in @entry is + * copied to all the replacement entries. If and only if one new xa_node is + * needed, the function will use GFP_NOWAIT to get one if xas->xa_alloc is + * NULL. If more new xa_node are needed, the function gives EINVAL error. + * + * NOTE: use xas_try_split_min_order() to get next split order instead of + * @order - 1 if you want to minmize xas_try_split() calls. + * + * Context: Any context. The caller should hold the xa_lock. + */ +void xas_try_split(struct xa_state *xas, void *entry, unsigned int order) +{ + unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; + unsigned int offset, marks; + struct xa_node *node; + void *curr = xas_load(xas); + int values = 0; + gfp_t gfp = GFP_NOWAIT; + + node = xas->xa_node; + if (xas_top(node)) + return; + + if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) + gfp |= __GFP_ACCOUNT; + + marks = node_get_marks(node, xas->xa_offset); + + offset = xas->xa_offset + sibs; + + if (xas->xa_shift < node->shift) { + struct xa_node *child = xas->xa_alloc; + unsigned int expected_sibs = + (1 << ((order - 1) % XA_CHUNK_SHIFT)) - 1; + + /* + * No support for splitting sibling entries + * (horizontally) or cascade split (vertically), which + * requires two or more new xa_nodes. + * Since if one xa_node allocation fails, + * it is hard to free the prior allocations. + */ + if (sibs || xas->xa_sibs != expected_sibs) { + xas_destroy(xas); + xas_set_err(xas, -EINVAL); + return; + } + + if (!child) { + child = kmem_cache_alloc_lru(radix_tree_node_cachep, + xas->xa_lru, gfp); + if (!child) { + xas_destroy(xas); + xas_set_err(xas, -ENOMEM); + return; + } + RCU_INIT_POINTER(child->parent, xas->xa_alloc); + } + __xas_init_node_for_split(xas, child, entry); + + xas->xa_alloc = rcu_dereference_raw(child->parent); + child->shift = node->shift - XA_CHUNK_SHIFT; + child->offset = offset; + child->count = XA_CHUNK_SIZE; + child->nr_values = xa_is_value(entry) ? + XA_CHUNK_SIZE : 0; + RCU_INIT_POINTER(child->parent, node); + node_set_marks(node, offset, child, xas->xa_sibs, + marks); + rcu_assign_pointer(node->slots[offset], + xa_mk_node(child)); + if (xa_is_value(curr)) + values--; + xas_update(xas, child); + + } else { + do { + unsigned int canon = offset - xas->xa_sibs; + + node_set_marks(node, canon, NULL, 0, marks); + rcu_assign_pointer(node->slots[canon], entry); + while (offset > canon) + rcu_assign_pointer(node->slots[offset--], + xa_mk_sibling(canon)); + values += (xa_is_value(entry) - xa_is_value(curr)) * + (xas->xa_sibs + 1); + } while (offset-- > xas->xa_offset); + } + + node->nr_values += values; + xas_update(xas, node); +} +EXPORT_SYMBOL_GPL(xas_try_split); #endif /** diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c index 3a1d8d34182e..8fb2a3e17c0e 100644 --- a/lib/zlib_deflate/deflate.c +++ b/lib/zlib_deflate/deflate.c @@ -151,9 +151,6 @@ static const config configuration_table[10] = { * meaning. */ -#define EQUAL 0 -/* result of memcmp for equal strings */ - /* =========================================================================== * Update a hash value with the given input byte * IN assertion: all calls to UPDATE_HASH are made with consecutive @@ -713,8 +710,7 @@ static void check_match( ) { /* check that the match is indeed a match */ - if (memcmp((char *)s->window + match, - (char *)s->window + start, length) != EQUAL) { + if (memcmp((char *)s->window + match, (char *)s->window + start, length)) { fprintf(stderr, " start %u, match %u, length %d\n", start, match, length); do { |