diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 15 | ||||
-rw-r--r-- | lib/closure.c | 5 | ||||
-rw-r--r-- | lib/errname.c | 6 | ||||
-rw-r--r-- | lib/fw_table.c | 2 | ||||
-rw-r--r-- | lib/group_cpus.c | 22 | ||||
-rw-r--r-- | lib/idr.c | 2 | ||||
-rw-r--r-- | lib/iov_iter.c | 2 | ||||
-rw-r--r-- | lib/kunit/kunit-test.c | 2 | ||||
-rw-r--r-- | lib/kunit/test.c | 42 | ||||
-rw-r--r-- | lib/maple_tree.c | 11 | ||||
-rw-r--r-- | lib/objpool.c | 17 | ||||
-rw-r--r-- | lib/test_ida.c | 40 | ||||
-rw-r--r-- | lib/vsprintf.c | 11 | ||||
-rw-r--r-- | lib/zstd/common/fse_decompress.c | 2 |
14 files changed, 140 insertions, 39 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index cc7d53d9dc01..4405f81248fb 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1739,21 +1739,6 @@ config DEBUG_MAPLE_TREE endmenu -config DEBUG_CREDENTIALS - bool "Debug credential management" - depends on DEBUG_KERNEL - help - Enable this to turn on some debug checking for credential - management. The additional code keeps track of the number of - pointers from task_structs to any given cred struct, and checks to - see that this number never exceeds the usage count of the cred - struct. - - Furthermore, if SELinux is enabled, this also checks that the - security pointer in the cred struct is never seen to be invalid. - - If unsure, say N. - source "kernel/rcu/Kconfig.debug" config DEBUG_WQ_FORCE_RR_CPU diff --git a/lib/closure.c b/lib/closure.c index f86c9eeafb35..c16540552d61 100644 --- a/lib/closure.c +++ b/lib/closure.c @@ -36,7 +36,7 @@ static inline void closure_put_after_sub(struct closure *cl, int flags) closure_debug_destroy(cl); if (destructor) - destructor(cl); + destructor(&cl->work); if (parent) closure_put(parent); @@ -108,8 +108,9 @@ struct closure_syncer { int done; }; -static void closure_sync_fn(struct closure *cl) +static CLOSURE_CALLBACK(closure_sync_fn) { + struct closure *cl = container_of(ws, struct closure, work); struct closure_syncer *s = cl->s; struct task_struct *p; diff --git a/lib/errname.c b/lib/errname.c index dd1b998552cd..4f9112b38f3a 100644 --- a/lib/errname.c +++ b/lib/errname.c @@ -111,9 +111,6 @@ static const char *names_0[] = { E(ENOSPC), E(ENOSR), E(ENOSTR), -#ifdef ENOSYM - E(ENOSYM), -#endif E(ENOSYS), E(ENOTBLK), E(ENOTCONN), @@ -144,9 +141,6 @@ static const char *names_0[] = { #endif E(EREMOTE), E(EREMOTEIO), -#ifdef EREMOTERELEASE - E(EREMOTERELEASE), -#endif E(ERESTART), E(ERFKILL), E(EROFS), diff --git a/lib/fw_table.c b/lib/fw_table.c index b51f30a28e47..294df54e33b6 100644 --- a/lib/fw_table.c +++ b/lib/fw_table.c @@ -7,7 +7,7 @@ * Copyright (C) 2023 Intel Corp. */ #include <linux/errno.h> -#include <linux/fw_table.h> +#include <linux/acpi.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/string.h> diff --git a/lib/group_cpus.c b/lib/group_cpus.c index aa3f6815bb12..ee272c4cefcc 100644 --- a/lib/group_cpus.c +++ b/lib/group_cpus.c @@ -366,13 +366,25 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps) if (!masks) goto fail_node_to_cpumask; - /* Stabilize the cpumasks */ - cpus_read_lock(); build_node_to_cpumask(node_to_cpumask); + /* + * Make a local cache of 'cpu_present_mask', so the two stages + * spread can observe consistent 'cpu_present_mask' without holding + * cpu hotplug lock, then we can reduce deadlock risk with cpu + * hotplug code. + * + * Here CPU hotplug may happen when reading `cpu_present_mask`, and + * we can live with the case because it only affects that hotplug + * CPU is handled in the 1st or 2nd stage, and either way is correct + * from API user viewpoint since 2-stage spread is sort of + * optimization. + */ + cpumask_copy(npresmsk, data_race(cpu_present_mask)); + /* grouping present CPUs first */ ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask, - cpu_present_mask, nmsk, masks); + npresmsk, nmsk, masks); if (ret < 0) goto fail_build_affinity; nr_present = ret; @@ -387,15 +399,13 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps) curgrp = 0; else curgrp = nr_present; - cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask); + cpumask_andnot(npresmsk, cpu_possible_mask, npresmsk); ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask, npresmsk, nmsk, masks); if (ret >= 0) nr_others = ret; fail_build_affinity: - cpus_read_unlock(); - if (ret >= 0) WARN_ON(nr_present + nr_others < numgrps); diff --git a/lib/idr.c b/lib/idr.c index 13f2758c2377..da36054c3ca0 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -508,7 +508,7 @@ void ida_free(struct ida *ida, unsigned int id) goto delete; xas_store(&xas, xa_mk_value(v)); } else { - if (!test_bit(bit, bitmap->bitmap)) + if (!bitmap || !test_bit(bit, bitmap->bitmap)) goto err; __clear_bit(bit, bitmap->bitmap); xas_set_mark(&xas, XA_FREE_MARK); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 009875bc95aa..e0aa6b440ca5 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -409,7 +409,7 @@ size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t byte void *kaddr = kmap_local_page(page); size_t n = min(bytes, (size_t)PAGE_SIZE - offset); - n = iterate_and_advance(i, bytes, kaddr, + n = iterate_and_advance(i, n, kaddr + offset, copy_to_user_iter_nofault, memcpy_to_iter); kunmap_local(kaddr); diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c index 99d2a3a528e1..de2113a58fa0 100644 --- a/lib/kunit/kunit-test.c +++ b/lib/kunit/kunit-test.c @@ -562,7 +562,7 @@ static void kunit_log_test(struct kunit *test) KUNIT_EXPECT_TRUE(test, test->log->append_newlines); full_log = string_stream_get_string(test->log); - kunit_add_action(test, (kunit_action_t *)kfree, full_log); + kunit_add_action(test, kfree_wrapper, full_log); KUNIT_EXPECT_NOT_ERR_OR_NULL(test, strstr(full_log, "put this in log.")); KUNIT_EXPECT_NOT_ERR_OR_NULL(test, diff --git a/lib/kunit/test.c b/lib/kunit/test.c index f2eb71f1a66c..7aceb07a1af9 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -338,6 +338,36 @@ void kunit_init_test(struct kunit *test, const char *name, struct string_stream } EXPORT_SYMBOL_GPL(kunit_init_test); +/* Only warn when a test takes more than twice the threshold */ +#define KUNIT_SPEED_WARNING_MULTIPLIER 2 + +/* Slow tests are defined as taking more than 1s */ +#define KUNIT_SPEED_SLOW_THRESHOLD_S 1 + +#define KUNIT_SPEED_SLOW_WARNING_THRESHOLD_S \ + (KUNIT_SPEED_WARNING_MULTIPLIER * KUNIT_SPEED_SLOW_THRESHOLD_S) + +#define s_to_timespec64(s) ns_to_timespec64((s) * NSEC_PER_SEC) + +static void kunit_run_case_check_speed(struct kunit *test, + struct kunit_case *test_case, + struct timespec64 duration) +{ + struct timespec64 slow_thr = + s_to_timespec64(KUNIT_SPEED_SLOW_WARNING_THRESHOLD_S); + enum kunit_speed speed = test_case->attr.speed; + + if (timespec64_compare(&duration, &slow_thr) < 0) + return; + + if (speed == KUNIT_SPEED_VERY_SLOW || speed == KUNIT_SPEED_SLOW) + return; + + kunit_warn(test, + "Test should be marked slow (runtime: %lld.%09lds)", + duration.tv_sec, duration.tv_nsec); +} + /* * Initializes and runs test case. Does not clean up or do post validations. */ @@ -345,6 +375,8 @@ static void kunit_run_case_internal(struct kunit *test, struct kunit_suite *suite, struct kunit_case *test_case) { + struct timespec64 start, end; + if (suite->init) { int ret; @@ -356,7 +388,13 @@ static void kunit_run_case_internal(struct kunit *test, } } + ktime_get_ts64(&start); + test_case->run_case(test); + + ktime_get_ts64(&end); + + kunit_run_case_check_speed(test, test_case, timespec64_sub(end, start)); } static void kunit_case_internal_cleanup(struct kunit *test) @@ -670,6 +708,8 @@ int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_ return 0; } + kunit_suite_counter = 1; + static_branch_inc(&kunit_running); for (i = 0; i < num_suites; i++) { @@ -696,8 +736,6 @@ void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites) for (i = 0; i < num_suites; i++) kunit_exit_suite(suites[i]); - - kunit_suite_counter = 1; } EXPORT_SYMBOL_GPL(__kunit_test_suites_exit); diff --git a/lib/maple_tree.c b/lib/maple_tree.c index bb24d84a4922..684689457d77 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -5501,6 +5501,17 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp) mas_wr_end_piv(&wr_mas); node_size = mas_wr_new_end(&wr_mas); + + /* Slot store, does not require additional nodes */ + if (node_size == wr_mas.node_end) { + /* reuse node */ + if (!mt_in_rcu(mas->tree)) + return 0; + /* shifting boundary */ + if (wr_mas.offset_end - mas->offset == 1) + return 0; + } + if (node_size >= mt_slots[wr_mas.type]) { /* Split, worst case for now. */ request = 1 + mas_mt_height(mas) * 2; diff --git a/lib/objpool.c b/lib/objpool.c index ce0087f64400..cfdc02420884 100644 --- a/lib/objpool.c +++ b/lib/objpool.c @@ -201,6 +201,23 @@ static inline void *objpool_try_get_slot(struct objpool_head *pool, int cpu) while (head != READ_ONCE(slot->last)) { void *obj; + /* + * data visibility of 'last' and 'head' could be out of + * order since memory updating of 'last' and 'head' are + * performed in push() and pop() independently + * + * before any retrieving attempts, pop() must guarantee + * 'last' is behind 'head', that is to say, there must + * be available objects in slot, which could be ensured + * by condition 'last != head && last - head <= nr_objs' + * that is equivalent to 'last - head - 1 < nr_objs' as + * 'last' and 'head' are both unsigned int32 + */ + if (READ_ONCE(slot->last) - head - 1 >= pool->nr_objs) { + head = READ_ONCE(slot->head); + continue; + } + /* obj must be retrieved before moving forward head */ obj = READ_ONCE(slot->entries[head & slot->mask]); diff --git a/lib/test_ida.c b/lib/test_ida.c index b06880625961..55105baa19da 100644 --- a/lib/test_ida.c +++ b/lib/test_ida.c @@ -150,6 +150,45 @@ static void ida_check_conv(struct ida *ida) IDA_BUG_ON(ida, !ida_is_empty(ida)); } +/* + * Check various situations where we attempt to free an ID we don't own. + */ +static void ida_check_bad_free(struct ida *ida) +{ + unsigned long i; + + printk("vvv Ignore \"not allocated\" warnings\n"); + /* IDA is empty; all of these will fail */ + ida_free(ida, 0); + for (i = 0; i < 31; i++) + ida_free(ida, 1 << i); + + /* IDA contains a single value entry */ + IDA_BUG_ON(ida, ida_alloc_min(ida, 3, GFP_KERNEL) != 3); + ida_free(ida, 0); + for (i = 0; i < 31; i++) + ida_free(ida, 1 << i); + + /* IDA contains a single bitmap */ + IDA_BUG_ON(ida, ida_alloc_min(ida, 1023, GFP_KERNEL) != 1023); + ida_free(ida, 0); + for (i = 0; i < 31; i++) + ida_free(ida, 1 << i); + + /* IDA contains a tree */ + IDA_BUG_ON(ida, ida_alloc_min(ida, (1 << 20) - 1, GFP_KERNEL) != (1 << 20) - 1); + ida_free(ida, 0); + for (i = 0; i < 31; i++) + ida_free(ida, 1 << i); + printk("^^^ \"not allocated\" warnings over\n"); + + ida_free(ida, 3); + ida_free(ida, 1023); + ida_free(ida, (1 << 20) - 1); + + IDA_BUG_ON(ida, !ida_is_empty(ida)); +} + static DEFINE_IDA(ida); static int ida_checks(void) @@ -162,6 +201,7 @@ static int ida_checks(void) ida_check_leaf(&ida, 1024 * 64); ida_check_max(&ida); ida_check_conv(&ida); + ida_check_bad_free(&ida); printk("IDA: %u of %u tests passed\n", tests_passed, tests_run); return (tests_run != tests_passed) ? 0 : -EINVAL; diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 3e3733a7084f..552738f14275 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -2111,15 +2111,20 @@ char *fwnode_full_name_string(struct fwnode_handle *fwnode, char *buf, /* Loop starting from the root node to the current node. */ for (depth = fwnode_count_parents(fwnode); depth >= 0; depth--) { - struct fwnode_handle *__fwnode = - fwnode_get_nth_parent(fwnode, depth); + /* + * Only get a reference for other nodes (i.e. parent nodes). + * fwnode refcount may be 0 here. + */ + struct fwnode_handle *__fwnode = depth ? + fwnode_get_nth_parent(fwnode, depth) : fwnode; buf = string(buf, end, fwnode_get_name_prefix(__fwnode), default_str_spec); buf = string(buf, end, fwnode_get_name(__fwnode), default_str_spec); - fwnode_handle_put(__fwnode); + if (depth) + fwnode_handle_put(__fwnode); } return buf; diff --git a/lib/zstd/common/fse_decompress.c b/lib/zstd/common/fse_decompress.c index a0d06095be83..8dcb8ca39767 100644 --- a/lib/zstd/common/fse_decompress.c +++ b/lib/zstd/common/fse_decompress.c @@ -312,7 +312,7 @@ size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size typedef struct { short ncount[FSE_MAX_SYMBOL_VALUE + 1]; - FSE_DTable dtable[1]; /* Dynamically sized */ + FSE_DTable dtable[]; /* Dynamically sized */ } FSE_DecompressWksp; |