diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 7 | ||||
-rw-r--r-- | lib/Kconfig.kcsan | 26 | ||||
-rw-r--r-- | lib/cpumask.c | 16 | ||||
-rw-r--r-- | lib/crc-t10dif.c | 75 | ||||
-rw-r--r-- | lib/crypto/chacha20poly1305.c | 2 | ||||
-rw-r--r-- | lib/crypto/sha256.c | 10 | ||||
-rw-r--r-- | lib/debugobjects.c | 13 | ||||
-rw-r--r-- | lib/math/div64.c | 41 | ||||
-rw-r--r-- | lib/mpi/Makefile | 1 | ||||
-rw-r--r-- | lib/mpi/mpi-sub-ui.c | 78 | ||||
-rw-r--r-- | lib/random32.c | 2 | ||||
-rw-r--r-- | lib/rhashtable.c | 35 | ||||
-rw-r--r-- | lib/sbitmap.c | 3 | ||||
-rw-r--r-- | lib/test_vmalloc.c | 103 |
14 files changed, 331 insertions, 81 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 9ad9210d70a1..1da566baecc9 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1117,6 +1117,7 @@ config PROVE_LOCKING select DEBUG_RWSEMS select DEBUG_WW_MUTEX_SLOWPATH select DEBUG_LOCK_ALLOC + select PREEMPT_COUNT if !ARCH_NO_PREEMPT select TRACE_IRQFLAGS default n help @@ -1325,11 +1326,17 @@ config WW_MUTEX_SELFTEST endmenu # lock debugging config TRACE_IRQFLAGS + depends on TRACE_IRQFLAGS_SUPPORT bool help Enables hooks to interrupt enabling and disabling for either tracing or lock debugging. +config TRACE_IRQFLAGS_NMI + def_bool y + depends on TRACE_IRQFLAGS + depends on TRACE_IRQFLAGS_NMI_SUPPORT + config STACKTRACE bool "Stack backtrace support" depends on STACKTRACE_SUPPORT diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index 5ee88e5119c2..3d282d51849b 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -4,7 +4,8 @@ config HAVE_ARCH_KCSAN bool config HAVE_KCSAN_COMPILER - def_bool CC_IS_CLANG && $(cc-option,-fsanitize=thread -mllvm -tsan-distinguish-volatile=1) + def_bool (CC_IS_CLANG && $(cc-option,-fsanitize=thread -mllvm -tsan-distinguish-volatile=1)) || \ + (CC_IS_GCC && $(cc-option,-fsanitize=thread --param tsan-distinguish-volatile=1)) help For the list of compilers that support KCSAN, please see <file:Documentation/dev-tools/kcsan.rst>. @@ -59,7 +60,28 @@ config KCSAN_SELFTEST bool "Perform short selftests on boot" default y help - Run KCSAN selftests on boot. On test failure, causes the kernel to panic. + Run KCSAN selftests on boot. On test failure, causes the kernel to + panic. Recommended to be enabled, ensuring critical functionality + works as intended. + +config KCSAN_TEST + tristate "KCSAN test for integrated runtime behaviour" + depends on TRACEPOINTS && KUNIT + select TORTURE_TEST + help + KCSAN test focusing on behaviour of the integrated runtime. Tests + various race scenarios, and verifies the reports generated to + console. Makes use of KUnit for test organization, and the Torture + framework for test thread control. + + Each test case may run at least up to KCSAN_REPORT_ONCE_IN_MS + milliseconds. Test run duration may be optimized by building the + kernel and KCSAN test with KCSAN_REPORT_ONCE_IN_MS set to a lower + than default value. + + Say Y here if you want the test to be built into the kernel and run + during boot; say M if you want the test to build as a module; say N + if you are unsure. config KCSAN_EARLY_ENABLE bool "Early enable during boot" diff --git a/lib/cpumask.c b/lib/cpumask.c index fb22fb266f93..85da6ab4fbb5 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -6,6 +6,7 @@ #include <linux/export.h> #include <linux/memblock.h> #include <linux/numa.h> +#include <linux/sched/isolation.h> /** * cpumask_next - get the next cpu in a cpumask @@ -205,22 +206,27 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask) */ unsigned int cpumask_local_spread(unsigned int i, int node) { - int cpu; + int cpu, hk_flags; + const struct cpumask *mask; + hk_flags = HK_FLAG_DOMAIN | HK_FLAG_MANAGED_IRQ; + mask = housekeeping_cpumask(hk_flags); /* Wrap: we always want a cpu. */ - i %= num_online_cpus(); + i %= cpumask_weight(mask); if (node == NUMA_NO_NODE) { - for_each_cpu(cpu, cpu_online_mask) + for_each_cpu(cpu, mask) { if (i-- == 0) return cpu; + } } else { /* NUMA first. */ - for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) + for_each_cpu_and(cpu, cpumask_of_node(node), mask) { if (i-- == 0) return cpu; + } - for_each_cpu(cpu, cpu_online_mask) { + for_each_cpu(cpu, mask) { /* Skip NUMA nodes, done above. */ if (cpumask_test_cpu(cpu, cpumask_of_node(node))) continue; diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c index 8cc01a603416..1ed2ed487097 100644 --- a/lib/crc-t10dif.c +++ b/lib/crc-t10dif.c @@ -17,64 +17,69 @@ #include <linux/notifier.h> static struct crypto_shash __rcu *crct10dif_tfm; -static struct static_key crct10dif_fallback __read_mostly; +static DEFINE_STATIC_KEY_TRUE(crct10dif_fallback); static DEFINE_MUTEX(crc_t10dif_mutex); +static struct work_struct crct10dif_rehash_work; -static int crc_t10dif_rehash(struct notifier_block *self, unsigned long val, void *data) +static int crc_t10dif_notify(struct notifier_block *self, unsigned long val, void *data) { struct crypto_alg *alg = data; - struct crypto_shash *new, *old; if (val != CRYPTO_MSG_ALG_LOADED || - static_key_false(&crct10dif_fallback) || - strncmp(alg->cra_name, CRC_T10DIF_STRING, strlen(CRC_T10DIF_STRING))) - return 0; + strcmp(alg->cra_name, CRC_T10DIF_STRING)) + return NOTIFY_DONE; + + schedule_work(&crct10dif_rehash_work); + return NOTIFY_OK; +} + +static void crc_t10dif_rehash(struct work_struct *work) +{ + struct crypto_shash *new, *old; mutex_lock(&crc_t10dif_mutex); old = rcu_dereference_protected(crct10dif_tfm, lockdep_is_held(&crc_t10dif_mutex)); - if (!old) { - mutex_unlock(&crc_t10dif_mutex); - return 0; - } - new = crypto_alloc_shash("crct10dif", 0, 0); + new = crypto_alloc_shash(CRC_T10DIF_STRING, 0, 0); if (IS_ERR(new)) { mutex_unlock(&crc_t10dif_mutex); - return 0; + return; } rcu_assign_pointer(crct10dif_tfm, new); mutex_unlock(&crc_t10dif_mutex); - synchronize_rcu(); - crypto_free_shash(old); - return 0; + if (old) { + synchronize_rcu(); + crypto_free_shash(old); + } else { + static_branch_disable(&crct10dif_fallback); + } } static struct notifier_block crc_t10dif_nb = { - .notifier_call = crc_t10dif_rehash, + .notifier_call = crc_t10dif_notify, }; __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len) { struct { struct shash_desc shash; - char ctx[2]; + __u16 crc; } desc; int err; - if (static_key_false(&crct10dif_fallback)) + if (static_branch_unlikely(&crct10dif_fallback)) return crc_t10dif_generic(crc, buffer, len); rcu_read_lock(); desc.shash.tfm = rcu_dereference(crct10dif_tfm); - *(__u16 *)desc.ctx = crc; - + desc.crc = crc; err = crypto_shash_update(&desc.shash, buffer, len); rcu_read_unlock(); BUG_ON(err); - return *(__u16 *)desc.ctx; + return desc.crc; } EXPORT_SYMBOL(crc_t10dif_update); @@ -86,19 +91,17 @@ EXPORT_SYMBOL(crc_t10dif); static int __init crc_t10dif_mod_init(void) { + INIT_WORK(&crct10dif_rehash_work, crc_t10dif_rehash); crypto_register_notifier(&crc_t10dif_nb); - crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0); - if (IS_ERR(crct10dif_tfm)) { - static_key_slow_inc(&crct10dif_fallback); - crct10dif_tfm = NULL; - } + crc_t10dif_rehash(&crct10dif_rehash_work); return 0; } static void __exit crc_t10dif_mod_fini(void) { crypto_unregister_notifier(&crc_t10dif_nb); - crypto_free_shash(crct10dif_tfm); + cancel_work_sync(&crct10dif_rehash_work); + crypto_free_shash(rcu_dereference_protected(crct10dif_tfm, 1)); } module_init(crc_t10dif_mod_init); @@ -106,15 +109,23 @@ module_exit(crc_t10dif_mod_fini); static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp) { - if (static_key_false(&crct10dif_fallback)) + struct crypto_shash *tfm; + int len; + + if (static_branch_unlikely(&crct10dif_fallback)) return sprintf(buffer, "fallback\n"); - return sprintf(buffer, "%s\n", - crypto_tfm_alg_driver_name(crypto_shash_tfm(crct10dif_tfm))); + rcu_read_lock(); + tfm = rcu_dereference(crct10dif_tfm); + len = snprintf(buffer, PAGE_SIZE, "%s\n", + crypto_shash_driver_name(tfm)); + rcu_read_unlock(); + + return len; } -module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0644); +module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0444); -MODULE_DESCRIPTION("T10 DIF CRC calculation"); +MODULE_DESCRIPTION("T10 DIF CRC calculation (library API)"); MODULE_LICENSE("GPL"); MODULE_SOFTDEP("pre: crct10dif"); diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c index ad0699ce702f..431e04280332 100644 --- a/lib/crypto/chacha20poly1305.c +++ b/lib/crypto/chacha20poly1305.c @@ -21,8 +21,6 @@ #define CHACHA_KEY_WORDS (CHACHA_KEY_SIZE / sizeof(u32)) -bool __init chacha20poly1305_selftest(void); - static void chacha_load_key(u32 *k, const u8 *in) { k[0] = get_unaligned_le32(in); diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c index 2e621697c5c3..2321f6cb322f 100644 --- a/lib/crypto/sha256.c +++ b/lib/crypto/sha256.c @@ -280,4 +280,14 @@ void sha224_final(struct sha256_state *sctx, u8 *out) } EXPORT_SYMBOL(sha224_final); +void sha256(const u8 *data, unsigned int len, u8 *out) +{ + struct sha256_state sctx; + + sha256_init(&sctx); + sha256_update(&sctx, data, len); + sha256_final(&sctx, out); +} +EXPORT_SYMBOL(sha256); + MODULE_LICENSE("GPL"); diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 48054dbf1b51..fe4557955d97 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -1022,18 +1022,7 @@ static int debug_stats_show(struct seq_file *m, void *v) seq_printf(m, "objs_freed :%d\n", debug_objects_freed); return 0; } - -static int debug_stats_open(struct inode *inode, struct file *filp) -{ - return single_open(filp, debug_stats_show, NULL); -} - -static const struct file_operations debug_stats_fops = { - .open = debug_stats_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(debug_stats); static int __init debug_objects_init_debugfs(void) { diff --git a/lib/math/div64.c b/lib/math/div64.c index 368ca7fd0d82..3952a07130d8 100644 --- a/lib/math/div64.c +++ b/lib/math/div64.c @@ -190,3 +190,44 @@ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) return __iter_div_u64_rem(dividend, divisor, remainder); } EXPORT_SYMBOL(iter_div_u64_rem); + +#ifndef mul_u64_u64_div_u64 +u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c) +{ + u64 res = 0, div, rem; + int shift; + + /* can a * b overflow ? */ + if (ilog2(a) + ilog2(b) > 62) { + /* + * (b * a) / c is equal to + * + * (b / c) * a + + * (b % c) * a / c + * + * if nothing overflows. Can the 1st multiplication + * overflow? Yes, but we do not care: this can only + * happen if the end result can't fit in u64 anyway. + * + * So the code below does + * + * res = (b / c) * a; + * b = b % c; + */ + div = div64_u64_rem(b, c, &rem); + res = div * a; + b = rem; + + shift = ilog2(a) + ilog2(b) - 62; + if (shift > 0) { + /* drop precision */ + b >>= shift; + c >>= shift; + if (!c) + return res; + } + } + + return res + div64_u64(a * b, c); +} +#endif diff --git a/lib/mpi/Makefile b/lib/mpi/Makefile index d5874a7f5ff9..43b8fce14079 100644 --- a/lib/mpi/Makefile +++ b/lib/mpi/Makefile @@ -16,6 +16,7 @@ mpi-y = \ mpicoder.o \ mpi-bit.o \ mpi-cmp.o \ + mpi-sub-ui.o \ mpih-cmp.o \ mpih-div.o \ mpih-mul.o \ diff --git a/lib/mpi/mpi-sub-ui.c b/lib/mpi/mpi-sub-ui.c new file mode 100644 index 000000000000..b41b082b5f3e --- /dev/null +++ b/lib/mpi/mpi-sub-ui.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* mpi-sub-ui.c - Subtract an unsigned integer from an MPI. + * + * Copyright 1991, 1993, 1994, 1996, 1999-2002, 2004, 2012, 2013, 2015 + * Free Software Foundation, Inc. + * + * This file was based on the GNU MP Library source file: + * https://gmplib.org/repo/gmp-6.2/file/510b83519d1c/mpz/aors_ui.h + * + * The GNU MP Library is free software; you can redistribute it and/or modify + * it under the terms of either: + * + * * the GNU Lesser General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your + * option) any later version. + * + * or + * + * * the GNU General Public License as published by the Free Software + * Foundation; either version 2 of the License, or (at your option) any + * later version. + * + * or both in parallel, as here. + * + * The GNU MP Library is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received copies of the GNU General Public License and the + * GNU Lesser General Public License along with the GNU MP Library. If not, + * see https://www.gnu.org/licenses/. + */ + +#include "mpi-internal.h" + +int mpi_sub_ui(MPI w, MPI u, unsigned long vval) +{ + if (u->nlimbs == 0) { + if (mpi_resize(w, 1) < 0) + return -ENOMEM; + w->d[0] = vval; + w->nlimbs = (vval != 0); + w->sign = (vval != 0); + return 0; + } + + /* If not space for W (and possible carry), increase space. */ + if (mpi_resize(w, u->nlimbs + 1)) + return -ENOMEM; + + if (u->sign) { + mpi_limb_t cy; + + cy = mpihelp_add_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval); + w->d[u->nlimbs] = cy; + w->nlimbs = u->nlimbs + cy; + w->sign = 1; + } else { + /* The signs are different. Need exact comparison to determine + * which operand to subtract from which. + */ + if (u->nlimbs == 1 && u->d[0] < vval) { + w->d[0] = vval - u->d[0]; + w->nlimbs = 1; + w->sign = 1; + } else { + mpihelp_sub_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval); + /* Size can decrease with at most one limb. */ + w->nlimbs = (u->nlimbs - (w->d[u->nlimbs - 1] == 0)); + w->sign = 0; + } + } + + mpi_normalize(w); + return 0; +} +EXPORT_SYMBOL_GPL(mpi_sub_ui); diff --git a/lib/random32.c b/lib/random32.c index 763b920a6206..3d749abb9e80 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -48,7 +48,7 @@ static inline void prandom_state_selftest(void) } #endif -static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; +DEFINE_PER_CPU(struct rnd_state, net_rand_state); /** * prandom_u32_state - seeded pseudo-random number generator. diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 9f6890aedd1a..c949c1e3b87c 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -31,7 +31,7 @@ union nested_table { union nested_table __rcu *table; - struct rhash_lock_head *bucket; + struct rhash_lock_head __rcu *bucket; }; static u32 head_hashfn(struct rhashtable *ht, @@ -222,7 +222,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, } static int rhashtable_rehash_one(struct rhashtable *ht, - struct rhash_lock_head **bkt, + struct rhash_lock_head __rcu **bkt, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); @@ -275,7 +275,7 @@ static int rhashtable_rehash_chain(struct rhashtable *ht, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); - struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash); + struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash); int err; if (!bkt) @@ -485,7 +485,7 @@ fail: } static void *rhashtable_lookup_one(struct rhashtable *ht, - struct rhash_lock_head **bkt, + struct rhash_lock_head __rcu **bkt, struct bucket_table *tbl, unsigned int hash, const void *key, struct rhash_head *obj) { @@ -535,12 +535,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, return ERR_PTR(-ENOENT); } -static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, - struct rhash_lock_head **bkt, - struct bucket_table *tbl, - unsigned int hash, - struct rhash_head *obj, - void *data) +static struct bucket_table *rhashtable_insert_one( + struct rhashtable *ht, struct rhash_lock_head __rcu **bkt, + struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj, + void *data) { struct bucket_table *new_tbl; struct rhash_head *head; @@ -591,7 +589,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, { struct bucket_table *new_tbl; struct bucket_table *tbl; - struct rhash_lock_head **bkt; + struct rhash_lock_head __rcu **bkt; unsigned int hash; void *data; @@ -1173,8 +1171,8 @@ void rhashtable_destroy(struct rhashtable *ht) } EXPORT_SYMBOL_GPL(rhashtable_destroy); -struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, - unsigned int hash) +struct rhash_lock_head __rcu **__rht_bucket_nested( + const struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); unsigned int index = hash & ((1 << tbl->nest) - 1); @@ -1202,10 +1200,10 @@ struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, } EXPORT_SYMBOL_GPL(__rht_bucket_nested); -struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, - unsigned int hash) +struct rhash_lock_head __rcu **rht_bucket_nested( + const struct bucket_table *tbl, unsigned int hash) { - static struct rhash_lock_head *rhnull; + static struct rhash_lock_head __rcu *rhnull; if (!rhnull) INIT_RHT_NULLS_HEAD(rhnull); @@ -1213,9 +1211,8 @@ struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, } EXPORT_SYMBOL_GPL(rht_bucket_nested); -struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, - struct bucket_table *tbl, - unsigned int hash) +struct rhash_lock_head __rcu **rht_bucket_nested_insert( + struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); unsigned int index = hash & ((1 << tbl->nest) - 1); diff --git a/lib/sbitmap.c b/lib/sbitmap.c index af88d1346dd7..267aa7709416 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -292,8 +292,11 @@ void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m) for (i = 0; i < sb->map_nr; i++) { unsigned long word = READ_ONCE(sb->map[i].word); + unsigned long cleared = READ_ONCE(sb->map[i].cleared); unsigned int word_bits = READ_ONCE(sb->map[i].depth); + word &= ~cleared; + while (word_bits > 0) { unsigned int bits = min(8 - byte_bits, word_bits); diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index ddc9685702b1..5cf2fe9aab9e 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -15,6 +15,8 @@ #include <linux/delay.h> #include <linux/rwsem.h> #include <linux/mm.h> +#include <linux/rcupdate.h> +#include <linux/slab.h> #define __param(type, name, init, msg) \ static type name = init; \ @@ -35,14 +37,18 @@ __param(int, test_loop_count, 1000000, __param(int, run_test_mask, INT_MAX, "Set tests specified in the mask.\n\n" - "\t\tid: 1, name: fix_size_alloc_test\n" - "\t\tid: 2, name: full_fit_alloc_test\n" - "\t\tid: 4, name: long_busy_list_alloc_test\n" - "\t\tid: 8, name: random_size_alloc_test\n" - "\t\tid: 16, name: fix_align_alloc_test\n" - "\t\tid: 32, name: random_size_align_alloc_test\n" - "\t\tid: 64, name: align_shift_alloc_test\n" - "\t\tid: 128, name: pcpu_alloc_test\n" + "\t\tid: 1, name: fix_size_alloc_test\n" + "\t\tid: 2, name: full_fit_alloc_test\n" + "\t\tid: 4, name: long_busy_list_alloc_test\n" + "\t\tid: 8, name: random_size_alloc_test\n" + "\t\tid: 16, name: fix_align_alloc_test\n" + "\t\tid: 32, name: random_size_align_alloc_test\n" + "\t\tid: 64, name: align_shift_alloc_test\n" + "\t\tid: 128, name: pcpu_alloc_test\n" + "\t\tid: 256, name: kvfree_rcu_1_arg_vmalloc_test\n" + "\t\tid: 512, name: kvfree_rcu_2_arg_vmalloc_test\n" + "\t\tid: 1024, name: kvfree_rcu_1_arg_slab_test\n" + "\t\tid: 2048, name: kvfree_rcu_2_arg_slab_test\n" /* Add a new test case description here. */ ); @@ -316,6 +322,83 @@ pcpu_alloc_test(void) return rv; } +struct test_kvfree_rcu { + struct rcu_head rcu; + unsigned char array[20]; +}; + +static int +kvfree_rcu_1_arg_vmalloc_test(void) +{ + struct test_kvfree_rcu *p; + int i; + + for (i = 0; i < test_loop_count; i++) { + p = vmalloc(1 * PAGE_SIZE); + if (!p) + return -1; + + p->array[0] = 'a'; + kvfree_rcu(p); + } + + return 0; +} + +static int +kvfree_rcu_2_arg_vmalloc_test(void) +{ + struct test_kvfree_rcu *p; + int i; + + for (i = 0; i < test_loop_count; i++) { + p = vmalloc(1 * PAGE_SIZE); + if (!p) + return -1; + + p->array[0] = 'a'; + kvfree_rcu(p, rcu); + } + + return 0; +} + +static int +kvfree_rcu_1_arg_slab_test(void) +{ + struct test_kvfree_rcu *p; + int i; + + for (i = 0; i < test_loop_count; i++) { + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return -1; + + p->array[0] = 'a'; + kvfree_rcu(p); + } + + return 0; +} + +static int +kvfree_rcu_2_arg_slab_test(void) +{ + struct test_kvfree_rcu *p; + int i; + + for (i = 0; i < test_loop_count; i++) { + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return -1; + + p->array[0] = 'a'; + kvfree_rcu(p, rcu); + } + + return 0; +} + struct test_case_desc { const char *test_name; int (*test_func)(void); @@ -330,6 +413,10 @@ static struct test_case_desc test_case_array[] = { { "random_size_align_alloc_test", random_size_align_alloc_test }, { "align_shift_alloc_test", align_shift_alloc_test }, { "pcpu_alloc_test", pcpu_alloc_test }, + { "kvfree_rcu_1_arg_vmalloc_test", kvfree_rcu_1_arg_vmalloc_test }, + { "kvfree_rcu_2_arg_vmalloc_test", kvfree_rcu_2_arg_vmalloc_test }, + { "kvfree_rcu_1_arg_slab_test", kvfree_rcu_1_arg_slab_test }, + { "kvfree_rcu_2_arg_slab_test", kvfree_rcu_2_arg_slab_test }, /* Add a new test case here. */ }; |