diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-06-29 16:34:12 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-06-29 16:34:12 -0700 |
commit | 632f54b4d60bfe0701f43d0bc387928de6e3dcfb (patch) | |
tree | fac09ccb563bdd3e71133c3f571db6d747fe94fa /mm/slab_common.c | |
parent | bf1fa6f15553df04f2bdd06190ccd5f388ab0777 (diff) | |
parent | 7bc162d5cc4de5c33c5570dba2719a01506a9fd0 (diff) | |
download | lwn-632f54b4d60bfe0701f43d0bc387928de6e3dcfb.tar.gz lwn-632f54b4d60bfe0701f43d0bc387928de6e3dcfb.zip |
Merge tag 'slab-for-6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab updates from Vlastimil Babka:
- SLAB deprecation:
Following the discussion at LSF/MM 2023 [1] and no objections, the
SLAB allocator is deprecated by renaming the config option (to make
its users notice) to CONFIG_SLAB_DEPRECATED with updated help text.
SLUB should be used instead. Existing defconfigs with CONFIG_SLAB are
also updated.
- SLAB_NO_MERGE kmem_cache flag (Jesper Dangaard Brouer):
There are (very limited) cases where kmem_cache merging is
undesirable, and existing ways to prevent it are hacky. Introduce a
new flag to do that cleanly and convert the existing hacky users.
Btrfs plans to use this for debug kernel builds (that use case is
always fine), networking for performance reasons (that should be very
rare).
- Replace the usage of weak PRNGs (David Keisar Schmidt):
In addition to using stronger RNGs for the security related features,
the code is a bit cleaner.
- Misc code cleanups (SeongJae Parki, Xiongwei Song, Zhen Lei, and
zhaoxinchao)
Link: https://lwn.net/Articles/932201/ [1]
* tag 'slab-for-6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
mm/slab_common: use SLAB_NO_MERGE instead of negative refcount
mm/slab: break up RCU readers on SLAB_TYPESAFE_BY_RCU example code
mm/slab: add a missing semicolon on SLAB_TYPESAFE_BY_RCU example code
mm/slab_common: reduce an if statement in create_cache()
mm/slab: introduce kmem_cache flag SLAB_NO_MERGE
mm/slab: rename CONFIG_SLAB to CONFIG_SLAB_DEPRECATED
mm/slab: remove HAVE_HARDENED_USERCOPY_ALLOCATOR
mm/slab_common: Replace invocation of weak PRNG
mm/slab: Replace invocation of weak PRNG
slub: Don't read nr_slabs and total_objects directly
slub: Remove slabs_node() function
slub: Remove CONFIG_SMP defined check
slub: Put objects_show() into CONFIG_SLUB_DEBUG enabled block
slub: Correct the error code when slab_kset is NULL
mm/slab: correct return values in comment for _kmem_cache_create()
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r-- | mm/slab_common.c | 33 |
1 files changed, 13 insertions, 20 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c index 43c008165f56..d1555ea2981a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -49,7 +49,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work, */ #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \ - SLAB_FAILSLAB | kasan_never_merge()) + SLAB_FAILSLAB | SLAB_NO_MERGE | kasan_never_merge()) #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ SLAB_CACHE_DMA32 | SLAB_ACCOUNT) @@ -238,14 +238,12 @@ static struct kmem_cache *create_cache(const char *name, s->refcount = 1; list_add(&s->list, &slab_caches); -out: - if (err) - return ERR_PTR(err); return s; out_free_cache: kmem_cache_free(kmem_cache, s); - goto out; +out: + return ERR_PTR(err); } /** @@ -892,6 +890,13 @@ new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags) flags |= SLAB_CACHE_DMA; } + /* + * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for + * KMALLOC_NORMAL caches. + */ + if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_NORMAL)) + flags |= SLAB_NO_MERGE; + if (minalign > ARCH_KMALLOC_MINALIGN) { aligned_size = ALIGN(aligned_size, minalign); aligned_idx = __kmalloc_index(aligned_size, false); @@ -903,13 +908,6 @@ new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags) aligned_size, flags); if (idx != aligned_idx) kmalloc_caches[type][idx] = kmalloc_caches[type][aligned_idx]; - - /* - * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for - * KMALLOC_NORMAL caches. - */ - if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_NORMAL)) - kmalloc_caches[type][idx]->refcount = -1; } /* @@ -1162,7 +1160,7 @@ EXPORT_SYMBOL(kmalloc_large_node); #ifdef CONFIG_SLAB_FREELIST_RANDOM /* Randomize a generic freelist */ -static void freelist_randomize(struct rnd_state *state, unsigned int *list, +static void freelist_randomize(unsigned int *list, unsigned int count) { unsigned int rand; @@ -1173,8 +1171,7 @@ static void freelist_randomize(struct rnd_state *state, unsigned int *list, /* Fisher-Yates shuffle */ for (i = count - 1; i > 0; i--) { - rand = prandom_u32_state(state); - rand %= (i + 1); + rand = get_random_u32_below(i + 1); swap(list[i], list[rand]); } } @@ -1183,7 +1180,6 @@ static void freelist_randomize(struct rnd_state *state, unsigned int *list, int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, gfp_t gfp) { - struct rnd_state state; if (count < 2 || cachep->random_seq) return 0; @@ -1192,10 +1188,7 @@ int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, if (!cachep->random_seq) return -ENOMEM; - /* Get best entropy at this stage of boot */ - prandom_seed_state(&state, get_random_long()); - - freelist_randomize(&state, cachep->random_seq, count); + freelist_randomize(cachep->random_seq, count); return 0; } |