diff options
author | Vlastimil Babka <vbabka@suse.cz> | 2023-10-03 15:27:11 +0200 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2023-12-06 11:57:21 +0100 |
commit | b774d3e326d30fc8ef841101c399e44bdac2aa48 (patch) | |
tree | 4b8af66490308105e4609eba7f242029017a63b5 /mm/slub.c | |
parent | b52ef56e9b324b172053b03d8c775ef4708fbc23 (diff) | |
download | lwn-b774d3e326d30fc8ef841101c399e44bdac2aa48.tar.gz lwn-b774d3e326d30fc8ef841101c399e44bdac2aa48.zip |
mm/slab: move kfree() from slab_common.c to slub.c
This should result in better code. Currently kfree() makes a function
call between compilation units to __kmem_cache_free() which does its own
virt_to_slab(), throwing away the struct slab pointer we already had in
kfree(). Now it can be reused. Additionally kfree() can now inline the
whole SLUB freeing fastpath.
Also move over free_large_kmalloc() as the only callsites are now in
slub.c, and make it static.
Reviewed-by: Kees Cook <keescook@chromium.org>
Acked-by: David Rientjes <rientjes@google.com>
Tested-by: David Rientjes <rientjes@google.com>
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 51 |
1 files changed, 46 insertions, 5 deletions
diff --git a/mm/slub.c b/mm/slub.c index cc801f8258fe..2baa9e94d9df 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4197,11 +4197,6 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) return cachep; } -void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller) -{ - slab_free(s, virt_to_slab(x), x, NULL, &x, 1, caller); -} - /** * kmem_cache_free - Deallocate an object * @s: The cache the allocation was from. @@ -4220,6 +4215,52 @@ void kmem_cache_free(struct kmem_cache *s, void *x) } EXPORT_SYMBOL(kmem_cache_free); +static void free_large_kmalloc(struct folio *folio, void *object) +{ + unsigned int order = folio_order(folio); + + if (WARN_ON_ONCE(order == 0)) + pr_warn_once("object pointer: 0x%p\n", object); + + kmemleak_free(object); + kasan_kfree_large(object); + kmsan_kfree_large(object); + + mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B, + -(PAGE_SIZE << order)); + __free_pages(folio_page(folio, 0), order); +} + +/** + * kfree - free previously allocated memory + * @object: pointer returned by kmalloc() or kmem_cache_alloc() + * + * If @object is NULL, no operation is performed. + */ +void kfree(const void *object) +{ + struct folio *folio; + struct slab *slab; + struct kmem_cache *s; + void *x = (void *)object; + + trace_kfree(_RET_IP_, object); + + if (unlikely(ZERO_OR_NULL_PTR(object))) + return; + + folio = virt_to_folio(object); + if (unlikely(!folio_test_slab(folio))) { + free_large_kmalloc(folio, (void *)object); + return; + } + + slab = folio_slab(folio); + s = slab->slab_cache; + slab_free(s, slab, x, NULL, &x, 1, _RET_IP_); +} +EXPORT_SYMBOL(kfree); + struct detached_freelist { struct slab *slab; void *tail; |