diff options
author | Hyeonggon Yoo <42.hyeyoo@gmail.com> | 2022-08-17 19:18:21 +0900 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2022-09-01 10:38:06 +0200 |
commit | b14051352465a24b3c9ceaccac4e39b3521bb370 (patch) | |
tree | f16fbee4c11cf6df63f7acbff10971814bece277 /mm/slub.c | |
parent | ed4cd17eb26d7f0c6a762608a3f30870929fbcdd (diff) | |
download | lwn-b14051352465a24b3c9ceaccac4e39b3521bb370.tar.gz lwn-b14051352465a24b3c9ceaccac4e39b3521bb370.zip |
mm/sl[au]b: generalize kmalloc subsystem
Now everything in kmalloc subsystem can be generalized.
Let's do it!
Generalize __do_kmalloc_node(), __kmalloc_node_track_caller(),
kfree(), __ksize(), __kmalloc(), __kmalloc_node() and move them
to slab_common.c.
In the meantime, rename kmalloc_large_node_notrace()
to __kmalloc_large_node() and make it static as it's now only called in
slab_common.c.
[ feng.tang@intel.com: adjust kfence skip list to include
__kmem_cache_free so that kfence kunit tests do not fail ]
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 87 |
1 files changed, 0 insertions, 87 deletions
diff --git a/mm/slub.c b/mm/slub.c index a11f78c2647c..cd49785d59e1 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4388,49 +4388,6 @@ static int __init setup_slub_min_objects(char *str) __setup("slub_min_objects=", setup_slub_min_objects); -static __always_inline -void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) -{ - struct kmem_cache *s; - void *ret; - - if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { - ret = kmalloc_large_node_notrace(size, flags, node); - - trace_kmalloc_node(caller, ret, NULL, - size, PAGE_SIZE << get_order(size), - flags, node); - - return ret; - } - - s = kmalloc_slab(size, flags); - - if (unlikely(ZERO_OR_NULL_PTR(s))) - return s; - - ret = slab_alloc_node(s, NULL, flags, node, caller, size); - - trace_kmalloc_node(caller, ret, s, size, s->size, flags, node); - - ret = kasan_kmalloc(s, ret, size, flags); - - return ret; -} - -void *__kmalloc_node(size_t size, gfp_t flags, int node) -{ - return __do_kmalloc_node(size, flags, node, _RET_IP_); -} -EXPORT_SYMBOL(__kmalloc_node); - -void *__kmalloc(size_t size, gfp_t flags) -{ - return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_); -} -EXPORT_SYMBOL(__kmalloc); - - #ifdef CONFIG_HARDENED_USERCOPY /* * Rejects incorrectly sized objects and objects that are to be copied @@ -4481,43 +4438,6 @@ void __check_heap_object(const void *ptr, unsigned long n, } #endif /* CONFIG_HARDENED_USERCOPY */ -size_t __ksize(const void *object) -{ - struct folio *folio; - - if (unlikely(object == ZERO_SIZE_PTR)) - return 0; - - folio = virt_to_folio(object); - - if (unlikely(!folio_test_slab(folio))) - return folio_size(folio); - - return slab_ksize(folio_slab(folio)->slab_cache); -} -EXPORT_SYMBOL(__ksize); - -void kfree(const void *x) -{ - struct folio *folio; - struct slab *slab; - void *object = (void *)x; - - trace_kfree(_RET_IP_, x); - - if (unlikely(ZERO_OR_NULL_PTR(x))) - return; - - folio = virt_to_folio(x); - if (unlikely(!folio_test_slab(folio))) { - free_large_kmalloc(folio, object); - return; - } - slab = folio_slab(folio); - slab_free(slab->slab_cache, slab, object, NULL, &object, 1, _RET_IP_); -} -EXPORT_SYMBOL(kfree); - #define SHRINK_PROMOTE_MAX 32 /* @@ -4863,13 +4783,6 @@ int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) return 0; } -void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, - int node, unsigned long caller) -{ - return __do_kmalloc_node(size, gfpflags, node, caller); -} -EXPORT_SYMBOL(__kmalloc_node_track_caller); - #ifdef CONFIG_SYSFS static int count_inuse(struct slab *slab) { |