diff options
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r-- | include/linux/slab.h | 46 |
1 files changed, 3 insertions, 43 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 45af70315a94..6b3e155b70bf 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -167,7 +167,6 @@ struct mem_cgroup; /* * struct kmem_cache related prototypes */ -void __init kmem_cache_init(void); bool slab_is_available(void); struct kmem_cache *kmem_cache_create(const char *name, unsigned int size, @@ -284,7 +283,7 @@ static inline unsigned int arch_slab_minalign(void) * (PAGE_SIZE*2). Larger requests are passed to the page allocator. */ #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) -#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) +#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) #ifndef KMALLOC_SHIFT_LOW #define KMALLOC_SHIFT_LOW 5 #endif @@ -292,20 +291,7 @@ static inline unsigned int arch_slab_minalign(void) #ifdef CONFIG_SLUB #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) -#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) -#ifndef KMALLOC_SHIFT_LOW -#define KMALLOC_SHIFT_LOW 3 -#endif -#endif - -#ifdef CONFIG_SLOB -/* - * SLOB passes all requests larger than one page to the page allocator. - * No kmalloc array is necessary since objects of different sizes can - * be allocated from the same page. - */ -#define KMALLOC_SHIFT_HIGH PAGE_SHIFT -#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) +#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) #ifndef KMALLOC_SHIFT_LOW #define KMALLOC_SHIFT_LOW 3 #endif @@ -366,7 +352,6 @@ enum kmalloc_cache_type { NR_KMALLOC_TYPES }; -#ifndef CONFIG_SLOB extern struct kmem_cache * kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1]; @@ -458,7 +443,6 @@ static __always_inline unsigned int __kmalloc_index(size_t size, } static_assert(PAGE_SHIFT <= 20); #define kmalloc_index(s) __kmalloc_index(s, true) -#endif /* !CONFIG_SLOB */ void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1); @@ -487,10 +471,6 @@ void kmem_cache_free(struct kmem_cache *s, void *objp); void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p); int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p); -/* - * Caller must not use kfree_bulk() on memory not originally allocated - * by kmalloc(), because the SLOB allocator cannot handle this. - */ static __always_inline void kfree_bulk(size_t size, void **p) { kmem_cache_free_bulk(NULL, size, p); @@ -526,7 +506,7 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align * to be at least to the size. * * The @flags argument may be one of the GFP flags defined at - * include/linux/gfp.h and described at + * include/linux/gfp_types.h and described at * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>` * * The recommended usage of the @flags is described at @@ -567,7 +547,6 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align * Try really hard to succeed the allocation but fail * eventually. */ -#ifndef CONFIG_SLOB static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size) && size) { @@ -583,17 +562,7 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags) } return __kmalloc(size, flags); } -#else -static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags) -{ - if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE) - return kmalloc_large(size, flags); - return __kmalloc(size, flags); -} -#endif - -#ifndef CONFIG_SLOB static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node) { if (__builtin_constant_p(size) && size) { @@ -609,15 +578,6 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla } return __kmalloc_node(size, flags, node); } -#else -static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node) -{ - if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE) - return kmalloc_large_node(size, flags, node); - - return __kmalloc_node(size, flags, node); -} -#endif /** * kmalloc_array - allocate memory for an array. |