summaryrefslogtreecommitdiff
path: root/include/linux/slab.h
diff options
context:
space:
mode:
authorAlexey Dobriyan <adobriyan@gmail.com>2018-04-05 16:20:22 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-05 21:36:23 -0700
commit36071a279b4100afe9fbee18727ad78daa307591 (patch)
tree223780ab9b84dc29acfeeaf4ba47dcd2ecdf8414 /include/linux/slab.h
parentc86305743bdf3928ed7fbc685724c04bbd5331aa (diff)
downloadlwn-36071a279b4100afe9fbee18727ad78daa307591.tar.gz
lwn-36071a279b4100afe9fbee18727ad78daa307591.zip
slab: make kmalloc_index() return "unsigned int"
kmalloc_index() return index into an array of kmalloc kmem caches, therefore should be unsigned. Space savings with SLUB on trimmed down .config: add/remove: 0/1 grow/shrink: 6/56 up/down: 85/-557 (-472) Function old new delta calculate_sizes 924 983 +59 on_freelist 589 604 +15 init_cache_random_seq 122 127 +5 ext4_mb_init 1206 1210 +4 slab_pad_check.part 270 271 +1 cpu_partial_store 112 113 +1 usersize_show 28 27 -1 ... new_slab 1871 1837 -34 slab_order 204 - -204 This patch start a series of converting SLUB (mostly) to "unsigned int". 1) Most integers in the code are in fact unsigned entities: array indexes, lengths, buffer sizes, allocation orders. It is therefore better to use unsigned variables 2) Some integers in the code are either "size_t" or "unsigned long" for no reason. size_t usually comes from people trying to maintain type correctness and figuring out that "sizeof" operator returns size_t or memset/memcpy takes size_t so should everything passed to it. However the number of 4GB+ objects in the kernel is very small. Most, if not all, dynamically allocated objects with kmalloc() or kmem_cache_create() aren't actually big. Maintaining wide types doesn't do anything. 64-bit ops are bigger than 32-bit on our beloved x86_64, so try to not use 64-bit where it isn't necessary (read: everywhere where integers are integers not pointers) 3) in case of SLAB allocators, there are additional limitations *) page->inuse, page->objects are only 16-/15-bit, *) cache size was always 32-bit *) slab orders are small, order 20 is needed to go 64-bit on x86_64 (PAGE_SIZE << order) Basically everything is 32-bit except kmalloc(1ULL<<32) which gets shortcut through page allocator. Christoph said: : : That changes with large base page size on power and ARM64 f.e. but then : we do not want to encourage larger allocations through slab anyways. Link: http://lkml.kernel.org/r/20180305200730.15812-2-adobriyan@gmail.com Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h6
1 files changed, 3 insertions, 3 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 231abc8976c5..296f33a512eb 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -308,7 +308,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
* 2 = 129 .. 192 bytes
* n = 2^(n-1)+1 .. 2^n
*/
-static __always_inline int kmalloc_index(size_t size)
+static __always_inline unsigned int kmalloc_index(size_t size)
{
if (!size)
return 0;
@@ -504,7 +504,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
return kmalloc_large(size, flags);
#ifndef CONFIG_SLOB
if (!(flags & GFP_DMA)) {
- int index = kmalloc_index(size);
+ unsigned int index = kmalloc_index(size);
if (!index)
return ZERO_SIZE_PTR;
@@ -542,7 +542,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
#ifndef CONFIG_SLOB
if (__builtin_constant_p(size) &&
size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
- int i = kmalloc_index(size);
+ unsigned int i = kmalloc_index(size);
if (!i)
return ZERO_SIZE_PTR;