summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-07-17 04:03:23 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-17 10:23:01 -0700
commitd07dbea46405b37d59495eb4de9d1056dcfb7c6d (patch)
tree221376c8c5509a88f8942246180685d5c01baf46 /mm/slub.c
parent6cb8f91320d3e720351c21741da795fed580b21b (diff)
downloadlwn-d07dbea46405b37d59495eb4de9d1056dcfb7c6d.tar.gz
lwn-d07dbea46405b37d59495eb4de9d1056dcfb7c6d.zip
Slab allocators: support __GFP_ZERO in all allocators
A kernel convention for many allocators is that if __GFP_ZERO is passed to an allocator then the allocated memory should be zeroed. This is currently not supported by the slab allocators. The inconsistency makes it difficult to implement in derived allocators such as in the uncached allocator and the pool allocators. In addition the support zeroed allocations in the slab allocators does not have a consistent API. There are no zeroing allocator functions for NUMA node placement (kmalloc_node, kmem_cache_alloc_node). The zeroing allocations are only provided for default allocs (kzalloc, kmem_cache_zalloc_node). __GFP_ZERO will make zeroing universally available and does not require any addititional functions. So add the necessary logic to all slab allocators to support __GFP_ZERO. The code is added to the hot path. The gfp flags are on the stack and so the cacheline is readily available for checking if we want a zeroed object. Zeroing while allocating is now a frequent operation and we seem to be gradually approaching a 1-1 parity between zeroing and not zeroing allocs. The current tree has 3476 uses of kmalloc vs 2731 uses of kzalloc. Signed-off-by: Christoph Lameter <clameter@sgi.com> Acked-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 548d78df81e1..479eb5c01917 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1077,7 +1077,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
void *last;
void *p;
- BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
+ BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK));
if (flags & __GFP_WAIT)
local_irq_enable();
@@ -1540,7 +1540,7 @@ debug:
* Otherwise we can simply pick the next object from the lockless free list.
*/
static void __always_inline *slab_alloc(struct kmem_cache *s,
- gfp_t gfpflags, int node, void *addr)
+ gfp_t gfpflags, int node, void *addr, int length)
{
struct page *page;
void **object;
@@ -1558,19 +1558,25 @@ static void __always_inline *slab_alloc(struct kmem_cache *s,
page->lockless_freelist = object[page->offset];
}
local_irq_restore(flags);
+
+ if (unlikely((gfpflags & __GFP_ZERO) && object))
+ memset(object, 0, length);
+
return object;
}
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{
- return slab_alloc(s, gfpflags, -1, __builtin_return_address(0));
+ return slab_alloc(s, gfpflags, -1,
+ __builtin_return_address(0), s->objsize);
}
EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{
- return slab_alloc(s, gfpflags, node, __builtin_return_address(0));
+ return slab_alloc(s, gfpflags, node,
+ __builtin_return_address(0), s->objsize);
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
#endif
@@ -2318,7 +2324,7 @@ void *__kmalloc(size_t size, gfp_t flags)
if (ZERO_OR_NULL_PTR(s))
return s;
- return slab_alloc(s, flags, -1, __builtin_return_address(0));
+ return slab_alloc(s, flags, -1, __builtin_return_address(0), size);
}
EXPORT_SYMBOL(__kmalloc);
@@ -2330,7 +2336,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
if (ZERO_OR_NULL_PTR(s))
return s;
- return slab_alloc(s, flags, node, __builtin_return_address(0));
+ return slab_alloc(s, flags, node, __builtin_return_address(0), size);
}
EXPORT_SYMBOL(__kmalloc_node);
#endif
@@ -2643,7 +2649,7 @@ void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags)
{
void *x;
- x = slab_alloc(s, flags, -1, __builtin_return_address(0));
+ x = slab_alloc(s, flags, -1, __builtin_return_address(0), 0);
if (x)
memset(x, 0, s->objsize);
return x;
@@ -2693,7 +2699,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
if (ZERO_OR_NULL_PTR(s))
return s;
- return slab_alloc(s, gfpflags, -1, caller);
+ return slab_alloc(s, gfpflags, -1, caller, size);
}
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
@@ -2704,7 +2710,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
if (ZERO_OR_NULL_PTR(s))
return s;
- return slab_alloc(s, gfpflags, node, caller);
+ return slab_alloc(s, gfpflags, node, caller, size);
}
#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)