summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-07-17 04:03:22 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-17 10:23:01 -0700
commit6cb8f91320d3e720351c21741da795fed580b21b (patch)
treec9f73c8b82cd0f6c534939b8b9f36e8615b0ab2d /mm
parentef2ad80c7d255ed0449eda947c2d700635b7e0f5 (diff)
downloadlwn-6cb8f91320d3e720351c21741da795fed580b21b.tar.gz
lwn-6cb8f91320d3e720351c21741da795fed580b21b.zip
Slab allocators: consistent ZERO_SIZE_PTR support and NULL result semantics
Define ZERO_OR_NULL_PTR macro to be able to remove the checks from the allocators. Move ZERO_SIZE_PTR related stuff into slab.h. Make ZERO_SIZE_PTR work for all slab allocators and get rid of the WARN_ON_ONCE(size == 0) that is still remaining in SLAB. Make slub return NULL like the other allocators if a too large memory segment is requested via __kmalloc. Signed-off-by: Christoph Lameter <clameter@sgi.com> Acked-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c13
-rw-r--r--mm/slob.c11
-rw-r--r--mm/slub.c29
-rw-r--r--mm/util.c2
4 files changed, 32 insertions, 23 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 4bd8a53091b7..d2cd304fd8af 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -775,6 +775,9 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
*/
BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
#endif
+ if (!size)
+ return ZERO_SIZE_PTR;
+
while (size > csizep->cs_size)
csizep++;
@@ -2351,7 +2354,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* this should not happen at all.
* But leave a BUG_ON for some lucky dude.
*/
- BUG_ON(!cachep->slabp_cache);
+ BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
}
cachep->ctor = ctor;
cachep->name = name;
@@ -3653,8 +3656,8 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
struct kmem_cache *cachep;
cachep = kmem_find_general_cachep(size, flags);
- if (unlikely(cachep == NULL))
- return NULL;
+ if (unlikely(ZERO_OR_NULL_PTR(cachep)))
+ return cachep;
return kmem_cache_alloc_node(cachep, flags, node);
}
@@ -3760,7 +3763,7 @@ void kfree(const void *objp)
struct kmem_cache *c;
unsigned long flags;
- if (unlikely(!objp))
+ if (unlikely(ZERO_OR_NULL_PTR(objp)))
return;
local_irq_save(flags);
kfree_debugcheck(objp);
@@ -4447,7 +4450,7 @@ const struct seq_operations slabstats_op = {
*/
size_t ksize(const void *objp)
{
- if (unlikely(objp == NULL))
+ if (unlikely(ZERO_OR_NULL_PTR(objp)))
return 0;
return obj_size(virt_to_cache(objp));
diff --git a/mm/slob.c b/mm/slob.c
index 154e7bdf3544..41d32c3c0be4 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -347,7 +347,7 @@ static void slob_free(void *block, int size)
slobidx_t units;
unsigned long flags;
- if (!block)
+ if (ZERO_OR_NULL_PTR(block))
return;
BUG_ON(!size);
@@ -424,10 +424,13 @@ out:
void *__kmalloc_node(size_t size, gfp_t gfp, int node)
{
+ unsigned int *m;
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
if (size < PAGE_SIZE - align) {
- unsigned int *m;
+ if (!size)
+ return ZERO_SIZE_PTR;
+
m = slob_alloc(size + align, gfp, align, node);
if (m)
*m = size;
@@ -450,7 +453,7 @@ void kfree(const void *block)
{
struct slob_page *sp;
- if (!block)
+ if (ZERO_OR_NULL_PTR(block))
return;
sp = (struct slob_page *)virt_to_page(block);
@@ -468,7 +471,7 @@ size_t ksize(const void *block)
{
struct slob_page *sp;
- if (!block)
+ if (ZERO_OR_NULL_PTR(block))
return 0;
sp = (struct slob_page *)virt_to_page(block);
diff --git a/mm/slub.c b/mm/slub.c
index 1b0a95d75dbb..548d78df81e1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2270,10 +2270,11 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
int index = kmalloc_index(size);
if (!index)
- return NULL;
+ return ZERO_SIZE_PTR;
/* Allocation too large? */
- BUG_ON(index < 0);
+ if (index < 0)
+ return NULL;
#ifdef CONFIG_ZONE_DMA
if ((flags & SLUB_DMA)) {
@@ -2314,9 +2315,10 @@ void *__kmalloc(size_t size, gfp_t flags)
{
struct kmem_cache *s = get_slab(size, flags);
- if (s)
- return slab_alloc(s, flags, -1, __builtin_return_address(0));
- return ZERO_SIZE_PTR;
+ if (ZERO_OR_NULL_PTR(s))
+ return s;
+
+ return slab_alloc(s, flags, -1, __builtin_return_address(0));
}
EXPORT_SYMBOL(__kmalloc);
@@ -2325,9 +2327,10 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
struct kmem_cache *s = get_slab(size, flags);
- if (s)
- return slab_alloc(s, flags, node, __builtin_return_address(0));
- return ZERO_SIZE_PTR;
+ if (ZERO_OR_NULL_PTR(s))
+ return s;
+
+ return slab_alloc(s, flags, node, __builtin_return_address(0));
}
EXPORT_SYMBOL(__kmalloc_node);
#endif
@@ -2378,7 +2381,7 @@ void kfree(const void *x)
* this comparison would be true for all "negative" pointers
* (which would cover the whole upper half of the address space).
*/
- if ((unsigned long)x <= (unsigned long)ZERO_SIZE_PTR)
+ if (ZERO_OR_NULL_PTR(x))
return;
page = virt_to_head_page(x);
@@ -2687,8 +2690,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
{
struct kmem_cache *s = get_slab(size, gfpflags);
- if (!s)
- return ZERO_SIZE_PTR;
+ if (ZERO_OR_NULL_PTR(s))
+ return s;
return slab_alloc(s, gfpflags, -1, caller);
}
@@ -2698,8 +2701,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
{
struct kmem_cache *s = get_slab(size, gfpflags);
- if (!s)
- return ZERO_SIZE_PTR;
+ if (ZERO_OR_NULL_PTR(s))
+ return s;
return slab_alloc(s, gfpflags, node, caller);
}
diff --git a/mm/util.c b/mm/util.c
index 18396ea63ee6..f2f21b775516 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -76,7 +76,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
if (unlikely(!new_size)) {
kfree(p);
- return NULL;
+ return ZERO_SIZE_PTR;
}
ks = ksize(p);