diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-10-16 01:26:07 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 09:43:01 -0700 |
commit | ee3c72a14bfecdf783738032ff3c73ef6412f5b3 (patch) | |
tree | 876c2c5d33058be8502504330726bb16b876ba52 /mm | |
parent | b3fba8da653999c67d7517050f196e92da6f8d3b (diff) | |
download | lwn-ee3c72a14bfecdf783738032ff3c73ef6412f5b3.tar.gz lwn-ee3c72a14bfecdf783738032ff3c73ef6412f5b3.zip |
SLUB: Avoid touching page struct when freeing to per cpu slab
Set c->node to -1 if we allocate from a debug slab instead for SlabDebug
which requires access the page struct cacheline.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Tested-by: Alexey Dobriyan <adobriyan@sw.ru>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 14 |
1 files changed, 9 insertions, 5 deletions
diff --git a/mm/slub.c b/mm/slub.c index 5d895d44c327..ea9fd72093d8 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1537,6 +1537,7 @@ debug: c->page->inuse++; c->page->freelist = object[c->offset]; + c->node = -1; slab_unlock(c->page); return object; } @@ -1560,8 +1561,7 @@ static void __always_inline *slab_alloc(struct kmem_cache *s, local_irq_save(flags); c = get_cpu_slab(s, smp_processor_id()); - if (unlikely(!c->page || !c->freelist || - !node_match(c, node))) + if (unlikely(!c->freelist || !node_match(c, node))) object = __slab_alloc(s, gfpflags, node, addr, c); @@ -1670,7 +1670,7 @@ static void __always_inline slab_free(struct kmem_cache *s, local_irq_save(flags); debug_check_no_locks_freed(object, s->objsize); c = get_cpu_slab(s, smp_processor_id()); - if (likely(page == c->page && !SlabDebug(page))) { + if (likely(page == c->page && c->node >= 0)) { object[c->offset] = c->freelist; c->freelist = object; } else @@ -3250,12 +3250,16 @@ static unsigned long slab_objects(struct kmem_cache *s, for_each_possible_cpu(cpu) { struct page *page; + int node; struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); if (!c) continue; page = c->page; + node = c->node; + if (node < 0) + continue; if (page) { if (flags & SO_CPU) { int x = 0; @@ -3265,9 +3269,9 @@ static unsigned long slab_objects(struct kmem_cache *s, else x = 1; total += x; - nodes[c->node] += x; + nodes[node] += x; } - per_cpu[c->node]++; + per_cpu[node]++; } } |