summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2014-10-09 15:26:27 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:25:51 -0400
commitbf0dea23a9c094ae869a88bb694fbe966671bf6d (patch)
tree420ca01f321664323b3ad0eeead8f2b4e04cd51e /include
parent12220dea07f1ac6ac717707104773d771c3f3077 (diff)
downloadlwn-bf0dea23a9c094ae869a88bb694fbe966671bf6d.tar.gz
lwn-bf0dea23a9c094ae869a88bb694fbe966671bf6d.zip
mm/slab: use percpu allocator for cpu cache
Because of chicken and egg problem, initialization of SLAB is really complicated. We need to allocate cpu cache through SLAB to make the kmem_cache work, but before initialization of kmem_cache, allocation through SLAB is impossible. On the other hand, SLUB does initialization in a more simple way. It uses percpu allocator to allocate cpu cache so there is no chicken and egg problem. So, this patch try to use percpu allocator in SLAB. This simplifies the initialization step in SLAB so that we could maintain SLAB code more easily. In my testing there is no performance difference. This implementation relies on percpu allocator. Because percpu allocator uses vmalloc address space, vmalloc address space could be exhausted by this change on many cpu system with *32 bit* kernel. This implementation can cover 1024 cpus in worst case by following calculation. Worst: 1024 cpus * 4 bytes for pointer * 300 kmem_caches * 120 objects per cpu_cache = 140 MB Normal: 1024 cpus * 4 bytes for pointer * 150 kmem_caches(slab merge) * 80 objects per cpu_cache = 46 MB Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Jeremiah Mahler <jmmahler@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/slab_def.h20
1 files changed, 3 insertions, 17 deletions
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 8235dfbb3b05..b869d1662ba3 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -8,6 +8,8 @@
*/
struct kmem_cache {
+ struct array_cache __percpu *cpu_cache;
+
/* 1) Cache tunables. Protected by slab_mutex */
unsigned int batchcount;
unsigned int limit;
@@ -71,23 +73,7 @@ struct kmem_cache {
struct memcg_cache_params *memcg_params;
#endif
-/* 6) per-cpu/per-node data, touched during every alloc/free */
- /*
- * We put array[] at the end of kmem_cache, because we want to size
- * this array to nr_cpu_ids slots instead of NR_CPUS
- * (see kmem_cache_init())
- * We still use [NR_CPUS] and not [1] or [0] because cache_cache
- * is statically defined, so we reserve the max number of cpus.
- *
- * We also need to guarantee that the list is able to accomodate a
- * pointer for each node since "nodelists" uses the remainder of
- * available pointers.
- */
- struct kmem_cache_node **node;
- struct array_cache *array[NR_CPUS + MAX_NUMNODES];
- /*
- * Do not add fields after array[]
- */
+ struct kmem_cache_node *node[MAX_NUMNODES];
};
#endif /* _LINUX_SLAB_DEF_H */