summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c16
1 files changed, 0 insertions, 16 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 42ce17304275..7e1fe663795a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2104,24 +2104,8 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
{
-#ifdef CONFIG_SMP
- /*
- * Will use reserve that does not require slab operation during
- * early boot.
- */
BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
-#else
- /*
- * Special hack for UP mode. allocpercpu() falls back to kmalloc
- * operations. So we cannot use that before the slab allocator is up
- * Simply get the smallest possible compound page. The page will be
- * released via kfree() when the cpu caches are resized later.
- */
- if (slab_state < UP)
- s->cpu_slab = (__percpu void *)kmalloc_large(PAGE_SIZE << 1, GFP_NOWAIT);
- else
-#endif
s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);