summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/slub_def.h19
-rw-r--r--mm/slub.c24
2 files changed, 21 insertions, 22 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 17ebe0f89bf3..a78fb4ac2015 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -131,11 +131,21 @@ struct kmem_cache {
#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
+#ifdef CONFIG_ZONE_DMA
+#define SLUB_DMA __GFP_DMA
+/* Reserve extra caches for potential DMA use */
+#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6)
+#else
+/* Disable DMA functionality */
+#define SLUB_DMA (__force gfp_t)0
+#define KMALLOC_CACHES SLUB_PAGE_SHIFT
+#endif
+
/*
* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
*/
-extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT];
+extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES];
/*
* Sorry that the following has to be that ugly but some versions of GCC
@@ -203,13 +213,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
return &kmalloc_caches[index];
}
-#ifdef CONFIG_ZONE_DMA
-#define SLUB_DMA __GFP_DMA
-#else
-/* Disable DMA functionality */
-#define SLUB_DMA (__force gfp_t)0
-#endif
-
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
diff --git a/mm/slub.c b/mm/slub.c
index d6c9ecf629d5..cdb7f0214af0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2092,7 +2092,7 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
{
int cpu;
- if (s < kmalloc_caches + SLUB_PAGE_SHIFT && s >= kmalloc_caches)
+ if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches)
/*
* Boot time creation of the kmalloc array. Use static per cpu data
* since the per cpu allocator is not available yet.
@@ -2539,7 +2539,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
* Kmalloc subsystem
*******************************************************************/
-struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
+struct kmem_cache kmalloc_caches[KMALLOC_CACHES] __cacheline_aligned;
EXPORT_SYMBOL(kmalloc_caches);
static int __init setup_slub_min_order(char *str)
@@ -2629,6 +2629,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
char *text;
size_t realsize;
unsigned long slabflags;
+ int i;
s = kmalloc_caches_dma[index];
if (s)
@@ -2649,18 +2650,13 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
(unsigned int)realsize);
- if (flags & __GFP_WAIT)
- s = kmalloc(kmem_size, flags & ~SLUB_DMA);
- else {
- int i;
+ s = NULL;
+ for (i = 0; i < KMALLOC_CACHES; i++)
+ if (!kmalloc_caches[i].size)
+ break;
- s = NULL;
- for (i = 0; i < SLUB_PAGE_SHIFT; i++)
- if (kmalloc_caches[i].size) {
- s = kmalloc_caches + i;
- break;
- }
- }
+ BUG_ON(i >= KMALLOC_CACHES);
+ s = kmalloc_caches + i;
/*
* Must defer sysfs creation to a workqueue because we don't know
@@ -2674,7 +2670,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
if (!s || !text || !kmem_cache_open(s, flags, text,
realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) {
- kfree(s);
+ s->size = 0;
kfree(text);
goto unlock_out;
}