summaryrefslogtreecommitdiff
path: root/mm/mempool.c
diff options
context:
space:
mode:
authorAndrey Ryabinin <a.ryabinin@samsung.com>2015-04-15 16:15:05 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 16:35:20 -0700
commit923936157b158f36bd6a3d86496dce82b1a957de (patch)
tree4a78fc384c220efad52d250b49795b535fbc2db0 /mm/mempool.c
parentbda6d33042a486c8f7b15bf15a80fd07d4eab204 (diff)
downloadlwn-923936157b158f36bd6a3d86496dce82b1a957de.tar.gz
lwn-923936157b158f36bd6a3d86496dce82b1a957de.zip
mm/mempool.c: kasan: poison mempool elements
Mempools keep allocated objects in reserved for situations when ordinary allocation may not be possible to satisfy. These objects shouldn't be accessed before they leave the pool. This patch poison elements when get into the pool and unpoison when they leave it. This will let KASan to detect use-after-free of mempool's elements. Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> Tested-by: David Rientjes <rientjes@google.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dmitry Chernenkov <drcheren@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Alexander Potapenko <glider@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mempool.c')
-rw-r--r--mm/mempool.c23
1 files changed, 23 insertions, 0 deletions
diff --git a/mm/mempool.c b/mm/mempool.c
index 2884d5bad77e..2cc08de8b1db 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -12,6 +12,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/highmem.h>
+#include <linux/kasan.h>
#include <linux/kmemleak.h>
#include <linux/export.h>
#include <linux/mempool.h>
@@ -101,10 +102,31 @@ static inline void poison_element(mempool_t *pool, void *element)
}
#endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
+static void kasan_poison_element(mempool_t *pool, void *element)
+{
+ if (pool->alloc == mempool_alloc_slab)
+ kasan_slab_free(pool->pool_data, element);
+ if (pool->alloc == mempool_kmalloc)
+ kasan_kfree(element);
+ if (pool->alloc == mempool_alloc_pages)
+ kasan_free_pages(element, (unsigned long)pool->pool_data);
+}
+
+static void kasan_unpoison_element(mempool_t *pool, void *element)
+{
+ if (pool->alloc == mempool_alloc_slab)
+ kasan_slab_alloc(pool->pool_data, element);
+ if (pool->alloc == mempool_kmalloc)
+ kasan_krealloc(element, (size_t)pool->pool_data);
+ if (pool->alloc == mempool_alloc_pages)
+ kasan_alloc_pages(element, (unsigned long)pool->pool_data);
+}
+
static void add_element(mempool_t *pool, void *element)
{
BUG_ON(pool->curr_nr >= pool->min_nr);
poison_element(pool, element);
+ kasan_poison_element(pool, element);
pool->elements[pool->curr_nr++] = element;
}
@@ -114,6 +136,7 @@ static void *remove_element(mempool_t *pool)
BUG_ON(pool->curr_nr < 0);
check_element(pool, element);
+ kasan_unpoison_element(pool, element);
return element;
}