summaryrefslogtreecommitdiff
path: root/mm/kasan
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kasan')
-rw-r--r--mm/kasan/kasan_test.c26
1 files changed, 2 insertions, 24 deletions
diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
index 9b1024a6e580..691c15fc7cdb 100644
--- a/mm/kasan/kasan_test.c
+++ b/mm/kasan/kasan_test.c
@@ -215,7 +215,7 @@ static void kmalloc_node_oob_right(struct kunit *test)
/*
* Check that KASAN detects an out-of-bounds access for a big object allocated
- * via kmalloc(). But not as big as to trigger the page_alloc fallback for SLUB.
+ * via kmalloc(). But not as big as to trigger the page_alloc fallback.
*/
static void kmalloc_big_oob_right(struct kunit *test)
{
@@ -233,8 +233,7 @@ static void kmalloc_big_oob_right(struct kunit *test)
/*
* The kmalloc_large_* tests below use kmalloc() to allocate a memory chunk
* that does not fit into the largest slab cache and therefore is allocated via
- * the page_alloc fallback for SLUB. SLAB has no such fallback, and thus these
- * tests are not supported for it.
+ * the page_alloc fallback.
*/
static void kmalloc_large_oob_right(struct kunit *test)
@@ -242,8 +241,6 @@ static void kmalloc_large_oob_right(struct kunit *test)
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
- KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
-
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@@ -258,8 +255,6 @@ static void kmalloc_large_uaf(struct kunit *test)
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
- KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
-
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
kfree(ptr);
@@ -272,8 +267,6 @@ static void kmalloc_large_invalid_free(struct kunit *test)
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
- KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
-
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@@ -407,18 +400,12 @@ static void krealloc_less_oob(struct kunit *test)
static void krealloc_large_more_oob(struct kunit *test)
{
- /* page_alloc fallback is only implemented for SLUB. */
- KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
-
krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
KMALLOC_MAX_CACHE_SIZE + 235);
}
static void krealloc_large_less_oob(struct kunit *test)
{
- /* page_alloc fallback is only implemented for SLUB. */
- KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
-
krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
KMALLOC_MAX_CACHE_SIZE + 201);
}
@@ -1156,9 +1143,6 @@ static void mempool_kmalloc_large_uaf(struct kunit *test)
size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
void *extra_elem;
- /* page_alloc fallback is only implemented for SLUB. */
- KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
-
extra_elem = mempool_prepare_kmalloc(test, &pool, size);
mempool_uaf_helper(test, &pool, false);
@@ -1227,9 +1211,6 @@ static void mempool_kmalloc_large_double_free(struct kunit *test)
size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
char *extra_elem;
- /* page_alloc fallback is only implemented for SLUB. */
- KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
-
extra_elem = mempool_prepare_kmalloc(test, &pool, size);
mempool_double_free_helper(test, &pool);
@@ -1284,9 +1265,6 @@ static void mempool_kmalloc_large_invalid_free(struct kunit *test)
size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
char *extra_elem;
- /* page_alloc fallback is only implemented for SLUB. */
- KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
-
extra_elem = mempool_prepare_kmalloc(test, &pool, size);
mempool_kmalloc_invalid_free_helper(test, &pool);