From 40f3bf0cb04c91d33531b1b95788ad2f0e4062cf Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 2 Nov 2021 15:42:04 +0100 Subject: mm: Convert struct page to struct slab in functions used by other subsystems KASAN, KFENCE and memcg interact with SLAB or SLUB internals through functions nearest_obj(), obj_to_index() and objs_per_slab() that use struct page as parameter. This patch converts it to struct slab including all callers, through a coccinelle semantic patch. // Options: --include-headers --no-includes --smpl-spacing include/linux/slab_def.h include/linux/slub_def.h mm/slab.h mm/kasan/*.c mm/kfence/kfence_test.c mm/memcontrol.c mm/slab.c mm/slub.c // Note: needs coccinelle 1.1.1 to avoid breaking whitespace @@ @@ -objs_per_slab_page( +objs_per_slab( ... ) { ... } @@ @@ -objs_per_slab_page( +objs_per_slab( ... ) @@ identifier fn =~ "obj_to_index|objs_per_slab"; @@ fn(..., - const struct page *page + const struct slab *slab ,...) { <... ( - page_address(page) + slab_address(slab) | - page + slab ) ...> } @@ identifier fn =~ "nearest_obj"; @@ fn(..., - struct page *page + const struct slab *slab ,...) { <... ( - page_address(page) + slab_address(slab) | - page + slab ) ...> } @@ identifier fn =~ "nearest_obj|obj_to_index|objs_per_slab"; expression E; @@ fn(..., ( - slab_page(E) + E | - virt_to_page(E) + virt_to_slab(E) | - virt_to_head_page(E) + virt_to_slab(E) | - page + page_slab(page) ) ,...) Signed-off-by: Vlastimil Babka Reviewed-by: Andrey Konovalov Reviewed-by: Roman Gushchin Acked-by: Johannes Weiner Cc: Julia Lawall Cc: Luis Chamberlain Cc: Andrey Ryabinin Cc: Alexander Potapenko Cc: Andrey Konovalov Cc: Dmitry Vyukov Cc: Marco Elver Cc: Johannes Weiner Cc: Michal Hocko Cc: Vladimir Davydov Cc: Cc: --- mm/kfence/kfence_test.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm/kfence') diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c index 695030c1fff8..f7276711d7b9 100644 --- a/mm/kfence/kfence_test.c +++ b/mm/kfence/kfence_test.c @@ -291,8 +291,8 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat * even for KFENCE objects; these are required so that * memcg accounting works correctly. */ - KUNIT_EXPECT_EQ(test, obj_to_index(s, page, alloc), 0U); - KUNIT_EXPECT_EQ(test, objs_per_slab_page(s, page), 1); + KUNIT_EXPECT_EQ(test, obj_to_index(s, page_slab(page), alloc), 0U); + KUNIT_EXPECT_EQ(test, objs_per_slab(s, page_slab(page)), 1); if (policy == ALLOCATE_ANY) return alloc; -- cgit v1.2.3 From 8dae0cfed57357c0a627f377386ade1591f4d9ea Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Wed, 3 Nov 2021 18:19:48 +0100 Subject: mm/kfence: Convert kfence_guarded_alloc() to struct slab The function sets some fields that are being moved from struct page to struct slab so it needs to be converted. Signed-off-by: Vlastimil Babka Tested-by: Marco Elver Cc: Alexander Potapenko Cc: Marco Elver Cc: Dmitry Vyukov Cc: --- mm/kfence/core.c | 12 ++++++------ mm/kfence/kfence_test.c | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) (limited to 'mm/kfence') diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 09945784df9e..4eb60cf5ff8b 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -360,7 +360,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g { struct kfence_metadata *meta = NULL; unsigned long flags; - struct page *page; + struct slab *slab; void *addr; /* Try to obtain a free object. */ @@ -424,13 +424,13 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g alloc_covered_add(alloc_stack_hash, 1); - /* Set required struct page fields. */ - page = virt_to_page(meta->addr); - page->slab_cache = cache; + /* Set required slab fields. */ + slab = virt_to_slab((void *)meta->addr); + slab->slab_cache = cache; if (IS_ENABLED(CONFIG_SLUB)) - page->objects = 1; + slab->objects = 1; if (IS_ENABLED(CONFIG_SLAB)) - page->s_mem = addr; + slab->s_mem = addr; /* Memory initialization. */ for_each_canary(meta, set_canary_byte); diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c index f7276711d7b9..a22b1af85577 100644 --- a/mm/kfence/kfence_test.c +++ b/mm/kfence/kfence_test.c @@ -282,7 +282,7 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat alloc = kmalloc(size, gfp); if (is_kfence_address(alloc)) { - struct page *page = virt_to_head_page(alloc); + struct slab *slab = virt_to_slab(alloc); struct kmem_cache *s = test_cache ?: kmalloc_caches[kmalloc_type(GFP_KERNEL)][__kmalloc_index(size, false)]; @@ -291,8 +291,8 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat * even for KFENCE objects; these are required so that * memcg accounting works correctly. */ - KUNIT_EXPECT_EQ(test, obj_to_index(s, page_slab(page), alloc), 0U); - KUNIT_EXPECT_EQ(test, objs_per_slab(s, page_slab(page)), 1); + KUNIT_EXPECT_EQ(test, obj_to_index(s, slab, alloc), 0U); + KUNIT_EXPECT_EQ(test, objs_per_slab(s, slab), 1); if (policy == ALLOCATE_ANY) return alloc; -- cgit v1.2.3 From 401fb12c68c257b9c9116b1475c0ac26b646fcc0 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 4 Nov 2021 11:30:58 +0100 Subject: mm/sl*b: Differentiate struct slab fields by sl*b implementations With a struct slab definition separate from struct page, we can go further and define only fields that the chosen sl*b implementation uses. This means everything between __page_flags and __page_refcount placeholders now depends on the chosen CONFIG_SL*B. Some fields exist in all implementations (slab_list) but can be part of a union in some, so it's simpler to repeat them than complicate the definition with ifdefs even more. The patch doesn't change physical offsets of the fields, although it could be done later - for example it's now clear that tighter packing in SLOB could be possible. This should also prevent accidental use of fields that don't exist in given implementation. Before this patch virt_to_cache() and cache_from_obj() were visible for SLOB (albeit not used), although they rely on the slab_cache field that isn't set by SLOB. With this patch it's now a compile error, so these functions are now hidden behind an #ifndef CONFIG_SLOB. Signed-off-by: Vlastimil Babka Reviewed-by: Roman Gushchin Tested-by: Marco Elver # kfence Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Alexander Potapenko Cc: Marco Elver Cc: Dmitry Vyukov Cc: --- mm/kfence/core.c | 9 +++++---- mm/slab.h | 48 ++++++++++++++++++++++++++++++++++++++---------- 2 files changed, 43 insertions(+), 14 deletions(-) (limited to 'mm/kfence') diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 4eb60cf5ff8b..267dfde43b91 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -427,10 +427,11 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g /* Set required slab fields. */ slab = virt_to_slab((void *)meta->addr); slab->slab_cache = cache; - if (IS_ENABLED(CONFIG_SLUB)) - slab->objects = 1; - if (IS_ENABLED(CONFIG_SLAB)) - slab->s_mem = addr; +#if defined(CONFIG_SLUB) + slab->objects = 1; +#elif defined(CONFIG_SLAB) + slab->s_mem = addr; +#endif /* Memory initialization. */ for_each_canary(meta, set_canary_byte); diff --git a/mm/slab.h b/mm/slab.h index b8d7ed42cca8..ad15f624a6b5 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -8,9 +8,24 @@ /* Reuses the bits in struct page */ struct slab { unsigned long __page_flags; + +#if defined(CONFIG_SLAB) + union { struct list_head slab_list; - struct { /* Partial pages */ + struct rcu_head rcu_head; + }; + struct kmem_cache *slab_cache; + void *freelist; /* array of free object indexes */ + void *s_mem; /* first object */ + unsigned int active; + +#elif defined(CONFIG_SLUB) + + union { + struct list_head slab_list; + struct rcu_head rcu_head; + struct { struct slab *next; #ifdef CONFIG_64BIT int slabs; /* Nr of slabs left */ @@ -18,25 +33,32 @@ struct slab { short int slabs; #endif }; - struct rcu_head rcu_head; }; - struct kmem_cache *slab_cache; /* not slob */ + struct kmem_cache *slab_cache; /* Double-word boundary */ void *freelist; /* first free object */ union { - void *s_mem; /* slab: first object */ - unsigned long counters; /* SLUB */ - struct { /* SLUB */ + unsigned long counters; + struct { unsigned inuse:16; unsigned objects:15; unsigned frozen:1; }; }; + unsigned int __unused; + +#elif defined(CONFIG_SLOB) + + struct list_head slab_list; + void *__unused_1; + void *freelist; /* first free block */ + void *__unused_2; + int units; + +#else +#error "Unexpected slab allocator configured" +#endif - union { - unsigned int active; /* SLAB */ - int units; /* SLOB */ - }; atomic_t __page_refcount; #ifdef CONFIG_MEMCG unsigned long memcg_data; @@ -48,10 +70,14 @@ struct slab { SLAB_MATCH(flags, __page_flags); SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */ SLAB_MATCH(slab_list, slab_list); +#ifndef CONFIG_SLOB SLAB_MATCH(rcu_head, rcu_head); SLAB_MATCH(slab_cache, slab_cache); +#endif +#ifdef CONFIG_SLAB SLAB_MATCH(s_mem, s_mem); SLAB_MATCH(active, active); +#endif SLAB_MATCH(_refcount, __page_refcount); #ifdef CONFIG_MEMCG SLAB_MATCH(memcg_data, memcg_data); @@ -599,6 +625,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, } #endif /* CONFIG_MEMCG_KMEM */ +#ifndef CONFIG_SLOB static inline struct kmem_cache *virt_to_cache(const void *obj) { struct slab *slab; @@ -645,6 +672,7 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) print_tracking(cachep, x); return cachep; } +#endif /* CONFIG_SLOB */ static inline size_t slab_ksize(const struct kmem_cache *s) { -- cgit v1.2.3