summaryrefslogtreecommitdiff
path: root/mm/kfence
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-01-10 11:58:12 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2022-01-10 11:58:12 -0800
commitca1a46d6f5064c129f7ca6bcfd8f035d69da175c (patch)
treefb3da9324e2caa4ae650177ebd5598214e28c2b8 /mm/kfence
parentd93aebbd76a07a8101d2f7393dc18be3e235f11b (diff)
parent9d6c59c1c0d62a314a2b46839699b200cccd2d08 (diff)
downloadlwn-ca1a46d6f5064c129f7ca6bcfd8f035d69da175c.tar.gz
lwn-ca1a46d6f5064c129f7ca6bcfd8f035d69da175c.zip
Merge tag 'slab-for-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab updates from Vlastimil Babka: - Separate struct slab from struct page - an offshot of the page folio work. Struct page fields used by slab allocators are moved from struct page to a new struct slab, that uses the same physical storage. Similar to struct folio, it always is a head page. This brings better type safety, separation of large kmalloc allocations from true slabs, and cleanup of related objcg code. - A SLAB_MERGE_DEFAULT config optimization. * tag 'slab-for-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: (33 commits) mm/slob: Remove unnecessary page_mapcount_reset() function call bootmem: Use page->index instead of page->freelist zsmalloc: Stop using slab fields in struct page mm/slub: Define struct slab fields for CONFIG_SLUB_CPU_PARTIAL only when enabled mm/slub: Simplify struct slab slabs field definition mm/sl*b: Differentiate struct slab fields by sl*b implementations mm/kfence: Convert kfence_guarded_alloc() to struct slab mm/kasan: Convert to struct folio and struct slab mm/slob: Convert SLOB to use struct slab and struct folio mm/memcg: Convert slab objcgs from struct page to struct slab mm: Convert struct page to struct slab in functions used by other subsystems mm/slab: Finish struct page to struct slab conversion mm/slab: Convert most struct page to struct slab by spatch mm/slab: Convert kmem_getpages() and kmem_freepages() to struct slab mm/slub: Finish struct page to struct slab conversion mm/slub: Convert most struct page to struct slab by spatch mm/slub: Convert pfmemalloc_match() to take a struct slab mm/slub: Convert __free_slab() to use struct slab mm/slub: Convert alloc_slab_page() to return a struct slab mm/slub: Convert print_page_info() to print_slab_info() ...
Diffstat (limited to 'mm/kfence')
-rw-r--r--mm/kfence/core.c17
-rw-r--r--mm/kfence/kfence_test.c6
2 files changed, 12 insertions, 11 deletions
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index a19154a8d196..5ad40e3add45 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -360,7 +360,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
{
struct kfence_metadata *meta = NULL;
unsigned long flags;
- struct page *page;
+ struct slab *slab;
void *addr;
/* Try to obtain a free object. */
@@ -424,13 +424,14 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
alloc_covered_add(alloc_stack_hash, 1);
- /* Set required struct page fields. */
- page = virt_to_page(meta->addr);
- page->slab_cache = cache;
- if (IS_ENABLED(CONFIG_SLUB))
- page->objects = 1;
- if (IS_ENABLED(CONFIG_SLAB))
- page->s_mem = addr;
+ /* Set required slab fields. */
+ slab = virt_to_slab((void *)meta->addr);
+ slab->slab_cache = cache;
+#if defined(CONFIG_SLUB)
+ slab->objects = 1;
+#elif defined(CONFIG_SLAB)
+ slab->s_mem = addr;
+#endif
/* Memory initialization. */
for_each_canary(meta, set_canary_byte);
diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
index 695030c1fff8..a22b1af85577 100644
--- a/mm/kfence/kfence_test.c
+++ b/mm/kfence/kfence_test.c
@@ -282,7 +282,7 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat
alloc = kmalloc(size, gfp);
if (is_kfence_address(alloc)) {
- struct page *page = virt_to_head_page(alloc);
+ struct slab *slab = virt_to_slab(alloc);
struct kmem_cache *s = test_cache ?:
kmalloc_caches[kmalloc_type(GFP_KERNEL)][__kmalloc_index(size, false)];
@@ -291,8 +291,8 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat
* even for KFENCE objects; these are required so that
* memcg accounting works correctly.
*/
- KUNIT_EXPECT_EQ(test, obj_to_index(s, page, alloc), 0U);
- KUNIT_EXPECT_EQ(test, objs_per_slab_page(s, page), 1);
+ KUNIT_EXPECT_EQ(test, obj_to_index(s, slab, alloc), 0U);
+ KUNIT_EXPECT_EQ(test, objs_per_slab(s, slab), 1);
if (policy == ALLOCATE_ANY)
return alloc;