diff options
author | Chengming Zhou <chengming.zhou@linux.dev> | 2024-06-27 15:59:59 +0800 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-07-12 15:52:13 -0700 |
commit | d468f1b8cb8e4c28ebb2282af2dd4021b60df7cb (patch) | |
tree | d77e2a4cbeac41ebdfdf0bfbac9bf1a7c5001205 /mm | |
parent | 538148f9ba9e3136a877881e72ccbe56733daae2 (diff) | |
download | lwn-d468f1b8cb8e4c28ebb2282af2dd4021b60df7cb.tar.gz lwn-d468f1b8cb8e4c28ebb2282af2dd4021b60df7cb.zip |
mm/zsmalloc: move record_obj() into obj_malloc()
We always record_obj() to make handle points to object after obj_malloc(),
so simplify the code by moving record_obj() into obj_malloc(). There
should be no functional change.
Link: https://lkml.kernel.org/r/20240627075959.611783-2-chengming.zhou@linux.dev
Signed-off-by: Chengming Zhou <chengming.zhou@linux.dev>
Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/zsmalloc.c | 15 |
1 files changed, 6 insertions, 9 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 7fc25fa4e6b3..c2f4e62ffb46 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1306,7 +1306,6 @@ static unsigned long obj_malloc(struct zs_pool *pool, void *vaddr; class = pool->size_class[zspage->class]; - handle |= OBJ_ALLOCATED_TAG; obj = get_freeobj(zspage); offset = obj * class->size; @@ -1322,15 +1321,16 @@ static unsigned long obj_malloc(struct zs_pool *pool, set_freeobj(zspage, link->next >> OBJ_TAG_BITS); if (likely(!ZsHugePage(zspage))) /* record handle in the header of allocated chunk */ - link->handle = handle; + link->handle = handle | OBJ_ALLOCATED_TAG; else /* record handle to page->index */ - zspage->first_page->index = handle; + zspage->first_page->index = handle | OBJ_ALLOCATED_TAG; kunmap_atomic(vaddr); mod_zspage_inuse(zspage, 1); obj = location_to_obj(m_page, obj); + record_obj(handle, obj); return obj; } @@ -1348,7 +1348,7 @@ static unsigned long obj_malloc(struct zs_pool *pool, */ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) { - unsigned long handle, obj; + unsigned long handle; struct size_class *class; int newfg; struct zspage *zspage; @@ -1371,10 +1371,9 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) spin_lock(&class->lock); zspage = find_get_zspage(class); if (likely(zspage)) { - obj = obj_malloc(pool, zspage, handle); + obj_malloc(pool, zspage, handle); /* Now move the zspage to another fullness group, if required */ fix_fullness_group(class, zspage); - record_obj(handle, obj); class_stat_inc(class, ZS_OBJS_INUSE, 1); goto out; @@ -1389,10 +1388,9 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) } spin_lock(&class->lock); - obj = obj_malloc(pool, zspage, handle); + obj_malloc(pool, zspage, handle); newfg = get_fullness_group(class, zspage); insert_zspage(class, zspage, newfg); - record_obj(handle, obj); atomic_long_add(class->pages_per_zspage, &pool->pages_allocated); class_stat_inc(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); class_stat_inc(class, ZS_OBJS_INUSE, 1); @@ -1591,7 +1589,6 @@ static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage, free_obj = obj_malloc(pool, dst_zspage, handle); zs_object_copy(class, free_obj, used_obj); obj_idx++; - record_obj(handle, free_obj); obj_free(class->size, used_obj); /* Stop if there is no more space */ |