diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2024-03-21 14:24:45 +0000 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-04-25 20:56:00 -0700 |
commit | 46df8e73a4a3f1445f2a8429111e72ede1f4d291 (patch) | |
tree | 06f3131bd2de34e954040b566884111a93012cf2 /mm | |
parent | 8682a7be36d8c6ebd484753034a716a13f8a1f54 (diff) | |
download | lwn-46df8e73a4a3f1445f2a8429111e72ede1f4d291.tar.gz lwn-46df8e73a4a3f1445f2a8429111e72ede1f4d291.zip |
mm: free up PG_slab
Reclaim the Slab page flag by using a spare bit in PageType. We are
perennially short of page flags for various purposes, and now that the
original SLAB allocator has been retired, SLUB does not use the
mapcount/page_type field. This lets us remove a number of special cases
for ignoring mapcount on Slab pages.
[willy@infradead.org: update vmcoreinfo]
Link: https://lkml.kernel.org/r/ZgGV-O8WYQ_83kxp@casper.infradead.org
Link: https://lkml.kernel.org/r/20240321142448.1645400-8-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory-failure.c | 9 | ||||
-rw-r--r-- | mm/slab.h | 2 |
2 files changed, 1 insertions, 10 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 9e62a00b46dd..0a7a8a4ba421 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1251,7 +1251,6 @@ static int me_huge_page(struct page_state *ps, struct page *p) #define mlock (1UL << PG_mlocked) #define lru (1UL << PG_lru) #define head (1UL << PG_head) -#define slab (1UL << PG_slab) #define reserved (1UL << PG_reserved) static struct page_state error_states[] = { @@ -1261,13 +1260,6 @@ static struct page_state error_states[] = { * PG_buddy pages only make a small fraction of all free pages. */ - /* - * Could in theory check if slab page is free or if we can drop - * currently unused objects without touching them. But just - * treat it as standard kernel for now. - */ - { slab, slab, MF_MSG_SLAB, me_kernel }, - { head, head, MF_MSG_HUGE, me_huge_page }, { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty }, @@ -1294,7 +1286,6 @@ static struct page_state error_states[] = { #undef mlock #undef lru #undef head -#undef slab #undef reserved static void update_per_node_mf_stats(unsigned long pfn, diff --git a/mm/slab.h b/mm/slab.h index 65db525e93af..1343bfa12cee 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -84,8 +84,8 @@ struct slab { }; struct rcu_head rcu_head; }; - unsigned int __unused; + unsigned int __page_type; atomic_t __page_refcount; #ifdef CONFIG_SLAB_OBJ_EXT unsigned long obj_exts; |