summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2024-08-21 18:39:12 +0100
committerAndrew Morton <akpm@linux-foundation.org>2024-09-03 21:15:43 -0700
commit04cb7502a5d70a6ca230e8c24835dc7dccd39fa7 (patch)
tree4c7473c3017dffe749b319a44d879d768c030338
parent4ffca5a96678c9fe1a39ec1e9c8fd326b57ea8a7 (diff)
downloadlwn-04cb7502a5d70a6ca230e8c24835dc7dccd39fa7.tar.gz
lwn-04cb7502a5d70a6ca230e8c24835dc7dccd39fa7.zip
zsmalloc: use all available 24 bits of page_type
Now that we have an extra 8 bits, we don't need to limit ourselves to supporting a 64KiB page size. I'm sure both Hexagon users are grateful, but it does reduce complexity a little. We can also remove reset_first_obj_offset() as calling __ClearPageZsmalloc() will now reset all 32 bits of page_type. Link: https://lkml.kernel.org/r/20240821173914.2270383-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: David Hildenbrand <david@redhat.com> Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Kent Overstreet <kent.overstreet@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--drivers/block/zram/Kconfig1
-rw-r--r--mm/Kconfig10
-rw-r--r--mm/zsmalloc.c15
3 files changed, 6 insertions, 20 deletions
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index eacf1cba7bf4..7b29cce60ab2 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -2,7 +2,6 @@
config ZRAM
tristate "Compressed RAM block device support"
depends on BLOCK && SYSFS && MMU
- depends on HAVE_ZSMALLOC
depends on CRYPTO_LZO || CRYPTO_ZSTD || CRYPTO_LZ4 || CRYPTO_LZ4HC || CRYPTO_842
select ZSMALLOC
help
diff --git a/mm/Kconfig b/mm/Kconfig
index 3936fe4d26d9..5946dcdcaeda 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -128,7 +128,7 @@ config ZSWAP_COMPRESSOR_DEFAULT
choice
prompt "Default allocator"
depends on ZSWAP
- default ZSWAP_ZPOOL_DEFAULT_ZSMALLOC if HAVE_ZSMALLOC
+ default ZSWAP_ZPOOL_DEFAULT_ZSMALLOC if MMU
default ZSWAP_ZPOOL_DEFAULT_ZBUD
help
Selects the default allocator for the compressed cache for
@@ -154,7 +154,6 @@ config ZSWAP_ZPOOL_DEFAULT_Z3FOLD
config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
bool "zsmalloc"
- depends on HAVE_ZSMALLOC
select ZSMALLOC
help
Use the zsmalloc allocator as the default allocator.
@@ -187,15 +186,10 @@ config Z3FOLD
page. It is a ZBUD derivative so the simplicity and determinism are
still there.
-config HAVE_ZSMALLOC
- def_bool y
- depends on MMU
- depends on PAGE_SIZE_LESS_THAN_256KB # we want <= 64 KiB
-
config ZSMALLOC
tristate
prompt "N:1 compression allocator (zsmalloc)" if ZSWAP
- depends on HAVE_ZSMALLOC
+ depends on MMU
help
zsmalloc is a slab-based memory allocator designed to store
pages of various compression levels efficiently. It achieves
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 2d3163e4da96..73a3ec5b21ad 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -20,7 +20,7 @@
* page->index: links together all component pages of a zspage
* For the huge page, this is always 0, so we use this field
* to store handle.
- * page->page_type: PG_zsmalloc, lower 16 bit locate the first object
+ * page->page_type: PGTY_zsmalloc, lower 24 bits locate the first object
* offset in a subpage of a zspage
*
* Usage of struct page flags:
@@ -452,13 +452,7 @@ static inline struct page *get_first_page(struct zspage *zspage)
return first_page;
}
-#define FIRST_OBJ_PAGE_TYPE_MASK 0xffff
-
-static inline void reset_first_obj_offset(struct page *page)
-{
- VM_WARN_ON_ONCE(!PageZsmalloc(page));
- page->page_type |= FIRST_OBJ_PAGE_TYPE_MASK;
-}
+#define FIRST_OBJ_PAGE_TYPE_MASK 0xffffff
static inline unsigned int get_first_obj_offset(struct page *page)
{
@@ -468,8 +462,8 @@ static inline unsigned int get_first_obj_offset(struct page *page)
static inline void set_first_obj_offset(struct page *page, unsigned int offset)
{
- /* With 16 bit available, we can support offsets into 64 KiB pages. */
- BUILD_BUG_ON(PAGE_SIZE > SZ_64K);
+ /* With 24 bits available, we can support offsets into 16 MiB pages. */
+ BUILD_BUG_ON(PAGE_SIZE > SZ_16M);
VM_WARN_ON_ONCE(!PageZsmalloc(page));
VM_WARN_ON_ONCE(offset & ~FIRST_OBJ_PAGE_TYPE_MASK);
page->page_type &= ~FIRST_OBJ_PAGE_TYPE_MASK;
@@ -808,7 +802,6 @@ static void reset_page(struct page *page)
ClearPagePrivate(page);
set_page_private(page, 0);
page->index = 0;
- reset_first_obj_offset(page);
__ClearPageZsmalloc(page);
}