summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChengming Zhou <zhouchengming@bytedance.com>2023-11-02 03:23:24 +0000
committerVlastimil Babka <vbabka@suse.cz>2023-11-22 15:36:25 +0100
commit8a399e2f60037ed07a55278e39b20e43dea4f0c2 (patch)
tree582b006faaae0f1a0ecff398ebe86b1b3510a878 /mm/slub.c
parent43c4c349149c77f27c8e5801755a7b8883a70ebe (diff)
downloadlwn-8a399e2f60037ed07a55278e39b20e43dea4f0c2.tar.gz
lwn-8a399e2f60037ed07a55278e39b20e43dea4f0c2.zip
slub: Keep track of whether slub is on the per-node partial list
Now we rely on the "frozen" bit to see if we should manipulate the slab->slab_list, which will be changed in the following patch. Instead we introduce another way to keep track of whether slub is on the per-node partial list, here we reuse the PG_workingset bit. We have to use the atomic set_bit() and clear_bit() variants and change slab_unlock() to bit_spin_unlock() because when cmpxchg is not available and PG_lock is used, there may be concurrent operations on the two bits. Thanks to Mark Brown for reporting a hang and testing of a previous version where the non-atomic operations were used. Suggested-by: Matthew Wilcox <willy@infradead.org> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c24
1 files changed, 23 insertions, 1 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 03384cd965c5..6efcbf79fd2d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -522,7 +522,7 @@ static __always_inline void slab_unlock(struct slab *slab)
struct page *page = slab_page(slab);
VM_BUG_ON_PAGE(PageTail(page), page);
- __bit_spin_unlock(PG_locked, &page->flags);
+ bit_spin_unlock(PG_locked, &page->flags);
}
static inline bool
@@ -2117,6 +2117,25 @@ static void discard_slab(struct kmem_cache *s, struct slab *slab)
}
/*
+ * SLUB reuses PG_workingset bit to keep track of whether it's on
+ * the per-node partial list.
+ */
+static inline bool slab_test_node_partial(const struct slab *slab)
+{
+ return folio_test_workingset((struct folio *)slab_folio(slab));
+}
+
+static inline void slab_set_node_partial(struct slab *slab)
+{
+ set_bit(PG_workingset, folio_flags(slab_folio(slab), 0));
+}
+
+static inline void slab_clear_node_partial(struct slab *slab)
+{
+ clear_bit(PG_workingset, folio_flags(slab_folio(slab), 0));
+}
+
+/*
* Management of partially allocated slabs.
*/
static inline void
@@ -2127,6 +2146,7 @@ __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail)
list_add_tail(&slab->slab_list, &n->partial);
else
list_add(&slab->slab_list, &n->partial);
+ slab_set_node_partial(slab);
}
static inline void add_partial(struct kmem_cache_node *n,
@@ -2141,6 +2161,7 @@ static inline void remove_partial(struct kmem_cache_node *n,
{
lockdep_assert_held(&n->list_lock);
list_del(&slab->slab_list);
+ slab_clear_node_partial(slab);
n->nr_partial--;
}
@@ -4833,6 +4854,7 @@ static int __kmem_cache_do_shrink(struct kmem_cache *s)
if (free == slab->objects) {
list_move(&slab->slab_list, &discard);
+ slab_clear_node_partial(slab);
n->nr_partial--;
dec_slabs_node(s, node, slab->objects);
} else if (free <= SHRINK_PROMOTE_MAX)