summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2021-05-20 14:00:03 +0200
committerVlastimil Babka <vbabka@suse.cz>2021-09-04 01:12:22 +0200
commitf3ab8b6b9228176920e1c73fa24d2db62268aa48 (patch)
tree4dda78a388bdab8165e147fbc255e0f1463b9d93 /mm/slub.c
parentcfdf836e1f93df56ddd9a1d48b2deadf02f441fe (diff)
downloadlwn-f3ab8b6b9228176920e1c73fa24d2db62268aa48.tar.gz
lwn-f3ab8b6b9228176920e1c73fa24d2db62268aa48.zip
mm, slub: move irq control into unfreeze_partials()
unfreeze_partials() can be optimized so that it doesn't need irqs disabled for the whole time. As the first step, move irq control into the function and remove it from the put_cpu_partial() caller. Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/mm/slub.c b/mm/slub.c
index cb12a077c61c..1c4bd45d66a1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2350,9 +2350,8 @@ redo:
/*
* Unfreeze all the cpu partial slabs.
*
- * This function must be called with interrupts disabled
- * for the cpu using c (or some other guarantee must be there
- * to guarantee no concurrent accesses).
+ * This function must be called with preemption or migration
+ * disabled with c local to the cpu.
*/
static void unfreeze_partials(struct kmem_cache *s,
struct kmem_cache_cpu *c)
@@ -2360,6 +2359,9 @@ static void unfreeze_partials(struct kmem_cache *s,
#ifdef CONFIG_SLUB_CPU_PARTIAL
struct kmem_cache_node *n = NULL, *n2 = NULL;
struct page *page, *discard_page = NULL;
+ unsigned long flags;
+
+ local_irq_save(flags);
while ((page = slub_percpu_partial(c))) {
struct page new;
@@ -2412,6 +2414,8 @@ static void unfreeze_partials(struct kmem_cache *s,
discard_slab(s, page);
stat(s, FREE_SLAB);
}
+
+ local_irq_restore(flags);
#endif /* CONFIG_SLUB_CPU_PARTIAL */
}
@@ -2439,14 +2443,11 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
pobjects = oldpage->pobjects;
pages = oldpage->pages;
if (drain && pobjects > slub_cpu_partial(s)) {
- unsigned long flags;
/*
* partial array is full. Move the existing
* set to the per node partial list.
*/
- local_irq_save(flags);
unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
- local_irq_restore(flags);
oldpage = NULL;
pobjects = 0;
pages = 0;