summaryrefslogtreecommitdiff
path: root/lib/debugobjects.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2024-10-07 18:50:12 +0200
committerThomas Gleixner <tglx@linutronix.de>2024-10-15 17:30:32 +0200
commit9ce99c6d7bfbca71f1e5fa34045ea48cb768f54a (patch)
treea0259b1a05e891deb8ff84c2a12f022dc2bfc4ff /lib/debugobjects.c
parenta3b9e191f5fc11fa93176a4074a919d33d64c5fe (diff)
downloadlwn-9ce99c6d7bfbca71f1e5fa34045ea48cb768f54a.tar.gz
lwn-9ce99c6d7bfbca71f1e5fa34045ea48cb768f54a.zip
debugobjects: Rework free_object_work()
Convert it to batch processing with intermediate helper functions. This reduces the final changes for batch processing. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Zhen Lei <thunder.leizhen@huawei.com> Link: https://lore.kernel.org/all/20241007164914.015906394@linutronix.de
Diffstat (limited to 'lib/debugobjects.c')
-rw-r--r--lib/debugobjects.c82
1 files changed, 39 insertions, 43 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 3700ddfcf76a..d5a853851482 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -35,7 +35,7 @@
* frequency of 10Hz and about 1024 objects for each freeing operation.
* So it is freeing at most 10k debug objects per second.
*/
-#define ODEBUG_FREE_WORK_MAX 1024
+#define ODEBUG_FREE_WORK_MAX (1024 / ODEBUG_BATCH_SIZE)
#define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
struct debug_bucket {
@@ -158,6 +158,21 @@ static bool pool_move_batch(struct obj_pool *dst, struct obj_pool *src)
return true;
}
+static bool pool_pop_batch(struct hlist_head *head, struct obj_pool *src)
+{
+ if (!src->cnt)
+ return false;
+
+ for (int i = 0; src->cnt && i < ODEBUG_BATCH_SIZE; i++) {
+ struct hlist_node *node = src->objects.first;
+
+ WRITE_ONCE(src->cnt, src->cnt - 1);
+ hlist_del(node);
+ hlist_add_head(node, head);
+ }
+ return true;
+}
+
static struct debug_obj *__alloc_object(struct hlist_head *list)
{
struct debug_obj *obj;
@@ -343,55 +358,36 @@ static struct debug_obj *alloc_object(void *addr, struct debug_bucket *b,
return obj;
}
-/*
- * workqueue function to free objects.
- *
- * To reduce contention on the global pool_lock, the actual freeing of
- * debug objects will be delayed if the pool_lock is busy.
- */
+/* workqueue function to free objects. */
static void free_obj_work(struct work_struct *work)
{
- struct debug_obj *obj;
- unsigned long flags;
- HLIST_HEAD(tofree);
+ bool free = true;
WRITE_ONCE(obj_freeing, false);
- if (!raw_spin_trylock_irqsave(&pool_lock, flags))
- return;
- if (pool_global.cnt >= pool_global.max_cnt)
- goto free_objs;
-
- /*
- * The objs on the pool list might be allocated before the work is
- * run, so recheck if pool list it full or not, if not fill pool
- * list from the global free list. As it is likely that a workload
- * may be gearing up to use more and more objects, don't free any
- * of them until the next round.
- */
- while (pool_to_free.cnt && pool_global.cnt < pool_global.max_cnt) {
- obj = hlist_entry(pool_to_free.objects.first, typeof(*obj), node);
- hlist_del(&obj->node);
- hlist_add_head(&obj->node, &pool_global.objects);
- WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt - 1);
- WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1);
- }
- raw_spin_unlock_irqrestore(&pool_lock, flags);
- return;
+ if (!pool_count(&pool_to_free))
+ return;
-free_objs:
- /*
- * Pool list is already full and there are still objs on the free
- * list. Move remaining free objs to a temporary list to free the
- * memory outside the pool_lock held region.
- */
- if (pool_to_free.cnt) {
- hlist_move_list(&pool_to_free.objects, &tofree);
- WRITE_ONCE(pool_to_free.cnt, 0);
+ for (unsigned int cnt = 0; cnt < ODEBUG_FREE_WORK_MAX; cnt++) {
+ HLIST_HEAD(tofree);
+
+ /* Acquire and drop the lock for each batch */
+ scoped_guard(raw_spinlock_irqsave, &pool_lock) {
+ if (!pool_to_free.cnt)
+ return;
+
+ /* Refill the global pool if possible */
+ if (pool_move_batch(&pool_global, &pool_to_free)) {
+ /* Don't free as there seems to be demand */
+ free = false;
+ } else if (free) {
+ pool_pop_batch(&tofree, &pool_to_free);
+ } else {
+ return;
+ }
+ }
+ free_object_list(&tofree);
}
- raw_spin_unlock_irqrestore(&pool_lock, flags);
-
- free_object_list(&tofree);
}
static void __free_object(struct debug_obj *obj)