summaryrefslogtreecommitdiff
path: root/lib/percpu_ida.c
diff options
context:
space:
mode:
authorShaohua Li <shli@kernel.org>2013-12-31 11:38:27 +0800
committerJens Axboe <axboe@kernel.dk>2014-01-30 12:57:25 -0700
commitd835502f3dacad1638d516ab156d66f0ba377cf5 (patch)
tree1a82543b7c14120251a4bceaa08f5cc9f86ecf43 /lib/percpu_ida.c
parent53d8ab29f8f6d67e37857b68189b38fa3d87dd8e (diff)
downloadlwn-d835502f3dacad1638d516ab156d66f0ba377cf5.tar.gz
lwn-d835502f3dacad1638d516ab156d66f0ba377cf5.zip
percpu_ida: fix a live lock
steal_tags only happens when free tags is more than half of the total tags. This is too strict and can cause live lock. I found that if one cpu has free tags, but other cpu can't steal (thread is bound to specific cpus), threads which want to allocate tags are always sleeping. I found this when I run next patch, but this could happen without it I think. I did performance test too with null_blk. Two cases (each cpu has enough percpu tags, or total tags are limited), no performance changes were observed. Signed-off-by: Shaohua Li <shli@fusionio.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'lib/percpu_ida.c')
-rw-r--r--lib/percpu_ida.c7
1 files changed, 2 insertions, 5 deletions
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
index 9d054bf91d0f..85f43b1718d4 100644
--- a/lib/percpu_ida.c
+++ b/lib/percpu_ida.c
@@ -54,9 +54,7 @@ static inline void move_tags(unsigned *dst, unsigned *dst_nr,
/*
* Try to steal tags from a remote cpu's percpu freelist.
*
- * We first check how many percpu freelists have tags - we don't steal tags
- * unless enough percpu freelists have tags on them that it's possible more than
- * half the total tags could be stuck on remote percpu freelists.
+ * We first check how many percpu freelists have tags
*
* Then we iterate through the cpus until we find some tags - we don't attempt
* to find the "best" cpu to steal from, to keep cacheline bouncing to a
@@ -69,8 +67,7 @@ static inline void steal_tags(struct percpu_ida *pool,
struct percpu_ida_cpu *remote;
for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
- cpus_have_tags * pool->percpu_max_size > pool->nr_tags / 2;
- cpus_have_tags--) {
+ cpus_have_tags; cpus_have_tags--) {
cpu = cpumask_next(cpu, &pool->cpus_have_tags);
if (cpu >= nr_cpu_ids) {