summaryrefslogtreecommitdiff
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-01-24 11:01:33 -0800
committerTejun Heo <tj@kernel.org>2013-01-24 11:01:33 -0800
commita1056305fa98c7e13b38718658a8b07a5d926460 (patch)
treed20ce512fdd0e3f07d972d62ecc9cb357c3db69e /kernel/workqueue.c
parent94cf58bb2907bd2702fce2266955e29ab5261f53 (diff)
downloadlwn-a1056305fa98c7e13b38718658a8b07a5d926460.tar.gz
lwn-a1056305fa98c7e13b38718658a8b07a5d926460.zip
workqueue: make freezing/thawing per-pool
Instead of holding locks from both pools and then processing the pools together, make freezing/thwaing per-pool - grab locks of one pool, process it, release it and then proceed to the next pool. While this patch changes processing order across pools, order within each pool remains the same. As each pool is independent, this shouldn't break anything. This is part of an effort to remove global_cwq and make worker_pool the top level abstraction, which in turn will help implementing worker pools with user-specified attributes. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c46
1 files changed, 20 insertions, 26 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index fd400f8c9514..b609bfba134b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3686,25 +3686,22 @@ void freeze_workqueues_begin(void)
struct worker_pool *pool;
struct workqueue_struct *wq;
- local_irq_disable();
-
for_each_worker_pool(pool, gcwq) {
- spin_lock_nested(&pool->lock, pool - gcwq->pools);
+ spin_lock_irq(&pool->lock);
WARN_ON_ONCE(pool->flags & POOL_FREEZING);
pool->flags |= POOL_FREEZING;
- }
- list_for_each_entry(wq, &workqueues, list) {
- struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+ list_for_each_entry(wq, &workqueues, list) {
+ struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
- if (cwq && wq->flags & WQ_FREEZABLE)
- cwq->max_active = 0;
- }
+ if (cwq && cwq->pool == pool &&
+ (wq->flags & WQ_FREEZABLE))
+ cwq->max_active = 0;
+ }
- for_each_worker_pool(pool, gcwq)
- spin_unlock(&pool->lock);
- local_irq_enable();
+ spin_unlock_irq(&pool->lock);
+ }
}
spin_unlock(&workqueue_lock);
@@ -3779,30 +3776,27 @@ void thaw_workqueues(void)
struct worker_pool *pool;
struct workqueue_struct *wq;
- local_irq_disable();
-
for_each_worker_pool(pool, gcwq) {
- spin_lock_nested(&pool->lock, pool - gcwq->pools);
+ spin_lock_irq(&pool->lock);
WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
pool->flags &= ~POOL_FREEZING;
- }
- list_for_each_entry(wq, &workqueues, list) {
- struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+ list_for_each_entry(wq, &workqueues, list) {
+ struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
- if (!cwq || !(wq->flags & WQ_FREEZABLE))
- continue;
+ if (!cwq || cwq->pool != pool ||
+ !(wq->flags & WQ_FREEZABLE))
+ continue;
- /* restore max_active and repopulate worklist */
- cwq_set_max_active(cwq, wq->saved_max_active);
- }
+ /* restore max_active and repopulate worklist */
+ cwq_set_max_active(cwq, wq->saved_max_active);
+ }
- for_each_worker_pool(pool, gcwq) {
wake_up_worker(pool);
- spin_unlock(&pool->lock);
+
+ spin_unlock_irq(&pool->lock);
}
- local_irq_enable();
}
workqueue_freezing = false;