summaryrefslogtreecommitdiff
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-03-12 11:29:58 -0700
committerTejun Heo <tj@kernel.org>2013-03-12 11:29:58 -0700
commit171169695555831e8cc41dbc1783700868631ea5 (patch)
tree62e774a7298eadb486a7148796b2e8cd627d6e77 /kernel/workqueue.c
parent49e3cf44df0663a521aa71e7667c52a9dbd0fce9 (diff)
downloadlwn-171169695555831e8cc41dbc1783700868631ea5.tar.gz
lwn-171169695555831e8cc41dbc1783700868631ea5.zip
workqueue: introduce for_each_pool()
With the scheduled unbound pools with custom attributes, there will be multiple unbound pools, so it wouldn't be able to use for_each_wq_cpu() + for_each_std_worker_pool() to iterate through all pools. Introduce for_each_pool() which iterates through all pools using worker_pool_idr and use it instead of for_each_wq_cpu() + for_each_std_worker_pool() combination in freeze_workqueues_begin(). Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c36
1 files changed, 21 insertions, 15 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2db1532b09dc..55494e3f9f3b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -295,6 +295,14 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
(cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3))
/**
+ * for_each_pool - iterate through all worker_pools in the system
+ * @pool: iteration cursor
+ * @id: integer used for iteration
+ */
+#define for_each_pool(pool, id) \
+ idr_for_each_entry(&worker_pool_idr, pool, id)
+
+/**
* for_each_pwq - iterate through all pool_workqueues of the specified workqueue
* @pwq: iteration cursor
* @wq: the target workqueue
@@ -3586,33 +3594,31 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
*/
void freeze_workqueues_begin(void)
{
- unsigned int cpu;
+ struct worker_pool *pool;
+ int id;
spin_lock_irq(&workqueue_lock);
WARN_ON_ONCE(workqueue_freezing);
workqueue_freezing = true;
- for_each_wq_cpu(cpu) {
- struct worker_pool *pool;
+ for_each_pool(pool, id) {
struct workqueue_struct *wq;
- for_each_std_worker_pool(pool, cpu) {
- spin_lock(&pool->lock);
-
- WARN_ON_ONCE(pool->flags & POOL_FREEZING);
- pool->flags |= POOL_FREEZING;
+ spin_lock(&pool->lock);
- list_for_each_entry(wq, &workqueues, list) {
- struct pool_workqueue *pwq = get_pwq(cpu, wq);
+ WARN_ON_ONCE(pool->flags & POOL_FREEZING);
+ pool->flags |= POOL_FREEZING;
- if (pwq && pwq->pool == pool &&
- (wq->flags & WQ_FREEZABLE))
- pwq->max_active = 0;
- }
+ list_for_each_entry(wq, &workqueues, list) {
+ struct pool_workqueue *pwq = get_pwq(pool->cpu, wq);
- spin_unlock(&pool->lock);
+ if (pwq && pwq->pool == pool &&
+ (wq->flags & WQ_FREEZABLE))
+ pwq->max_active = 0;
}
+
+ spin_unlock(&pool->lock);
}
spin_unlock_irq(&workqueue_lock);