diff options
author | Tejun Heo <tj@kernel.org> | 2012-07-12 14:46:37 -0700 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2012-07-12 14:46:37 -0700 |
commit | 974271c485a4d8bb801decc616748f90aafb07ec (patch) | |
tree | ed3e41b86293b207127a03bde638e4dd6acf6d58 /kernel | |
parent | 918227bb1b59444a2c467711fd50cc22bb4a897b (diff) | |
download | lwn-974271c485a4d8bb801decc616748f90aafb07ec.tar.gz lwn-974271c485a4d8bb801decc616748f90aafb07ec.zip |
workqueue: don't use WQ_HIGHPRI for unbound workqueues
Unbound wqs aren't concurrency-managed and try to execute work items
as soon as possible. This is currently achieved by implicitly setting
%WQ_HIGHPRI on all unbound workqueues; however, WQ_HIGHPRI
implementation is about to be restructured and this usage won't be
valid anymore.
Add an explicit chain-wakeup path for unbound workqueues in
process_one_work() instead of piggy backing on %WQ_HIGHPRI.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 18 |
1 files changed, 11 insertions, 7 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9a3128dc67df..27637c284cb9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -580,6 +580,10 @@ static bool __need_more_worker(struct global_cwq *gcwq) /* * Need to wake up a worker? Called from anything but currently * running workers. + * + * Note that, because unbound workers never contribute to nr_running, this + * function will always return %true for unbound gcwq as long as the + * worklist isn't empty. */ static bool need_more_worker(struct global_cwq *gcwq) { @@ -1867,6 +1871,13 @@ __acquires(&gcwq->lock) if (unlikely(cpu_intensive)) worker_set_flags(worker, WORKER_CPU_INTENSIVE, true); + /* + * Unbound gcwq isn't concurrency managed and work items should be + * executed ASAP. Wake up another worker if necessary. + */ + if ((worker->flags & WORKER_UNBOUND) && need_more_worker(gcwq)) + wake_up_worker(gcwq); + spin_unlock_irq(&gcwq->lock); work_clear_pending(work); @@ -2984,13 +2995,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, if (flags & WQ_MEM_RECLAIM) flags |= WQ_RESCUER; - /* - * Unbound workqueues aren't concurrency managed and should be - * dispatched to workers immediately. - */ - if (flags & WQ_UNBOUND) - flags |= WQ_HIGHPRI; - max_active = max_active ?: WQ_DFL_ACTIVE; max_active = wq_clamp_max_active(max_active, flags, wq->name); |