summaryrefslogtreecommitdiff
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-03-12 11:29:59 -0700
committerTejun Heo <tj@kernel.org>2013-03-12 11:29:59 -0700
commit420c0ddb1f205a3511b766d0dfee2cc87ed9dae0 (patch)
treeb40f69b265def3d67d2ea67b06584bc2e7437678 /kernel/workqueue.c
parentd84ff0512f1bfc0d8c864efadb4523fce68919cc (diff)
downloadlwn-420c0ddb1f205a3511b766d0dfee2cc87ed9dae0.tar.gz
lwn-420c0ddb1f205a3511b766d0dfee2cc87ed9dae0.zip
workqueue: remove workqueue_struct->pool_wq.single
workqueue->pool_wq union is used to point either to percpu pwqs (pool_workqueues) or single unbound pwq. As the first pwq can be accessed via workqueue->pwqs list, there's no reason for the single pointer anymore. Use list_first_entry(workqueue->pwqs) to access the unbound pwq and drop workqueue->pool_wq.single pointer and the pool_wq union. It simplifies the code and eases implementing multiple unbound pools w/ custom attributes. This patch doesn't introduce any visible behavior changes. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c26
1 files changed, 12 insertions, 14 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 73c5f68065b5..acee7b525d51 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -188,11 +188,7 @@ struct wq_flusher {
*/
struct workqueue_struct {
unsigned int flags; /* W: WQ_* flags */
- union {
- struct pool_workqueue __percpu *pcpu;
- struct pool_workqueue *single;
- unsigned long v;
- } pool_wq; /* I: pwq's */
+ struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwq's */
struct list_head pwqs; /* I: all pwqs of this wq */
struct list_head list; /* W: list of all workqueues */
@@ -471,9 +467,11 @@ static struct pool_workqueue *get_pwq(int cpu, struct workqueue_struct *wq)
{
if (!(wq->flags & WQ_UNBOUND)) {
if (likely(cpu < nr_cpu_ids))
- return per_cpu_ptr(wq->pool_wq.pcpu, cpu);
- } else if (likely(cpu == WORK_CPU_UNBOUND))
- return wq->pool_wq.single;
+ return per_cpu_ptr(wq->cpu_pwqs, cpu);
+ } else if (likely(cpu == WORK_CPU_UNBOUND)) {
+ return list_first_entry(&wq->pwqs, struct pool_workqueue,
+ pwqs_node);
+ }
return NULL;
}
@@ -3085,8 +3083,8 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
int cpu;
if (!(wq->flags & WQ_UNBOUND)) {
- wq->pool_wq.pcpu = alloc_percpu(struct pool_workqueue);
- if (!wq->pool_wq.pcpu)
+ wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
+ if (!wq->cpu_pwqs)
return -ENOMEM;
for_each_possible_cpu(cpu) {
@@ -3102,7 +3100,6 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
if (!pwq)
return -ENOMEM;
- wq->pool_wq.single = pwq;
pwq->pool = get_std_worker_pool(WORK_CPU_UNBOUND, highpri);
list_add_tail(&pwq->pwqs_node, &wq->pwqs);
}
@@ -3113,9 +3110,10 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
static void free_pwqs(struct workqueue_struct *wq)
{
if (!(wq->flags & WQ_UNBOUND))
- free_percpu(wq->pool_wq.pcpu);
- else
- kmem_cache_free(pwq_cache, wq->pool_wq.single);
+ free_percpu(wq->cpu_pwqs);
+ else if (!list_empty(&wq->pwqs))
+ kmem_cache_free(pwq_cache, list_first_entry(&wq->pwqs,
+ struct pool_workqueue, pwqs_node));
}
static int wq_clamp_max_active(int max_active, unsigned int flags,