summaryrefslogtreecommitdiff
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-04-01 11:23:35 -0700
committerTejun Heo <tj@kernel.org>2013-04-01 11:23:35 -0700
commite50aba9aea63b7617887b4d9694184f478731c82 (patch)
tree68649f81762c3088c535d3bdb4743c2d96f6c7eb /kernel/workqueue.c
parentf147f29eb7c4959e5f8be604ce2d23979c86378c (diff)
downloadlwn-e50aba9aea63b7617887b4d9694184f478731c82.tar.gz
lwn-e50aba9aea63b7617887b4d9694184f478731c82.zip
workqueue: use NUMA-aware allocation for pool_workqueues
Use kmem_cache_alloc_node() with @pool->node instead of kmem_cache_zalloc() when allocating a pool_workqueue so that it's allocated on the same node as the associated worker_pool. As there's no no kmem_cache_zalloc_node(), move zeroing to init_pwq(). This was suggested by Lai Jiangshan. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c8d047b6c895..07ec57459457 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3626,12 +3626,14 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
spin_unlock_irq(&pwq->pool->lock);
}
-/* initialize newly zalloced @pwq which is associated with @wq and @pool */
+/* initialize newly alloced @pwq which is associated with @wq and @pool */
static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
struct worker_pool *pool)
{
BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
+ memset(pwq, 0, sizeof(*pwq));
+
pwq->pool = pool;
pwq->wq = wq;
pwq->flush_color = -1;
@@ -3677,7 +3679,7 @@ static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
if (!pool)
return NULL;
- pwq = kmem_cache_zalloc(pwq_cache, GFP_KERNEL);
+ pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
if (!pwq) {
put_unbound_pool(pool);
return NULL;