summaryrefslogtreecommitdiff
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-07-19 23:39:40 +0200
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-09-30 20:29:39 +0200
commitb5deef901282628d88c784f4c9d2f0583ec3b355 (patch)
tree1d3be92f18c9afd9426b06739c8f76931acbf03f /block/ll_rw_blk.c
parenta3b05e8f58c95dfccbf2c824d0c68e5990571f24 (diff)
downloadlwn-b5deef901282628d88c784f4c9d2f0583ec3b355.tar.gz
lwn-b5deef901282628d88c784f4c9d2f0583ec3b355.zip
[PATCH] Make sure all block/io scheduler setups are node aware
Some were kmalloc_node(), some were still kmalloc(). Change them all to kmalloc_node(). Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 4b7b4461e8d6..c6dfa889206c 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -39,6 +39,7 @@ static void blk_unplug_timeout(unsigned long data);
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
static void init_request_from_bio(struct request *req, struct bio *bio);
static int __make_request(request_queue_t *q, struct bio *bio);
+static struct io_context *current_io_context(gfp_t gfp_flags, int node);
/*
* For the allocated request tables
@@ -2114,7 +2115,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
if (rl->count[rw]+1 >= q->nr_requests) {
- ioc = current_io_context(GFP_ATOMIC);
+ ioc = current_io_context(GFP_ATOMIC, q->node);
/*
* The queue will fill after this allocation, so set
* it as full, and mark this process as "batching".
@@ -2234,7 +2235,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
* up to a big batch of them for a small period time.
* See ioc_batching, ioc_set_batching
*/
- ioc = current_io_context(GFP_NOIO);
+ ioc = current_io_context(GFP_NOIO, q->node);
ioc_set_batching(q, ioc);
spin_lock_irq(q->queue_lock);
@@ -3641,7 +3642,7 @@ void exit_io_context(void)
* but since the current task itself holds a reference, the context can be
* used in general code, so long as it stays within `current` context.
*/
-struct io_context *current_io_context(gfp_t gfp_flags)
+static struct io_context *current_io_context(gfp_t gfp_flags, int node)
{
struct task_struct *tsk = current;
struct io_context *ret;
@@ -3650,7 +3651,7 @@ struct io_context *current_io_context(gfp_t gfp_flags)
if (likely(ret))
return ret;
- ret = kmem_cache_alloc(iocontext_cachep, gfp_flags);
+ ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
if (ret) {
atomic_set(&ret->refcount, 1);
ret->task = current;
@@ -3674,10 +3675,10 @@ EXPORT_SYMBOL(current_io_context);
*
* This is always called in the context of the task which submitted the I/O.
*/
-struct io_context *get_io_context(gfp_t gfp_flags)
+struct io_context *get_io_context(gfp_t gfp_flags, int node)
{
struct io_context *ret;
- ret = current_io_context(gfp_flags);
+ ret = current_io_context(gfp_flags, node);
if (likely(ret))
atomic_inc(&ret->refcount);
return ret;