diff options
author | Tejun Heo <tj@kernel.org> | 2012-03-05 13:15:24 -0800 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-03-06 21:27:24 +0100 |
commit | 24acfc34fba0b4f62ef9d5c2616eb0faa802b606 (patch) | |
tree | 42d07b0e4ad922b24853fe542cb9ab543aa8174c /block/blk-ioc.c | |
parent | b679281a6410676a41b175c5a185150a1ae42f9d (diff) | |
download | lwn-24acfc34fba0b4f62ef9d5c2616eb0faa802b606.tar.gz lwn-24acfc34fba0b4f62ef9d5c2616eb0faa802b606.zip |
block: interface update for ioc/icq creation functions
Make the following interface updates to prepare for future ioc related
changes.
* create_io_context() returning ioc only works for %current because it
doesn't increment ref on the ioc. Drop @task parameter from it and
always assume %current.
* Make create_io_context_slowpath() return 0 or -errno and rename it
to create_task_io_context().
* Make ioc_create_icq() take @ioc as parameter instead of assuming
that of %current. The caller, get_request(), is updated to create
ioc explicitly and then pass it into ioc_create_icq().
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-ioc.c')
-rw-r--r-- | block/blk-ioc.c | 22 |
1 files changed, 10 insertions, 12 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 92bf55540d87..10928740b5da 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -212,15 +212,14 @@ void ioc_clear_queue(struct request_queue *q) } } -void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags, - int node) +int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) { struct io_context *ioc; ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO, node); if (unlikely(!ioc)) - return; + return -ENOMEM; /* initialize */ atomic_long_set(&ioc->refcount, 1); @@ -244,6 +243,8 @@ void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags, else kmem_cache_free(iocontext_cachep, ioc); task_unlock(task); + + return 0; } /** @@ -275,7 +276,7 @@ struct io_context *get_task_io_context(struct task_struct *task, return ioc; } task_unlock(task); - } while (create_io_context(task, gfp_flags, node)); + } while (!create_task_io_context(task, gfp_flags, node)); return NULL; } @@ -319,26 +320,23 @@ EXPORT_SYMBOL(ioc_lookup_icq); /** * ioc_create_icq - create and link io_cq + * @ioc: io_context of interest * @q: request_queue of interest * @gfp_mask: allocation mask * - * Make sure io_cq linking %current->io_context and @q exists. If either - * io_context and/or icq don't exist, they will be created using @gfp_mask. + * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they + * will be created using @gfp_mask. * * The caller is responsible for ensuring @ioc won't go away and @q is * alive and will stay alive until this function returns. */ -struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask) +struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, + gfp_t gfp_mask) { struct elevator_type *et = q->elevator->type; - struct io_context *ioc; struct io_cq *icq; /* allocate stuff */ - ioc = create_io_context(current, gfp_mask, q->node); - if (!ioc) - return NULL; - icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO, q->node); if (!icq) |