diff options
author | Tejun Heo <tj@kernel.org> | 2011-12-14 00:33:40 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2011-12-14 00:33:40 +0100 |
commit | f2dbd76a0a994bc1d5a3d0e7c844cc373832e86c (patch) | |
tree | cae6a4333ee6e5eb76ef133dfdee95e1943c0ab1 /block/blk-core.c | |
parent | 1238033c79e92e5c315af12e45396f1a78c73dec (diff) | |
download | lwn-f2dbd76a0a994bc1d5a3d0e7c844cc373832e86c.tar.gz lwn-f2dbd76a0a994bc1d5a3d0e7c844cc373832e86c.zip |
block, cfq: replace current_io_context() with create_io_context()
When called under queue_lock, current_io_context() triggers lockdep
warning if it hits allocation path. This is because io_context
installation is protected by task_lock which is not IRQ safe, so it
triggers irq-unsafe-lock -> irq -> irq-safe-lock -> irq-unsafe-lock
deadlock warning.
Given the restriction, accessor + creator rolled into one doesn't work
too well. Drop current_io_context() and let the users access
task->io_context directly inside queue_lock combined with explicit
creation using create_io_context().
Future ioc updates will further consolidate ioc access and the create
interface will be unexported.
While at it, relocate ioc internal interface declarations in blk.h and
add section comments before and after.
This patch does not introduce functional change.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 25 |
1 files changed, 20 insertions, 5 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index fd4749391e17..6804fdf27eff 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -771,9 +771,12 @@ static struct request *get_request(struct request_queue *q, int rw_flags, { struct request *rq = NULL; struct request_list *rl = &q->rq; - struct io_context *ioc = NULL; + struct io_context *ioc; const bool is_sync = rw_is_sync(rw_flags) != 0; + bool retried = false; int may_queue; +retry: + ioc = current->io_context; if (unlikely(blk_queue_dead(q))) return NULL; @@ -784,7 +787,20 @@ static struct request *get_request(struct request_queue *q, int rw_flags, if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { if (rl->count[is_sync]+1 >= q->nr_requests) { - ioc = current_io_context(GFP_ATOMIC, q->node); + /* + * We want ioc to record batching state. If it's + * not already there, creating a new one requires + * dropping queue_lock, which in turn requires + * retesting conditions to avoid queue hang. + */ + if (!ioc && !retried) { + spin_unlock_irq(q->queue_lock); + create_io_context(current, gfp_mask, q->node); + spin_lock_irq(q->queue_lock); + retried = true; + goto retry; + } + /* * The queue will fill after this allocation, so set * it as full, and mark this process as "batching". @@ -892,7 +908,6 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, rq = get_request(q, rw_flags, bio, GFP_NOIO); while (!rq) { DEFINE_WAIT(wait); - struct io_context *ioc; struct request_list *rl = &q->rq; if (unlikely(blk_queue_dead(q))) @@ -912,8 +927,8 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, * up to a big batch of them for a small period time. * See ioc_batching, ioc_set_batching */ - ioc = current_io_context(GFP_NOIO, q->node); - ioc_set_batching(q, ioc); + create_io_context(current, GFP_NOIO, q->node); + ioc_set_batching(q, current->io_context); spin_lock_irq(q->queue_lock); finish_wait(&rl->wait[is_sync], &wait); |