diff options
author | Mike Snitzer <snitzer@redhat.com> | 2016-01-31 17:22:27 -0500 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2016-02-22 11:06:22 -0500 |
commit | 16f122661dbb3dfefc60788b528b54ad702005aa (patch) | |
tree | 500f1868d0342e7d61eeb86337d0120532d37d78 /drivers/md/dm.c | |
parent | f083b09b7819c785db4f82a81f68da3bccfb04bf (diff) | |
download | lwn-16f122661dbb3dfefc60788b528b54ad702005aa.tar.gz lwn-16f122661dbb3dfefc60788b528b54ad702005aa.zip |
dm: optimize dm_mq_queue_rq()
DM multipath is the only dm-mq target. But that aside, request-based DM
only supports tables with a single target that is immutable. Leverage
this fact in dm_mq_queue_rq() by using the 'immutable_target' stored in
the mapped_device when the table was made active. This saves the need
to even take the read-side of the SRCU via dm_{get,put}_live_table.
If the active DM table does not have an immutable target (e.g. "error"
target was swapped in) then fallback to the slow-path where the target
is looked up from the live table.
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r-- | drivers/md/dm.c | 40 |
1 files changed, 18 insertions, 22 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index d605170a02d9..35ca9d065760 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -154,6 +154,7 @@ struct mapped_device { /* Protect queue and type against concurrent access. */ struct mutex type_lock; + struct dm_target *immutable_target; struct target_type *immutable_target_type; struct gendisk *disk; @@ -2492,8 +2493,15 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, * This must be done before setting the queue restrictions, * because request-based dm may be run just after the setting. */ - if (dm_table_request_based(t)) + if (dm_table_request_based(t)) { stop_queue(q); + /* + * Leverage the fact that request-based DM targets are + * immutable singletons and establish md->immutable_target + * - used to optimize both dm_request_fn and dm_mq_queue_rq + */ + md->immutable_target = dm_table_get_immutable_target(t); + } __bind_mempools(md, t); @@ -2564,7 +2572,6 @@ void dm_set_md_type(struct mapped_device *md, unsigned type) unsigned dm_get_md_type(struct mapped_device *md) { - BUG_ON(!mutex_is_locked(&md->type_lock)); return md->type; } @@ -2641,28 +2648,15 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq = bd->rq; struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); struct mapped_device *md = tio->md; - int srcu_idx; - struct dm_table *map = dm_get_live_table(md, &srcu_idx); - struct dm_target *ti; - sector_t pos; + struct dm_target *ti = md->immutable_target; - /* always use block 0 to find the target for flushes for now */ - pos = 0; - if (!(rq->cmd_flags & REQ_FLUSH)) - pos = blk_rq_pos(rq); + if (unlikely(!ti)) { + int srcu_idx; + struct dm_table *map = dm_get_live_table(md, &srcu_idx); - ti = dm_table_find_target(map, pos); - if (!dm_target_is_valid(ti)) { + ti = dm_table_find_target(map, 0); dm_put_live_table(md, srcu_idx); - DMERR_LIMIT("request attempted access beyond the end of device"); - /* - * Must perform setup, that rq_completed() requires, - * before returning BLK_MQ_RQ_QUEUE_ERROR - */ - dm_start_request(md, rq); - return BLK_MQ_RQ_QUEUE_ERROR; } - dm_put_live_table(md, srcu_idx); if (ti->type->busy && ti->type->busy(ti)) return BLK_MQ_RQ_QUEUE_BUSY; @@ -2678,8 +2672,10 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, */ tio->ti = ti; - /* Clone the request if underlying devices aren't blk-mq */ - if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) { + /* + * Both the table and md type cannot change after initial table load + */ + if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) { /* clone request is allocated at the end of the pdu */ tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io); (void) clone_rq(rq, md, tio, GFP_ATOMIC); |