diff options
author | Keith Busch <keith.busch@intel.com> | 2014-10-17 17:46:36 -0600 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2015-02-09 13:06:47 -0500 |
commit | 2eb6e1e3aa873f2bb62075bebe17fa108ee07374 (patch) | |
tree | baa8b4216daa574b9e5ee7591f85a8beb7758d6b /drivers/md/dm-mpath.c | |
parent | 1ae49ea2cf3ef097d4496981261a400f1f988b84 (diff) | |
download | lwn-2eb6e1e3aa873f2bb62075bebe17fa108ee07374.tar.gz lwn-2eb6e1e3aa873f2bb62075bebe17fa108ee07374.zip |
dm: submit stacked requests in irq enabled context
Switch to having request-based DM enqueue all prep'ed requests into work
processed by another thread. This allows request-based DM to invoke
block APIs that assume interrupt enabled context (e.g. blk_get_request)
and is a prerequisite for adding blk-mq support to request-based DM.
The new kernel thread is only initialized for request-based DM devices.
multipath_map() is now always in irq enabled context so change multipath
spinlock (m->lock) locking to always disable interrupts.
Signed-off-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-mpath.c')
-rw-r--r-- | drivers/md/dm-mpath.c | 18 |
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 7b6b0f0f831a..2552b88f8953 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -384,12 +384,11 @@ static int multipath_map(struct dm_target *ti, struct request *clone, struct multipath *m = (struct multipath *) ti->private; int r = DM_MAPIO_REQUEUE; size_t nr_bytes = blk_rq_bytes(clone); - unsigned long flags; struct pgpath *pgpath; struct block_device *bdev; struct dm_mpath_io *mpio; - spin_lock_irqsave(&m->lock, flags); + spin_lock_irq(&m->lock); /* Do we need to select a new pgpath? */ if (!m->current_pgpath || @@ -411,21 +410,26 @@ static int multipath_map(struct dm_target *ti, struct request *clone, /* ENOMEM, requeue */ goto out_unlock; + mpio = map_context->ptr; + mpio->pgpath = pgpath; + mpio->nr_bytes = nr_bytes; + bdev = pgpath->path.dev->bdev; + clone->q = bdev_get_queue(bdev); clone->rq_disk = bdev->bd_disk; clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; - mpio = map_context->ptr; - mpio->pgpath = pgpath; - mpio->nr_bytes = nr_bytes; + + spin_unlock_irq(&m->lock); + if (pgpath->pg->ps.type->start_io) pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path, nr_bytes); - r = DM_MAPIO_REMAPPED; + return DM_MAPIO_REMAPPED; out_unlock: - spin_unlock_irqrestore(&m->lock, flags); + spin_unlock_irq(&m->lock); return r; } |