diff options
author | Mike Snitzer <snitzer@redhat.com> | 2017-12-10 15:37:21 -0500 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2018-01-06 11:23:24 -0500 |
commit | 848b8aefd44df99b3e38a872acb8d54d3530bebf (patch) | |
tree | 0bcdfea974072cf295304ac3890d167c7840bae8 /drivers/md/dm-mpath.c | |
parent | cd025384455715525a296e54999349e540850301 (diff) | |
download | lwn-848b8aefd44df99b3e38a872acb8d54d3530bebf.tar.gz lwn-848b8aefd44df99b3e38a872acb8d54d3530bebf.zip |
dm mpath: optimize NVMe bio-based support
All code that deals with pg_init is not used with bio-based NVMe mode.
This includes skipping initialization of pg_init related variables.
Also, pg_init related members on 'struct multipath' have been grouped
together.
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-mpath.c')
-rw-r--r-- | drivers/md/dm-mpath.c | 171 |
1 files changed, 95 insertions, 76 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index d3813b1e74e2..fa5ee78c69c9 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -64,36 +64,30 @@ struct priority_group { /* Multipath context */ struct multipath { - struct list_head list; - struct dm_target *ti; - - const char *hw_handler_name; - char *hw_handler_params; + unsigned long flags; /* Multipath state flags */ spinlock_t lock; - - unsigned nr_priority_groups; - struct list_head priority_groups; - - wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */ + enum dm_queue_mode queue_mode; struct pgpath *current_pgpath; struct priority_group *current_pg; struct priority_group *next_pg; /* Switch to this PG if set */ - unsigned long flags; /* Multipath state flags */ + atomic_t nr_valid_paths; /* Total number of usable paths */ + unsigned nr_priority_groups; + struct list_head priority_groups; + const char *hw_handler_name; + char *hw_handler_params; + wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */ unsigned pg_init_retries; /* Number of times to retry pg_init */ unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ - - atomic_t nr_valid_paths; /* Total number of usable paths */ atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */ atomic_t pg_init_count; /* Number of times pg_init called */ - enum dm_queue_mode queue_mode; - struct mutex work_mutex; struct work_struct trigger_event; + struct dm_target *ti; struct work_struct process_queued_bios; struct bio_list queued_bios; @@ -135,10 +129,10 @@ static struct pgpath *alloc_pgpath(void) { struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL); - if (pgpath) { - pgpath->is_active = true; - INIT_DELAYED_WORK(&pgpath->activate_path, activate_path_work); - } + if (!pgpath) + return NULL; + + pgpath->is_active = true; return pgpath; } @@ -193,13 +187,8 @@ static struct multipath *alloc_multipath(struct dm_target *ti) if (m) { INIT_LIST_HEAD(&m->priority_groups); spin_lock_init(&m->lock); - set_bit(MPATHF_QUEUE_IO, &m->flags); atomic_set(&m->nr_valid_paths, 0); - atomic_set(&m->pg_init_in_progress, 0); - atomic_set(&m->pg_init_count, 0); - m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; INIT_WORK(&m->trigger_event, trigger_event); - init_waitqueue_head(&m->pg_init_wait); mutex_init(&m->work_mutex); m->queue_mode = DM_TYPE_NONE; @@ -235,6 +224,14 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m) } } + if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) { + set_bit(MPATHF_QUEUE_IO, &m->flags); + atomic_set(&m->pg_init_in_progress, 0); + atomic_set(&m->pg_init_count, 0); + m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; + init_waitqueue_head(&m->pg_init_wait); + } + dm_table_set_type(ti->table, m->queue_mode); return 0; @@ -339,6 +336,9 @@ static void __switch_pg(struct multipath *m, struct priority_group *pg) { m->current_pg = pg; + if (m->queue_mode == DM_TYPE_NVME_BIO_BASED) + return; + /* Must we initialise the PG first, and queue I/O till it's ready? */ if (m->hw_handler_name) { set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); @@ -384,7 +384,8 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) unsigned bypassed = 1; if (!atomic_read(&m->nr_valid_paths)) { - clear_bit(MPATHF_QUEUE_IO, &m->flags); + if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) + clear_bit(MPATHF_QUEUE_IO, &m->flags); goto failed; } @@ -528,8 +529,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC); if (IS_ERR(clone)) { /* EBUSY, ENODEV or EWOULDBLOCK: requeue */ - bool queue_dying = blk_queue_dying(q); - if (queue_dying) { + if (blk_queue_dying(q)) { atomic_inc(&m->pg_init_in_progress); activate_or_offline_path(pgpath); } @@ -563,21 +563,28 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m /* Do we need to select a new pgpath? */ pgpath = READ_ONCE(m->current_pgpath); + /* MPATHF_QUEUE_IO will never be set for NVMe */ queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); if (!pgpath || !queue_io) pgpath = choose_pgpath(m, mpio->nr_bytes); - if ((pgpath && queue_io) || - (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) { + if ((!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) || + (pgpath && queue_io)) { /* Queue for the daemon to resubmit */ spin_lock_irqsave(&m->lock, flags); bio_list_add(&m->queued_bios, bio); spin_unlock_irqrestore(&m->lock, flags); - /* PG_INIT_REQUIRED cannot be set without QUEUE_IO */ - if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) - pg_init_all_paths(m); - else if (!queue_io) + + if (m->queue_mode == DM_TYPE_NVME_BIO_BASED) { queue_work(kmultipathd, &m->process_queued_bios); + } else { + /* PG_INIT_REQUIRED cannot be set without QUEUE_IO */ + if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) + pg_init_all_paths(m); + else if (!queue_io) + queue_work(kmultipathd, &m->process_queued_bios); + } + return DM_MAPIO_SUBMITTED; } @@ -750,34 +757,11 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg, return 0; } -static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps, - struct dm_target *ti) +static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, char **error) { - int r; - struct pgpath *p; - struct multipath *m = ti->private; - struct request_queue *q = NULL; + struct request_queue *q = bdev_get_queue(bdev); const char *attached_handler_name; - - /* we need at least a path arg */ - if (as->argc < 1) { - ti->error = "no device given"; - return ERR_PTR(-EINVAL); - } - - p = alloc_pgpath(); - if (!p) - return ERR_PTR(-ENOMEM); - - r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table), - &p->path.dev); - if (r) { - ti->error = "error getting device"; - goto bad; - } - - if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) || m->hw_handler_name) - q = bdev_get_queue(p->path.dev->bdev); + int r; if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) { retain: @@ -809,26 +793,59 @@ retain: char b[BDEVNAME_SIZE]; printk(KERN_INFO "dm-mpath: retaining handler on device %s\n", - bdevname(p->path.dev->bdev, b)); + bdevname(bdev, b)); goto retain; } if (r < 0) { - ti->error = "error attaching hardware handler"; - dm_put_device(ti, p->path.dev); - goto bad; + *error = "error attaching hardware handler"; + return r; } if (m->hw_handler_params) { r = scsi_dh_set_params(q, m->hw_handler_params); if (r < 0) { - ti->error = "unable to set hardware " - "handler parameters"; - dm_put_device(ti, p->path.dev); - goto bad; + *error = "unable to set hardware handler parameters"; + return r; } } } + return 0; +} + +static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps, + struct dm_target *ti) +{ + int r; + struct pgpath *p; + struct multipath *m = ti->private; + + /* we need at least a path arg */ + if (as->argc < 1) { + ti->error = "no device given"; + return ERR_PTR(-EINVAL); + } + + p = alloc_pgpath(); + if (!p) + return ERR_PTR(-ENOMEM); + + r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table), + &p->path.dev); + if (r) { + ti->error = "error getting device"; + goto bad; + } + + if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) { + INIT_DELAYED_WORK(&p->activate_path, activate_path_work); + r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error); + if (r) { + dm_put_device(ti, p->path.dev); + goto bad; + } + } + r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error); if (r) { dm_put_device(ti, p->path.dev); @@ -836,7 +853,6 @@ retain: } return p; - bad: free_pgpath(p); return ERR_PTR(r); @@ -1152,16 +1168,19 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m) static void flush_multipath_work(struct multipath *m) { - set_bit(MPATHF_PG_INIT_DISABLED, &m->flags); - smp_mb__after_atomic(); + if (m->hw_handler_name) { + set_bit(MPATHF_PG_INIT_DISABLED, &m->flags); + smp_mb__after_atomic(); + + flush_workqueue(kmpath_handlerd); + multipath_wait_for_pg_init_completion(m); + + clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags); + smp_mb__after_atomic(); + } - flush_workqueue(kmpath_handlerd); - multipath_wait_for_pg_init_completion(m); flush_workqueue(kmultipathd); flush_work(&m->trigger_event); - - clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags); - smp_mb__after_atomic(); } static void multipath_dtr(struct dm_target *ti) @@ -1537,7 +1556,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone, } static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, - blk_status_t *error) + blk_status_t *error) { struct multipath *m = ti->private; struct dm_mpath_io *mpio = get_mpio_from_bio(clone); |