diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 7 | ||||
-rw-r--r-- | block/blk-mq.c | 60 | ||||
-rw-r--r-- | block/blk-sysfs.c | 2 | ||||
-rw-r--r-- | block/bounce.c | 2 | ||||
-rw-r--r-- | block/elevator.c | 6 |
5 files changed, 43 insertions, 34 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index aa819a58ea24..f6ab750060fe 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -554,6 +554,8 @@ void blk_cleanup_queue(struct request_queue *q) q->queue_lock = &q->__queue_lock; spin_unlock_irq(lock); + bdi_destroy(&q->backing_dev_info); + /* @q is and will stay empty, shutdown and put */ blk_put_queue(q); } @@ -734,6 +736,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) } EXPORT_SYMBOL(blk_init_queue_node); +static void blk_queue_bio(struct request_queue *q, struct bio *bio); + struct request_queue * blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, spinlock_t *lock) @@ -1587,7 +1591,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) blk_rq_bio_prep(req->q, req, bio); } -void blk_queue_bio(struct request_queue *q, struct bio *bio) +static void blk_queue_bio(struct request_queue *q, struct bio *bio) { const bool sync = !!(bio->bi_rw & REQ_SYNC); struct blk_plug *plug; @@ -1695,7 +1699,6 @@ out_unlock: spin_unlock_irq(q->queue_lock); } } -EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */ /* * If bio->bi_dev is a partition, remap the location diff --git a/block/blk-mq.c b/block/blk-mq.c index c382a34fe5ac..87e901501de2 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -673,8 +673,11 @@ static void blk_mq_rq_timer(unsigned long priv) data.next = blk_rq_timeout(round_jiffies_up(data.next)); mod_timer(&q->timeout, data.next); } else { - queue_for_each_hw_ctx(q, hctx, i) - blk_mq_tag_idle(hctx); + queue_for_each_hw_ctx(q, hctx, i) { + /* the hctx may be unmapped, so check it here */ + if (blk_mq_hw_queue_mapped(hctx)) + blk_mq_tag_idle(hctx); + } } } @@ -851,6 +854,16 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) spin_lock(&hctx->lock); list_splice(&rq_list, &hctx->dispatch); spin_unlock(&hctx->lock); + /* + * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but + * it's possible the queue is stopped and restarted again + * before this. Queue restart will dispatch requests. And since + * requests in rq_list aren't added into hctx->dispatch yet, + * the requests in rq_list might get lost. + * + * blk_mq_run_hw_queue() already checks the STOPPED bit + **/ + blk_mq_run_hw_queue(hctx, true); } } @@ -1601,22 +1614,6 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) return NOTIFY_OK; } -static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu) -{ - struct request_queue *q = hctx->queue; - struct blk_mq_tag_set *set = q->tag_set; - - if (set->tags[hctx->queue_num]) - return NOTIFY_OK; - - set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num); - if (!set->tags[hctx->queue_num]) - return NOTIFY_STOP; - - hctx->tags = set->tags[hctx->queue_num]; - return NOTIFY_OK; -} - static int blk_mq_hctx_notify(void *data, unsigned long action, unsigned int cpu) { @@ -1624,8 +1621,11 @@ static int blk_mq_hctx_notify(void *data, unsigned long action, if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) return blk_mq_hctx_cpu_offline(hctx, cpu); - else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) - return blk_mq_hctx_cpu_online(hctx, cpu); + + /* + * In case of CPU online, tags may be reallocated + * in blk_mq_map_swqueue() after mapping is updated. + */ return NOTIFY_OK; } @@ -1805,6 +1805,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) unsigned int i; struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx; + struct blk_mq_tag_set *set = q->tag_set; queue_for_each_hw_ctx(q, hctx, i) { cpumask_clear(hctx->cpumask); @@ -1833,16 +1834,20 @@ static void blk_mq_map_swqueue(struct request_queue *q) * disable it and free the request entries. */ if (!hctx->nr_ctx) { - struct blk_mq_tag_set *set = q->tag_set; - if (set->tags[i]) { blk_mq_free_rq_map(set, set->tags[i], i); set->tags[i] = NULL; - hctx->tags = NULL; } + hctx->tags = NULL; continue; } + /* unmapped hw queue can be remapped after CPU topo changed */ + if (!set->tags[i]) + set->tags[i] = blk_mq_init_rq_map(set, i); + hctx->tags = set->tags[i]; + WARN_ON(!hctx->tags); + /* * Set the map size to the number of mapped software queues. * This is more accurate and more efficient than looping @@ -2120,9 +2125,16 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, */ list_for_each_entry(q, &all_q_list, all_q_node) blk_mq_freeze_queue_start(q); - list_for_each_entry(q, &all_q_list, all_q_node) + list_for_each_entry(q, &all_q_list, all_q_node) { blk_mq_freeze_queue_wait(q); + /* + * timeout handler can't touch hw queue during the + * reinitialization + */ + del_timer_sync(&q->timeout); + } + list_for_each_entry(q, &all_q_list, all_q_node) blk_mq_queue_reinit(q); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index faaf36ade7eb..2b8fd302f677 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -522,8 +522,6 @@ static void blk_release_queue(struct kobject *kobj) blk_trace_shutdown(q); - bdi_destroy(&q->backing_dev_info); - ida_simple_remove(&blk_queue_ida, q->id); call_rcu(&q->rcu_head, blk_free_queue_rcu); } diff --git a/block/bounce.c b/block/bounce.c index 4bac72579c1f..3ab0bce1c947 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -218,8 +218,8 @@ bounce: if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force) continue; - inc_zone_page_state(to->bv_page, NR_BOUNCE); to->bv_page = mempool_alloc(pool, q->bounce_gfp); + inc_zone_page_state(to->bv_page, NR_BOUNCE); if (rw == WRITE) { char *vto, *vfrom; diff --git a/block/elevator.c b/block/elevator.c index 59794d0d38e3..8985038f398c 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -157,7 +157,7 @@ struct elevator_queue *elevator_alloc(struct request_queue *q, eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); if (unlikely(!eq)) - goto err; + return NULL; eq->type = e; kobject_init(&eq->kobj, &elv_ktype); @@ -165,10 +165,6 @@ struct elevator_queue *elevator_alloc(struct request_queue *q, hash_init(eq->hash); return eq; -err: - kfree(eq); - elevator_put(e); - return NULL; } EXPORT_SYMBOL(elevator_alloc); |