diff options
-rw-r--r-- | block/bfq-cgroup.c | 16 | ||||
-rw-r--r-- | block/bfq-iosched.c | 13 | ||||
-rw-r--r-- | block/bfq-iosched.h | 4 | ||||
-rw-r--r-- | block/bfq-wf2q.c | 8 | ||||
-rw-r--r-- | block/blk-cgroup.c | 2 | ||||
-rw-r--r-- | block/blk-core.c | 9 | ||||
-rw-r--r-- | block/blk-iocost.c | 2 | ||||
-rw-r--r-- | drivers/block/loop.c | 28 | ||||
-rw-r--r-- | include/linux/bio.h | 3 |
9 files changed, 47 insertions, 38 deletions
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 627476bc6495..1b2829e99dad 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -724,19 +724,19 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, * sure that the reference to cgroup is valid across the call (see * comments in bfq_bic_update_cgroup on this issue) */ -static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd, - struct bfq_io_cq *bic, - struct bfq_group *bfqg) +static void __bfq_bic_change_cgroup(struct bfq_data *bfqd, + struct bfq_io_cq *bic, + struct bfq_group *bfqg) { - struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0); - struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1); + struct bfq_queue *async_bfqq = bic_to_bfqq(bic, false); + struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, true); struct bfq_entity *entity; if (async_bfqq) { entity = &async_bfqq->entity; if (entity->sched_data != &bfqg->sched_data) { - bic_set_bfqq(bic, NULL, 0); + bic_set_bfqq(bic, NULL, false); bfq_release_process_ref(bfqd, async_bfqq); } } @@ -772,12 +772,10 @@ static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd, */ bfq_put_cooperator(sync_bfqq); bfq_release_process_ref(bfqd, sync_bfqq); - bic_set_bfqq(bic, NULL, 1); + bic_set_bfqq(bic, NULL, true); } } } - - return bfqg; } void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index a72304c728fc..16f43bbc575a 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -386,6 +386,12 @@ static void bfq_put_stable_ref(struct bfq_queue *bfqq); void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync) { + struct bfq_queue *old_bfqq = bic->bfqq[is_sync]; + + /* Clear bic pointer if bfqq is detached from this bic */ + if (old_bfqq && old_bfqq->bic == bic) + old_bfqq->bic = NULL; + /* * If bfqq != NULL, then a non-stable queue merge between * bic->bfqq and bfqq is happening here. This causes troubles @@ -3108,7 +3114,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, /* * Merge queues (that is, let bic redirect its requests to new_bfqq) */ - bic_set_bfqq(bic, new_bfqq, 1); + bic_set_bfqq(bic, new_bfqq, true); bfq_mark_bfqq_coop(new_bfqq); /* * new_bfqq now belongs to at least two bics (it is a shared queue): @@ -5311,7 +5317,6 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) unsigned long flags; spin_lock_irqsave(&bfqd->lock, flags); - bfqq->bic = NULL; bfq_exit_bfqq(bfqd, bfqq); bic_set_bfqq(bic, NULL, is_sync); spin_unlock_irqrestore(&bfqd->lock, flags); @@ -6557,7 +6562,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) return bfqq; } - bic_set_bfqq(bic, NULL, 1); + bic_set_bfqq(bic, NULL, true); bfq_put_cooperator(bfqq); @@ -7058,7 +7063,9 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) bfqd->idle_slice_timer.function = bfq_idle_slice_timer; bfqd->queue_weights_tree = RB_ROOT_CACHED; +#ifdef CONFIG_BFQ_GROUP_IOSCHED bfqd->num_groups_with_pending_reqs = 0; +#endif INIT_LIST_HEAD(&bfqd->active_list); INIT_LIST_HEAD(&bfqd->idle_list); diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 9fa89577322d..41aa151ccc22 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -197,8 +197,10 @@ struct bfq_entity { /* flag, set to request a weight, ioprio or ioprio_class change */ int prio_changed; +#ifdef CONFIG_BFQ_GROUP_IOSCHED /* flag, set if the entity is counted in groups_with_pending_reqs */ bool in_groups_with_pending_reqs; +#endif /* last child queue of entity created (for non-leaf entities) */ struct bfq_queue *last_bfqq_created; @@ -491,6 +493,7 @@ struct bfq_data { */ struct rb_root_cached queue_weights_tree; +#ifdef CONFIG_BFQ_GROUP_IOSCHED /* * Number of groups with at least one process that * has at least one request waiting for completion. Note that @@ -538,6 +541,7 @@ struct bfq_data { * with no request waiting for completion. */ unsigned int num_groups_with_pending_reqs; +#endif /* * Per-class (RT, BE, IDLE) number of bfq_queues containing diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index b02b53658ed4..ea4c3d757fdd 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c @@ -1612,28 +1612,28 @@ void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, void bfq_add_bfqq_in_groups_with_pending_reqs(struct bfq_queue *bfqq) { +#ifdef CONFIG_BFQ_GROUP_IOSCHED struct bfq_entity *entity = &bfqq->entity; if (!entity->in_groups_with_pending_reqs) { entity->in_groups_with_pending_reqs = true; -#ifdef CONFIG_BFQ_GROUP_IOSCHED if (!(bfqq_group(bfqq)->num_queues_with_pending_reqs++)) bfqq->bfqd->num_groups_with_pending_reqs++; -#endif } +#endif } void bfq_del_bfqq_in_groups_with_pending_reqs(struct bfq_queue *bfqq) { +#ifdef CONFIG_BFQ_GROUP_IOSCHED struct bfq_entity *entity = &bfqq->entity; if (entity->in_groups_with_pending_reqs) { entity->in_groups_with_pending_reqs = false; -#ifdef CONFIG_BFQ_GROUP_IOSCHED if (!(--bfqq_group(bfqq)->num_queues_with_pending_reqs)) bfqq->bfqd->num_groups_with_pending_reqs--; -#endif } +#endif } /* diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 50ac0dce95b8..ce6a2b7d3dfb 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -33,6 +33,7 @@ #include "blk-cgroup.h" #include "blk-ioprio.h" #include "blk-throttle.h" +#include "blk-rq-qos.h" /* * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. @@ -1322,6 +1323,7 @@ err_unlock: void blkcg_exit_disk(struct gendisk *disk) { blkg_destroy_all(disk); + rq_qos_exit(disk->queue); blk_throtl_exit(disk); } diff --git a/block/blk-core.c b/block/blk-core.c index 3866b6c4cd88..9321767470dc 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -254,14 +254,15 @@ EXPORT_SYMBOL_GPL(blk_clear_pm_only); static void blk_free_queue_rcu(struct rcu_head *rcu_head) { - kmem_cache_free(blk_requestq_cachep, - container_of(rcu_head, struct request_queue, rcu_head)); + struct request_queue *q = container_of(rcu_head, + struct request_queue, rcu_head); + + percpu_ref_exit(&q->q_usage_counter); + kmem_cache_free(blk_requestq_cachep, q); } static void blk_free_queue(struct request_queue *q) { - percpu_ref_exit(&q->q_usage_counter); - if (q->poll_stat) blk_stat_remove_callback(q, q->poll_cb); blk_stat_free_callback(q->poll_cb); diff --git a/block/blk-iocost.c b/block/blk-iocost.c index d1bdc12deaa7..549ddc9e0c6f 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -232,7 +232,9 @@ enum { /* 1/64k is granular enough and can easily be handled w/ u32 */ WEIGHT_ONE = 1 << 16, +}; +enum { /* * As vtime is used to calculate the cost of each IO, it needs to * be fairly high precision. For example, it should be able to diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 1f8f3b87bdfa..df628e30bca4 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1773,7 +1773,16 @@ static const struct block_device_operations lo_fops = { /* * And now the modules code and kernel interface. */ -static int max_loop; + +/* + * If max_loop is specified, create that many devices upfront. + * This also becomes a hard limit. If max_loop is not specified, + * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module + * init time. Loop devices can be requested on-demand with the + * /dev/loop-control interface, or be instantiated by accessing + * a 'dead' device node. + */ +static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT; module_param(max_loop, int, 0444); MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); module_param(max_part, int, 0444); @@ -2181,7 +2190,7 @@ MODULE_ALIAS("devname:loop-control"); static int __init loop_init(void) { - int i, nr; + int i; int err; part_shift = 0; @@ -2209,19 +2218,6 @@ static int __init loop_init(void) goto err_out; } - /* - * If max_loop is specified, create that many devices upfront. - * This also becomes a hard limit. If max_loop is not specified, - * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module - * init time. Loop devices can be requested on-demand with the - * /dev/loop-control interface, or be instantiated by accessing - * a 'dead' device node. - */ - if (max_loop) - nr = max_loop; - else - nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT; - err = misc_register(&loop_misc); if (err < 0) goto err_out; @@ -2233,7 +2229,7 @@ static int __init loop_init(void) } /* pre-create number of devices given by config or max_loop */ - for (i = 0; i < nr; i++) + for (i = 0; i < max_loop; i++) loop_add(i); printk(KERN_INFO "loop: module loaded\n"); diff --git a/include/linux/bio.h b/include/linux/bio.h index b231a665682a..22078a28d7cb 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -782,8 +782,7 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) static inline void bio_clear_polled(struct bio *bio) { - /* can't support alloc cache if we turn off polling */ - bio->bi_opf &= ~(REQ_POLLED | REQ_ALLOC_CACHE); + bio->bi_opf &= ~REQ_POLLED; } struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, |