From 1ba64edef6051d2ec79bb2fbd3a0c8f0df00ab55 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:37 +0100 Subject: block, sx8: kill blk_insert_request() The only user left for blk_insert_request() is sx8 and it can be trivially switched to use blk_execute_rq_nowait() - special requests aren't included in io stat and sx8 doesn't use block layer tagging. Switch sx8 and kill blk_insert_requeset(). This patch doesn't introduce any functional difference. Only compile tested. Signed-off-by: Tejun Heo Acked-by: Jeff Garzik Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux/blkdev.h') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c7a6d3b5bc7b..8a6b51b13a1c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -660,7 +660,6 @@ extern void __blk_put_request(struct request_queue *, struct request *); extern struct request *blk_get_request(struct request_queue *, int, gfp_t); extern struct request *blk_make_request(struct request_queue *, struct bio *, gfp_t); -extern void blk_insert_request(struct request_queue *, struct request *, int, void *); extern void blk_requeue_request(struct request_queue *, struct request *); extern void blk_add_request_payload(struct request *rq, struct page *page, unsigned int len); -- cgit v1.2.3 From 34f6055c80285e4efb3f602a9119db75239744dc Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:37 +0100 Subject: block: add blk_queue_dead() There are a number of QUEUE_FLAG_DEAD tests. Add blk_queue_dead() macro and use it. This patch doesn't introduce any functional difference. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-core.c | 6 +++--- block/blk-exec.c | 2 +- block/blk-sysfs.c | 4 ++-- block/blk-throttle.c | 4 ++-- block/blk.h | 2 +- include/linux/blkdev.h | 1 + 6 files changed, 10 insertions(+), 9 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-core.c b/block/blk-core.c index 435af2378614..b5ed4f4a8d96 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -608,7 +608,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue_node); int blk_get_queue(struct request_queue *q) { - if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { + if (likely(!blk_queue_dead(q))) { kobject_get(&q->kobj); return 0; } @@ -755,7 +755,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, const bool is_sync = rw_is_sync(rw_flags) != 0; int may_queue; - if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) + if (unlikely(blk_queue_dead(q))) return NULL; may_queue = elv_may_queue(q, rw_flags); @@ -875,7 +875,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, struct io_context *ioc; struct request_list *rl = &q->rq; - if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) + if (unlikely(blk_queue_dead(q))) return NULL; prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, diff --git a/block/blk-exec.c b/block/blk-exec.c index a1ebceb332f9..60532852b3ab 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -50,7 +50,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, { int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; - if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { + if (unlikely(blk_queue_dead(q))) { rq->errors = -ENXIO; if (rq->end_io) rq->end_io(rq, rq->errors); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index e7f9f657f105..f0b2ca8f66d0 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -425,7 +425,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) if (!entry->show) return -EIO; mutex_lock(&q->sysfs_lock); - if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { + if (blk_queue_dead(q)) { mutex_unlock(&q->sysfs_lock); return -ENOENT; } @@ -447,7 +447,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, q = container_of(kobj, struct request_queue, kobj); mutex_lock(&q->sysfs_lock); - if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { + if (blk_queue_dead(q)) { mutex_unlock(&q->sysfs_lock); return -ENOENT; } diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 4553245d9317..5eed6a76721d 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -310,7 +310,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) struct request_queue *q = td->queue; /* no throttling for dead queue */ - if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) + if (unlikely(blk_queue_dead(q))) return NULL; rcu_read_lock(); @@ -335,7 +335,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) spin_lock_irq(q->queue_lock); /* Make sure @q is still alive */ - if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { + if (unlikely(blk_queue_dead(q))) { kfree(tg); return NULL; } diff --git a/block/blk.h b/block/blk.h index 3f6551b3c92d..e38691dbb329 100644 --- a/block/blk.h +++ b/block/blk.h @@ -85,7 +85,7 @@ static inline struct request *__elv_next_request(struct request_queue *q) q->flush_queue_delayed = 1; return NULL; } - if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags) || + if (unlikely(blk_queue_dead(q)) || !q->elevator->ops->elevator_dispatch_fn(q, 0)) return NULL; } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8a6b51b13a1c..783f97c14d0a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -481,6 +481,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) +#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_noxmerges(q) \ test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) -- cgit v1.2.3 From a73f730d013ff2788389fd0c46ad3e5510f124e6 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:37 +0100 Subject: block, cfq: move cfqd->cic_index to q->id cfq allocates per-queue id using ida and uses it to index cic radix tree from io_context. Move it to q->id and allocate on queue init and free on queue release. This simplifies cfq a bit and will allow for further improvements of io context life-cycle management. This patch doesn't introduce any functional difference. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-core.c | 24 +++++++++++++++-------- block/blk-sysfs.c | 2 ++ block/blk.h | 3 +++ block/cfq-iosched.c | 52 +++++--------------------------------------------- include/linux/blkdev.h | 6 ++++++ 5 files changed, 32 insertions(+), 55 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-core.c b/block/blk-core.c index 30add45a87ef..af7301581172 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -39,6 +39,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); +DEFINE_IDA(blk_queue_ida); + /* * For the allocated request tables */ @@ -474,6 +476,10 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) if (!q) return NULL; + q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL); + if (q->id < 0) + goto fail_q; + q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; q->backing_dev_info.state = 0; @@ -481,15 +487,11 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) q->backing_dev_info.name = "block"; err = bdi_init(&q->backing_dev_info); - if (err) { - kmem_cache_free(blk_requestq_cachep, q); - return NULL; - } + if (err) + goto fail_id; - if (blk_throtl_init(q)) { - kmem_cache_free(blk_requestq_cachep, q); - return NULL; - } + if (blk_throtl_init(q)) + goto fail_id; setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, laptop_mode_timer_fn, (unsigned long) q); @@ -512,6 +514,12 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) q->queue_lock = &q->__queue_lock; return q; + +fail_id: + ida_simple_remove(&blk_queue_ida, q->id); +fail_q: + kmem_cache_free(blk_requestq_cachep, q); + return NULL; } EXPORT_SYMBOL(blk_alloc_queue_node); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index f0b2ca8f66d0..5b4b4ab5e785 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -494,6 +494,8 @@ static void blk_release_queue(struct kobject *kobj) blk_trace_shutdown(q); bdi_destroy(&q->backing_dev_info); + + ida_simple_remove(&blk_queue_ida, q->id); kmem_cache_free(blk_requestq_cachep, q); } diff --git a/block/blk.h b/block/blk.h index e38691dbb329..aae4d88fc523 100644 --- a/block/blk.h +++ b/block/blk.h @@ -1,6 +1,8 @@ #ifndef BLK_INTERNAL_H #define BLK_INTERNAL_H +#include + /* Amount of time in which a process may batch requests */ #define BLK_BATCH_TIME (HZ/50UL) @@ -9,6 +11,7 @@ extern struct kmem_cache *blk_requestq_cachep; extern struct kobj_type blk_queue_ktype; +extern struct ida blk_queue_ida; void init_request_from_bio(struct request *req, struct bio *bio); void blk_rq_bio_prep(struct request_queue *q, struct request *rq, diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 16ace89613bc..ec3f5e8ba564 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -65,9 +65,6 @@ static DEFINE_PER_CPU(unsigned long, cfq_ioc_count); static struct completion *ioc_gone; static DEFINE_SPINLOCK(ioc_gone_lock); -static DEFINE_SPINLOCK(cic_index_lock); -static DEFINE_IDA(cic_index_ida); - #define CFQ_PRIO_LISTS IOPRIO_BE_NR #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) @@ -290,7 +287,6 @@ struct cfq_data { unsigned int cfq_group_idle; unsigned int cfq_latency; - unsigned int cic_index; struct list_head cic_list; /* @@ -484,7 +480,7 @@ static inline void cic_set_cfqq(struct cfq_io_context *cic, static inline void *cfqd_dead_key(struct cfq_data *cfqd) { - return (void *)(cfqd->cic_index << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY); + return (void *)(cfqd->queue->id << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY); } static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic) @@ -3105,7 +3101,7 @@ cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc, BUG_ON(rcu_dereference_check(ioc->ioc_data, lockdep_is_held(&ioc->lock)) == cic); - radix_tree_delete(&ioc->radix_root, cfqd->cic_index); + radix_tree_delete(&ioc->radix_root, cfqd->queue->id); hlist_del_rcu(&cic->cic_list); spin_unlock_irqrestore(&ioc->lock, flags); @@ -3133,7 +3129,7 @@ cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) } do { - cic = radix_tree_lookup(&ioc->radix_root, cfqd->cic_index); + cic = radix_tree_lookup(&ioc->radix_root, cfqd->queue->id); rcu_read_unlock(); if (!cic) break; @@ -3169,8 +3165,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, cic->key = cfqd; spin_lock_irqsave(&ioc->lock, flags); - ret = radix_tree_insert(&ioc->radix_root, - cfqd->cic_index, cic); + ret = radix_tree_insert(&ioc->radix_root, cfqd->queue->id, cic); if (!ret) hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list); spin_unlock_irqrestore(&ioc->lock, flags); @@ -3944,10 +3939,6 @@ static void cfq_exit_queue(struct elevator_queue *e) cfq_shutdown_timer_wq(cfqd); - spin_lock(&cic_index_lock); - ida_remove(&cic_index_ida, cfqd->cic_index); - spin_unlock(&cic_index_lock); - /* * Wait for cfqg->blkg->key accessors to exit their grace periods. * Do this wait only if there are other unlinked groups out @@ -3969,24 +3960,6 @@ static void cfq_exit_queue(struct elevator_queue *e) kfree(cfqd); } -static int cfq_alloc_cic_index(void) -{ - int index, error; - - do { - if (!ida_pre_get(&cic_index_ida, GFP_KERNEL)) - return -ENOMEM; - - spin_lock(&cic_index_lock); - error = ida_get_new(&cic_index_ida, &index); - spin_unlock(&cic_index_lock); - if (error && error != -EAGAIN) - return error; - } while (error); - - return index; -} - static void *cfq_init_queue(struct request_queue *q) { struct cfq_data *cfqd; @@ -3994,23 +3967,9 @@ static void *cfq_init_queue(struct request_queue *q) struct cfq_group *cfqg; struct cfq_rb_root *st; - i = cfq_alloc_cic_index(); - if (i < 0) - return NULL; - cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); - if (!cfqd) { - spin_lock(&cic_index_lock); - ida_remove(&cic_index_ida, i); - spin_unlock(&cic_index_lock); + if (!cfqd) return NULL; - } - - /* - * Don't need take queue_lock in the routine, since we are - * initializing the ioscheduler, and nobody is using cfqd - */ - cfqd->cic_index = i; /* Init root service tree */ cfqd->grp_service_tree = CFQ_RB_ROOT; @@ -4294,7 +4253,6 @@ static void __exit cfq_exit(void) */ if (elv_ioc_count_read(cfq_ioc_count)) wait_for_completion(&all_gone); - ida_destroy(&cic_index_ida); cfq_slab_kill(); } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 783f97c14d0a..8c8dbc4738ea 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -310,6 +310,12 @@ struct request_queue { */ unsigned long queue_flags; + /* + * ida allocated id for this queue. Used to index queues from + * ioctx. + */ + int id; + /* * queue needs bounce pages for pages above this limit */ -- cgit v1.2.3 From 09ac46c429464c919d04bb737b27edd84d944f02 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:38 +0100 Subject: block: misc updates to blk_get_queue() * blk_get_queue() is peculiar in that it returns 0 on success and 1 on failure instead of 0 / -errno or boolean. Update it such that it returns %true on success and %false on failure. * Make sure the caller checks for the return value. * Separate out __blk_get_queue() which doesn't check whether @q is dead and put it in blk.h. This will be used later. This patch doesn't introduce any functional changes. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-core.c | 8 ++++---- block/blk.h | 5 +++++ block/bsg.c | 4 +--- block/genhd.c | 2 +- drivers/scsi/scsi_scan.c | 2 +- include/linux/blkdev.h | 2 +- 6 files changed, 13 insertions(+), 10 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-core.c b/block/blk-core.c index af7301581172..fd4749391e17 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -626,14 +626,14 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn, } EXPORT_SYMBOL(blk_init_allocated_queue_node); -int blk_get_queue(struct request_queue *q) +bool blk_get_queue(struct request_queue *q) { if (likely(!blk_queue_dead(q))) { - kobject_get(&q->kobj); - return 0; + __blk_get_queue(q); + return true; } - return 1; + return false; } EXPORT_SYMBOL(blk_get_queue); diff --git a/block/blk.h b/block/blk.h index fc3c41b2fd24..8d421156fefb 100644 --- a/block/blk.h +++ b/block/blk.h @@ -13,6 +13,11 @@ extern struct kmem_cache *blk_requestq_cachep; extern struct kobj_type blk_queue_ktype; extern struct ida blk_queue_ida; +static inline void __blk_get_queue(struct request_queue *q) +{ + kobject_get(&q->kobj); +} + void init_request_from_bio(struct request *req, struct bio *bio); void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio); diff --git a/block/bsg.c b/block/bsg.c index 702f1316bb8f..167d586cece6 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -769,12 +769,10 @@ static struct bsg_device *bsg_add_device(struct inode *inode, struct file *file) { struct bsg_device *bd; - int ret; #ifdef BSG_DEBUG unsigned char buf[32]; #endif - ret = blk_get_queue(rq); - if (ret) + if (!blk_get_queue(rq)) return ERR_PTR(-ENXIO); bd = bsg_alloc_device(); diff --git a/block/genhd.c b/block/genhd.c index 02e9fca80825..c958169d24f0 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -615,7 +615,7 @@ void add_disk(struct gendisk *disk) * Take an extra ref on queue which will be put on disk_release() * so that it sticks around as long as @disk is there. */ - WARN_ON_ONCE(blk_get_queue(disk->queue)); + WARN_ON_ONCE(!blk_get_queue(disk->queue)); retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj, "bdi"); diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index b3c6d957fbd8..89da43f73c00 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -297,7 +297,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, kfree(sdev); goto out; } - blk_get_queue(sdev->request_queue); + WARN_ON_ONCE(!blk_get_queue(sdev->request_queue)); sdev->request_queue->queuedata = sdev; scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8c8dbc4738ea..d1b6f4ed1f96 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -865,7 +865,7 @@ extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatte extern void blk_dump_rq_flags(struct request *, char *); extern long nr_blockdev_pages(void); -int blk_get_queue(struct request_queue *); +bool __must_check blk_get_queue(struct request_queue *); struct request_queue *blk_alloc_queue(gfp_t); struct request_queue *blk_alloc_queue_node(gfp_t, int); extern void blk_put_queue(struct request_queue *); -- cgit v1.2.3 From b2efa05265d62bc29f3a64400fad4b44340eedb8 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:39 +0100 Subject: block, cfq: unlink cfq_io_context's immediately cic is association between io_context and request_queue. A cic is linked from both ioc and q and should be destroyed when either one goes away. As ioc and q both have their own locks, locking becomes a bit complex - both orders work for removal from one but not from the other. Currently, cfq tries to circumvent this locking order issue with RCU. ioc->lock nests inside queue_lock but the radix tree and cic's are also protected by RCU allowing either side to walk their lists without grabbing lock. This rather unconventional use of RCU quickly devolves into extremely fragile convolution. e.g. The following is from cfqd going away too soon after ioc and q exits raced. general protection fault: 0000 [#1] PREEMPT SMP CPU 2 Modules linked in: [ 88.503444] Pid: 599, comm: hexdump Not tainted 3.1.0-rc10-work+ #158 Bochs Bochs RIP: 0010:[] [] cfq_exit_single_io_context+0x58/0xf0 ... Call Trace: [] call_for_each_cic+0x5a/0x90 [] cfq_exit_io_context+0x15/0x20 [] exit_io_context+0x100/0x140 [] do_exit+0x579/0x850 [] do_group_exit+0x5b/0xd0 [] sys_exit_group+0x17/0x20 [] system_call_fastpath+0x16/0x1b The only real hot path here is cic lookup during request initialization and avoiding extra locking requires very confined use of RCU. This patch makes cic removal from both ioc and request_queue perform double-locking and unlink immediately. * From q side, the change is almost trivial as ioc->lock nests inside queue_lock. It just needs to grab each ioc->lock as it walks cic_list and unlink it. * From ioc side, it's a bit more difficult because of inversed lock order. ioc needs its lock to walk its cic_list but can't grab the matching queue_lock and needs to perform unlock-relock dancing. Unlinking is now wholly done from put_io_context() and fast path is optimized by using the queue_lock the caller already holds, which is by far the most common case. If the ioc accessed multiple devices, it tries with trylock. In unlikely cases of fast path failure, it falls back to full double-locking dance from workqueue. Double-locking isn't the prettiest thing in the world but it's *far* simpler and more understandable than RCU trick without adding any meaningful overhead. This still leaves a lot of now unnecessary RCU logics. Future patches will trim them. -v2: Vivek pointed out that cic->q was being dereferenced after cic->release() was called. Updated to use local variable @this_q instead. Signed-off-by: Tejun Heo Cc: Vivek Goyal Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 2 +- block/blk-ioc.c | 166 ++++++++++++++++++++++++++++++++++++++-------- block/cfq-iosched.c | 44 +++--------- fs/ioprio.c | 2 +- include/linux/blkdev.h | 3 + include/linux/iocontext.h | 12 ++-- kernel/fork.c | 2 +- 7 files changed, 159 insertions(+), 72 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index dc00835aab6a..278869358049 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1649,7 +1649,7 @@ static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk) ioc = get_task_io_context(tsk, GFP_ATOMIC, NUMA_NO_NODE); if (ioc) { ioc_cgroup_changed(ioc); - put_io_context(ioc); + put_io_context(ioc, NULL); } } diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 6f59fbad93d9..fb23965595da 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -29,55 +29,164 @@ void get_io_context(struct io_context *ioc) } EXPORT_SYMBOL(get_io_context); -static void cfq_dtor(struct io_context *ioc) +/* + * Releasing ioc may nest into another put_io_context() leading to nested + * fast path release. As the ioc's can't be the same, this is okay but + * makes lockdep whine. Keep track of nesting and use it as subclass. + */ +#ifdef CONFIG_LOCKDEP +#define ioc_release_depth(q) ((q) ? (q)->ioc_release_depth : 0) +#define ioc_release_depth_inc(q) (q)->ioc_release_depth++ +#define ioc_release_depth_dec(q) (q)->ioc_release_depth-- +#else +#define ioc_release_depth(q) 0 +#define ioc_release_depth_inc(q) do { } while (0) +#define ioc_release_depth_dec(q) do { } while (0) +#endif + +/* + * Slow path for ioc release in put_io_context(). Performs double-lock + * dancing to unlink all cic's and then frees ioc. + */ +static void ioc_release_fn(struct work_struct *work) { - if (!hlist_empty(&ioc->cic_list)) { - struct cfq_io_context *cic; + struct io_context *ioc = container_of(work, struct io_context, + release_work); + struct request_queue *last_q = NULL; + + spin_lock_irq(&ioc->lock); + + while (!hlist_empty(&ioc->cic_list)) { + struct cfq_io_context *cic = hlist_entry(ioc->cic_list.first, + struct cfq_io_context, + cic_list); + struct request_queue *this_q = cic->q; + + if (this_q != last_q) { + /* + * Need to switch to @this_q. Once we release + * @ioc->lock, it can go away along with @cic. + * Hold on to it. + */ + __blk_get_queue(this_q); + + /* + * blk_put_queue() might sleep thanks to kobject + * idiocy. Always release both locks, put and + * restart. + */ + if (last_q) { + spin_unlock(last_q->queue_lock); + spin_unlock_irq(&ioc->lock); + blk_put_queue(last_q); + } else { + spin_unlock_irq(&ioc->lock); + } + + last_q = this_q; + spin_lock_irq(this_q->queue_lock); + spin_lock(&ioc->lock); + continue; + } + ioc_release_depth_inc(this_q); + cic->exit(cic); + cic->release(cic); + ioc_release_depth_dec(this_q); + } - cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context, - cic_list); - cic->dtor(ioc); + if (last_q) { + spin_unlock(last_q->queue_lock); + spin_unlock_irq(&ioc->lock); + blk_put_queue(last_q); + } else { + spin_unlock_irq(&ioc->lock); } + + kmem_cache_free(iocontext_cachep, ioc); } /** * put_io_context - put a reference of io_context * @ioc: io_context to put + * @locked_q: request_queue the caller is holding queue_lock of (hint) * * Decrement reference count of @ioc and release it if the count reaches - * zero. + * zero. If the caller is holding queue_lock of a queue, it can indicate + * that with @locked_q. This is an optimization hint and the caller is + * allowed to pass in %NULL even when it's holding a queue_lock. */ -void put_io_context(struct io_context *ioc) +void put_io_context(struct io_context *ioc, struct request_queue *locked_q) { + struct request_queue *last_q = locked_q; + unsigned long flags; + if (ioc == NULL) return; BUG_ON(atomic_long_read(&ioc->refcount) <= 0); + if (locked_q) + lockdep_assert_held(locked_q->queue_lock); if (!atomic_long_dec_and_test(&ioc->refcount)) return; - rcu_read_lock(); - cfq_dtor(ioc); - rcu_read_unlock(); - - kmem_cache_free(iocontext_cachep, ioc); -} -EXPORT_SYMBOL(put_io_context); + /* + * Destroy @ioc. This is a bit messy because cic's are chained + * from both ioc and queue, and ioc->lock nests inside queue_lock. + * The inner ioc->lock should be held to walk our cic_list and then + * for each cic the outer matching queue_lock should be grabbed. + * ie. We need to do reverse-order double lock dancing. + * + * Another twist is that we are often called with one of the + * matching queue_locks held as indicated by @locked_q, which + * prevents performing double-lock dance for other queues. + * + * So, we do it in two stages. The fast path uses the queue_lock + * the caller is holding and, if other queues need to be accessed, + * uses trylock to avoid introducing locking dependency. This can + * handle most cases, especially if @ioc was performing IO on only + * single device. + * + * If trylock doesn't cut it, we defer to @ioc->release_work which + * can do all the double-locking dancing. + */ + spin_lock_irqsave_nested(&ioc->lock, flags, + ioc_release_depth(locked_q)); + + while (!hlist_empty(&ioc->cic_list)) { + struct cfq_io_context *cic = hlist_entry(ioc->cic_list.first, + struct cfq_io_context, + cic_list); + struct request_queue *this_q = cic->q; + + if (this_q != last_q) { + if (last_q && last_q != locked_q) + spin_unlock(last_q->queue_lock); + last_q = NULL; + + if (!spin_trylock(this_q->queue_lock)) + break; + last_q = this_q; + continue; + } + ioc_release_depth_inc(this_q); + cic->exit(cic); + cic->release(cic); + ioc_release_depth_dec(this_q); + } -static void cfq_exit(struct io_context *ioc) -{ - rcu_read_lock(); + if (last_q && last_q != locked_q) + spin_unlock(last_q->queue_lock); - if (!hlist_empty(&ioc->cic_list)) { - struct cfq_io_context *cic; + spin_unlock_irqrestore(&ioc->lock, flags); - cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context, - cic_list); - cic->exit(ioc); - } - rcu_read_unlock(); + /* if no cic's left, we're done; otherwise, kick release_work */ + if (hlist_empty(&ioc->cic_list)) + kmem_cache_free(iocontext_cachep, ioc); + else + schedule_work(&ioc->release_work); } +EXPORT_SYMBOL(put_io_context); /* Called by the exiting task */ void exit_io_context(struct task_struct *task) @@ -92,10 +201,8 @@ void exit_io_context(struct task_struct *task) task->io_context = NULL; task_unlock(task); - if (atomic_dec_and_test(&ioc->nr_tasks)) - cfq_exit(ioc); - - put_io_context(ioc); + atomic_dec(&ioc->nr_tasks); + put_io_context(ioc, NULL); } static struct io_context *create_task_io_context(struct task_struct *task, @@ -115,6 +222,7 @@ static struct io_context *create_task_io_context(struct task_struct *task, spin_lock_init(&ioc->lock); INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH); INIT_HLIST_HEAD(&ioc->cic_list); + INIT_WORK(&ioc->release_work, ioc_release_fn); /* try to install, somebody might already have beaten us to it */ task_lock(task); diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index e617b088c59b..6cc606560402 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1778,7 +1778,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfqd->active_queue = NULL; if (cfqd->active_cic) { - put_io_context(cfqd->active_cic->ioc); + put_io_context(cfqd->active_cic->ioc, cfqd->queue); cfqd->active_cic = NULL; } } @@ -2812,38 +2812,6 @@ static void cfq_exit_cic(struct cfq_io_context *cic) } } -static void cfq_exit_single_io_context(struct io_context *ioc, - struct cfq_io_context *cic) -{ - struct cfq_data *cfqd = cic_to_cfqd(cic); - - if (cfqd) { - struct request_queue *q = cfqd->queue; - unsigned long flags; - - spin_lock_irqsave(q->queue_lock, flags); - - /* - * Ensure we get a fresh copy of the ->key to prevent - * race between exiting task and queue - */ - smp_read_barrier_depends(); - if (cic->key == cfqd) - cfq_exit_cic(cic); - - spin_unlock_irqrestore(q->queue_lock, flags); - } -} - -/* - * The process that ioc belongs to has exited, we need to clean up - * and put the internal structures we have that belongs to that process. - */ -static void cfq_exit_io_context(struct io_context *ioc) -{ - call_for_each_cic(ioc, cfq_exit_single_io_context); -} - static struct cfq_io_context * cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) { @@ -2855,8 +2823,8 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) cic->ttime.last_end_request = jiffies; INIT_LIST_HEAD(&cic->queue_list); INIT_HLIST_NODE(&cic->cic_list); - cic->dtor = cfq_free_io_context; - cic->exit = cfq_exit_io_context; + cic->exit = cfq_exit_cic; + cic->release = cfq_release_cic; elv_ioc_count_inc(cfq_ioc_count); } @@ -3726,7 +3694,7 @@ static void cfq_put_request(struct request *rq) BUG_ON(!cfqq->allocated[rw]); cfqq->allocated[rw]--; - put_io_context(RQ_CIC(rq)->ioc); + put_io_context(RQ_CIC(rq)->ioc, cfqq->cfqd->queue); rq->elevator_private[0] = NULL; rq->elevator_private[1] = NULL; @@ -3937,8 +3905,12 @@ static void cfq_exit_queue(struct elevator_queue *e) struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, struct cfq_io_context, queue_list); + struct io_context *ioc = cic->ioc; + spin_lock(&ioc->lock); cfq_exit_cic(cic); + cfq_release_cic(cic); + spin_unlock(&ioc->lock); } cfq_put_async_queues(cfqd); diff --git a/fs/ioprio.c b/fs/ioprio.c index 0f1b9515213b..f84b380d65e5 100644 --- a/fs/ioprio.c +++ b/fs/ioprio.c @@ -51,7 +51,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio) ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); if (ioc) { ioc_ioprio_changed(ioc, ioprio); - put_io_context(ioc); + put_io_context(ioc, NULL); } return err; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d1b6f4ed1f96..65c2f8c70089 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -393,6 +393,9 @@ struct request_queue { /* Throttle data */ struct throtl_data *td; #endif +#ifdef CONFIG_LOCKDEP + int ioc_release_depth; +#endif }; #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 2c2b6da96b3c..01e863128780 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -3,6 +3,7 @@ #include #include +#include struct cfq_queue; struct cfq_ttime { @@ -33,8 +34,8 @@ struct cfq_io_context { unsigned long changed; - void (*dtor)(struct io_context *); /* destructor */ - void (*exit)(struct io_context *); /* called on task exit */ + void (*exit)(struct cfq_io_context *); + void (*release)(struct cfq_io_context *); struct rcu_head rcu_head; }; @@ -61,6 +62,8 @@ struct io_context { struct radix_tree_root radix_root; struct hlist_head cic_list; void __rcu *ioc_data; + + struct work_struct release_work; }; static inline struct io_context *ioc_task_link(struct io_context *ioc) @@ -79,7 +82,7 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc) struct task_struct; #ifdef CONFIG_BLOCK -void put_io_context(struct io_context *ioc); +void put_io_context(struct io_context *ioc, struct request_queue *locked_q); void exit_io_context(struct task_struct *task); struct io_context *get_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node); @@ -87,7 +90,8 @@ void ioc_ioprio_changed(struct io_context *ioc, int ioprio); void ioc_cgroup_changed(struct io_context *ioc); #else struct io_context; -static inline void put_io_context(struct io_context *ioc) { } +static inline void put_io_context(struct io_context *ioc, + struct request_queue *locked_q) { } static inline void exit_io_context(struct task_struct *task) { } #endif diff --git a/kernel/fork.c b/kernel/fork.c index 5bcfc739bb7c..2753449f2038 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -887,7 +887,7 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk) return -ENOMEM; new_ioc->ioprio = ioc->ioprio; - put_io_context(new_ioc); + put_io_context(new_ioc, NULL); } #endif return 0; -- cgit v1.2.3 From a612fddf0d8090f2877305c9168b6c1a34fb5d90 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:41 +0100 Subject: block, cfq: move cfqd->icq_list to request_queue and add request->elv.icq Most of icq management is about to be moved out of cfq into blk-ioc. This patch prepares for it. * Move cfqd->icq_list to request_queue->icq_list * Make request explicitly point to icq instead of through elevator private data. ->elevator_private[3] is replaced with sub struct elv which contains icq pointer and priv[2]. cfq is updated accordingly. * Meaningless clearing of ->elevator_private[0] removed from elv_set_request(). At that point in code, the field was guaranteed to be %NULL anyway. This patch doesn't introduce any functional change. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-core.c | 1 + block/cfq-iosched.c | 28 +++++++++++----------------- block/elevator.c | 2 -- include/linux/blkdev.h | 10 ++++++++-- 4 files changed, 20 insertions(+), 21 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-core.c b/block/blk-core.c index 6804fdf27eff..3c26c7f48703 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -497,6 +497,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) laptop_mode_timer_fn, (unsigned long) q); setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); INIT_LIST_HEAD(&q->timeout_list); + INIT_LIST_HEAD(&q->icq_list); INIT_LIST_HEAD(&q->flush_queue[0]); INIT_LIST_HEAD(&q->flush_queue[1]); INIT_LIST_HEAD(&q->flush_data_in_flight); diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index d2f16fcdec7f..9bc5ecc1b336 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -54,9 +54,9 @@ static const int cfq_hist_divisor = 4; #define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32) #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) -#define RQ_CIC(rq) icq_to_cic((rq)->elevator_private[0]) -#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private[1]) -#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elevator_private[2]) +#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq) +#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0]) +#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1]) static struct kmem_cache *cfq_pool; static struct kmem_cache *cfq_icq_pool; @@ -297,8 +297,6 @@ struct cfq_data { unsigned int cfq_group_idle; unsigned int cfq_latency; - struct list_head icq_list; - /* * Fallback dummy cfqq for extreme OOM conditions */ @@ -3053,7 +3051,7 @@ static int cfq_create_cic(struct cfq_data *cfqd, gfp_t gfp_mask) ret = radix_tree_insert(&ioc->icq_tree, q->id, icq); if (likely(!ret)) { hlist_add_head(&icq->ioc_node, &ioc->icq_list); - list_add(&icq->q_node, &cfqd->icq_list); + list_add(&icq->q_node, &q->icq_list); icq = NULL; } else if (ret == -EEXIST) { /* someone else already did it */ @@ -3605,12 +3603,10 @@ static void cfq_put_request(struct request *rq) put_io_context(RQ_CIC(rq)->icq.ioc, cfqq->cfqd->queue); - rq->elevator_private[0] = NULL; - rq->elevator_private[1] = NULL; - /* Put down rq reference on cfqg */ cfq_put_cfqg(RQ_CFQG(rq)); - rq->elevator_private[2] = NULL; + rq->elv.priv[0] = NULL; + rq->elv.priv[1] = NULL; cfq_put_queue(cfqq); } @@ -3696,9 +3692,9 @@ new_queue: cfqq->allocated[rw]++; cfqq->ref++; - rq->elevator_private[0] = &cic->icq; - rq->elevator_private[1] = cfqq; - rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg); + rq->elv.icq = &cic->icq; + rq->elv.priv[0] = cfqq; + rq->elv.priv[1] = cfq_ref_get_cfqg(cfqq->cfqg); spin_unlock_irq(q->queue_lock); return 0; @@ -3810,8 +3806,8 @@ static void cfq_exit_queue(struct elevator_queue *e) if (cfqd->active_queue) __cfq_slice_expired(cfqd, cfqd->active_queue, 0); - while (!list_empty(&cfqd->icq_list)) { - struct io_cq *icq = list_entry(cfqd->icq_list.next, + while (!list_empty(&q->icq_list)) { + struct io_cq *icq = list_entry(q->icq_list.next, struct io_cq, q_node); struct io_context *ioc = icq->ioc; @@ -3922,8 +3918,6 @@ static void *cfq_init_queue(struct request_queue *q) cfqd->oom_cfqq.ref++; cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group); - INIT_LIST_HEAD(&cfqd->icq_list); - cfqd->queue = q; init_timer(&cfqd->idle_slice_timer); diff --git a/block/elevator.c b/block/elevator.c index 31ffe76aed3d..c5c6214829cb 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -745,8 +745,6 @@ int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) if (e->type->ops.elevator_set_req_fn) return e->type->ops.elevator_set_req_fn(q, rq, gfp_mask); - - rq->elevator_private[0] = NULL; return 0; } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 65c2f8c70089..8bca04873f53 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -111,10 +111,14 @@ struct request { * Three pointers are available for the IO schedulers, if they need * more they have to dynamically allocate it. Flush requests are * never put on the IO scheduler. So let the flush fields share - * space with the three elevator_private pointers. + * space with the elevator data. */ union { - void *elevator_private[3]; + struct { + struct io_cq *icq; + void *priv[2]; + } elv; + struct { unsigned int seq; struct list_head list; @@ -357,6 +361,8 @@ struct request_queue { struct timer_list timeout; struct list_head timeout_list; + struct list_head icq_list; + struct queue_limits limits; /* -- cgit v1.2.3 From b1bd055d397e09f99dcef9b138ed104ff1812fcb Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Wed, 11 Jan 2012 16:27:11 +0100 Subject: block: Introduce blk_set_stacking_limits function Stacking driver queue limits are typically bounded exclusively by the capabilities of the low level devices, not by the stacking driver itself. This patch introduces blk_set_stacking_limits() which has more liberal metrics than the default queue limits function. This allows us to inherit topology parameters from bottom devices without manually tweaking the default limits in each driver prior to calling the stacking function. Since there is now a clear distinction between stacking and low-level devices, blk_set_default_limits() has been modified to carry the more conservative values that we used to manually set in blk_queue_make_request(). Signed-off-by: Martin K. Petersen Acked-by: Mike Snitzer Signed-off-by: Jens Axboe --- block/blk-settings.c | 32 ++++++++++++++++++++++++-------- drivers/md/dm-table.c | 6 +++--- drivers/md/md.c | 1 + include/linux/blkdev.h | 1 + 4 files changed, 29 insertions(+), 11 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-settings.c b/block/blk-settings.c index fa1eb0449a05..d3234fc494ad 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -104,9 +104,7 @@ EXPORT_SYMBOL_GPL(blk_queue_lld_busy); * @lim: the queue_limits structure to reset * * Description: - * Returns a queue_limit struct to its default state. Can be used by - * stacking drivers like DM that stage table swaps and reuse an - * existing device queue. + * Returns a queue_limit struct to its default state. */ void blk_set_default_limits(struct queue_limits *lim) { @@ -114,13 +112,12 @@ void blk_set_default_limits(struct queue_limits *lim) lim->max_integrity_segments = 0; lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; - lim->max_sectors = BLK_DEF_MAX_SECTORS; - lim->max_hw_sectors = INT_MAX; + lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; lim->max_discard_sectors = 0; lim->discard_granularity = 0; lim->discard_alignment = 0; lim->discard_misaligned = 0; - lim->discard_zeroes_data = 1; + lim->discard_zeroes_data = 0; lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); lim->alignment_offset = 0; @@ -130,6 +127,27 @@ void blk_set_default_limits(struct queue_limits *lim) } EXPORT_SYMBOL(blk_set_default_limits); +/** + * blk_set_stacking_limits - set default limits for stacking devices + * @lim: the queue_limits structure to reset + * + * Description: + * Returns a queue_limit struct to its default state. Should be used + * by stacking drivers like DM that have no internal limits. + */ +void blk_set_stacking_limits(struct queue_limits *lim) +{ + blk_set_default_limits(lim); + + /* Inherit limits from component devices */ + lim->discard_zeroes_data = 1; + lim->max_segments = USHRT_MAX; + lim->max_hw_sectors = UINT_MAX; + + lim->max_sectors = BLK_DEF_MAX_SECTORS; +} +EXPORT_SYMBOL(blk_set_stacking_limits); + /** * blk_queue_make_request - define an alternate make_request function for a device * @q: the request queue for the device to be affected @@ -165,8 +183,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) q->nr_batching = BLK_BATCH_REQ; blk_set_default_limits(&q->limits); - blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); - q->limits.discard_zeroes_data = 0; /* * by default assume old behaviour and bounce for any highmem page diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 8e9132130142..63cc54289aff 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -699,7 +699,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table, while (i < dm_table_get_num_targets(table)) { ti = dm_table_get_target(table, i++); - blk_set_default_limits(&ti_limits); + blk_set_stacking_limits(&ti_limits); /* combine all target devices' limits */ if (ti->type->iterate_devices) @@ -1221,10 +1221,10 @@ int dm_calculate_queue_limits(struct dm_table *table, struct queue_limits ti_limits; unsigned i = 0; - blk_set_default_limits(limits); + blk_set_stacking_limits(limits); while (i < dm_table_get_num_targets(table)) { - blk_set_default_limits(&ti_limits); + blk_set_stacking_limits(&ti_limits); ti = dm_table_get_target(table, i++); diff --git a/drivers/md/md.c b/drivers/md/md.c index ee981737edfc..114ba155af87 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -4622,6 +4622,7 @@ static int md_alloc(dev_t dev, char *name) mddev->queue->queuedata = mddev; blk_queue_make_request(mddev->queue, md_make_request); + blk_set_stacking_limits(&mddev->queue->limits); disk = alloc_disk(1 << shift); if (!disk) { diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8bca04873f53..adc34133a56a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -844,6 +844,7 @@ extern void blk_queue_io_min(struct request_queue *q, unsigned int min); extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); extern void blk_set_default_limits(struct queue_limits *lim); +extern void blk_set_stacking_limits(struct queue_limits *lim); extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, sector_t offset); extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, -- cgit v1.2.3 From fd83240a60ecc59849420df3393e9e6d35c77683 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 12 Jan 2012 09:17:30 +0100 Subject: blockdev: convert some macros to static inlines We prefer to program in C rather than preprocessor and it fixes this warning when CONFIG_BLK_DEV_INTEGRITY is not set: drivers/md/dm-table.c: In function 'dm_table_set_integrity': drivers/md/dm-table.c:1285:3: warning: statement with no effect [-Wunused-value] Signed-off-by: Stephen Rothwell Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 77 +++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 64 insertions(+), 13 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index adc34133a56a..5cfb9b22627f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1298,19 +1298,70 @@ queue_max_integrity_segments(struct request_queue *q) #else /* CONFIG_BLK_DEV_INTEGRITY */ -#define blk_integrity_rq(rq) (0) -#define blk_rq_count_integrity_sg(a, b) (0) -#define blk_rq_map_integrity_sg(a, b, c) (0) -#define bdev_get_integrity(a) (0) -#define blk_get_integrity(a) (0) -#define blk_integrity_compare(a, b) (0) -#define blk_integrity_register(a, b) (0) -#define blk_integrity_unregister(a) do { } while (0) -#define blk_queue_max_integrity_segments(a, b) do { } while (0) -#define queue_max_integrity_segments(a) (0) -#define blk_integrity_merge_rq(a, b, c) (0) -#define blk_integrity_merge_bio(a, b, c) (0) -#define blk_integrity_is_initialized(a) (0) +struct bio; +struct block_device; +struct gendisk; +struct blk_integrity; + +static inline int blk_integrity_rq(struct request *rq) +{ + return 0; +} +static inline int blk_rq_count_integrity_sg(struct request_queue *q, + struct bio *b) +{ + return 0; +} +static inline int blk_rq_map_integrity_sg(struct request_queue *q, + struct bio *b, + struct scatterlist *s) +{ + return 0; +} +static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) +{ + return 0; +} +static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) +{ + return NULL; +} +static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) +{ + return 0; +} +static inline int blk_integrity_register(struct gendisk *d, + struct blk_integrity *b) +{ + return 0; +} +static inline void blk_integrity_unregister(struct gendisk *d) +{ +} +static inline void blk_queue_max_integrity_segments(struct request_queue *q, + unsigned int segs) +{ +} +static inline unsigned short queue_max_integrity_segments(struct request_queue *q) +{ + return 0; +} +static inline int blk_integrity_merge_rq(struct request_queue *rq, + struct request *r1, + struct request *r2) +{ + return 0; +} +static inline int blk_integrity_merge_bio(struct request_queue *rq, + struct request *r, + struct bio *b) +{ + return 0; +} +static inline bool blk_integrity_is_initialized(struct gendisk *g) +{ + return 0; +} #endif /* CONFIG_BLK_DEV_INTEGRITY */ -- cgit v1.2.3