diff options
author | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-13 09:15:34 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-13 09:15:34 -0800 |
commit | 5faad620264290b17e80a8b0996b039ea0d5ac73 (patch) | |
tree | 3adf5ccbd2794f9fd3b81f75031e4621a67137b1 | |
parent | bbc7610c062074468f362b37d33603536e87fb96 (diff) | |
parent | 2fc2c60df3d2b3a557eb8d750779def9d51934b1 (diff) | |
download | lwn-5faad620264290b17e80a8b0996b039ea0d5ac73.tar.gz lwn-5faad620264290b17e80a8b0996b039ea0d5ac73.zip |
Merge branch 'for-linus' of git://brick.kernel.dk/data/git/linux-2.6-block
* 'for-linus' of git://brick.kernel.dk/data/git/linux-2.6-block:
[PATCH] Fixup cciss error handling
[PATCH] Allow as-iosched to be unloaded
[PATCH 2/2] cciss: remove calls to pci_disable_device
[PATCH 1/2] cciss: map out more memory for config table
[PATCH] Propagate down request sync flag
Resolve trivial whitespace conflict in drivers/block/cciss.c manually.
-rw-r--r-- | block/as-iosched.c | 15 | ||||
-rw-r--r-- | block/cfq-iosched.c | 18 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 28 | ||||
-rw-r--r-- | drivers/block/cciss.c | 2 |
4 files changed, 34 insertions, 29 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c index 5934c4bfd52a..ef126277b4b3 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -1462,20 +1462,7 @@ static struct elevator_type iosched_as = { static int __init as_init(void) { - int ret; - - ret = elv_register(&iosched_as); - if (!ret) { - /* - * don't allow AS to get unregistered, since we would have - * to browse all tasks in the system and release their - * as_io_context first - */ - __module_get(THIS_MODULE); - return 0; - } - - return ret; + return elv_register(&iosched_as); } static void __exit as_exit(void) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 78c6b312bd30..533a2938ffd6 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -219,9 +219,12 @@ static int cfq_queue_empty(request_queue_t *q) return !cfqd->busy_queues; } -static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) +static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync) { - if (rw == READ || rw == WRITE_SYNC) + /* + * Use the per-process queue, for read requests and syncronous writes + */ + if (!(rw & REQ_RW) || is_sync) return task->pid; return CFQ_KEY_ASYNC; @@ -473,7 +476,7 @@ static struct request * cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) { struct task_struct *tsk = current; - pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio)); + pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio)); struct cfq_queue *cfqq; cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio); @@ -1748,6 +1751,9 @@ static int cfq_may_queue(request_queue_t *q, int rw) struct cfq_data *cfqd = q->elevator->elevator_data; struct task_struct *tsk = current; struct cfq_queue *cfqq; + unsigned int key; + + key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC); /* * don't force setup of a queue from here, as a call to may_queue @@ -1755,7 +1761,7 @@ static int cfq_may_queue(request_queue_t *q, int rw) * so just lookup a possibly existing queue, or return 'may queue' * if that fails */ - cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio); + cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio); if (cfqq) { cfq_init_prio_data(cfqq); cfq_prio_boost(cfqq); @@ -1798,10 +1804,10 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask) struct task_struct *tsk = current; struct cfq_io_context *cic; const int rw = rq_data_dir(rq); - pid_t key = cfq_queue_pid(tsk, rw); + const int is_sync = rq_is_sync(rq); + pid_t key = cfq_queue_pid(tsk, rw, is_sync); struct cfq_queue *cfqq; unsigned long flags; - int is_sync = key != CFQ_KEY_ASYNC; might_sleep_if(gfp_mask & __GFP_WAIT); diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index a541b42c08e3..79807dbc306e 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -2058,15 +2058,16 @@ static void freed_request(request_queue_t *q, int rw, int priv) * Returns NULL on failure, with queue_lock held. * Returns !NULL on success, with queue_lock *not held*. */ -static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, - gfp_t gfp_mask) +static struct request *get_request(request_queue_t *q, int rw_flags, + struct bio *bio, gfp_t gfp_mask) { struct request *rq = NULL; struct request_list *rl = &q->rq; struct io_context *ioc = NULL; + const int rw = rw_flags & 0x01; int may_queue, priv; - may_queue = elv_may_queue(q, rw); + may_queue = elv_may_queue(q, rw_flags); if (may_queue == ELV_MQUEUE_NO) goto rq_starved; @@ -2114,7 +2115,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, spin_unlock_irq(q->queue_lock); - rq = blk_alloc_request(q, rw, priv, gfp_mask); + rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); if (unlikely(!rq)) { /* * Allocation failed presumably due to memory. Undo anything @@ -2162,12 +2163,13 @@ out: * * Called with q->queue_lock held, and returns with it unlocked. */ -static struct request *get_request_wait(request_queue_t *q, int rw, +static struct request *get_request_wait(request_queue_t *q, int rw_flags, struct bio *bio) { + const int rw = rw_flags & 0x01; struct request *rq; - rq = get_request(q, rw, bio, GFP_NOIO); + rq = get_request(q, rw_flags, bio, GFP_NOIO); while (!rq) { DEFINE_WAIT(wait); struct request_list *rl = &q->rq; @@ -2175,7 +2177,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw, prepare_to_wait_exclusive(&rl->wait[rw], &wait, TASK_UNINTERRUPTIBLE); - rq = get_request(q, rw, bio, GFP_NOIO); + rq = get_request(q, rw_flags, bio, GFP_NOIO); if (!rq) { struct io_context *ioc; @@ -2910,6 +2912,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) int el_ret, nr_sectors, barrier, err; const unsigned short prio = bio_prio(bio); const int sync = bio_sync(bio); + int rw_flags; nr_sectors = bio_sectors(bio); @@ -2984,10 +2987,19 @@ static int __make_request(request_queue_t *q, struct bio *bio) get_rq: /* + * This sync check and mask will be re-done in init_request_from_bio(), + * but we need to set it earlier to expose the sync flag to the + * rq allocator and io schedulers. + */ + rw_flags = bio_data_dir(bio); + if (sync) + rw_flags |= REQ_RW_SYNC; + + /* * Grab a free request. This is might sleep but can not fail. * Returns with the queue unlocked. */ - req = get_request_wait(q, bio_data_dir(bio), bio); + req = get_request_wait(q, rw_flags, bio); /* * After dropping the lock and possibly sleeping here, our request diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index acb2fa9cf6b1..d719a5d8f435 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -3004,7 +3004,7 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) } return 0; - err_out_free_res: +err_out_free_res: /* * Deliberately omit pci_disable_device(): it does something nasty to * Smart Array controllers that pci_enable_device does not undo |