diff options
author | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2006-11-22 12:06:44 -0600 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2006-11-22 12:06:44 -0600 |
commit | 0bd2af46839ad6262d25714a6ec0365db9d6b98f (patch) | |
tree | dcced72d230d69fd0c5816ac6dd03ab84799a93e /block | |
parent | e138a5d2356729b8752e88520cc1525fae9794ac (diff) | |
parent | f26b90440cd74c78fe10c9bd5160809704a9627c (diff) | |
download | lwn-0bd2af46839ad6262d25714a6ec0365db9d6b98f.tar.gz lwn-0bd2af46839ad6262d25714a6ec0365db9d6b98f.zip |
Merge ../scsi-rc-fixes-2.6
Diffstat (limited to 'block')
-rw-r--r-- | block/cfq-iosched.c | 16 | ||||
-rw-r--r-- | block/elevator.c | 17 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 108 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 5 |
4 files changed, 45 insertions, 101 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index d3d76136f53a..1d9c3c70a9a0 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -456,6 +456,9 @@ static void cfq_add_rq_rb(struct request *rq) */ while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL) cfq_dispatch_insert(cfqd->queue, __alias); + + if (!cfq_cfqq_on_rr(cfqq)) + cfq_add_cfqq_rr(cfqd, cfqq); } static inline void @@ -1215,11 +1218,12 @@ static inline void changed_ioprio(struct cfq_io_context *cic) { struct cfq_data *cfqd = cic->key; struct cfq_queue *cfqq; + unsigned long flags; if (unlikely(!cfqd)) return; - spin_lock(cfqd->queue->queue_lock); + spin_lock_irqsave(cfqd->queue->queue_lock, flags); cfqq = cic->cfqq[ASYNC]; if (cfqq) { @@ -1236,7 +1240,7 @@ static inline void changed_ioprio(struct cfq_io_context *cic) if (cfqq) cfq_mark_cfqq_prio_changed(cfqq); - spin_unlock(cfqd->queue->queue_lock); + spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); } static void cfq_ioc_set_ioprio(struct io_context *ioc) @@ -1362,6 +1366,7 @@ cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, struct rb_node **p; struct rb_node *parent; struct cfq_io_context *__cic; + unsigned long flags; void *k; cic->ioc = ioc; @@ -1391,9 +1396,9 @@ restart: rb_link_node(&cic->rb_node, parent, p); rb_insert_color(&cic->rb_node, &ioc->cic_root); - spin_lock_irq(cfqd->queue->queue_lock); + spin_lock_irqsave(cfqd->queue->queue_lock, flags); list_add(&cic->queue_list, &cfqd->cic_list); - spin_unlock_irq(cfqd->queue->queue_lock); + spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); } /* @@ -1650,9 +1655,6 @@ static void cfq_insert_request(request_queue_t *q, struct request *rq) cfq_add_rq_rb(rq); - if (!cfq_cfqq_on_rr(cfqq)) - cfq_add_cfqq_rr(cfqd, cfqq); - list_add_tail(&rq->queuelist, &cfqq->fifo); cfq_rq_enqueued(cfqd, cfqq, rq); diff --git a/block/elevator.c b/block/elevator.c index 487dd3da8853..8ccd163254b8 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -93,21 +93,18 @@ static inline int elv_try_merge(struct request *__rq, struct bio *bio) static struct elevator_type *elevator_find(const char *name) { - struct elevator_type *e = NULL; + struct elevator_type *e; struct list_head *entry; list_for_each(entry, &elv_list) { - struct elevator_type *__e; - __e = list_entry(entry, struct elevator_type, list); + e = list_entry(entry, struct elevator_type, list); - if (!strcmp(__e->elevator_name, name)) { - e = __e; - break; - } + if (!strcmp(e->elevator_name, name)) + return e; } - return e; + return NULL; } static void elevator_put(struct elevator_type *e) @@ -1088,7 +1085,7 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name) struct list_head *entry; int len = 0; - spin_lock_irq(q->queue_lock); + spin_lock_irq(&elv_list_lock); list_for_each(entry, &elv_list) { struct elevator_type *__e; @@ -1098,7 +1095,7 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name) else len += sprintf(name+len, "%s ", __e->elevator_name); } - spin_unlock_irq(q->queue_lock); + spin_unlock_irq(&elv_list_lock); len += sprintf(len+name, "\n"); return len; diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index c847e17e5caa..9eaee6640535 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -56,11 +56,6 @@ static kmem_cache_t *requestq_cachep; */ static kmem_cache_t *iocontext_cachep; -static wait_queue_head_t congestion_wqh[2] = { - __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), - __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) - }; - /* * Controlling structure to kblockd */ @@ -112,35 +107,6 @@ static void blk_queue_congestion_threshold(struct request_queue *q) q->nr_congestion_off = nr; } -/* - * A queue has just exitted congestion. Note this in the global counter of - * congested queues, and wake up anyone who was waiting for requests to be - * put back. - */ -static void clear_queue_congested(request_queue_t *q, int rw) -{ - enum bdi_state bit; - wait_queue_head_t *wqh = &congestion_wqh[rw]; - - bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; - clear_bit(bit, &q->backing_dev_info.state); - smp_mb__after_clear_bit(); - if (waitqueue_active(wqh)) - wake_up(wqh); -} - -/* - * A queue has just entered congestion. Flag that in the queue's VM-visible - * state flags and increment the global gounter of congested queues. - */ -static void set_queue_congested(request_queue_t *q, int rw) -{ - enum bdi_state bit; - - bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; - set_bit(bit, &q->backing_dev_info.state); -} - /** * blk_get_backing_dev_info - get the address of a queue's backing_dev_info * @bdev: device @@ -159,7 +125,6 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) ret = &q->backing_dev_info; return ret; } - EXPORT_SYMBOL(blk_get_backing_dev_info); void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data) @@ -167,7 +132,6 @@ void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data) q->activity_fn = fn; q->activity_data = data; } - EXPORT_SYMBOL(blk_queue_activity_fn); /** @@ -2067,7 +2031,7 @@ static void __freed_request(request_queue_t *q, int rw) struct request_list *rl = &q->rq; if (rl->count[rw] < queue_congestion_off_threshold(q)) - clear_queue_congested(q, rw); + blk_clear_queue_congested(q, rw); if (rl->count[rw] + 1 <= q->nr_requests) { if (waitqueue_active(&rl->wait[rw])) @@ -2137,7 +2101,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, } } } - set_queue_congested(q, rw); + blk_set_queue_congested(q, rw); } /* @@ -2755,41 +2719,6 @@ void blk_end_sync_rq(struct request *rq, int error) } EXPORT_SYMBOL(blk_end_sync_rq); -/** - * blk_congestion_wait - wait for a queue to become uncongested - * @rw: READ or WRITE - * @timeout: timeout in jiffies - * - * Waits for up to @timeout jiffies for a queue (any queue) to exit congestion. - * If no queues are congested then just wait for the next request to be - * returned. - */ -long blk_congestion_wait(int rw, long timeout) -{ - long ret; - DEFINE_WAIT(wait); - wait_queue_head_t *wqh = &congestion_wqh[rw]; - - prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); - ret = io_schedule_timeout(timeout); - finish_wait(wqh, &wait); - return ret; -} - -EXPORT_SYMBOL(blk_congestion_wait); - -/** - * blk_congestion_end - wake up sleepers on a congestion queue - * @rw: READ or WRITE - */ -void blk_congestion_end(int rw) -{ - wait_queue_head_t *wqh = &congestion_wqh[rw]; - - if (waitqueue_active(wqh)) - wake_up(wqh); -} - /* * Has to be called with the request spinlock acquired */ @@ -3070,6 +2999,7 @@ void generic_make_request(struct bio *bio) { request_queue_t *q; sector_t maxsector; + sector_t old_sector; int ret, nr_sectors = bio_sectors(bio); dev_t old_dev; @@ -3098,7 +3028,7 @@ void generic_make_request(struct bio *bio) * NOTE: we don't repeat the blk_size check for each new device. * Stacking drivers are expected to know what they are doing. */ - maxsector = -1; + old_sector = -1; old_dev = 0; do { char b[BDEVNAME_SIZE]; @@ -3132,15 +3062,31 @@ end_io: */ blk_partition_remap(bio); - if (maxsector != -1) + if (old_sector != -1) blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, - maxsector); + old_sector); blk_add_trace_bio(q, bio, BLK_TA_QUEUE); - maxsector = bio->bi_sector; + old_sector = bio->bi_sector; old_dev = bio->bi_bdev->bd_dev; + maxsector = bio->bi_bdev->bd_inode->i_size >> 9; + if (maxsector) { + sector_t sector = bio->bi_sector; + + if (maxsector < nr_sectors || + maxsector - nr_sectors < sector) { + /* + * This may well happen - partitions are not + * checked to make sure they are within the size + * of the whole device. + */ + handle_bad_sector(bio); + goto end_io; + } + } + ret = q->make_request_fn(q, bio); } while (ret); } @@ -3765,14 +3711,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) blk_queue_congestion_threshold(q); if (rl->count[READ] >= queue_congestion_on_threshold(q)) - set_queue_congested(q, READ); + blk_set_queue_congested(q, READ); else if (rl->count[READ] < queue_congestion_off_threshold(q)) - clear_queue_congested(q, READ); + blk_clear_queue_congested(q, READ); if (rl->count[WRITE] >= queue_congestion_on_threshold(q)) - set_queue_congested(q, WRITE); + blk_set_queue_congested(q, WRITE); else if (rl->count[WRITE] < queue_congestion_off_threshold(q)) - clear_queue_congested(q, WRITE); + blk_clear_queue_congested(q, WRITE); if (rl->count[READ] >= q->nr_requests) { blk_set_queue_full(q, READ); diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index ac63964b7242..dcd9c71fe8d3 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -246,10 +246,10 @@ static int sg_io(struct file *file, request_queue_t *q, switch (hdr->dxfer_direction) { default: return -EINVAL; - case SG_DXFER_TO_FROM_DEV: case SG_DXFER_TO_DEV: writing = 1; break; + case SG_DXFER_TO_FROM_DEV: case SG_DXFER_FROM_DEV: break; } @@ -286,9 +286,8 @@ static int sg_io(struct file *file, request_queue_t *q, * fill in request structure */ rq->cmd_len = hdr->cmd_len; + memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ memcpy(rq->cmd, cmd, hdr->cmd_len); - if (sizeof(rq->cmd) != hdr->cmd_len) - memset(rq->cmd + hdr->cmd_len, 0, sizeof(rq->cmd) - hdr->cmd_len); memset(sense, 0, sizeof(sense)); rq->sense = sense; |