diff options
author | Mike Christie <michaelc@cs.wisc.edu> | 2005-12-05 02:37:06 -0600 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.(none)> | 2005-12-15 15:11:40 -0800 |
commit | defd94b75409b983f94548ea2f52ff5787ddb848 (patch) | |
tree | 0138b2dae748de88edaee4da23431f1a9dd347a1 /block | |
parent | 8b05b773b6030de5b1bab1cbb0bf1ff8c34cdbe0 (diff) | |
download | lwn-defd94b75409b983f94548ea2f52ff5787ddb848.tar.gz lwn-defd94b75409b983f94548ea2f52ff5787ddb848.zip |
[SCSI] seperate max_sectors from max_hw_sectors
- export __blk_put_request and blk_execute_rq_nowait
needed for async REQ_BLOCK_PC requests
- seperate max_hw_sectors and max_sectors for block/scsi_ioctl.c and
SG_IO bio.c helpers per Jens's last comments. Since block/scsi_ioctl.c SG_IO was
already testing against max_sectors and SCSI-ml was setting max_sectors and
max_hw_sectors to the same value this does not change any scsi SG_IO behavior. It only
prepares ll_rw_blk.c, scsi_ioctl.c and bio.c for when SCSI-ml begins to set
a valid max_hw_sectors for all LLDs. Today if a LLD does not set it
SCSI-ml sets it to a safe default and some LLDs set it to a artificial low
value to overcome memory and feedback issues.
Note: Since we now cap max_sectors to BLK_DEF_MAX_SECTORS, which is 1024,
drivers that used to call blk_queue_max_sectors with a large value of
max_sectors will now see the fs requests capped to BLK_DEF_MAX_SECTORS.
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/ll_rw_blk.c | 34 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 2 |
2 files changed, 27 insertions, 9 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index c525b5a2b598..d4beb9a89ee0 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -239,7 +239,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; q->backing_dev_info.state = 0; q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; - blk_queue_max_sectors(q, MAX_SECTORS); + blk_queue_max_sectors(q, SAFE_MAX_SECTORS); blk_queue_hardsect_size(q, 512); blk_queue_dma_alignment(q, 511); blk_queue_congestion_threshold(q); @@ -555,7 +555,12 @@ void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors) printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors); } - q->max_sectors = q->max_hw_sectors = max_sectors; + if (BLK_DEF_MAX_SECTORS > max_sectors) + q->max_hw_sectors = q->max_sectors = max_sectors; + else { + q->max_sectors = BLK_DEF_MAX_SECTORS; + q->max_hw_sectors = max_sectors; + } } EXPORT_SYMBOL(blk_queue_max_sectors); @@ -657,8 +662,8 @@ EXPORT_SYMBOL(blk_queue_hardsect_size); void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b) { /* zero is "infinity" */ - t->max_sectors = t->max_hw_sectors = - min_not_zero(t->max_sectors,b->max_sectors); + t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); + t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors); t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments); t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); @@ -1293,9 +1298,15 @@ static inline int ll_new_hw_segment(request_queue_t *q, static int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio) { + unsigned short max_sectors; int len; - if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) { + if (unlikely(blk_pc_request(req))) + max_sectors = q->max_hw_sectors; + else + max_sectors = q->max_sectors; + + if (req->nr_sectors + bio_sectors(bio) > max_sectors) { req->flags |= REQ_NOMERGE; if (req == q->last_merge) q->last_merge = NULL; @@ -1325,9 +1336,16 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req, static int ll_front_merge_fn(request_queue_t *q, struct request *req, struct bio *bio) { + unsigned short max_sectors; int len; - if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) { + if (unlikely(blk_pc_request(req))) + max_sectors = q->max_hw_sectors; + else + max_sectors = q->max_sectors; + + + if (req->nr_sectors + bio_sectors(bio) > max_sectors) { req->flags |= REQ_NOMERGE; if (req == q->last_merge) q->last_merge = NULL; @@ -2144,7 +2162,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, struct bio *bio; int reading; - if (len > (q->max_sectors << 9)) + if (len > (q->max_hw_sectors << 9)) return -EINVAL; if (!len || !ubuf) return -EINVAL; @@ -2259,7 +2277,7 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, { struct bio *bio; - if (len > (q->max_sectors << 9)) + if (len > (q->max_hw_sectors << 9)) return -EINVAL; if (!len || !kbuf) return -EINVAL; diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 382dea7b224c..4e390dfd3157 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -233,7 +233,7 @@ static int sg_io(struct file *file, request_queue_t *q, if (verify_command(file, cmd)) return -EPERM; - if (hdr->dxfer_len > (q->max_sectors << 9)) + if (hdr->dxfer_len > (q->max_hw_sectors << 9)) return -EIO; if (hdr->dxfer_len) |