summaryrefslogtreecommitdiff
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2007-09-25 12:35:59 +0200
committerJens Axboe <axboe@carl.home.kernel.dk>2007-10-10 09:25:56 +0200
commit5705f7021748a69d84d6567e68e8851dab551464 (patch)
tree5a6dbc8fc6055c0334f4a97540e36a7844b9c482 /block/ll_rw_blk.c
parent9dfa52831e96194b8649613e3131baa2c109f7dc (diff)
downloadlwn-5705f7021748a69d84d6567e68e8851dab551464.tar.gz
lwn-5705f7021748a69d84d6567e68e8851dab551464.zip
Introduce rq_for_each_segment replacing rq_for_each_bio
Every usage of rq_for_each_bio wraps a usage of bio_for_each_segment, so these can be combined into rq_for_each_segment. We define "struct req_iterator" to hold the 'bio' and 'index' that are needed for the double iteration. Signed-off-by: Neil Brown <neilb@suse.de> Various compile fixes by me... Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c19
1 files changed, 6 insertions, 13 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index e35119a72a44..094c0fa5c405 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1244,8 +1244,7 @@ static void blk_recalc_rq_segments(struct request *rq)
int seg_size;
int hw_seg_size;
int cluster;
- struct bio *bio;
- int i;
+ struct req_iterator iter;
int high, highprv = 1;
struct request_queue *q = rq->q;
@@ -1255,8 +1254,7 @@ static void blk_recalc_rq_segments(struct request *rq)
cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
hw_seg_size = seg_size = 0;
phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
- rq_for_each_bio(bio, rq)
- bio_for_each_segment(bv, bio, i) {
+ rq_for_each_segment(bv, rq, iter) {
/*
* the trick here is making sure that a high page is never
* considered part of another segment, since that might
@@ -1353,8 +1351,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
struct scatterlist *sg)
{
struct bio_vec *bvec, *bvprv;
- struct bio *bio;
- int nsegs, i, cluster;
+ struct req_iterator iter;
+ int nsegs, cluster;
nsegs = 0;
cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
@@ -1363,11 +1361,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
* for each bio in rq
*/
bvprv = NULL;
- rq_for_each_bio(bio, rq) {
- /*
- * for each segment in bio
- */
- bio_for_each_segment(bvec, bio, i) {
+ rq_for_each_segment(bvec, rq, iter) {
int nbytes = bvec->bv_len;
if (bvprv && cluster) {
@@ -1390,8 +1384,7 @@ new_segment:
nsegs++;
}
bvprv = bvec;
- } /* segments in bio */
- } /* bios in rq */
+ } /* segments in rq */
return nsegs;
}