From 458b76ed2f9517becb74dcc8eedd70d3068ea6e4 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Tue, 24 Sep 2013 16:26:05 -0700 Subject: block: Kill bio_segments()/bi_vcnt usage When we start sharing biovecs, keeping bi_vcnt accurate for splits is going to be error prone - and unnecessary, if we refactor some code. So bio_segments() has to go - but most of the existing users just needed to know if the bio had multiple segments, which is easier - add a bio_multiple_segments() for them. (Two of the current uses of bio_segments() are going to go away in a couple patches, but the current implementation of bio_segments() is unsafe as soon as we start doing driver conversions for immutable biovecs - so implement a dumb version for bisectability, it'll go away in a couple patches) Signed-off-by: Kent Overstreet Cc: Jens Axboe Cc: Neil Brown Cc: Nagalakshmi Nandigama Cc: Sreekanth Reddy Cc: "James E.J. Bottomley" --- drivers/md/bcache/io.c | 53 ++++++++++++++++++++++---------------------------- 1 file changed, 23 insertions(+), 30 deletions(-) (limited to 'drivers/md/bcache/io.c') diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index 9b5b6a4..6e04f3b 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -24,7 +24,8 @@ static void bch_generic_make_request_hack(struct bio *bio) if (bio->bi_iter.bi_idx) { struct bio_vec bv; struct bvec_iter iter; - struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio)); + unsigned segs = bio_segments(bio); + struct bio *clone = bio_alloc(GFP_NOIO, segs); bio_for_each_segment(bv, bio, iter) clone->bi_io_vec[clone->bi_vcnt++] = bv; @@ -32,7 +33,7 @@ static void bch_generic_make_request_hack(struct bio *bio) clone->bi_iter.bi_sector = bio->bi_iter.bi_sector; clone->bi_bdev = bio->bi_bdev; clone->bi_rw = bio->bi_rw; - clone->bi_vcnt = bio_segments(bio); + clone->bi_vcnt = segs; clone->bi_iter.bi_size = bio->bi_iter.bi_size; clone->bi_private = bio; @@ -133,40 +134,32 @@ out: static unsigned bch_bio_max_sectors(struct bio *bio) { - unsigned ret = bio_sectors(bio); struct request_queue *q = bdev_get_queue(bio->bi_bdev); - unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES, - queue_max_segments(q)); + struct bio_vec bv; + struct bvec_iter iter; + unsigned ret = 0, seg = 0; if (bio->bi_rw & REQ_DISCARD) - return min(ret, q->limits.max_discard_sectors); - - if (bio_segments(bio) > max_segments || - q->merge_bvec_fn) { - struct bio_vec bv; - struct bvec_iter iter; - unsigned seg = 0; - - ret = 0; + return min(bio_sectors(bio), q->limits.max_discard_sectors); - bio_for_each_segment(bv, bio, iter) { - struct bvec_merge_data bvm = { - .bi_bdev = bio->bi_bdev, - .bi_sector = bio->bi_iter.bi_sector, - .bi_size = ret << 9, - .bi_rw = bio->bi_rw, - }; - - if (seg == max_segments) - break; + bio_for_each_segment(bv, bio, iter) { + struct bvec_merge_data bvm = { + .bi_bdev = bio->bi_bdev, + .bi_sector = bio->bi_iter.bi_sector, + .bi_size = ret << 9, + .bi_rw = bio->bi_rw, + }; + + if (seg == min_t(unsigned, BIO_MAX_PAGES, + queue_max_segments(q))) + break; - if (q->merge_bvec_fn && - q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len) - break; + if (q->merge_bvec_fn && + q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len) + break; - seg++; - ret += bv.bv_len >> 9; - } + seg++; + ret += bv.bv_len >> 9; } ret = min(ret, queue_max_sectors(q)); -- cgit v1.1