diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-30 11:19:05 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-30 11:19:05 -0800 |
commit | f568849edac8611d603e00bd6cbbcfea09395ae6 (patch) | |
tree | b9472d640fe5d87426d38c9d81d946cf197ad3fb /drivers | |
parent | d9894c228b11273e720bb63ba120d1d326fe9d94 (diff) | |
parent | 675675ada486dde5bf9aa51665e90706bff11a35 (diff) | |
download | op-kernel-dev-f568849edac8611d603e00bd6cbbcfea09395ae6.zip op-kernel-dev-f568849edac8611d603e00bd6cbbcfea09395ae6.tar.gz |
Merge branch 'for-3.14/core' of git://git.kernel.dk/linux-block
Pull core block IO changes from Jens Axboe:
"The major piece in here is the immutable bio_ve series from Kent, the
rest is fairly minor. It was supposed to go in last round, but
various issues pushed it to this release instead. The pull request
contains:
- Various smaller blk-mq fixes from different folks. Nothing major
here, just minor fixes and cleanups.
- Fix for a memory leak in the error path in the block ioctl code
from Christian Engelmayer.
- Header export fix from CaiZhiyong.
- Finally the immutable biovec changes from Kent Overstreet. This
enables some nice future work on making arbitrarily sized bios
possible, and splitting more efficient. Related fixes to immutable
bio_vecs:
- dm-cache immutable fixup from Mike Snitzer.
- btrfs immutable fixup from Muthu Kumar.
- bio-integrity fix from Nic Bellinger, which is also going to stable"
* 'for-3.14/core' of git://git.kernel.dk/linux-block: (44 commits)
xtensa: fixup simdisk driver to work with immutable bio_vecs
block/blk-mq-cpu.c: use hotcpu_notifier()
blk-mq: for_each_* macro correctness
block: Fix memory leak in rw_copy_check_uvector() handling
bio-integrity: Fix bio_integrity_verify segment start bug
block: remove unrelated header files and export symbol
blk-mq: uses page->list incorrectly
blk-mq: use __smp_call_function_single directly
btrfs: fix missing increment of bi_remaining
Revert "block: Warn and free bio if bi_end_io is not set"
block: Warn and free bio if bi_end_io is not set
blk-mq: fix initializing request's start time
block: blk-mq: don't export blk_mq_free_queue()
block: blk-mq: make blk_sync_queue support mq
block: blk-mq: support draining mq queue
dm cache: increment bi_remaining when bi_end_io is restored
block: fixup for generic bio chaining
block: Really silence spurious compiler warnings
block: Silence spurious compiler warnings
block: Kill bio_pair_split()
...
Diffstat (limited to 'drivers')
77 files changed, 1080 insertions, 1596 deletions
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index 14a9d19..9220f8e 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h @@ -100,11 +100,8 @@ enum { struct buf { ulong nframesout; - ulong resid; - ulong bv_resid; - sector_t sector; struct bio *bio; - struct bio_vec *bv; + struct bvec_iter iter; struct request *rq; }; @@ -120,13 +117,10 @@ struct frame { ulong waited; ulong waited_total; struct aoetgt *t; /* parent target I belong to */ - sector_t lba; struct sk_buff *skb; /* command skb freed on module exit */ struct sk_buff *r_skb; /* response skb for async processing */ struct buf *buf; - struct bio_vec *bv; - ulong bcnt; - ulong bv_off; + struct bvec_iter iter; char flags; }; diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index d251543..8184451 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -196,8 +196,7 @@ aoe_freetframe(struct frame *f) t = f->t; f->buf = NULL; - f->lba = 0; - f->bv = NULL; + memset(&f->iter, 0, sizeof(f->iter)); f->r_skb = NULL; f->flags = 0; list_add(&f->head, &t->ffree); @@ -295,21 +294,14 @@ newframe(struct aoedev *d) } static void -skb_fillup(struct sk_buff *skb, struct bio_vec *bv, ulong off, ulong cnt) +skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter) { int frag = 0; - ulong fcnt; -loop: - fcnt = bv->bv_len - (off - bv->bv_offset); - if (fcnt > cnt) - fcnt = cnt; - skb_fill_page_desc(skb, frag++, bv->bv_page, off, fcnt); - cnt -= fcnt; - if (cnt <= 0) - return; - bv++; - off = bv->bv_offset; - goto loop; + struct bio_vec bv; + + __bio_for_each_segment(bv, bio, iter, iter) + skb_fill_page_desc(skb, frag++, bv.bv_page, + bv.bv_offset, bv.bv_len); } static void @@ -346,12 +338,10 @@ ata_rw_frameinit(struct frame *f) t->nout++; f->waited = 0; f->waited_total = 0; - if (f->buf) - f->lba = f->buf->sector; /* set up ata header */ - ah->scnt = f->bcnt >> 9; - put_lba(ah, f->lba); + ah->scnt = f->iter.bi_size >> 9; + put_lba(ah, f->iter.bi_sector); if (t->d->flags & DEVFL_EXT) { ah->aflags |= AOEAFL_EXT; } else { @@ -360,11 +350,11 @@ ata_rw_frameinit(struct frame *f) ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */ } if (f->buf && bio_data_dir(f->buf->bio) == WRITE) { - skb_fillup(skb, f->bv, f->bv_off, f->bcnt); + skb_fillup(skb, f->buf->bio, f->iter); ah->aflags |= AOEAFL_WRITE; - skb->len += f->bcnt; - skb->data_len = f->bcnt; - skb->truesize += f->bcnt; + skb->len += f->iter.bi_size; + skb->data_len = f->iter.bi_size; + skb->truesize += f->iter.bi_size; t->wpkts++; } else { t->rpkts++; @@ -382,7 +372,6 @@ aoecmd_ata_rw(struct aoedev *d) struct buf *buf; struct sk_buff *skb; struct sk_buff_head queue; - ulong bcnt, fbcnt; buf = nextbuf(d); if (buf == NULL) @@ -390,39 +379,22 @@ aoecmd_ata_rw(struct aoedev *d) f = newframe(d); if (f == NULL) return 0; - bcnt = d->maxbcnt; - if (bcnt == 0) - bcnt = DEFAULTBCNT; - if (bcnt > buf->resid) - bcnt = buf->resid; - fbcnt = bcnt; - f->bv = buf->bv; - f->bv_off = f->bv->bv_offset + (f->bv->bv_len - buf->bv_resid); - do { - if (fbcnt < buf->bv_resid) { - buf->bv_resid -= fbcnt; - buf->resid -= fbcnt; - break; - } - fbcnt -= buf->bv_resid; - buf->resid -= buf->bv_resid; - if (buf->resid == 0) { - d->ip.buf = NULL; - break; - } - buf->bv++; - buf->bv_resid = buf->bv->bv_len; - WARN_ON(buf->bv_resid == 0); - } while (fbcnt); /* initialize the headers & frame */ f->buf = buf; - f->bcnt = bcnt; - ata_rw_frameinit(f); + f->iter = buf->iter; + f->iter.bi_size = min_t(unsigned long, + d->maxbcnt ?: DEFAULTBCNT, + f->iter.bi_size); + bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size); + + if (!buf->iter.bi_size) + d->ip.buf = NULL; /* mark all tracking fields and load out */ buf->nframesout += 1; - buf->sector += bcnt >> 9; + + ata_rw_frameinit(f); skb = skb_clone(f->skb, GFP_ATOMIC); if (skb) { @@ -613,10 +585,7 @@ reassign_frame(struct frame *f) skb = nf->skb; nf->skb = f->skb; nf->buf = f->buf; - nf->bcnt = f->bcnt; - nf->lba = f->lba; - nf->bv = f->bv; - nf->bv_off = f->bv_off; + nf->iter = f->iter; nf->waited = 0; nf->waited_total = f->waited_total; nf->sent = f->sent; @@ -648,19 +617,19 @@ probe(struct aoetgt *t) } f->flags |= FFL_PROBE; ifrotate(t); - f->bcnt = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT; + f->iter.bi_size = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT; ata_rw_frameinit(f); skb = f->skb; - for (frag = 0, n = f->bcnt; n > 0; ++frag, n -= m) { + for (frag = 0, n = f->iter.bi_size; n > 0; ++frag, n -= m) { if (n < PAGE_SIZE) m = n; else m = PAGE_SIZE; skb_fill_page_desc(skb, frag, empty_page, 0, m); } - skb->len += f->bcnt; - skb->data_len = f->bcnt; - skb->truesize += f->bcnt; + skb->len += f->iter.bi_size; + skb->data_len = f->iter.bi_size; + skb->truesize += f->iter.bi_size; skb = skb_clone(f->skb, GFP_ATOMIC); if (skb) { @@ -897,15 +866,15 @@ rqbiocnt(struct request *r) static void bio_pageinc(struct bio *bio) { - struct bio_vec *bv; + struct bio_vec bv; struct page *page; - int i; + struct bvec_iter iter; - bio_for_each_segment(bv, bio, i) { + bio_for_each_segment(bv, bio, iter) { /* Non-zero page count for non-head members of * compound pages is no longer allowed by the kernel. */ - page = compound_trans_head(bv->bv_page); + page = compound_trans_head(bv.bv_page); atomic_inc(&page->_count); } } @@ -913,12 +882,12 @@ bio_pageinc(struct bio *bio) static void bio_pagedec(struct bio *bio) { - struct bio_vec *bv; struct page *page; - int i; + struct bio_vec bv; + struct bvec_iter iter; - bio_for_each_segment(bv, bio, i) { - page = compound_trans_head(bv->bv_page); + bio_for_each_segment(bv, bio, iter) { + page = compound_trans_head(bv.bv_page); atomic_dec(&page->_count); } } @@ -929,12 +898,8 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio) memset(buf, 0, sizeof(*buf)); buf->rq = rq; buf->bio = bio; - buf->resid = bio->bi_size; - buf->sector = bio->bi_sector; + buf->iter = bio->bi_iter; bio_pageinc(bio); - buf->bv = bio_iovec(bio); - buf->bv_resid = buf->bv->bv_len; - WARN_ON(buf->bv_resid == 0); } static struct buf * @@ -1119,24 +1084,18 @@ gettgt(struct aoedev *d, char *addr) } static void -bvcpy(struct bio_vec *bv, ulong off, struct sk_buff *skb, long cnt) +bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt) { - ulong fcnt; - char *p; int soff = 0; -loop: - fcnt = bv->bv_len - (off - bv->bv_offset); - if (fcnt > cnt) - fcnt = cnt; - p = page_address(bv->bv_page) + off; - skb_copy_bits(skb, soff, p, fcnt); - soff += fcnt; - cnt -= fcnt; - if (cnt <= 0) - return; - bv++; - off = bv->bv_offset; - goto loop; + struct bio_vec bv; + + iter.bi_size = cnt; + + __bio_for_each_segment(bv, bio, iter, iter) { + char *p = page_address(bv.bv_page) + bv.bv_offset; + skb_copy_bits(skb, soff, p, bv.bv_len); + soff += bv.bv_len; + } } void @@ -1152,7 +1111,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail) do { bio = rq->bio; bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags); - } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size)); + } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size)); /* cf. http://lkml.org/lkml/2006/10/31/28 */ if (!fastfail) @@ -1229,7 +1188,15 @@ noskb: if (buf) clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); break; } - bvcpy(f->bv, f->bv_off, skb, n); + if (n > f->iter.bi_size) { + pr_err_ratelimited("%s e%ld.%d. bytes=%ld need=%u\n", + "aoe: too-large data size in read from", + (long) d->aoemajor, d->aoeminor, + n, f->iter.bi_size); + clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); + break; + } + bvcpy(skb, f->buf->bio, f->iter, n); case ATA_CMD_PIO_WRITE: case ATA_CMD_PIO_WRITE_EXT: spin_lock_irq(&d->lock); @@ -1272,7 +1239,7 @@ out: aoe_freetframe(f); - if (buf && --buf->nframesout == 0 && buf->resid == 0) + if (buf && --buf->nframesout == 0 && buf->iter.bi_size == 0) aoe_end_buf(d, buf); spin_unlock_irq(&d->lock); @@ -1727,7 +1694,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf) { if (buf == NULL) return; - buf->resid = 0; + buf->iter.bi_size = 0; clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); if (buf->nframesout == 0) aoe_end_buf(d, buf); diff --git a/drivers/block/brd.c b/drivers/block/brd.c index d91f1a5..e73b85c 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -328,18 +328,18 @@ static void brd_make_request(struct request_queue *q, struct bio *bio) struct block_device *bdev = bio->bi_bdev; struct brd_device *brd = bdev->bd_disk->private_data; int rw; - struct bio_vec *bvec; + struct bio_vec bvec; sector_t sector; - int i; + struct bvec_iter iter; int err = -EIO; - sector = bio->bi_sector; + sector = bio->bi_iter.bi_sector; if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) goto out; if (unlikely(bio->bi_rw & REQ_DISCARD)) { err = 0; - discard_from_brd(brd, sector, bio->bi_size); + discard_from_brd(brd, sector, bio->bi_iter.bi_size); goto out; } @@ -347,10 +347,10 @@ static void brd_make_request(struct request_queue *q, struct bio *bio) if (rw == READA) rw = READ; - bio_for_each_segment(bvec, bio, i) { - unsigned int len = bvec->bv_len; - err = brd_do_bvec(brd, bvec->bv_page, len, - bvec->bv_offset, rw, sector); + bio_for_each_segment(bvec, bio, iter) { + unsigned int len = bvec.bv_len; + err = brd_do_bvec(brd, bvec.bv_page, len, + bvec.bv_offset, rw, sector); if (err) break; sector += len >> SECTOR_SHIFT; diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 28c73ca..a9b13f2 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -159,7 +159,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, bio = bio_alloc_drbd(GFP_NOIO); bio->bi_bdev = bdev->md_bdev; - bio->bi_sector = sector; + bio->bi_iter.bi_sector = sector; err = -EIO; if (bio_add_page(bio, page, size, 0) != size) goto out; diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index b12c11e..597f111 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -1028,7 +1028,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must } else page = b->bm_pages[page_nr]; bio->bi_bdev = mdev->ldev->md_bdev; - bio->bi_sector = on_disk_sector; + bio->bi_iter.bi_sector = on_disk_sector; /* bio_add_page of a single page to an empty bio will always succeed, * according to api. Do we want to assert that? */ bio_add_page(bio, page, len, 0); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 9e3818b..929468e 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1537,15 +1537,17 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page, static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio) { - struct bio_vec *bvec; - int i; + struct bio_vec bvec; + struct bvec_iter iter; + /* hint all but last page with MSG_MORE */ - bio_for_each_segment(bvec, bio, i) { + bio_for_each_segment(bvec, bio, iter) { int err; - err = _drbd_no_send_page(mdev, bvec->bv_page, - bvec->bv_offset, bvec->bv_len, - i == bio->bi_vcnt - 1 ? 0 : MSG_MORE); + err = _drbd_no_send_page(mdev, bvec.bv_page, + bvec.bv_offset, bvec.bv_len, + bio_iter_last(bvec, iter) + ? 0 : MSG_MORE); if (err) return err; } @@ -1554,15 +1556,16 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio) static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio) { - struct bio_vec *bvec; - int i; + struct bio_vec bvec; + struct bvec_iter iter; + /* hint all but last page with MSG_MORE */ - bio_for_each_segment(bvec, bio, i) { + bio_for_each_segment(bvec, bio, iter) { int err; - err = _drbd_send_page(mdev, bvec->bv_page, - bvec->bv_offset, bvec->bv_len, - i == bio->bi_vcnt - 1 ? 0 : MSG_MORE); + err = _drbd_send_page(mdev, bvec.bv_page, + bvec.bv_offset, bvec.bv_len, + bio_iter_last(bvec, iter) ? 0 : MSG_MORE); if (err) return err; } diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 6fa6673..d073305 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1333,7 +1333,7 @@ next_bio: goto fail; } /* > peer_req->i.sector, unless this is the first bio */ - bio->bi_sector = sector; + bio->bi_iter.bi_sector = sector; bio->bi_bdev = mdev->ldev->backing_bdev; bio->bi_rw = rw; bio->bi_private = peer_req; @@ -1353,7 +1353,7 @@ next_bio: dev_err(DEV, "bio_add_page failed for len=%u, " "bi_vcnt=0 (bi_sector=%llu)\n", - len, (unsigned long long)bio->bi_sector); + len, (uint64_t)bio->bi_iter.bi_sector); err = -ENOSPC; goto fail; } @@ -1595,9 +1595,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size) static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, sector_t sector, int data_size) { - struct bio_vec *bvec; + struct bio_vec bvec; + struct bvec_iter iter; struct bio *bio; - int dgs, err, i, expect; + int dgs, err, expect; void *dig_in = mdev->tconn->int_dig_in; void *dig_vv = mdev->tconn->int_dig_vv; @@ -1615,13 +1616,13 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, mdev->recv_cnt += data_size>>9; bio = req->master_bio; - D_ASSERT(sector == bio->bi_sector); + D_ASSERT(sector == bio->bi_iter.bi_sector); - bio_for_each_segment(bvec, bio, i) { - void *mapped = kmap(bvec->bv_page) + bvec->bv_offset; - expect = min_t(int, data_size, bvec->bv_len); + bio_for_each_segment(bvec, bio, iter) { + void *mapped = kmap(bvec.bv_page) + bvec.bv_offset; + expect = min_t(int, data_size, bvec.bv_len); err = drbd_recv_all_warn(mdev->tconn, mapped, expect); - kunmap(bvec->bv_page); + kunmap(bvec.bv_page); if (err) return err; data_size -= expect; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index fec7bef..104a040 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -77,8 +77,8 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev, req->epoch = 0; drbd_clear_interval(&req->i); - req->i.sector = bio_src->bi_sector; - req->i.size = bio_src->bi_size; + req->i.sector = bio_src->bi_iter.bi_sector; + req->i.size = bio_src->bi_iter.bi_size; req->i.local = true; req->i.waiting = false; @@ -1280,7 +1280,7 @@ void drbd_make_request(struct request_queue *q, struct bio *bio) /* * what we "blindly" assume: */ - D_ASSERT(IS_ALIGNED(bio->bi_size, 512)); + D_ASSERT(IS_ALIGNED(bio->bi_iter.bi_size, 512)); inc_ap_bio(mdev); __drbd_make_request(mdev, bio, start_time); diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index 978cb1a..28e15d9 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h @@ -269,7 +269,7 @@ static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bi /* Short lived temporary struct on the stack. * We could squirrel the error to be returned into - * bio->bi_size, or similar. But that would be too ugly. */ + * bio->bi_iter.bi_size, or similar. But that would be too ugly. */ struct bio_and_error { struct bio *bio; int error; diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 891c0ec..84d3175 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -313,8 +313,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio * { struct hash_desc desc; struct scatterlist sg; - struct bio_vec *bvec; - int i; + struct bio_vec bvec; + struct bvec_iter iter; desc.tfm = tfm; desc.flags = 0; @@ -322,8 +322,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio * sg_init_table(&sg, 1); crypto_hash_init(&desc); - bio_for_each_segment(bvec, bio, i) { - sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset); + bio_for_each_segment(bvec, bio, iter) { + sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); crypto_hash_update(&desc, &sg, sg.length); } crypto_hash_final(&desc, digest); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 000abe2..6b29c44 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -2351,7 +2351,7 @@ static void rw_interrupt(void) /* Compute maximal contiguous buffer size. */ static int buffer_chain_size(void) { - struct bio_vec *bv; + struct bio_vec bv; int size; struct req_iterator iter; char *base; @@ -2360,10 +2360,10 @@ static int buffer_chain_size(void) size = 0; rq_for_each_segment(bv, current_req, iter) { - if (page_address(bv->bv_page) + bv->bv_offset != base + size) + if (page_address(bv.bv_page) + bv.bv_offset != base + size) break; - size += bv->bv_len; + size += bv.bv_len; } return size >> 9; @@ -2389,7 +2389,7 @@ static int transfer_size(int ssize, int max_sector, int max_size) static void copy_buffer(int ssize, int max_sector, int max_sector_2) { int remaining; /* number of transferred 512-byte sectors */ - struct bio_vec *bv; + struct bio_vec bv; char *buffer; char *dma_buffer; int size; @@ -2427,10 +2427,10 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2) if (!remaining) break; - size = bv->bv_len; + size = bv.bv_len; SUPBOUND(size, remaining); - buffer = page_address(bv->bv_page) + bv->bv_offset; + buffer = page_address(bv.bv_page) + bv.bv_offset; if (dma_buffer + size > floppy_track_buffer + (max_buffer_sectors << 10) || dma_buffer < floppy_track_buffer) { @@ -3775,9 +3775,9 @@ static int __floppy_read_block_0(struct block_device *bdev) bio_vec.bv_len = size; bio_vec.bv_offset = 0; bio.bi_vcnt = 1; - bio.bi_size = size; + bio.bi_iter.bi_size = size; bio.bi_bdev = bdev; - bio.bi_sector = 0; + bio.bi_iter.bi_sector = 0; bio.bi_flags = (1 << BIO_QUIET); init_completion(&complete); bio.bi_private = &complete; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index c8dac73..33fde3a 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -288,9 +288,10 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos) { int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t, struct page *page); - struct bio_vec *bvec; + struct bio_vec bvec; + struct bvec_iter iter; struct page *page = NULL; - int i, ret = 0; + int ret = 0; if (lo->transfer != transfer_none) { page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); @@ -302,11 +303,11 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos) do_lo_send = do_lo_send_direct_write; } - bio_for_each_segment(bvec, bio, i) { - ret = do_lo_send(lo, bvec, pos, page); + bio_for_each_segment(bvec, bio, iter) { + ret = do_lo_send(lo, &bvec, pos, page); if (ret < 0) break; - pos += bvec->bv_len; + pos += bvec.bv_len; } if (page) { kunmap(page); @@ -392,20 +393,20 @@ do_lo_receive(struct loop_device *lo, static int lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos) { - struct bio_vec *bvec; + struct bio_vec bvec; + struct bvec_iter iter; ssize_t s; - int i; - bio_for_each_segment(bvec, bio, i) { - s = do_lo_receive(lo, bvec, bsize, pos); + bio_for_each_segment(bvec, bio, iter) { + s = do_lo_receive(lo, &bvec, bsize, pos); if (s < 0) return s; - if (s != bvec->bv_len) { + if (s != bvec.bv_len) { zero_fill_bio(bio); break; } - pos += bvec->bv_len; + pos += bvec.bv_len; } return 0; } @@ -415,7 +416,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) loff_t pos; int ret; - pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; + pos = ((loff_t) bio->bi_iter.bi_sector << 9) + lo->lo_offset; if (bio_rw(bio) == WRITE) { struct file *file = lo->lo_backing_file; @@ -444,7 +445,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) goto out; } ret = file->f_op->fallocate(file, mode, pos, - bio->bi_size); + bio->bi_iter.bi_size); if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP)) ret = -EIO; diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 050c712..52b2f2a 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -3962,8 +3962,9 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) { struct driver_data *dd = queue->queuedata; struct scatterlist *sg; - struct bio_vec *bvec; - int i, nents = 0; + struct bio_vec bvec; + struct bvec_iter iter; + int nents = 0; int tag = 0, unaligned = 0; if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { @@ -3993,7 +3994,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) } if (unlikely(bio->bi_rw & REQ_DISCARD)) { - bio_endio(bio, mtip_send_trim(dd, bio->bi_sector, + bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector, bio_sectors(bio))); return; } @@ -4006,7 +4007,8 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 && dd->unal_qdepth) { - if (bio->bi_sector % 8 != 0) /* Unaligned on 4k boundaries */ + if (bio->bi_iter.bi_sector % 8 != 0) + /* Unaligned on 4k boundaries */ unaligned = 1; else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */ unaligned = 1; @@ -4025,17 +4027,17 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) } /* Create the scatter list for this bio. */ - bio_for_each_segment(bvec, bio, i) { + bio_for_each_segment(bvec, bio, iter) { sg_set_page(&sg[nents], - bvec->bv_page, - bvec->bv_len, - bvec->bv_offset); + bvec.bv_page, + bvec.bv_len, + bvec.bv_offset); nents++; } /* Issue the read/write. */ mtip_hw_submit_io(dd, - bio->bi_sector, + bio->bi_iter.bi_sector, bio_sectors(bio), nents, tag, diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 2dc3b51..55298db 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -271,18 +271,18 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req) if (nbd_cmd(req) == NBD_CMD_WRITE) { struct req_iterator iter; - struct bio_vec *bvec; + struct bio_vec bvec; /* * we are really probing at internals to determine * whether to set MSG_MORE or not... */ rq_for_each_segment(bvec, req, iter) { flags = 0; - if (!rq_iter_last(req, iter)) + if (!rq_iter_last(bvec, iter)) flags = MSG_MORE; dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", - nbd->disk->disk_name, req, bvec->bv_len); - result = sock_send_bvec(nbd, bvec, flags); + nbd->disk->disk_name, req, bvec.bv_len); + result = sock_send_bvec(nbd, &bvec, flags); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Send data failed (result %d)\n", @@ -378,10 +378,10 @@ static struct request *nbd_read_stat(struct nbd_device *nbd) nbd->disk->disk_name, req); if (nbd_cmd(req) == NBD_CMD_READ) { struct req_iterator iter; - struct bio_vec *bvec; + struct bio_vec bvec; rq_for_each_segment(bvec, req, iter) { - result = sock_recv_bvec(nbd, bvec); + result = sock_recv_bvec(nbd, &bvec); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", result); @@ -389,7 +389,7 @@ static struct request *nbd_read_stat(struct nbd_device *nbd) return req; } dprintk(DBG_RX, "%s: request %p: got %d bytes data\n", - nbd->disk->disk_name, req, bvec->bv_len); + nbd->disk->disk_name, req, bvec.bv_len); } } return req; diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 26d03fa..1f14ac4 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -441,104 +441,19 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd, return total_len; } -struct nvme_bio_pair { - struct bio b1, b2, *parent; - struct bio_vec *bv1, *bv2; - int err; - atomic_t cnt; -}; - -static void nvme_bio_pair_endio(struct bio *bio, int err) -{ - struct nvme_bio_pair *bp = bio->bi_private; - - if (err) - bp->err = err; - - if (atomic_dec_and_test(&bp->cnt)) { - bio_endio(bp->parent, bp->err); - kfree(bp->bv1); - kfree(bp->bv2); - kfree(bp); - } -} - -static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx, - int len, int offset) -{ - struct nvme_bio_pair *bp; - - BUG_ON(len > bio->bi_size); - BUG_ON(idx > bio->bi_vcnt); - - bp = kmalloc(sizeof(*bp), GFP_ATOMIC); - if (!bp) - return NULL; - bp->err = 0; - - bp->b1 = *bio; - bp->b2 = *bio; - - bp->b1.bi_size = len; - bp->b2.bi_size -= len; - bp->b1.bi_vcnt = idx; - bp->b2.bi_idx = idx; - bp->b2.bi_sector += len >> 9; - - if (offset) { - bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec), - GFP_ATOMIC); - if (!bp->bv1) - goto split_fail_1; - - bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec), - GFP_ATOMIC); - if (!bp->bv2) - goto split_fail_2; - - memcpy(bp->bv1, bio->bi_io_vec, - bio->bi_max_vecs * sizeof(struct bio_vec)); - memcpy(bp->bv2, bio->bi_io_vec, - bio->bi_max_vecs * sizeof(struct bio_vec)); - - bp->b1.bi_io_vec = bp->bv1; - bp->b2.bi_io_vec = bp->bv2; - bp->b2.bi_io_vec[idx].bv_offset += offset; - bp->b2.bi_io_vec[idx].bv_len -= offset; - bp->b1.bi_io_vec[idx].bv_len = offset; - bp->b1.bi_vcnt++; - } else - bp->bv1 = bp->bv2 = NULL; - - bp->b1.bi_private = bp; - bp->b2.bi_private = bp; - - bp->b1.bi_end_io = nvme_bio_pair_endio; - bp->b2.bi_end_io = nvme_bio_pair_endio; - - bp->parent = bio; - atomic_set(&bp->cnt, 2); - - return bp; - - split_fail_2: - kfree(bp->bv1); - split_fail_1: - kfree(bp); - return NULL; -} - static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq, - int idx, int len, int offset) + int len) { - struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset); - if (!bp) + struct bio *split = bio_split(bio, len >> 9, GFP_ATOMIC, NULL); + if (!split) return -ENOMEM; + bio_chain(split, bio); + if (bio_list_empty(&nvmeq->sq_cong)) add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); - bio_list_add(&nvmeq->sq_cong, &bp->b1); - bio_list_add(&nvmeq->sq_cong, &bp->b2); + bio_list_add(&nvmeq->sq_cong, split); + bio_list_add(&nvmeq->sq_cong, bio); return 0; } @@ -550,41 +465,44 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq, static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, struct bio *bio, enum dma_data_direction dma_dir, int psegs) { - struct bio_vec *bvec, *bvprv = NULL; + struct bio_vec bvec, bvprv; + struct bvec_iter iter; struct scatterlist *sg = NULL; - int i, length = 0, nsegs = 0, split_len = bio->bi_size; + int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size; + int first = 1; if (nvmeq->dev->stripe_size) split_len = nvmeq->dev->stripe_size - - ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1)); + ((bio->bi_iter.bi_sector << 9) & + (nvmeq->dev->stripe_size - 1)); sg_init_table(iod->sg, psegs); - bio_for_each_segment(bvec, bio, i) { - if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { - sg->length += bvec->bv_len; + bio_for_each_segment(bvec, bio, iter) { + if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) { + sg->length += bvec.bv_len; } else { - if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec)) - return nvme_split_and_submit(bio, nvmeq, i, - length, 0); + if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec)) + return nvme_split_and_submit(bio, nvmeq, + length); sg = sg ? sg + 1 : iod->sg; - sg_set_page(sg, bvec->bv_page, bvec->bv_len, - bvec->bv_offset); + sg_set_page(sg, bvec.bv_page, + bvec.bv_len, bvec.bv_offset); nsegs++; } - if (split_len - length < bvec->bv_len) - return nvme_split_and_submit(bio, nvmeq, i, split_len, - split_len - length); - length += bvec->bv_len; + if (split_len - length < bvec.bv_len) + return nvme_split_and_submit(bio, nvmeq, split_len); + length += bvec.bv_len; bvprv = bvec; + first = 0; } iod->nents = nsegs; sg_mark_end(sg); if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0) return -ENOMEM; - BUG_ON(length != bio->bi_size); + BUG_ON(length != bio->bi_iter.bi_size); return length; } @@ -608,8 +526,8 @@ static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns, iod->npages = 0; range->cattr = cpu_to_le32(0); - range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift); - range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); + range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift); + range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector)); memset(cmnd, 0, sizeof(*cmnd)); cmnd->dsm.opcode = nvme_cmd_dsm; @@ -674,7 +592,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, } result = -ENOMEM; - iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); + iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC); if (!iod) goto nomem; iod->private = bio; @@ -723,7 +641,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, cmnd->rw.nsid = cpu_to_le32(ns->ns_id); length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, GFP_ATOMIC); - cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); + cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector)); cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); cmnd->rw.control = cpu_to_le16(control); cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index ff8668c..3dda09a 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -651,7 +651,7 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s for (;;) { tmp = rb_entry(n, struct pkt_rb_node, rb_node); - if (s <= tmp->bio->bi_sector) + if (s <= tmp->bio->bi_iter.bi_sector) next = n->rb_left; else next = n->rb_right; @@ -660,12 +660,12 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s n = next; } - if (s > tmp->bio->bi_sector) { + if (s > tmp->bio->bi_iter.bi_sector) { tmp = pkt_rbtree_next(tmp); if (!tmp) return NULL; } - BUG_ON(s > tmp->bio->bi_sector); + BUG_ON(s > tmp->bio->bi_iter.bi_sector); return tmp; } @@ -676,13 +676,13 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod { struct rb_node **p = &pd->bio_queue.rb_node; struct rb_node *parent = NULL; - sector_t s = node->bio->bi_sector; + sector_t s = node->bio->bi_iter.bi_sector; struct pkt_rb_node *tmp; while (*p) { parent = *p; tmp = rb_entry(parent, struct pkt_rb_node, rb_node); - if (s < tmp->bio->bi_sector) + if (s < tmp->bio->bi_iter.bi_sector) p = &(*p)->rb_left; else p = &(*p)->rb_right; @@ -857,7 +857,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) spin_lock(&pd->iosched.lock); bio = bio_list_peek(&pd->iosched.write_queue); spin_unlock(&pd->iosched.lock); - if (bio && (bio->bi_sector == pd->iosched.last_write)) + if (bio && (bio->bi_iter.bi_sector == + pd->iosched.last_write)) need_write_seek = 0; if (need_write_seek && reads_queued) { if (atomic_read(&pd->cdrw.pending_bios) > 0) { @@ -888,7 +889,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) continue; if (bio_data_dir(bio) == READ) - pd->iosched.successive_reads += bio->bi_size >> 10; + pd->iosched.successive_reads += + bio->bi_iter.bi_size >> 10; else { pd->iosched.successive_reads = 0; pd->iosched.last_write = bio_end_sector(bio); @@ -978,7 +980,7 @@ static void pkt_end_io_read(struct bio *bio, int err) pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", bio, (unsigned long long)pkt->sector, - (unsigned long long)bio->bi_sector, err); + (unsigned long long)bio->bi_iter.bi_sector, err); if (err) atomic_inc(&pkt->io_errors); @@ -1026,8 +1028,9 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) memset(written, 0, sizeof(written)); spin_lock(&pkt->lock); bio_list_for_each(bio, &pkt->orig_bios) { - int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9); - int num_frames = bio->bi_size / CD_FRAMESIZE; + int first_frame = (bio->bi_iter.bi_sector - pkt->sector) / + (CD_FRAMESIZE >> 9); + int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE; pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); BUG_ON(first_frame < 0); BUG_ON(first_frame + num_frames > pkt->frames); @@ -1053,7 +1056,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) bio = pkt->r_bios[f]; bio_reset(bio); - bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); + bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); bio->bi_bdev = pd->bdev; bio->bi_end_io = pkt_end_io_read; bio->bi_private = pkt; @@ -1150,8 +1153,8 @@ static int pkt_start_recovery(struct packet_data *pkt) bio_reset(pkt->bio); pkt->bio->bi_bdev = pd->bdev; pkt->bio->bi_rw = REQ_WRITE; - pkt->bio->bi_sector = new_sector; - pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE; + pkt->bio->bi_iter.bi_sector = new_sector; + pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE; pkt->bio->bi_vcnt = pkt->frames; pkt->bio->bi_end_io = pkt_end_io_packet_write; @@ -1213,7 +1216,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd) node = first_node; while (node) { bio = node->bio; - zone = get_zone(bio->bi_sector, pd); + zone = get_zone(bio->bi_iter.bi_sector, pd); list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) { if (p->sector == zone) { bio = NULL; @@ -1252,14 +1255,14 @@ try_next_bio: pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone); while ((node = pkt_rbtree_find(pd, zone)) != NULL) { bio = node->bio; - pkt_dbg(2, pd, "found zone=%llx\n", - (unsigned long long)get_zone(bio->bi_sector, pd)); - if (get_zone(bio->bi_sector, pd) != zone) + pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long) + get_zone(bio->bi_iter.bi_sector, pd)); + if (get_zone(bio->bi_iter.bi_sector, pd) != zone) break; pkt_rbtree_erase(pd, node); spin_lock(&pkt->lock); bio_list_add(&pkt->orig_bios, bio); - pkt->write_size += bio->bi_size / CD_FRAMESIZE; + pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE; spin_unlock(&pkt->lock); } /* check write congestion marks, and if bio_queue_size is @@ -1293,7 +1296,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) struct bio_vec *bvec = pkt->w_bio->bi_io_vec; bio_reset(pkt->w_bio); - pkt->w_bio->bi_sector = pkt->sector; + pkt->w_bio->bi_iter.bi_sector = pkt->sector; pkt->w_bio->bi_bdev = pd->bdev; pkt->w_bio->bi_end_io = pkt_end_io_packet_write; pkt->w_bio->bi_private = pkt; @@ -2335,75 +2338,29 @@ static void pkt_end_io_read_cloned(struct bio *bio, int err) pkt_bio_finished(pd); } -static void pkt_make_request(struct request_queue *q, struct bio *bio) +static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio) { - struct pktcdvd_device *pd; - char b[BDEVNAME_SIZE]; + struct bio *cloned_bio = bio_clone(bio, GFP_NOIO); + struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO); + + psd->pd = pd; + psd->bio = bio; + cloned_bio->bi_bdev = pd->bdev; + cloned_bio->bi_private = psd; + cloned_bio->bi_end_io = pkt_end_io_read_cloned; + pd->stats.secs_r += bio_sectors(bio); + pkt_queue_bio(pd, cloned_bio); +} + +static void pkt_make_request_write(struct request_queue *q, struct bio *bio) +{ + struct pktcdvd_device *pd = q->queuedata; sector_t zone; struct packet_data *pkt; int was_empty, blocked_bio; struct pkt_rb_node *node; - pd = q->queuedata; - if (!pd) { - pr_err("%s incorrect request queue\n", - bdevname(bio->bi_bdev, b)); - goto end_io; - } - - /* - * Clone READ bios so we can have our own bi_end_io callback. - */ - if (bio_data_dir(bio) == READ) { - struct bio *cloned_bio = bio_clone(bio, GFP_NOIO); - struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO); - - psd->pd = pd; - psd->bio = bio; - cloned_bio->bi_bdev = pd->bdev; - cloned_bio->bi_private = psd; - cloned_bio->bi_end_io = pkt_end_io_read_cloned; - pd->stats.secs_r += bio_sectors(bio); - pkt_queue_bio(pd, cloned_bio); - return; - } - - if (!test_bit(PACKET_WRITABLE, &pd->flags)) { - pkt_notice(pd, "WRITE for ro device (%llu)\n", - (unsigned long long)bio->bi_sector); - goto end_io; - } - - if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) { - pkt_err(pd, "wrong bio size\n"); - goto end_io; - } - - blk_queue_bounce(q, &bio); - - zone = get_zone(bio->bi_sector, pd); - pkt_dbg(2, pd, "start = %6llx stop = %6llx\n", - (unsigned long long)bio->bi_sector, - (unsigned long long)bio_end_sector(bio)); - - /* Check if we have to split the bio */ - { - struct bio_pair *bp; - sector_t last_zone; - int first_sectors; - - last_zone = get_zone(bio_end_sector(bio) - 1, pd); - if (last_zone != zone) { - BUG_ON(last_zone != zone + pd->settings.size); - first_sectors = last_zone - bio->bi_sector; - bp = bio_split(bio, first_sectors); - BUG_ON(!bp); - pkt_make_request(q, &bp->bio1); - pkt_make_request(q, &bp->bio2); - bio_pair_release(bp); - return; - } - } + zone = get_zone(bio->bi_iter.bi_sector, pd); /* * If we find a matching packet in state WAITING or READ_WAIT, we can @@ -2417,7 +2374,8 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio) if ((pkt->state == PACKET_WAITING_STATE) || (pkt->state == PACKET_READ_WAIT_STATE)) { bio_list_add(&pkt->orig_bios, bio); - pkt->write_size += bio->bi_size / CD_FRAMESIZE; + pkt->write_size += + bio->bi_iter.bi_size / CD_FRAMESIZE; if ((pkt->write_size >= pkt->frames) && (pkt->state == PACKET_WAITING_STATE)) { atomic_inc(&pkt->run_sm); @@ -2476,6 +2434,64 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio) */ wake_up(&pd->wqueue); } +} + +static void pkt_make_request(struct request_queue *q, struct bio *bio) +{ + struct pktcdvd_device *pd; + char b[BDEVNAME_SIZE]; + struct bio *split; + + pd = q->queuedata; + if (!pd) { + pr_err("%s incorrect request queue\n", + bdevname(bio->bi_bdev, b)); + goto end_io; + } + + pkt_dbg(2, pd, "start = %6llx stop = %6llx\n", + (unsigned long long)bio->bi_iter.bi_sector, + (unsigned long long)bio_end_sector(bio)); + + /* + * Clone READ bios so we can have our own bi_end_io callback. + */ + if (bio_data_dir(bio) == READ) { + pkt_make_request_read(pd, bio); + return; + } + + if (!test_bit(PACKET_WRITABLE, &pd->flags)) { + pkt_notice(pd, "WRITE for ro device (%llu)\n", + (unsigned long long)bio->bi_iter.bi_sector); + goto end_io; + } + + if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) { + pkt_err(pd, "wrong bio size\n"); + goto end_io; + } + + blk_queue_bounce(q, &bio); + + do { + sector_t zone = get_zone(bio->bi_iter.bi_sector, pd); + sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd); + + if (last_zone != zone) { + BUG_ON(last_zone != zone + pd->settings.size); + + split = bio_split(bio, last_zone - + bio->bi_iter.bi_sector, + GFP_NOIO, fs_bio_set); + bio_chain(split, bio); + } else { + split = bio; + } + + pkt_make_request_write(q, split); + } while (split != bio); + return; end_io: bio_io_error(bio); diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index d754a88..c120d70 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -94,26 +94,25 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev, { unsigned int offset = 0; struct req_iterator iter; - struct bio_vec *bvec; + struct bio_vec bvec; unsigned int i = 0; size_t size; void *buf; rq_for_each_segment(bvec, req, iter) { unsigned long flags; - dev_dbg(&dev->sbd.core, - "%s:%u: bio %u: %u segs %u sectors from %lu\n", - __func__, __LINE__, i, bio_segments(iter.bio), - bio_sectors(iter.bio), iter.bio->bi_sector); + dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %lu\n", + __func__, __LINE__, i, bio_sectors(iter.bio), + iter.bio->bi_iter.bi_sector); - size = bvec->bv_len; - buf = bvec_kmap_irq(bvec, &flags); + size = bvec.bv_len; + buf = bvec_kmap_irq(&bvec, &flags); if (gather) memcpy(dev->bounce_buf+offset, buf, size); else memcpy(buf, dev->bounce_buf+offset, size); offset += size; - flush_kernel_dcache_page(bvec->bv_page); + flush_kernel_dcache_page(bvec.bv_page); bvec_kunmap_irq(buf, &flags); i++; } @@ -130,7 +129,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev, #ifdef DEBUG unsigned int n = 0; - struct bio_vec *bv; + struct bio_vec bv; struct req_iterator iter; rq_for_each_segment(bv, req, iter) diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index 06a2e53..ef45cfb 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c @@ -553,16 +553,16 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev, struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); int write = bio_data_dir(bio) == WRITE; const char *op = write ? "write" : "read"; - loff_t offset = bio->bi_sector << 9; + loff_t offset = bio->bi_iter.bi_sector << 9; int error = 0; - struct bio_vec *bvec; - unsigned int i; + struct bio_vec bvec; + struct bvec_iter iter; struct bio *next; - bio_for_each_segment(bvec, bio, i) { + bio_for_each_segment(bvec, bio, iter) { /* PS3 is ppc64, so we don't handle highmem */ - char *ptr = page_address(bvec->bv_page) + bvec->bv_offset; - size_t len = bvec->bv_len, retlen; + char *ptr = page_address(bvec.bv_page) + bvec.bv_offset; + size_t len = bvec.bv_len, retlen; dev_dbg(&dev->core, " %s %zu bytes at offset %llu\n", op, len, offset); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 16cab66..b365e0d 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1156,23 +1156,23 @@ static void bio_chain_put(struct bio *chain) */ static void zero_bio_chain(struct bio *chain, int start_ofs) { - struct bio_vec *bv; + struct bio_vec bv; + struct bvec_iter iter; unsigned long flags; void *buf; - int i; int pos = 0; while (chain) { - bio_for_each_segment(bv, chain, i) { - if (pos + bv->bv_len > start_ofs) { + bio_for_each_segment(bv, chain, iter) { + if (pos + bv.bv_len > start_ofs) { int remainder = max(start_ofs - pos, 0); - buf = bvec_kmap_irq(bv, &flags); + buf = bvec_kmap_irq(&bv, &flags); memset(buf + remainder, 0, - bv->bv_len - remainder); - flush_dcache_page(bv->bv_page); + bv.bv_len - remainder); + flush_dcache_page(bv.bv_page); bvec_kunmap_irq(buf, &flags); } - pos += bv->bv_len; + pos += bv.bv_len; } chain = chain->bi_next; @@ -1220,74 +1220,14 @@ static struct bio *bio_clone_range(struct bio *bio_src, unsigned int len, gfp_t gfpmask) { - struct bio_vec *bv; - unsigned int resid; - unsigned short idx; - unsigned int voff; - unsigned short end_idx; - unsigned short vcnt; struct bio *bio; - /* Handle the easy case for the caller */ - - if (!offset && len == bio_src->bi_size) - return bio_clone(bio_src, gfpmask); - - if (WARN_ON_ONCE(!len)) - return NULL; - if (WARN_ON_ONCE(len > bio_src->bi_size)) - return NULL; - if (WARN_ON_ONCE(offset > bio_src->bi_size - len)) - return NULL; - - /* Find first affected segment... */ - - resid = offset; - bio_for_each_segment(bv, bio_src, idx) { - if (resid < bv->bv_len) - break; - resid -= bv->bv_len; - } - voff = resid; - - /* ...and the last affected segment */ - - resid += len; - __bio_for_each_segment(bv, bio_src, end_idx, idx) { - if (resid <= bv->bv_len) - break; - resid -= bv->bv_len; - } - vcnt = end_idx - idx + 1; - - /* Build the clone */ - - bio = bio_alloc(gfpmask, (unsigned int) vcnt); + bio = bio_clone(bio_src, gfpmask); if (!bio) return NULL; /* ENOMEM */ - bio->bi_bdev = bio_src->bi_bdev; - bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT); - bio->bi_rw = bio_src->bi_rw; - bio->bi_flags |= 1 << BIO_CLONED; - - /* - * Copy over our part of the bio_vec, then update the first - * and last (or only) entries. - */ - memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx], - vcnt * sizeof (struct bio_vec)); - bio->bi_io_vec[0].bv_offset += voff; - if (vcnt > 1) { - bio->bi_io_vec[0].bv_len -= voff; - bio->bi_io_vec[vcnt - 1].bv_len = resid; - } else { - bio->bi_io_vec[0].bv_len = len; - } - - bio->bi_vcnt = vcnt; - bio->bi_size = len; - bio->bi_idx = 0; + bio_advance(bio, offset); + bio->bi_iter.bi_size = len; return bio; } @@ -1318,7 +1258,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src, /* Build up a chain of clone bios up to the limit */ - if (!bi || off >= bi->bi_size || !len) + if (!bi || off >= bi->bi_iter.bi_size || !len) return NULL; /* Nothing to clone */ end = &chain; @@ -1330,7 +1270,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src, rbd_warn(NULL, "bio_chain exhausted with %u left", len); goto out_err; /* EINVAL; ran out of bio's */ } - bi_size = min_t(unsigned int, bi->bi_size - off, len); + bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len); bio = bio_clone_range(bi, off, bi_size, gfpmask); if (!bio) goto out_err; /* ENOMEM */ @@ -1339,7 +1279,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src, end = &bio->bi_next; off += bi_size; - if (off == bi->bi_size) { + if (off == bi->bi_iter.bi_size) { bi = bi->bi_next; off = 0; } @@ -2227,7 +2167,8 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request, if (type == OBJ_REQUEST_BIO) { bio_list = data_desc; - rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT); + rbd_assert(img_offset == + bio_list->bi_iter.bi_sector << SECTOR_SHIFT); } else { rbd_assert(type == OBJ_REQUEST_PAGES); pages = data_desc; diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index 2284f5d..2839d37 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c @@ -174,7 +174,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio) if (!card) goto req_err; - if (bio->bi_sector + (bio->bi_size >> 9) > get_capacity(card->gendisk)) + if (bio_end_sector(bio) > get_capacity(card->gendisk)) goto req_err; if (unlikely(card->halt)) { @@ -187,7 +187,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio) goto req_err; } - if (bio->bi_size == 0) { + if (bio->bi_iter.bi_size == 0) { dev_err(CARD_TO_DEV(card), "size zero BIO!\n"); goto req_err; } @@ -208,7 +208,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio) dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n", bio_data_dir(bio) ? 'W' : 'R', bio_meta, - (u64)bio->bi_sector << 9, bio->bi_size); + (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size); st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas, bio_dma_done_cb, bio_meta); diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index fc88ba3..cf8cd29 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c @@ -684,7 +684,8 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, void *cb_data) { struct list_head dma_list[RSXX_MAX_TARGETS]; - struct bio_vec *bvec; + struct bio_vec bvec; + struct bvec_iter iter; unsigned long long addr8; unsigned int laddr; unsigned int bv_len; @@ -696,7 +697,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, int st; int i; - addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */ + addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */ atomic_set(n_dmas, 0); for (i = 0; i < card->n_targets; i++) { @@ -705,7 +706,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, } if (bio->bi_rw & REQ_DISCARD) { - bv_len = bio->bi_size; + bv_len = bio->bi_iter.bi_size; while (bv_len > 0) { tgt = rsxx_get_dma_tgt(card, addr8); @@ -722,9 +723,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, bv_len -= RSXX_HW_BLK_SIZE; } } else { - bio_for_each_segment(bvec, bio, i) { - bv_len = bvec->bv_len; - bv_off = bvec->bv_offset; + bio_for_each_segment(bvec, bio, iter) { + bv_len = bvec.bv_len; + bv_off = bvec.bv_offset; while (bv_len > 0) { tgt = rsxx_get_dma_tgt(card, addr8); @@ -736,7 +737,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, st = rsxx_queue_dma(card, &dma_list[tgt], bio_data_dir(bio), dma_off, dma_len, - laddr, bvec->bv_page, + laddr, bvec.bv_page, bv_off, cb, cb_data); if (st) goto bvec_err; diff --git a/drivers/block/umem.c b/drivers/block/umem.c index ad70868..4cf81b5 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -108,8 +108,7 @@ struct cardinfo { * have been written */ struct bio *bio, *currentbio, **biotail; - int current_idx; - sector_t current_sector; + struct bvec_iter current_iter; struct request_queue *queue; @@ -118,7 +117,7 @@ struct cardinfo { struct mm_dma_desc *desc; int cnt, headcnt; struct bio *bio, **biotail; - int idx; + struct bvec_iter iter; } mm_pages[2]; #define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc)) @@ -344,16 +343,13 @@ static int add_bio(struct cardinfo *card) dma_addr_t dma_handle; int offset; struct bio *bio; - struct bio_vec *vec; - int idx; + struct bio_vec vec; int rw; - int len; bio = card->currentbio; if (!bio && card->bio) { card->currentbio = card->bio; - card->current_idx = card->bio->bi_idx; - card->current_sector = card->bio->bi_sector; + card->current_iter = card->bio->bi_iter; card->bio = card->bio->bi_next; if (card->bio == NULL) card->biotail = &card->bio; @@ -362,18 +358,17 @@ static int add_bio(struct cardinfo *card) } if (!bio) return 0; - idx = card->current_idx; rw = bio_rw(bio); if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE) return 0; - vec = bio_iovec_idx(bio, idx); - len = vec->bv_len; + vec = bio_iter_iovec(bio, card->current_iter); + dma_handle = pci_map_page(card->dev, - vec->bv_page, - vec->bv_offset, - len, + vec.bv_page, + vec.bv_offset, + vec.bv_len, (rw == READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); @@ -381,7 +376,7 @@ static int add_bio(struct cardinfo *card) desc = &p->desc[p->cnt]; p->cnt++; if (p->bio == NULL) - p->idx = idx; + p->iter = card->current_iter; if ((p->biotail) != &bio->bi_next) { *(p->biotail) = bio; p->biotail = &(bio->bi_next); @@ -391,8 +386,8 @@ static int add_bio(struct cardinfo *card) desc->data_dma_handle = dma_handle; desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle); - desc->local_addr = cpu_to_le64(card->current_sector << 9); - desc->transfer_size = cpu_to_le32(len); + desc->local_addr = cpu_to_le64(card->current_iter.bi_sector << 9); + desc->transfer_size = cpu_to_le32(vec.bv_len); offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc)); desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset)); desc->zero1 = desc->zero2 = 0; @@ -407,10 +402,9 @@ static int add_bio(struct cardinfo *card) desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ); desc->sem_control_bits = desc->control_bits; - card->current_sector += (len >> 9); - idx++; - card->current_idx = idx; - if (idx >= bio->bi_vcnt) + + bio_advance_iter(bio, &card->current_iter, vec.bv_len); + if (!card->current_iter.bi_size) card->currentbio = NULL; return 1; @@ -439,23 +433,25 @@ static void process_page(unsigned long data) struct mm_dma_desc *desc = &page->desc[page->headcnt]; int control = le32_to_cpu(desc->sem_control_bits); int last = 0; - int idx; + struct bio_vec vec; if (!(control & DMASCR_DMA_COMPLETE)) { control = dma_status; last = 1; } + page->headcnt++; - idx = page->idx; - page->idx++; - if (page->idx >= bio->bi_vcnt) { + vec = bio_iter_iovec(bio, page->iter); + bio_advance_iter(bio, &page->iter, vec.bv_len); + + if (!page->iter.bi_size) { page->bio = bio->bi_next; if (page->bio) - page->idx = page->bio->bi_idx; + page->iter = page->bio->bi_iter; } pci_unmap_page(card->dev, desc->data_dma_handle, - bio_iovec_idx(bio, idx)->bv_len, + vec.bv_len, (control & DMASCR_TRANSFER_READ) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); if (control & DMASCR_HARD_ERROR) { @@ -532,7 +528,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio) { struct cardinfo *card = q->queuedata; pr_debug("mm_make_request %llu %u\n", - (unsigned long long)bio->bi_sector, bio->bi_size); + (unsigned long long)bio->bi_iter.bi_sector, + bio->bi_iter.bi_size); spin_lock_irq(&card->lock); *card->biotail = bio; diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 6620b73..4b97b86 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -1257,7 +1257,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, bio->bi_bdev = preq.bdev; bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; - bio->bi_sector = preq.sector_number; + bio->bi_iter.bi_sector = preq.sector_number; } preq.sector_number += seg[i].nsec; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index f9c43f9..8dcfb54 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1547,7 +1547,7 @@ static int blkif_recover(struct blkfront_info *info) for (i = 0; i < pending; i++) { offset = (i * segs * PAGE_SIZE) >> 9; size = min((unsigned int)(segs * PAGE_SIZE) >> 9, - (unsigned int)(bio->bi_size >> 9) - offset); + (unsigned int)bio_sectors(bio) - offset); cloned_bio = bio_clone(bio, GFP_NOIO); BUG_ON(cloned_bio == NULL); bio_trim(cloned_bio, offset, size); diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 754f4317..dbdbca5 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -280,7 +280,6 @@ struct bcache_device { unsigned long sectors_dirty_last; long sectors_dirty_derivative; - mempool_t *unaligned_bvec; struct bio_set *bio_split; unsigned data_csum:1; @@ -902,7 +901,6 @@ void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *); void bch_bbio_free(struct bio *, struct cache_set *); struct bio *bch_bbio_alloc(struct cache_set *); -struct bio *bch_bio_split(struct bio *, int, gfp_t, struct bio_set *); void bch_generic_make_request(struct bio *, struct bio_split_pool *); void __bch_submit_bbio(struct bio *, struct cache_set *); void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned); diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 31bb53f..946ecd3 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -299,7 +299,7 @@ void bch_btree_node_read(struct btree *b) bio = bch_bbio_alloc(b->c); bio->bi_rw = REQ_META|READ_SYNC; - bio->bi_size = KEY_SIZE(&b->key) << 9; + bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; bio->bi_end_io = btree_node_read_endio; bio->bi_private = &cl; @@ -362,7 +362,7 @@ static void btree_node_write_done(struct closure *cl) struct bio_vec *bv; int n; - __bio_for_each_segment(bv, b->bio, n, 0) + bio_for_each_segment_all(bv, b->bio, n) __free_page(bv->bv_page); __btree_node_write_done(cl); @@ -395,7 +395,7 @@ static void do_btree_node_write(struct btree *b) b->bio->bi_end_io = btree_node_write_endio; b->bio->bi_private = cl; b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; - b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); + b->bio->bi_iter.bi_size = set_blocks(i, b->c) * block_bytes(b->c); bch_bio_map(b->bio, i); /* @@ -421,7 +421,7 @@ static void do_btree_node_write(struct btree *b) struct bio_vec *bv; void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); - bio_for_each_segment(bv, b->bio, j) + bio_for_each_segment_all(bv, b->bio, j) memcpy(page_address(bv->bv_page), base + j * PAGE_SIZE, PAGE_SIZE); diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 264fcfb..03cb4d1 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -173,7 +173,8 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) { char name[BDEVNAME_SIZE]; struct bio *check; - struct bio_vec *bv; + struct bio_vec bv, *bv2; + struct bvec_iter iter; int i; check = bio_clone(bio, GFP_NOIO); @@ -185,23 +186,23 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) submit_bio_wait(READ_SYNC, check); - bio_for_each_segment(bv, bio, i) { - void *p1 = kmap_atomic(bv->bv_page); - void *p2 = page_address(check->bi_io_vec[i].bv_page); + bio_for_each_segment(bv, bio, iter) { + void *p1 = kmap_atomic(bv.bv_page); + void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page); - cache_set_err_on(memcmp(p1 + bv->bv_offset, - p2 + bv->bv_offset, - bv->bv_len), + cache_set_err_on(memcmp(p1 + bv.bv_offset, + p2 + bv.bv_offset, + bv.bv_len), dc->disk.c, "verify failed at dev %s sector %llu", bdevname(dc->bdev, name), - (uint64_t) bio->bi_sector); + (uint64_t) bio->bi_iter.bi_sector); kunmap_atomic(p1); } - bio_for_each_segment_all(bv, check, i) - __free_page(bv->bv_page); + bio_for_each_segment_all(bv2, check, i) + __free_page(bv2->bv_page); out_put: bio_put(check); } diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index 9056632..fa028fa 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -11,178 +11,40 @@ #include <linux/blkdev.h> -static void bch_bi_idx_hack_endio(struct bio *bio, int error) -{ - struct bio *p = bio->bi_private; - - bio_endio(p, error); - bio_put(bio); -} - -static void bch_generic_make_request_hack(struct bio *bio) -{ - if (bio->bi_idx) { - struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio)); - - memcpy(clone->bi_io_vec, - bio_iovec(bio), - bio_segments(bio) * sizeof(struct bio_vec)); - - clone->bi_sector = bio->bi_sector; - clone->bi_bdev = bio->bi_bdev; - clone->bi_rw = bio->bi_rw; - clone->bi_vcnt = bio_segments(bio); - clone->bi_size = bio->bi_size; - - clone->bi_private = bio; - clone->bi_end_io = bch_bi_idx_hack_endio; - - bio = clone; - } - - /* - * Hack, since drivers that clone bios clone up to bi_max_vecs, but our - * bios might have had more than that (before we split them per device - * limitations). - * - * To be taken out once immutable bvec stuff is in. - */ - bio->bi_max_vecs = bio->bi_vcnt; - - generic_make_request(bio); -} - -/** - * bch_bio_split - split a bio - * @bio: bio to split - * @sectors: number of sectors to split from the front of @bio - * @gfp: gfp mask - * @bs: bio set to allocate from - * - * Allocates and returns a new bio which represents @sectors from the start of - * @bio, and updates @bio to represent the remaining sectors. - * - * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio - * unchanged. - * - * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a - * bvec boundry; it is the caller's responsibility to ensure that @bio is not - * freed before the split. - */ -struct bio *bch_bio_split(struct bio *bio, int sectors, - gfp_t gfp, struct bio_set *bs) -{ - unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9; - struct bio_vec *bv; - struct bio *ret = NULL; - - BUG_ON(sectors <= 0); - - if (sectors >= bio_sectors(bio)) - return bio; - - if (bio->bi_rw & REQ_DISCARD) { - ret = bio_alloc_bioset(gfp, 1, bs); - if (!ret) - return NULL; - idx = 0; - goto out; - } - - bio_for_each_segment(bv, bio, idx) { - vcnt = idx - bio->bi_idx; - - if (!nbytes) { - ret = bio_alloc_bioset(gfp, vcnt, bs); - if (!ret) - return NULL; - - memcpy(ret->bi_io_vec, bio_iovec(bio), - sizeof(struct bio_vec) * vcnt); - - break; - } else if (nbytes < bv->bv_len) { - ret = bio_alloc_bioset(gfp, ++vcnt, bs); - if (!ret) - return NULL; - - memcpy(ret->bi_io_vec, bio_iovec(bio), - sizeof(struct bio_vec) * vcnt); - - ret->bi_io_vec[vcnt - 1].bv_len = nbytes; - bv->bv_offset += nbytes; - bv->bv_len -= nbytes; - break; - } - - nbytes -= bv->bv_len; - } -out: - ret->bi_bdev = bio->bi_bdev; - ret->bi_sector = bio->bi_sector; - ret->bi_size = sectors << 9; - ret->bi_rw = bio->bi_rw; - ret->bi_vcnt = vcnt; - ret->bi_max_vecs = vcnt; - - bio->bi_sector += sectors; - bio->bi_size -= sectors << 9; - bio->bi_idx = idx; - - if (bio_integrity(bio)) { - if (bio_integrity_clone(ret, bio, gfp)) { - bio_put(ret); - return NULL; - } - - bio_integrity_trim(ret, 0, bio_sectors(ret)); - bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio)); - } - - return ret; -} - static unsigned bch_bio_max_sectors(struct bio *bio) { - unsigned ret = bio_sectors(bio); struct request_queue *q = bdev_get_queue(bio->bi_bdev); - unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES, - queue_max_segments(q)); + struct bio_vec bv; + struct bvec_iter iter; + unsigned ret = 0, seg = 0; if (bio->bi_rw & REQ_DISCARD) - return min(ret, q->limits.max_discard_sectors); - - if (bio_segments(bio) > max_segments || - q->merge_bvec_fn) { - struct bio_vec *bv; - int i, seg = 0; - - ret = 0; - - bio_for_each_segment(bv, bio, i) { - struct bvec_merge_data bvm = { - .bi_bdev = bio->bi_bdev, - .bi_sector = bio->bi_sector, - .bi_size = ret << 9, - .bi_rw = bio->bi_rw, - }; - - if (seg == max_segments) - break; + return min(bio_sectors(bio), q->limits.max_discard_sectors); + + bio_for_each_segment(bv, bio, iter) { + struct bvec_merge_data bvm = { + .bi_bdev = bio->bi_bdev, + .bi_sector = bio->bi_iter.bi_sector, + .bi_size = ret << 9, + .bi_rw = bio->bi_rw, + }; + + if (seg == min_t(unsigned, BIO_MAX_PAGES, + queue_max_segments(q))) + break; - if (q->merge_bvec_fn && - q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len) - break; + if (q->merge_bvec_fn && + q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len) + break; - seg++; - ret += bv->bv_len >> 9; - } + seg++; + ret += bv.bv_len >> 9; } ret = min(ret, queue_max_sectors(q)); WARN_ON(!ret); - ret = max_t(int, ret, bio_iovec(bio)->bv_len >> 9); + ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9); return ret; } @@ -193,7 +55,7 @@ static void bch_bio_submit_split_done(struct closure *cl) s->bio->bi_end_io = s->bi_end_io; s->bio->bi_private = s->bi_private; - bio_endio(s->bio, 0); + bio_endio_nodec(s->bio, 0); closure_debug_destroy(&s->cl); mempool_free(s, s->p->bio_split_hook); @@ -232,19 +94,19 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p) bio_get(bio); do { - n = bch_bio_split(bio, bch_bio_max_sectors(bio), - GFP_NOIO, s->p->bio_split); + n = bio_next_split(bio, bch_bio_max_sectors(bio), + GFP_NOIO, s->p->bio_split); n->bi_end_io = bch_bio_submit_split_endio; n->bi_private = &s->cl; closure_get(&s->cl); - bch_generic_make_request_hack(n); + generic_make_request(n); } while (n != bio); continue_at(&s->cl, bch_bio_submit_split_done, NULL); submit: - bch_generic_make_request_hack(bio); + generic_make_request(bio); } /* Bios with headers */ @@ -272,8 +134,8 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c) { struct bbio *b = container_of(bio, struct bbio, bio); - bio->bi_sector = PTR_OFFSET(&b->key, 0); - bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; + bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); + bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; b->submit_time_us = local_clock_us(); closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0)); diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index ecdaa67..7eafdf0 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -51,10 +51,10 @@ reread: left = ca->sb.bucket_size - offset; len = min_t(unsigned, left, PAGE_SECTORS * 8); bio_reset(bio); - bio->bi_sector = bucket + offset; + bio->bi_iter.bi_sector = bucket + offset; bio->bi_bdev = ca->bdev; bio->bi_rw = READ; - bio->bi_size = len << 9; + bio->bi_iter.bi_size = len << 9; bio->bi_end_io = journal_read_endio; bio->bi_private = &cl; @@ -437,13 +437,13 @@ static void do_journal_discard(struct cache *ca) atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); bio_init(bio); - bio->bi_sector = bucket_to_sector(ca->set, + bio->bi_iter.bi_sector = bucket_to_sector(ca->set, ca->sb.d[ja->discard_idx]); bio->bi_bdev = ca->bdev; bio->bi_rw = REQ_WRITE|REQ_DISCARD; bio->bi_max_vecs = 1; bio->bi_io_vec = bio->bi_inline_vecs; - bio->bi_size = bucket_bytes(ca); + bio->bi_iter.bi_size = bucket_bytes(ca); bio->bi_end_io = journal_discard_endio; closure_get(&ca->set->cl); @@ -608,10 +608,10 @@ static void journal_write_unlocked(struct closure *cl) atomic_long_add(sectors, &ca->meta_sectors_written); bio_reset(bio); - bio->bi_sector = PTR_OFFSET(k, i); + bio->bi_iter.bi_sector = PTR_OFFSET(k, i); bio->bi_bdev = ca->bdev; bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA; - bio->bi_size = sectors << 9; + bio->bi_iter.bi_size = sectors << 9; bio->bi_end_io = journal_write_endio; bio->bi_private = w; diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index f2f0998..052bd24 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -86,7 +86,7 @@ static void moving_init(struct moving_io *io) bio_get(bio); bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); - bio->bi_size = KEY_SIZE(&io->w->key) << 9; + bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9; bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS); bio->bi_private = &io->cl; @@ -102,7 +102,7 @@ static void write_moving(struct closure *cl) if (!op->error) { moving_init(io); - io->bio.bio.bi_sector = KEY_START(&io->w->key); + io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key); op->write_prio = 1; op->bio = &io->bio.bio; diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 61bcfc2..c906571 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -197,14 +197,14 @@ static bool verify(struct cached_dev *dc, struct bio *bio) static void bio_csum(struct bio *bio, struct bkey *k) { - struct bio_vec *bv; + struct bio_vec bv; + struct bvec_iter iter; uint64_t csum = 0; - int i; - bio_for_each_segment(bv, bio, i) { - void *d = kmap(bv->bv_page) + bv->bv_offset; - csum = bch_crc64_update(csum, d, bv->bv_len); - kunmap(bv->bv_page); + bio_for_each_segment(bv, bio, iter) { + void *d = kmap(bv.bv_page) + bv.bv_offset; + csum = bch_crc64_update(csum, d, bv.bv_len); + kunmap(bv.bv_page); } k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); @@ -260,7 +260,7 @@ static void bch_data_invalidate(struct closure *cl) struct bio *bio = op->bio; pr_debug("invalidating %i sectors from %llu", - bio_sectors(bio), (uint64_t) bio->bi_sector); + bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); while (bio_sectors(bio)) { unsigned sectors = min(bio_sectors(bio), @@ -269,11 +269,11 @@ static void bch_data_invalidate(struct closure *cl) if (bch_keylist_realloc(&op->insert_keys, 0, op->c)) goto out; - bio->bi_sector += sectors; - bio->bi_size -= sectors << 9; + bio->bi_iter.bi_sector += sectors; + bio->bi_iter.bi_size -= sectors << 9; bch_keylist_add(&op->insert_keys, - &KEY(op->inode, bio->bi_sector, sectors)); + &KEY(op->inode, bio->bi_iter.bi_sector, sectors)); } op->insert_data_done = true; @@ -363,14 +363,14 @@ static void bch_data_insert_start(struct closure *cl) k = op->insert_keys.top; bkey_init(k); SET_KEY_INODE(k, op->inode); - SET_KEY_OFFSET(k, bio->bi_sector); + SET_KEY_OFFSET(k, bio->bi_iter.bi_sector); if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), op->write_point, op->write_prio, op->writeback)) goto err; - n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split); + n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split); n->bi_end_io = bch_data_insert_endio; n->bi_private = cl; @@ -521,7 +521,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) (bio->bi_rw & REQ_WRITE))) goto skip; - if (bio->bi_sector & (c->sb.block_size - 1) || + if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || bio_sectors(bio) & (c->sb.block_size - 1)) { pr_debug("skipping unaligned io"); goto skip; @@ -545,8 +545,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) spin_lock(&dc->io_lock); - hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) - if (i->last == bio->bi_sector && + hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) + if (i->last == bio->bi_iter.bi_sector && time_before(jiffies, i->jiffies)) goto found; @@ -555,8 +555,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) add_sequential(task); i->sequential = 0; found: - if (i->sequential + bio->bi_size > i->sequential) - i->sequential += bio->bi_size; + if (i->sequential + bio->bi_iter.bi_size > i->sequential) + i->sequential += bio->bi_iter.bi_size; i->last = bio_end_sector(bio); i->jiffies = jiffies + msecs_to_jiffies(5000); @@ -605,7 +605,6 @@ struct search { unsigned insert_bio_sectors; unsigned recoverable:1; - unsigned unaligned_bvec:1; unsigned write:1; unsigned read_dirty_data:1; @@ -649,15 +648,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) struct bkey *bio_key; unsigned ptr; - if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0) + if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) return MAP_CONTINUE; if (KEY_INODE(k) != s->iop.inode || - KEY_START(k) > bio->bi_sector) { + KEY_START(k) > bio->bi_iter.bi_sector) { unsigned bio_sectors = bio_sectors(bio); unsigned sectors = KEY_INODE(k) == s->iop.inode ? min_t(uint64_t, INT_MAX, - KEY_START(k) - bio->bi_sector) + KEY_START(k) - bio->bi_iter.bi_sector) : INT_MAX; int ret = s->d->cache_miss(b, s, bio, sectors); @@ -679,14 +678,14 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) if (KEY_DIRTY(k)) s->read_dirty_data = true; - n = bch_bio_split(bio, min_t(uint64_t, INT_MAX, - KEY_OFFSET(k) - bio->bi_sector), - GFP_NOIO, s->d->bio_split); + n = bio_next_split(bio, min_t(uint64_t, INT_MAX, + KEY_OFFSET(k) - bio->bi_iter.bi_sector), + GFP_NOIO, s->d->bio_split); bio_key = &container_of(n, struct bbio, bio)->key; bch_bkey_copy_single_ptr(bio_key, k, ptr); - bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key); + bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key); bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); n->bi_end_io = bch_cache_read_endio; @@ -713,7 +712,7 @@ static void cache_lookup(struct closure *cl) struct bio *bio = &s->bio.bio; int ret = bch_btree_map_keys(&s->op, s->iop.c, - &KEY(s->iop.inode, bio->bi_sector, 0), + &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), cache_lookup_fn, MAP_END_KEY); if (ret == -EAGAIN) continue_at(cl, cache_lookup, bcache_wq); @@ -758,10 +757,12 @@ static void bio_complete(struct search *s) static void do_bio_hook(struct search *s) { struct bio *bio = &s->bio.bio; - memcpy(bio, s->orig_bio, sizeof(struct bio)); + bio_init(bio); + __bio_clone_fast(bio, s->orig_bio); bio->bi_end_io = request_endio; bio->bi_private = &s->cl; + atomic_set(&bio->bi_cnt, 3); } @@ -773,9 +774,6 @@ static void search_free(struct closure *cl) if (s->iop.bio) bio_put(s->iop.bio); - if (s->unaligned_bvec) - mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec); - closure_debug_destroy(cl); mempool_free(s, s->d->c->search); } @@ -783,7 +781,6 @@ static void search_free(struct closure *cl) static struct search *search_alloc(struct bio *bio, struct bcache_device *d) { struct search *s; - struct bio_vec *bv; s = mempool_alloc(d->c->search, GFP_NOIO); memset(s, 0, offsetof(struct search, iop.insert_keys)); @@ -802,15 +799,6 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d) s->start_time = jiffies; do_bio_hook(s); - if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) { - bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO); - memcpy(bv, bio_iovec(bio), - sizeof(struct bio_vec) * bio_segments(bio)); - - s->bio.bio.bi_io_vec = bv; - s->unaligned_bvec = 1; - } - return s; } @@ -849,26 +837,13 @@ static void cached_dev_read_error(struct closure *cl) { struct search *s = container_of(cl, struct search, cl); struct bio *bio = &s->bio.bio; - struct bio_vec *bv; - int i; if (s->recoverable) { /* Retry from the backing device: */ trace_bcache_read_retry(s->orig_bio); s->iop.error = 0; - bv = s->bio.bio.bi_io_vec; do_bio_hook(s); - s->bio.bio.bi_io_vec = bv; - - if (!s->unaligned_bvec) - bio_for_each_segment(bv, s->orig_bio, i) - bv->bv_offset = 0, bv->bv_len = PAGE_SIZE; - else - memcpy(s->bio.bio.bi_io_vec, - bio_iovec(s->orig_bio), - sizeof(struct bio_vec) * - bio_segments(s->orig_bio)); /* XXX: invalidate cache */ @@ -893,9 +868,9 @@ static void cached_dev_read_done(struct closure *cl) if (s->iop.bio) { bio_reset(s->iop.bio); - s->iop.bio->bi_sector = s->cache_miss->bi_sector; + s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; - s->iop.bio->bi_size = s->insert_bio_sectors << 9; + s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; bch_bio_map(s->iop.bio, NULL); bio_copy_data(s->cache_miss, s->iop.bio); @@ -904,8 +879,7 @@ static void cached_dev_read_done(struct closure *cl) s->cache_miss = NULL; } - if (verify(dc, &s->bio.bio) && s->recoverable && - !s->unaligned_bvec && !s->read_dirty_data) + if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data) bch_data_verify(dc, s->orig_bio); bio_complete(s); @@ -945,7 +919,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, struct bio *miss, *cache_bio; if (s->cache_miss || s->iop.bypass) { - miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); + miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); ret = miss == bio ? MAP_DONE : MAP_CONTINUE; goto out_submit; } @@ -959,7 +933,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); s->iop.replace_key = KEY(s->iop.inode, - bio->bi_sector + s->insert_bio_sectors, + bio->bi_iter.bi_sector + s->insert_bio_sectors, s->insert_bio_sectors); ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); @@ -968,7 +942,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, s->iop.replace = true; - miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); + miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); /* btree_search_recurse()'s btree iterator is no good anymore */ ret = miss == bio ? MAP_DONE : -EINTR; @@ -979,9 +953,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, if (!cache_bio) goto out_submit; - cache_bio->bi_sector = miss->bi_sector; - cache_bio->bi_bdev = miss->bi_bdev; - cache_bio->bi_size = s->insert_bio_sectors << 9; + cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector; + cache_bio->bi_bdev = miss->bi_bdev; + cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; cache_bio->bi_end_io = request_endio; cache_bio->bi_private = &s->cl; @@ -1031,7 +1005,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) { struct closure *cl = &s->cl; struct bio *bio = &s->bio.bio; - struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0); + struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0); struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); @@ -1087,8 +1061,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) closure_bio_submit(flush, cl, s->d); } } else { - s->iop.bio = bio_clone_bioset(bio, GFP_NOIO, - dc->disk.bio_split); + s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split); closure_bio_submit(bio, cl, s->d); } @@ -1126,13 +1099,13 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio) part_stat_unlock(); bio->bi_bdev = dc->bdev; - bio->bi_sector += dc->sb.data_offset; + bio->bi_iter.bi_sector += dc->sb.data_offset; if (cached_dev_get(dc)) { s = search_alloc(bio, d); trace_bcache_request_start(s->d, bio); - if (!bio->bi_size) { + if (!bio->bi_iter.bi_size) { /* * can't call bch_journal_meta from under * generic_make_request @@ -1204,24 +1177,24 @@ void bch_cached_dev_request_init(struct cached_dev *dc) static int flash_dev_cache_miss(struct btree *b, struct search *s, struct bio *bio, unsigned sectors) { - struct bio_vec *bv; - int i; + struct bio_vec bv; + struct bvec_iter iter; /* Zero fill bio */ - bio_for_each_segment(bv, bio, i) { - unsigned j = min(bv->bv_len >> 9, sectors); + bio_for_each_segment(bv, bio, iter) { + unsigned j = min(bv.bv_len >> 9, sectors); - void *p = kmap(bv->bv_page); - memset(p + bv->bv_offset, 0, j << 9); - kunmap(bv->bv_page); + void *p = kmap(bv.bv_page); + memset(p + bv.bv_offset, 0, j << 9); + kunmap(bv.bv_page); sectors -= j; } - bio_advance(bio, min(sectors << 9, bio->bi_size)); + bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size)); - if (!bio->bi_size) + if (!bio->bi_iter.bi_size) return MAP_DONE; return MAP_CONTINUE; @@ -1255,7 +1228,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio) trace_bcache_request_start(s->d, bio); - if (!bio->bi_size) { + if (!bio->bi_iter.bi_size) { /* * can't call bch_journal_meta from under * generic_make_request @@ -1265,7 +1238,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio) bcache_wq); } else if (rw) { bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, - &KEY(d->id, bio->bi_sector, 0), + &KEY(d->id, bio->bi_iter.bi_sector, 0), &KEY(d->id, bio_end_sector(bio), 0)); s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index c57bfa0..93d593f 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -233,9 +233,9 @@ static void __write_super(struct cache_sb *sb, struct bio *bio) struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); unsigned i; - bio->bi_sector = SB_SECTOR; - bio->bi_rw = REQ_SYNC|REQ_META; - bio->bi_size = SB_SIZE; + bio->bi_iter.bi_sector = SB_SECTOR; + bio->bi_rw = REQ_SYNC|REQ_META; + bio->bi_iter.bi_size = SB_SIZE; bch_bio_map(bio, NULL); out->offset = cpu_to_le64(sb->offset); @@ -347,7 +347,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw, struct bio *bio = bch_bbio_alloc(c); bio->bi_rw = REQ_SYNC|REQ_META|rw; - bio->bi_size = KEY_SIZE(k) << 9; + bio->bi_iter.bi_size = KEY_SIZE(k) << 9; bio->bi_end_io = uuid_endio; bio->bi_private = cl; @@ -503,10 +503,10 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) closure_init_stack(cl); - bio->bi_sector = bucket * ca->sb.bucket_size; - bio->bi_bdev = ca->bdev; - bio->bi_rw = REQ_SYNC|REQ_META|rw; - bio->bi_size = bucket_bytes(ca); + bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; + bio->bi_bdev = ca->bdev; + bio->bi_rw = REQ_SYNC|REQ_META|rw; + bio->bi_iter.bi_size = bucket_bytes(ca); bio->bi_end_io = prio_endio; bio->bi_private = ca; @@ -739,8 +739,6 @@ static void bcache_device_free(struct bcache_device *d) } bio_split_pool_free(&d->bio_split_hook); - if (d->unaligned_bvec) - mempool_destroy(d->unaligned_bvec); if (d->bio_split) bioset_free(d->bio_split); if (is_vmalloc_addr(d->full_dirty_stripes)) @@ -793,8 +791,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, return minor; if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || - !(d->unaligned_bvec = mempool_create_kmalloc_pool(1, - sizeof(struct bio_vec) * BIO_MAX_PAGES)) || bio_split_pool_init(&d->bio_split_hook) || !(d->disk = alloc_disk(1))) { ida_simple_remove(&bcache_minor, minor); diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index bb37618..db3ae4c 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -224,10 +224,10 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done) void bch_bio_map(struct bio *bio, void *base) { - size_t size = bio->bi_size; + size_t size = bio->bi_iter.bi_size; struct bio_vec *bv = bio->bi_io_vec; - BUG_ON(!bio->bi_size); + BUG_ON(!bio->bi_iter.bi_size); BUG_ON(bio->bi_vcnt); bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0; diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 6c44fe0..f4300e4 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -111,7 +111,7 @@ static void dirty_init(struct keybuf_key *w) if (!io->dc->writeback_percent) bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); - bio->bi_size = KEY_SIZE(&w->key) << 9; + bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9; bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS); bio->bi_private = w; bio->bi_io_vec = bio->bi_inline_vecs; @@ -184,7 +184,7 @@ static void write_dirty(struct closure *cl) dirty_init(w); io->bio.bi_rw = WRITE; - io->bio.bi_sector = KEY_START(&w->key); + io->bio.bi_iter.bi_sector = KEY_START(&w->key); io->bio.bi_bdev = io->dc->bdev; io->bio.bi_end_io = dirty_endio; @@ -253,7 +253,7 @@ static void read_dirty(struct cached_dev *dc) io->dc = dc; dirty_init(w); - io->bio.bi_sector = PTR_OFFSET(&w->key, 0); + io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); io->bio.bi_bdev = PTR_CACHE(dc->disk.c, &w->key, 0)->bdev; io->bio.bi_rw = READ; diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index c9ddcf4..e2f8598 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -50,7 +50,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, return false; if (dc->partial_stripes_expensive && - bcache_dev_stripe_dirty(dc, bio->bi_sector, + bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, bio_sectors(bio))) return true; diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h index 3a8cfa2..dd36461 100644 --- a/drivers/md/dm-bio-record.h +++ b/drivers/md/dm-bio-record.h @@ -17,55 +17,24 @@ * original bio state. */ -struct dm_bio_vec_details { -#if PAGE_SIZE < 65536 - __u16 bv_len; - __u16 bv_offset; -#else - unsigned bv_len; - unsigned bv_offset; -#endif -}; - struct dm_bio_details { - sector_t bi_sector; struct block_device *bi_bdev; - unsigned int bi_size; - unsigned short bi_idx; unsigned long bi_flags; - struct dm_bio_vec_details bi_io_vec[BIO_MAX_PAGES]; + struct bvec_iter bi_iter; }; static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) { - unsigned i; - - bd->bi_sector = bio->bi_sector; bd->bi_bdev = bio->bi_bdev; - bd->bi_size = bio->bi_size; - bd->bi_idx = bio->bi_idx; bd->bi_flags = bio->bi_flags; - - for (i = 0; i < bio->bi_vcnt; i++) { - bd->bi_io_vec[i].bv_len = bio->bi_io_vec[i].bv_len; - bd->bi_io_vec[i].bv_offset = bio->bi_io_vec[i].bv_offset; - } + bd->bi_iter = bio->bi_iter; } static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) { - unsigned i; - - bio->bi_sector = bd->bi_sector; bio->bi_bdev = bd->bi_bdev; - bio->bi_size = bd->bi_size; - bio->bi_idx = bd->bi_idx; bio->bi_flags = bd->bi_flags; - - for (i = 0; i < bio->bi_vcnt; i++) { - bio->bi_io_vec[i].bv_len = bd->bi_io_vec[i].bv_len; - bio->bi_io_vec[i].bv_offset = bd->bi_io_vec[i].bv_offset; - } + bio->bi_iter = bd->bi_iter; } #endif diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 9ed4212..66c5d13 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -540,7 +540,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, bio_init(&b->bio); b->bio.bi_io_vec = b->bio_vec; b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS; - b->bio.bi_sector = block << b->c->sectors_per_block_bits; + b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits; b->bio.bi_bdev = b->c->bdev; b->bio.bi_end_io = end_io; diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index 930e8c3..1e018e9 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c @@ -72,7 +72,7 @@ static enum io_pattern iot_pattern(struct io_tracker *t) static void iot_update_stats(struct io_tracker *t, struct bio *bio) { - if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1) + if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1) t->nr_seq_samples++; else { /* @@ -87,7 +87,7 @@ static void iot_update_stats(struct io_tracker *t, struct bio *bio) t->nr_rand_samples++; } - t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1); + t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1); } static void iot_check_for_pattern_switch(struct io_tracker *t) diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 09334c2..ffd472e 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -85,6 +85,12 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio) { bio->bi_end_io = h->bi_end_io; bio->bi_private = h->bi_private; + + /* + * Must bump bi_remaining to allow bio to complete with + * restored bi_end_io. + */ + atomic_inc(&bio->bi_remaining); } /*----------------------------------------------------------------*/ @@ -664,15 +670,17 @@ static void remap_to_origin(struct cache *cache, struct bio *bio) static void remap_to_cache(struct cache *cache, struct bio *bio, dm_cblock_t cblock) { - sector_t bi_sector = bio->bi_sector; + sector_t bi_sector = bio->bi_iter.bi_sector; bio->bi_bdev = cache->cache_dev->bdev; if (!block_size_is_power_of_two(cache)) - bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) + - sector_div(bi_sector, cache->sectors_per_block); + bio->bi_iter.bi_sector = + (from_cblock(cblock) * cache->sectors_per_block) + + sector_div(bi_sector, cache->sectors_per_block); else - bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) | - (bi_sector & (cache->sectors_per_block - 1)); + bio->bi_iter.bi_sector = + (from_cblock(cblock) << cache->sectors_per_block_shift) | + (bi_sector & (cache->sectors_per_block - 1)); } static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) @@ -712,7 +720,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) { - sector_t block_nr = bio->bi_sector; + sector_t block_nr = bio->bi_iter.bi_sector; if (!block_size_is_power_of_two(cache)) (void) sector_div(block_nr, cache->sectors_per_block); @@ -1027,7 +1035,7 @@ static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio) static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) { return (bio_data_dir(bio) == WRITE) && - (bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); + (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); } static void avoid_copy(struct dm_cache_migration *mg) @@ -1252,7 +1260,7 @@ static void process_flush_bio(struct cache *cache, struct bio *bio) size_t pb_data_size = get_per_bio_data_size(cache); struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); - BUG_ON(bio->bi_size); + BUG_ON(bio->bi_iter.bi_size); if (!pb->req_nr) remap_to_origin(cache, bio); else @@ -1275,9 +1283,9 @@ static void process_flush_bio(struct cache *cache, struct bio *bio) */ static void process_discard_bio(struct cache *cache, struct bio *bio) { - dm_block_t start_block = dm_sector_div_up(bio->bi_sector, + dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector, cache->discard_block_size); - dm_block_t end_block = bio->bi_sector + bio_sectors(bio); + dm_block_t end_block = bio_end_sector(bio); dm_block_t b; end_block = block_div(end_block, cache->discard_block_size); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 81b0fa6..784695d 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -39,10 +39,8 @@ struct convert_context { struct completion restart; struct bio *bio_in; struct bio *bio_out; - unsigned int offset_in; - unsigned int offset_out; - unsigned int idx_in; - unsigned int idx_out; + struct bvec_iter iter_in; + struct bvec_iter iter_out; sector_t cc_sector; atomic_t cc_pending; }; @@ -826,10 +824,10 @@ static void crypt_convert_init(struct crypt_config *cc, { ctx->bio_in = bio_in; ctx->bio_out = bio_out; - ctx->offset_in = 0; - ctx->offset_out = 0; - ctx->idx_in = bio_in ? bio_in->bi_idx : 0; - ctx->idx_out = bio_out ? bio_out->bi_idx : 0; + if (bio_in) + ctx->iter_in = bio_in->bi_iter; + if (bio_out) + ctx->iter_out = bio_out->bi_iter; ctx->cc_sector = sector + cc->iv_offset; init_completion(&ctx->restart); } @@ -857,8 +855,8 @@ static int crypt_convert_block(struct crypt_config *cc, struct convert_context *ctx, struct ablkcipher_request *req) { - struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); - struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); + struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); + struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); struct dm_crypt_request *dmreq; u8 *iv; int r; @@ -869,24 +867,15 @@ static int crypt_convert_block(struct crypt_config *cc, dmreq->iv_sector = ctx->cc_sector; dmreq->ctx = ctx; sg_init_table(&dmreq->sg_in, 1); - sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, - bv_in->bv_offset + ctx->offset_in); + sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT, + bv_in.bv_offset); sg_init_table(&dmreq->sg_out, 1); - sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, - bv_out->bv_offset + ctx->offset_out); + sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT, + bv_out.bv_offset); - ctx->offset_in += 1 << SECTOR_SHIFT; - if (ctx->offset_in >= bv_in->bv_len) { - ctx->offset_in = 0; - ctx->idx_in++; - } - - ctx->offset_out += 1 << SECTOR_SHIFT; - if (ctx->offset_out >= bv_out->bv_len) { - ctx->offset_out = 0; - ctx->idx_out++; - } + bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT); + bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT); if (cc->iv_gen_ops) { r = cc->iv_gen_ops->generator(cc, iv, dmreq); @@ -937,8 +926,7 @@ static int crypt_convert(struct crypt_config *cc, atomic_set(&ctx->cc_pending, 1); - while(ctx->idx_in < ctx->bio_in->bi_vcnt && - ctx->idx_out < ctx->bio_out->bi_vcnt) { + while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) { crypt_alloc_req(cc, ctx); @@ -1021,7 +1009,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, size -= len; } - if (!clone->bi_size) { + if (!clone->bi_iter.bi_size) { bio_put(clone); return NULL; } @@ -1161,7 +1149,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) crypt_inc_pending(io); clone_init(io, clone); - clone->bi_sector = cc->start + io->sector; + clone->bi_iter.bi_sector = cc->start + io->sector; generic_make_request(clone); return 0; @@ -1207,9 +1195,9 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) } /* crypt_convert should have filled the clone bio */ - BUG_ON(io->ctx.idx_out < clone->bi_vcnt); + BUG_ON(io->ctx.iter_out.bi_size); - clone->bi_sector = cc->start + io->sector; + clone->bi_iter.bi_sector = cc->start + io->sector; if (async) kcryptd_queue_io(io); @@ -1224,7 +1212,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) struct dm_crypt_io *new_io; int crypt_finished; unsigned out_of_pages = 0; - unsigned remaining = io->base_bio->bi_size; + unsigned remaining = io->base_bio->bi_iter.bi_size; sector_t sector = io->sector; int r; @@ -1246,9 +1234,9 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) } io->ctx.bio_out = clone; - io->ctx.idx_out = 0; + io->ctx.iter_out = clone->bi_iter; - remaining -= clone->bi_size; + remaining -= clone->bi_iter.bi_size; sector += bio_sectors(clone); crypt_inc_pending(io); @@ -1290,8 +1278,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) crypt_inc_pending(new_io); crypt_convert_init(cc, &new_io->ctx, NULL, io->base_bio, sector); - new_io->ctx.idx_in = io->ctx.idx_in; - new_io->ctx.offset_in = io->ctx.offset_in; + new_io->ctx.iter_in = io->ctx.iter_in; /* * Fragments after the first use the base_io @@ -1869,11 +1856,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { bio->bi_bdev = cc->dev->bdev; if (bio_sectors(bio)) - bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector); + bio->bi_iter.bi_sector = cc->start + + dm_target_offset(ti, bio->bi_iter.bi_sector); return DM_MAPIO_REMAPPED; } - io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector)); + io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); if (bio_data_dir(io->base_bio) == READ) { if (kcryptd_io_read(io, GFP_NOWAIT)) diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index a8a511c..42c3a27 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -277,14 +277,15 @@ static int delay_map(struct dm_target *ti, struct bio *bio) if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { bio->bi_bdev = dc->dev_write->bdev; if (bio_sectors(bio)) - bio->bi_sector = dc->start_write + - dm_target_offset(ti, bio->bi_sector); + bio->bi_iter.bi_sector = dc->start_write + + dm_target_offset(ti, bio->bi_iter.bi_sector); return delay_bio(dc, dc->write_delay, bio); } bio->bi_bdev = dc->dev_read->bdev; - bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector); + bio->bi_iter.bi_sector = dc->start_read + + dm_target_offset(ti, bio->bi_iter.bi_sector); return delay_bio(dc, dc->read_delay, bio); } diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index c80a0ec..b257e46 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -248,7 +248,8 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio) bio->bi_bdev = fc->dev->bdev; if (bio_sectors(bio)) - bio->bi_sector = flakey_map_sector(ti, bio->bi_sector); + bio->bi_iter.bi_sector = + flakey_map_sector(ti, bio->bi_iter.bi_sector); } static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) @@ -265,8 +266,8 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n", bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, - (bio_data_dir(bio) == WRITE) ? 'w' : 'r', - bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes); + (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw, + (unsigned long long)bio->bi_iter.bi_sector, bio_bytes); } } diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 2a20986..b2b8a10 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -201,26 +201,29 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse /* * Functions for getting the pages from a bvec. */ -static void bvec_get_page(struct dpages *dp, +static void bio_get_page(struct dpages *dp, struct page **p, unsigned long *len, unsigned *offset) { - struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; - *p = bvec->bv_page; - *len = bvec->bv_len; - *offset = bvec->bv_offset; + struct bio *bio = dp->context_ptr; + struct bio_vec bvec = bio_iovec(bio); + *p = bvec.bv_page; + *len = bvec.bv_len; + *offset = bvec.bv_offset; } -static void bvec_next_page(struct dpages *dp) +static void bio_next_page(struct dpages *dp) { - struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; - dp->context_ptr = bvec + 1; + struct bio *bio = dp->context_ptr; + struct bio_vec bvec = bio_iovec(bio); + + bio_advance(bio, bvec.bv_len); } -static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec) +static void bio_dp_init(struct dpages *dp, struct bio *bio) { - dp->get_page = bvec_get_page; - dp->next_page = bvec_next_page; - dp->context_ptr = bvec; + dp->get_page = bio_get_page; + dp->next_page = bio_next_page; + dp->context_ptr = bio; } /* @@ -304,14 +307,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); - bio->bi_sector = where->sector + (where->count - remaining); + bio->bi_iter.bi_sector = where->sector + (where->count - remaining); bio->bi_bdev = where->bdev; bio->bi_end_io = endio; store_io_and_region_in_bio(bio, io, region); if (rw & REQ_DISCARD) { num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); - bio->bi_size = num_sectors << SECTOR_SHIFT; + bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; remaining -= num_sectors; } else if (rw & REQ_WRITE_SAME) { /* @@ -320,7 +323,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, dp->get_page(dp, &page, &len, &offset); bio_add_page(bio, page, logical_block_size, offset); num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); - bio->bi_size = num_sectors << SECTOR_SHIFT; + bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; offset = 0; remaining -= num_sectors; @@ -457,8 +460,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp, list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); break; - case DM_IO_BVEC: - bvec_dp_init(dp, io_req->mem.ptr.bvec); + case DM_IO_BIO: + bio_dp_init(dp, io_req->mem.ptr.bio); break; case DM_IO_VMA: diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 4f99d26..53e848c 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -85,7 +85,8 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio) bio->bi_bdev = lc->dev->bdev; if (bio_sectors(bio)) - bio->bi_sector = linear_map_sector(ti, bio->bi_sector); + bio->bi_iter.bi_sector = + linear_map_sector(ti, bio->bi_iter.bi_sector); } static int linear_map(struct dm_target *ti, struct bio *bio) diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 9584443..f284e0b 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -432,7 +432,7 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio) region_t region = dm_rh_bio_to_region(ms->rh, bio); if (log->type->in_sync(log, region, 0)) - return choose_mirror(ms, bio->bi_sector) ? 1 : 0; + return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0; return 0; } @@ -442,15 +442,15 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio) */ static sector_t map_sector(struct mirror *m, struct bio *bio) { - if (unlikely(!bio->bi_size)) + if (unlikely(!bio->bi_iter.bi_size)) return 0; - return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector); + return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector); } static void map_bio(struct mirror *m, struct bio *bio) { bio->bi_bdev = m->dev->bdev; - bio->bi_sector = map_sector(m, bio); + bio->bi_iter.bi_sector = map_sector(m, bio); } static void map_region(struct dm_io_region *io, struct mirror *m, @@ -526,8 +526,8 @@ static void read_async_bio(struct mirror *m, struct bio *bio) struct dm_io_region io; struct dm_io_request io_req = { .bi_rw = READ, - .mem.type = DM_IO_BVEC, - .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, + .mem.type = DM_IO_BIO, + .mem.ptr.bio = bio, .notify.fn = read_callback, .notify.context = bio, .client = m->ms->io_client, @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads) * We can only read balance if the region is in sync. */ if (likely(region_in_sync(ms, region, 1))) - m = choose_mirror(ms, bio->bi_sector); + m = choose_mirror(ms, bio->bi_iter.bi_sector); else if (m && atomic_read(&m->error_count)) m = NULL; @@ -629,8 +629,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio) struct mirror *m; struct dm_io_request io_req = { .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA), - .mem.type = DM_IO_BVEC, - .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, + .mem.type = DM_IO_BIO, + .mem.ptr.bio = bio, .notify.fn = write_callback, .notify.context = bio, .client = ms->io_client, @@ -1181,7 +1181,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio) * The region is in-sync and we can perform reads directly. * Store enough information so we can retry if it fails. */ - m = choose_mirror(ms, bio->bi_sector); + m = choose_mirror(ms, bio->bi_iter.bi_sector); if (unlikely(!m)) return -EIO; diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 69732e0..b929fd5 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c @@ -126,7 +126,8 @@ EXPORT_SYMBOL_GPL(dm_rh_region_to_sector); region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) { - return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin); + return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector - + rh->target_begin); } EXPORT_SYMBOL_GPL(dm_rh_bio_to_region); diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 7177185..ebddef5 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1438,6 +1438,7 @@ out: if (full_bio) { full_bio->bi_end_io = pe->full_bio_end_io; full_bio->bi_private = pe->full_bio_private; + atomic_inc(&full_bio->bi_remaining); } free_pending_exception(pe); @@ -1619,11 +1620,10 @@ static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, struct bio *bio, chunk_t chunk) { bio->bi_bdev = s->cow->bdev; - bio->bi_sector = chunk_to_sector(s->store, - dm_chunk_number(e->new_chunk) + - (chunk - e->old_chunk)) + - (bio->bi_sector & - s->store->chunk_mask); + bio->bi_iter.bi_sector = + chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) + + (chunk - e->old_chunk)) + + (bio->bi_iter.bi_sector & s->store->chunk_mask); } static int snapshot_map(struct dm_target *ti, struct bio *bio) @@ -1641,7 +1641,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_REMAPPED; } - chunk = sector_to_chunk(s->store, bio->bi_sector); + chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); /* Full snapshots are not usable */ /* To get here the table must be live so s->active is always set. */ @@ -1702,7 +1702,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) r = DM_MAPIO_SUBMITTED; if (!pe->started && - bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) { + bio->bi_iter.bi_size == + (s->store->chunk_size << SECTOR_SHIFT)) { pe->started = 1; up_write(&s->lock); start_full_bio(pe, bio); @@ -1758,7 +1759,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_REMAPPED; } - chunk = sector_to_chunk(s->store, bio->bi_sector); + chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); down_write(&s->lock); @@ -2095,7 +2096,7 @@ static int do_origin(struct dm_dev *origin, struct bio *bio) down_read(&_origins_lock); o = __lookup_origin(origin->bdev); if (o) - r = __origin_write(&o->snapshots, bio->bi_sector, bio); + r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio); up_read(&_origins_lock); return r; diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 73c1712..d1600d2 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -259,13 +259,15 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio, { sector_t begin, end; - stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin); + stripe_map_range_sector(sc, bio->bi_iter.bi_sector, + target_stripe, &begin); stripe_map_range_sector(sc, bio_end_sector(bio), target_stripe, &end); if (begin < end) { bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; - bio->bi_sector = begin + sc->stripe[target_stripe].physical_start; - bio->bi_size = to_bytes(end - begin); + bio->bi_iter.bi_sector = begin + + sc->stripe[target_stripe].physical_start; + bio->bi_iter.bi_size = to_bytes(end - begin); return DM_MAPIO_REMAPPED; } else { /* The range doesn't map to the target stripe */ @@ -293,9 +295,10 @@ static int stripe_map(struct dm_target *ti, struct bio *bio) return stripe_map_range(sc, bio, target_bio_nr); } - stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector); + stripe_map_sector(sc, bio->bi_iter.bi_sector, + &stripe, &bio->bi_iter.bi_sector); - bio->bi_sector += sc->stripe[stripe].physical_start; + bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start; bio->bi_bdev = sc->stripe[stripe].dev->bdev; return DM_MAPIO_REMAPPED; diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c index ff9ac4b..09a688b 100644 --- a/drivers/md/dm-switch.c +++ b/drivers/md/dm-switch.c @@ -311,11 +311,11 @@ error: static int switch_map(struct dm_target *ti, struct bio *bio) { struct switch_ctx *sctx = ti->private; - sector_t offset = dm_target_offset(ti, bio->bi_sector); + sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector); unsigned path_nr = switch_get_path_nr(sctx, offset); bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev; - bio->bi_sector = sctx->path_list[path_nr].start + offset; + bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset; return DM_MAPIO_REMAPPED; } diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 726228b..faaf944 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -414,7 +414,7 @@ static bool block_size_is_power_of_two(struct pool *pool) static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) { struct pool *pool = tc->pool; - sector_t block_nr = bio->bi_sector; + sector_t block_nr = bio->bi_iter.bi_sector; if (block_size_is_power_of_two(pool)) block_nr >>= pool->sectors_per_block_shift; @@ -427,14 +427,15 @@ static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) { struct pool *pool = tc->pool; - sector_t bi_sector = bio->bi_sector; + sector_t bi_sector = bio->bi_iter.bi_sector; bio->bi_bdev = tc->pool_dev->bdev; if (block_size_is_power_of_two(pool)) - bio->bi_sector = (block << pool->sectors_per_block_shift) | - (bi_sector & (pool->sectors_per_block - 1)); + bio->bi_iter.bi_sector = + (block << pool->sectors_per_block_shift) | + (bi_sector & (pool->sectors_per_block - 1)); else - bio->bi_sector = (block * pool->sectors_per_block) + + bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + sector_div(bi_sector, pool->sectors_per_block); } @@ -612,8 +613,10 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) { - if (m->bio) + if (m->bio) { m->bio->bi_end_io = m->saved_bi_end_io; + atomic_inc(&m->bio->bi_remaining); + } cell_error(m->tc->pool, m->cell); list_del(&m->list); mempool_free(m, m->tc->pool->mapping_pool); @@ -627,8 +630,10 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) int r; bio = m->bio; - if (bio) + if (bio) { bio->bi_end_io = m->saved_bi_end_io; + atomic_inc(&bio->bi_remaining); + } if (m->err) { cell_error(pool, m->cell); @@ -731,7 +736,8 @@ static void process_prepared(struct pool *pool, struct list_head *head, */ static int io_overlaps_block(struct pool *pool, struct bio *bio) { - return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT); + return bio->bi_iter.bi_size == + (pool->sectors_per_block << SECTOR_SHIFT); } static int io_overwrites_block(struct pool *pool, struct bio *bio) @@ -1136,7 +1142,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, if (bio_detain(pool, &key, bio, &cell)) return; - if (bio_data_dir(bio) == WRITE && bio->bi_size) + if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) break_sharing(tc, bio, block, &key, lookup_result, cell); else { struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); @@ -1159,7 +1165,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block /* * Remap empty bios (flushes) immediately, without provisioning. */ - if (!bio->bi_size) { + if (!bio->bi_iter.bi_size) { inc_all_io_entry(pool, bio); cell_defer_no_holder(tc, cell); @@ -1258,7 +1264,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio) r = dm_thin_find_block(tc->td, block, 1, &lookup_result); switch (r) { case 0: - if (lookup_result.shared && (rw == WRITE) && bio->bi_size) + if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) handle_unserviceable_bio(tc->pool, bio); else { inc_all_io_entry(tc->pool, bio); @@ -2939,7 +2945,7 @@ out_unlock: static int thin_map(struct dm_target *ti, struct bio *bio) { - bio->bi_sector = dm_target_offset(ti, bio->bi_sector); + bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); return thin_bio_map(ti, bio); } diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index 4b7941d..796007a 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c @@ -73,15 +73,10 @@ struct dm_verity_io { sector_t block; unsigned n_blocks; - /* saved bio vector */ - struct bio_vec *io_vec; - unsigned io_vec_size; + struct bvec_iter iter; struct work_struct work; - /* A space for short vectors; longer vectors are allocated separately. */ - struct bio_vec io_vec_inline[DM_VERITY_IO_VEC_INLINE]; - /* * Three variably-size fields follow this struct: * @@ -284,9 +279,10 @@ release_ret_r: static int verity_verify_io(struct dm_verity_io *io) { struct dm_verity *v = io->v; + struct bio *bio = dm_bio_from_per_bio_data(io, + v->ti->per_bio_data_size); unsigned b; int i; - unsigned vector = 0, offset = 0; for (b = 0; b < io->n_blocks; b++) { struct shash_desc *desc; @@ -336,31 +332,22 @@ test_block_hash: } todo = 1 << v->data_dev_block_bits; - do { - struct bio_vec *bv; + while (io->iter.bi_size) { u8 *page; - unsigned len; - - BUG_ON(vector >= io->io_vec_size); - bv = &io->io_vec[vector]; - page = kmap_atomic(bv->bv_page); - len = bv->bv_len - offset; - if (likely(len >= todo)) - len = todo; - r = crypto_shash_update(desc, - page + bv->bv_offset + offset, len); + struct bio_vec bv = bio_iter_iovec(bio, io->iter); + + page = kmap_atomic(bv.bv_page); + r = crypto_shash_update(desc, page + bv.bv_offset, + bv.bv_len); kunmap_atomic(page); + if (r < 0) { DMERR("crypto_shash_update failed: %d", r); return r; } - offset += len; - if (likely(offset == bv->bv_len)) { - offset = 0; - vector++; - } - todo -= len; - } while (todo); + + bio_advance_iter(bio, &io->iter, bv.bv_len); + } if (!v->version) { r = crypto_shash_update(desc, v->salt, v->salt_size); @@ -383,8 +370,6 @@ test_block_hash: return -EIO; } } - BUG_ON(vector != io->io_vec_size); - BUG_ON(offset); return 0; } @@ -400,10 +385,7 @@ static void verity_finish_io(struct dm_verity_io *io, int error) bio->bi_end_io = io->orig_bi_end_io; bio->bi_private = io->orig_bi_private; - if (io->io_vec != io->io_vec_inline) - mempool_free(io->io_vec, v->vec_mempool); - - bio_endio(bio, error); + bio_endio_nodec(bio, error); } static void verity_work(struct work_struct *w) @@ -493,9 +475,9 @@ static int verity_map(struct dm_target *ti, struct bio *bio) struct dm_verity_io *io; bio->bi_bdev = v->data_dev->bdev; - bio->bi_sector = verity_map_sector(v, bio->bi_sector); + bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector); - if (((unsigned)bio->bi_sector | bio_sectors(bio)) & + if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) & ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) { DMERR_LIMIT("unaligned io"); return -EIO; @@ -514,18 +496,12 @@ static int verity_map(struct dm_target *ti, struct bio *bio) io->v = v; io->orig_bi_end_io = bio->bi_end_io; io->orig_bi_private = bio->bi_private; - io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); - io->n_blocks = bio->bi_size >> v->data_dev_block_bits; + io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); + io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits; bio->bi_end_io = verity_end_io; bio->bi_private = io; - io->io_vec_size = bio_segments(bio); - if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE) - io->io_vec = io->io_vec_inline; - else - io->io_vec = mempool_alloc(v->vec_mempool, GFP_NOIO); - memcpy(io->io_vec, bio_iovec(bio), - io->io_vec_size * sizeof(struct bio_vec)); + io->iter = bio->bi_iter; verity_submit_prefetch(v, io); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index b49c762..8c53b09 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -575,7 +575,7 @@ static void start_io_acct(struct dm_io *io) atomic_inc_return(&md->pending[rw])); if (unlikely(dm_stats_used(&md->stats))) - dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector, + dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, bio_sectors(bio), false, 0, &io->stats_aux); } @@ -593,7 +593,7 @@ static void end_io_acct(struct dm_io *io) part_stat_unlock(); if (unlikely(dm_stats_used(&md->stats))) - dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector, + dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, bio_sectors(bio), true, duration, &io->stats_aux); /* @@ -742,7 +742,7 @@ static void dec_pending(struct dm_io *io, int error) if (io_error == DM_ENDIO_REQUEUE) return; - if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) { + if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) { /* * Preflush done for flush with data, reissue * without REQ_FLUSH. @@ -797,7 +797,7 @@ static void end_clone_bio(struct bio *clone, int error) struct dm_rq_clone_bio_info *info = clone->bi_private; struct dm_rq_target_io *tio = info->tio; struct bio *bio = info->orig; - unsigned int nr_bytes = info->orig->bi_size; + unsigned int nr_bytes = info->orig->bi_iter.bi_size; bio_put(clone); @@ -1128,7 +1128,7 @@ static void __map_bio(struct dm_target_io *tio) * this io. */ atomic_inc(&tio->io->io_count); - sector = clone->bi_sector; + sector = clone->bi_iter.bi_sector; r = ti->type->map(ti, clone); if (r == DM_MAPIO_REMAPPED) { /* the bio has been remapped so dispatch it */ @@ -1155,76 +1155,32 @@ struct clone_info { struct dm_io *io; sector_t sector; sector_t sector_count; - unsigned short idx; }; static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len) { - bio->bi_sector = sector; - bio->bi_size = to_bytes(len); -} - -static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count) -{ - bio->bi_idx = idx; - bio->bi_vcnt = idx + bv_count; - bio->bi_flags &= ~(1 << BIO_SEG_VALID); -} - -static void clone_bio_integrity(struct bio *bio, struct bio *clone, - unsigned short idx, unsigned len, unsigned offset, - unsigned trim) -{ - if (!bio_integrity(bio)) - return; - - bio_integrity_clone(clone, bio, GFP_NOIO); - - if (trim) - bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len); -} - -/* - * Creates a little bio that just does part of a bvec. - */ -static void clone_split_bio(struct dm_target_io *tio, struct bio *bio, - sector_t sector, unsigned short idx, - unsigned offset, unsigned len) -{ - struct bio *clone = &tio->clone; - struct bio_vec *bv = bio->bi_io_vec + idx; - - *clone->bi_io_vec = *bv; - - bio_setup_sector(clone, sector, len); - - clone->bi_bdev = bio->bi_bdev; - clone->bi_rw = bio->bi_rw; - clone->bi_vcnt = 1; - clone->bi_io_vec->bv_offset = offset; - clone->bi_io_vec->bv_len = clone->bi_size; - clone->bi_flags |= 1 << BIO_CLONED; - - clone_bio_integrity(bio, clone, idx, len, offset, 1); + bio->bi_iter.bi_sector = sector; + bio->bi_iter.bi_size = to_bytes(len); } /* * Creates a bio that consists of range of complete bvecs. */ static void clone_bio(struct dm_target_io *tio, struct bio *bio, - sector_t sector, unsigned short idx, - unsigned short bv_count, unsigned len) + sector_t sector, unsigned len) { struct bio *clone = &tio->clone; - unsigned trim = 0; - __bio_clone(clone, bio); - bio_setup_sector(clone, sector, len); - bio_setup_bv(clone, idx, bv_count); + __bio_clone_fast(clone, bio); + + if (bio_integrity(bio)) + bio_integrity_clone(clone, bio, GFP_NOIO); + + bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); + clone->bi_iter.bi_size = to_bytes(len); - if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) - trim = 1; - clone_bio_integrity(bio, clone, idx, len, 0, trim); + if (bio_integrity(bio)) + bio_integrity_trim(clone, 0, len); } static struct dm_target_io *alloc_tio(struct clone_info *ci, @@ -1257,7 +1213,7 @@ static void __clone_and_map_simple_bio(struct clone_info *ci, * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush * and discard, so no need for concern about wasted bvec allocations. */ - __bio_clone(clone, ci->bio); + __bio_clone_fast(clone, ci->bio); if (len) bio_setup_sector(clone, ci->sector, len); @@ -1286,10 +1242,7 @@ static int __send_empty_flush(struct clone_info *ci) } static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, - sector_t sector, int nr_iovecs, - unsigned short idx, unsigned short bv_count, - unsigned offset, unsigned len, - unsigned split_bvec) + sector_t sector, unsigned len) { struct bio *bio = ci->bio; struct dm_target_io *tio; @@ -1303,11 +1256,8 @@ static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti num_target_bios = ti->num_write_bios(ti, bio); for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { - tio = alloc_tio(ci, ti, nr_iovecs, target_bio_nr); - if (split_bvec) - clone_split_bio(tio, bio, sector, idx, offset, len); - else - clone_bio(tio, bio, sector, idx, bv_count, len); + tio = alloc_tio(ci, ti, 0, target_bio_nr); + clone_bio(tio, bio, sector, len); __map_bio(tio); } } @@ -1379,68 +1329,13 @@ static int __send_write_same(struct clone_info *ci) } /* - * Find maximum number of sectors / bvecs we can process with a single bio. - */ -static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx) -{ - struct bio *bio = ci->bio; - sector_t bv_len, total_len = 0; - - for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) { - bv_len = to_sector(bio->bi_io_vec[*idx].bv_len); - - if (bv_len > max) - break; - - max -= bv_len; - total_len += bv_len; - } - - return total_len; -} - -static int __split_bvec_across_targets(struct clone_info *ci, - struct dm_target *ti, sector_t max) -{ - struct bio *bio = ci->bio; - struct bio_vec *bv = bio->bi_io_vec + ci->idx; - sector_t remaining = to_sector(bv->bv_len); - unsigned offset = 0; - sector_t len; - - do { - if (offset) { - ti = dm_table_find_target(ci->map, ci->sector); - if (!dm_target_is_valid(ti)) - return -EIO; - - max = max_io_len(ci->sector, ti); - } - - len = min(remaining, max); - - __clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0, - bv->bv_offset + offset, len, 1); - - ci->sector += len; - ci->sector_count -= len; - offset += to_bytes(len); - } while (remaining -= len); - - ci->idx++; - - return 0; -} - -/* * Select the correct strategy for processing a non-flush bio. */ static int __split_and_process_non_flush(struct clone_info *ci) { struct bio *bio = ci->bio; struct dm_target *ti; - sector_t len, max; - int idx; + unsigned len; if (unlikely(bio->bi_rw & REQ_DISCARD)) return __send_discard(ci); @@ -1451,41 +1346,14 @@ static int __split_and_process_non_flush(struct clone_info *ci) if (!dm_target_is_valid(ti)) return -EIO; - max = max_io_len(ci->sector, ti); - - /* - * Optimise for the simple case where we can do all of - * the remaining io with a single clone. - */ - if (ci->sector_count <= max) { - __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs, - ci->idx, bio->bi_vcnt - ci->idx, 0, - ci->sector_count, 0); - ci->sector_count = 0; - return 0; - } - - /* - * There are some bvecs that don't span targets. - * Do as many of these as possible. - */ - if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { - len = __len_within_target(ci, max, &idx); - - __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs, - ci->idx, idx - ci->idx, 0, len, 0); + len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); - ci->sector += len; - ci->sector_count -= len; - ci->idx = idx; + __clone_and_map_data_bio(ci, ti, ci->sector, len); - return 0; - } + ci->sector += len; + ci->sector_count -= len; - /* - * Handle a bvec that must be split between two or more targets. - */ - return __split_bvec_across_targets(ci, ti, max); + return 0; } /* @@ -1510,8 +1378,7 @@ static void __split_and_process_bio(struct mapped_device *md, ci.io->bio = bio; ci.io->md = md; spin_lock_init(&ci.io->endio_lock); - ci.sector = bio->bi_sector; - ci.idx = bio->bi_idx; + ci.sector = bio->bi_iter.bi_sector; start_io_acct(ci.io); diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index 3193aef..e8b4574 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c @@ -74,8 +74,8 @@ static void faulty_fail(struct bio *bio, int error) { struct bio *b = bio->bi_private; - b->bi_size = bio->bi_size; - b->bi_sector = bio->bi_sector; + b->bi_iter.bi_size = bio->bi_iter.bi_size; + b->bi_iter.bi_sector = bio->bi_iter.bi_sector; bio_put(bio); @@ -185,26 +185,31 @@ static void make_request(struct mddev *mddev, struct bio *bio) return; } - if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE)) + if (check_sector(conf, bio->bi_iter.bi_sector, + bio_end_sector(bio), WRITE)) failit = 1; if (check_mode(conf, WritePersistent)) { - add_sector(conf, bio->bi_sector, WritePersistent); + add_sector(conf, bio->bi_iter.bi_sector, + WritePersistent); failit = 1; } if (check_mode(conf, WriteTransient)) failit = 1; } else { /* read request */ - if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ)) + if (check_sector(conf, bio->bi_iter.bi_sector, + bio_end_sector(bio), READ)) failit = 1; if (check_mode(conf, ReadTransient)) failit = 1; if (check_mode(conf, ReadPersistent)) { - add_sector(conf, bio->bi_sector, ReadPersistent); + add_sector(conf, bio->bi_iter.bi_sector, + ReadPersistent); failit = 1; } if (check_mode(conf, ReadFixable)) { - add_sector(conf, bio->bi_sector, ReadFixable); + add_sector(conf, bio->bi_iter.bi_sector, + ReadFixable); failit = 1; } } diff --git a/drivers/md/linear.c b/drivers/md/linear.c index f03fabd..56f534b 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -288,65 +288,65 @@ static int linear_stop (struct mddev *mddev) static void linear_make_request(struct mddev *mddev, struct bio *bio) { + char b[BDEVNAME_SIZE]; struct dev_info *tmp_dev; - sector_t start_sector; + struct bio *split; + sector_t start_sector, end_sector, data_offset; if (unlikely(bio->bi_rw & REQ_FLUSH)) { md_flush_request(mddev, bio); return; } - rcu_read_lock(); - tmp_dev = which_dev(mddev, bio->bi_sector); - start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; - - - if (unlikely(bio->bi_sector >= (tmp_dev->end_sector) - || (bio->bi_sector < start_sector))) { - char b[BDEVNAME_SIZE]; - - printk(KERN_ERR - "md/linear:%s: make_request: Sector %llu out of bounds on " - "dev %s: %llu sectors, offset %llu\n", - mdname(mddev), - (unsigned long long)bio->bi_sector, - bdevname(tmp_dev->rdev->bdev, b), - (unsigned long long)tmp_dev->rdev->sectors, - (unsigned long long)start_sector); - rcu_read_unlock(); - bio_io_error(bio); - return; - } - if (unlikely(bio_end_sector(bio) > tmp_dev->end_sector)) { - /* This bio crosses a device boundary, so we have to - * split it. - */ - struct bio_pair *bp; - sector_t end_sector = tmp_dev->end_sector; + do { + rcu_read_lock(); - rcu_read_unlock(); - - bp = bio_split(bio, end_sector - bio->bi_sector); + tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector); + start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; + end_sector = tmp_dev->end_sector; + data_offset = tmp_dev->rdev->data_offset; + bio->bi_bdev = tmp_dev->rdev->bdev; - linear_make_request(mddev, &bp->bio1); - linear_make_request(mddev, &bp->bio2); - bio_pair_release(bp); - return; - } - - bio->bi_bdev = tmp_dev->rdev->bdev; - bio->bi_sector = bio->bi_sector - start_sector - + tmp_dev->rdev->data_offset; - rcu_read_unlock(); + rcu_read_unlock(); - if (unlikely((bio->bi_rw & REQ_DISCARD) && - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { - /* Just ignore it */ - bio_endio(bio, 0); - return; - } + if (unlikely(bio->bi_iter.bi_sector >= end_sector || + bio->bi_iter.bi_sector < start_sector)) + goto out_of_bounds; + + if (unlikely(bio_end_sector(bio) > end_sector)) { + /* This bio crosses a device boundary, so we have to + * split it. + */ + split = bio_split(bio, end_sector - + bio->bi_iter.bi_sector, + GFP_NOIO, fs_bio_set); + bio_chain(split, bio); + } else { + split = bio; + } - generic_make_request(bio); + split->bi_iter.bi_sector = split->bi_iter.bi_sector - + start_sector + data_offset; + + if (unlikely((split->bi_rw & REQ_DISCARD) && + !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { + /* Just ignore it */ + bio_endio(split, 0); + } else + generic_make_request(split); + } while (split != bio); + return; + +out_of_bounds: + printk(KERN_ERR + "md/linear:%s: make_request: Sector %llu out of bounds on " + "dev %s: %llu sectors, offset %llu\n", + mdname(mddev), + (unsigned long long)bio->bi_iter.bi_sector, + bdevname(tmp_dev->rdev->bdev, b), + (unsigned long long)tmp_dev->rdev->sectors, + (unsigned long long)start_sector); + bio_io_error(bio); } static void linear_status (struct seq_file *seq, struct mddev *mddev) diff --git a/drivers/md/md.c b/drivers/md/md.c index 40c5313..4ad5cc4 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -393,7 +393,7 @@ static void md_submit_flush_data(struct work_struct *ws) struct mddev *mddev = container_of(ws, struct mddev, flush_work); struct bio *bio = mddev->flush_bio; - if (bio->bi_size == 0) + if (bio->bi_iter.bi_size == 0) /* an empty barrier - all done */ bio_endio(bio, 0); else { @@ -754,7 +754,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; - bio->bi_sector = sector; + bio->bi_iter.bi_sector = sector; bio_add_page(bio, page, size, 0); bio->bi_private = rdev; bio->bi_end_io = super_written; @@ -782,18 +782,16 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); int ret; - rw |= REQ_SYNC; - bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev; if (metadata_op) - bio->bi_sector = sector + rdev->sb_start; + bio->bi_iter.bi_sector = sector + rdev->sb_start; else if (rdev->mddev->reshape_position != MaxSector && (rdev->mddev->reshape_backwards == (sector >= rdev->mddev->reshape_position))) - bio->bi_sector = sector + rdev->new_data_offset; + bio->bi_iter.bi_sector = sector + rdev->new_data_offset; else - bio->bi_sector = sector + rdev->data_offset; + bio->bi_iter.bi_sector = sector + rdev->data_offset; bio_add_page(bio, page, size, 0); submit_bio_wait(rw, bio); diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 1642eae..849ad39 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -100,7 +100,7 @@ static void multipath_end_request(struct bio *bio, int error) md_error (mp_bh->mddev, rdev); printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", bdevname(rdev->bdev,b), - (unsigned long long)bio->bi_sector); + (unsigned long long)bio->bi_iter.bi_sector); multipath_reschedule_retry(mp_bh); } else multipath_end_bh_io(mp_bh, error); @@ -132,7 +132,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio) multipath = conf->multipaths + mp_bh->path; mp_bh->bio = *bio; - mp_bh->bio.bi_sector += multipath->rdev->data_offset; + mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; mp_bh->bio.bi_bdev = multipath->rdev->bdev; mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT; mp_bh->bio.bi_end_io = multipath_end_request; @@ -355,21 +355,22 @@ static void multipathd(struct md_thread *thread) spin_unlock_irqrestore(&conf->device_lock, flags); bio = &mp_bh->bio; - bio->bi_sector = mp_bh->master_bio->bi_sector; + bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector; if ((mp_bh->path = multipath_map (conf))<0) { printk(KERN_ALERT "multipath: %s: unrecoverable IO read" " error for block %llu\n", bdevname(bio->bi_bdev,b), - (unsigned long long)bio->bi_sector); + (unsigned long long)bio->bi_iter.bi_sector); multipath_end_bh_io(mp_bh, -EIO); } else { printk(KERN_ERR "multipath: %s: redirecting sector %llu" " to another IO path\n", bdevname(bio->bi_bdev,b), - (unsigned long long)bio->bi_sector); + (unsigned long long)bio->bi_iter.bi_sector); *bio = *(mp_bh->master_bio); - bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; + bio->bi_iter.bi_sector += + conf->multipaths[mp_bh->path].rdev->data_offset; bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; bio->bi_rw |= REQ_FAILFAST_TRANSPORT; bio->bi_end_io = multipath_end_request; diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index c4d420b..407a99e 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -501,10 +501,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev, unsigned int chunk_sects, struct bio *bio) { if (likely(is_power_of_2(chunk_sects))) { - return chunk_sects >= ((bio->bi_sector & (chunk_sects-1)) + return chunk_sects >= + ((bio->bi_iter.bi_sector & (chunk_sects-1)) + bio_sectors(bio)); } else{ - sector_t sector = bio->bi_sector; + sector_t sector = bio->bi_iter.bi_sector; return chunk_sects >= (sector_div(sector, chunk_sects) + bio_sectors(bio)); } @@ -512,64 +513,44 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev, static void raid0_make_request(struct mddev *mddev, struct bio *bio) { - unsigned int chunk_sects; - sector_t sector_offset; struct strip_zone *zone; struct md_rdev *tmp_dev; + struct bio *split; if (unlikely(bio->bi_rw & REQ_FLUSH)) { md_flush_request(mddev, bio); return; } - chunk_sects = mddev->chunk_sectors; - if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) { - sector_t sector = bio->bi_sector; - struct bio_pair *bp; - /* Sanity check -- queue functions should prevent this happening */ - if (bio_segments(bio) > 1) - goto bad_map; - /* This is a one page bio that upper layers - * refuse to split for us, so we need to split it. - */ - if (likely(is_power_of_2(chunk_sects))) - bp = bio_split(bio, chunk_sects - (sector & - (chunk_sects-1))); - else - bp = bio_split(bio, chunk_sects - - sector_div(sector, chunk_sects)); - raid0_make_request(mddev, &bp->bio1); - raid0_make_request(mddev, &bp->bio2); - bio_pair_release(bp); - return; - } + do { + sector_t sector = bio->bi_iter.bi_sector; + unsigned chunk_sects = mddev->chunk_sectors; - sector_offset = bio->bi_sector; - zone = find_zone(mddev->private, §or_offset); - tmp_dev = map_sector(mddev, zone, bio->bi_sector, - §or_offset); - bio->bi_bdev = tmp_dev->bdev; - bio->bi_sector = sector_offset + zone->dev_start + - tmp_dev->data_offset; - - if (unlikely((bio->bi_rw & REQ_DISCARD) && - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { - /* Just ignore it */ - bio_endio(bio, 0); - return; - } + unsigned sectors = chunk_sects - + (likely(is_power_of_2(chunk_sects)) + ? (sector & (chunk_sects-1)) + : sector_div(sector, chunk_sects)); - generic_make_request(bio); - return; - -bad_map: - printk("md/raid0:%s: make_request bug: can't convert block across chunks" - " or bigger than %dk %llu %d\n", - mdname(mddev), chunk_sects / 2, - (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2); + if (sectors < bio_sectors(bio)) { + split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set); + bio_chain(split, bio); + } else { + split = bio; + } - bio_io_error(bio); - return; + zone = find_zone(mddev->private, §or); + tmp_dev = map_sector(mddev, zone, sector, §or); + split->bi_bdev = tmp_dev->bdev; + split->bi_iter.bi_sector = sector + zone->dev_start + + tmp_dev->data_offset; + + if (unlikely((split->bi_rw & REQ_DISCARD) && + !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { + /* Just ignore it */ + bio_endio(split, 0); + } else + generic_make_request(split); + } while (split != bio); } static void raid0_status(struct seq_file *seq, struct mddev *mddev) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a49cfcc..fd3a2a1 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -229,7 +229,7 @@ static void call_bio_endio(struct r1bio *r1_bio) int done; struct r1conf *conf = r1_bio->mddev->private; sector_t start_next_window = r1_bio->start_next_window; - sector_t bi_sector = bio->bi_sector; + sector_t bi_sector = bio->bi_iter.bi_sector; if (bio->bi_phys_segments) { unsigned long flags; @@ -265,9 +265,8 @@ static void raid_end_bio_io(struct r1bio *r1_bio) if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { pr_debug("raid1: sync end %s on sectors %llu-%llu\n", (bio_data_dir(bio) == WRITE) ? "write" : "read", - (unsigned long long) bio->bi_sector, - (unsigned long long) bio->bi_sector + - bio_sectors(bio) - 1); + (unsigned long long) bio->bi_iter.bi_sector, + (unsigned long long) bio_end_sector(bio) - 1); call_bio_endio(r1_bio); } @@ -466,9 +465,8 @@ static void raid1_end_write_request(struct bio *bio, int error) struct bio *mbio = r1_bio->master_bio; pr_debug("raid1: behind end write sectors" " %llu-%llu\n", - (unsigned long long) mbio->bi_sector, - (unsigned long long) mbio->bi_sector + - bio_sectors(mbio) - 1); + (unsigned long long) mbio->bi_iter.bi_sector, + (unsigned long long) bio_end_sector(mbio) - 1); call_bio_endio(r1_bio); } } @@ -875,7 +873,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio) else if ((conf->next_resync - RESYNC_WINDOW_SECTORS >= bio_end_sector(bio)) || (conf->next_resync + NEXT_NORMALIO_DISTANCE - <= bio->bi_sector)) + <= bio->bi_iter.bi_sector)) wait = false; else wait = true; @@ -913,14 +911,14 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) if (bio && bio_data_dir(bio) == WRITE) { if (conf->next_resync + NEXT_NORMALIO_DISTANCE - <= bio->bi_sector) { + <= bio->bi_iter.bi_sector) { if (conf->start_next_window == MaxSector) conf->start_next_window = conf->next_resync + NEXT_NORMALIO_DISTANCE; if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE) - <= bio->bi_sector) + <= bio->bi_iter.bi_sector) conf->next_window_requests++; else conf->current_window_requests++; @@ -1027,7 +1025,8 @@ do_sync_io: if (bvecs[i].bv_page) put_page(bvecs[i].bv_page); kfree(bvecs); - pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); + pr_debug("%dB behind alloc failed, doing sync I/O\n", + bio->bi_iter.bi_size); } struct raid1_plug_cb { @@ -1107,7 +1106,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) if (bio_data_dir(bio) == WRITE && bio_end_sector(bio) > mddev->suspend_lo && - bio->bi_sector < mddev->suspend_hi) { + bio->bi_iter.bi_sector < mddev->suspend_hi) { /* As the suspend_* range is controlled by * userspace, we want an interruptible * wait. @@ -1118,7 +1117,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) prepare_to_wait(&conf->wait_barrier, &w, TASK_INTERRUPTIBLE); if (bio_end_sector(bio) <= mddev->suspend_lo || - bio->bi_sector >= mddev->suspend_hi) + bio->bi_iter.bi_sector >= mddev->suspend_hi) break; schedule(); } @@ -1140,7 +1139,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) r1_bio->sectors = bio_sectors(bio); r1_bio->state = 0; r1_bio->mddev = mddev; - r1_bio->sector = bio->bi_sector; + r1_bio->sector = bio->bi_iter.bi_sector; /* We might need to issue multiple reads to different * devices if there are bad blocks around, so we keep @@ -1180,12 +1179,13 @@ read_again: r1_bio->read_disk = rdisk; read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(read_bio, r1_bio->sector - bio->bi_sector, + bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors); r1_bio->bios[rdisk] = read_bio; - read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; + read_bio->bi_iter.bi_sector = r1_bio->sector + + mirror->rdev->data_offset; read_bio->bi_bdev = mirror->rdev->bdev; read_bio->bi_end_io = raid1_end_read_request; read_bio->bi_rw = READ | do_sync; @@ -1197,7 +1197,7 @@ read_again: */ sectors_handled = (r1_bio->sector + max_sectors - - bio->bi_sector); + - bio->bi_iter.bi_sector); r1_bio->sectors = max_sectors; spin_lock_irq(&conf->device_lock); if (bio->bi_phys_segments == 0) @@ -1218,7 +1218,8 @@ read_again: r1_bio->sectors = bio_sectors(bio) - sectors_handled; r1_bio->state = 0; r1_bio->mddev = mddev; - r1_bio->sector = bio->bi_sector + sectors_handled; + r1_bio->sector = bio->bi_iter.bi_sector + + sectors_handled; goto read_again; } else generic_make_request(read_bio); @@ -1321,7 +1322,7 @@ read_again: if (r1_bio->bios[j]) rdev_dec_pending(conf->mirrors[j].rdev, mddev); r1_bio->state = 0; - allow_barrier(conf, start_next_window, bio->bi_sector); + allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector); md_wait_for_blocked_rdev(blocked_rdev, mddev); start_next_window = wait_barrier(conf, bio); /* @@ -1348,7 +1349,7 @@ read_again: bio->bi_phys_segments++; spin_unlock_irq(&conf->device_lock); } - sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector; + sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector; atomic_set(&r1_bio->remaining, 1); atomic_set(&r1_bio->behind_remaining, 0); @@ -1360,7 +1361,7 @@ read_again: continue; mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(mbio, r1_bio->sector - bio->bi_sector, max_sectors); + bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors); if (first_clone) { /* do behind I/O ? @@ -1394,7 +1395,7 @@ read_again: r1_bio->bios[i] = mbio; - mbio->bi_sector = (r1_bio->sector + + mbio->bi_iter.bi_sector = (r1_bio->sector + conf->mirrors[i].rdev->data_offset); mbio->bi_bdev = conf->mirrors[i].rdev->bdev; mbio->bi_end_io = raid1_end_write_request; @@ -1434,7 +1435,7 @@ read_again: r1_bio->sectors = bio_sectors(bio) - sectors_handled; r1_bio->state = 0; r1_bio->mddev = mddev; - r1_bio->sector = bio->bi_sector + sectors_handled; + r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled; goto retry_write; } @@ -1958,14 +1959,14 @@ static int process_checks(struct r1bio *r1_bio) /* fixup the bio for reuse */ bio_reset(b); b->bi_vcnt = vcnt; - b->bi_size = r1_bio->sectors << 9; - b->bi_sector = r1_bio->sector + + b->bi_iter.bi_size = r1_bio->sectors << 9; + b->bi_iter.bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; b->bi_bdev = conf->mirrors[i].rdev->bdev; b->bi_end_io = end_sync_read; b->bi_private = r1_bio; - size = b->bi_size; + size = b->bi_iter.bi_size; for (j = 0; j < vcnt ; j++) { struct bio_vec *bi; bi = &b->bi_io_vec[j]; @@ -2220,11 +2221,11 @@ static int narrow_write_error(struct r1bio *r1_bio, int i) } wbio->bi_rw = WRITE; - wbio->bi_sector = r1_bio->sector; - wbio->bi_size = r1_bio->sectors << 9; + wbio->bi_iter.bi_sector = r1_bio->sector; + wbio->bi_iter.bi_size = r1_bio->sectors << 9; bio_trim(wbio, sector - r1_bio->sector, sectors); - wbio->bi_sector += rdev->data_offset; + wbio->bi_iter.bi_sector += rdev->data_offset; wbio->bi_bdev = rdev->bdev; if (submit_bio_wait(WRITE, wbio) == 0) /* failure! */ @@ -2338,7 +2339,8 @@ read_more: } r1_bio->read_disk = disk; bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); - bio_trim(bio, r1_bio->sector - bio->bi_sector, max_sectors); + bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector, + max_sectors); r1_bio->bios[r1_bio->read_disk] = bio; rdev = conf->mirrors[disk].rdev; printk_ratelimited(KERN_ERR @@ -2347,7 +2349,7 @@ read_more: mdname(mddev), (unsigned long long)r1_bio->sector, bdevname(rdev->bdev, b)); - bio->bi_sector = r1_bio->sector + rdev->data_offset; + bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset; bio->bi_bdev = rdev->bdev; bio->bi_end_io = raid1_end_read_request; bio->bi_rw = READ | do_sync; @@ -2356,7 +2358,7 @@ read_more: /* Drat - have to split this up more */ struct bio *mbio = r1_bio->master_bio; int sectors_handled = (r1_bio->sector + max_sectors - - mbio->bi_sector); + - mbio->bi_iter.bi_sector); r1_bio->sectors = max_sectors; spin_lock_irq(&conf->device_lock); if (mbio->bi_phys_segments == 0) @@ -2374,7 +2376,8 @@ read_more: r1_bio->state = 0; set_bit(R1BIO_ReadError, &r1_bio->state); r1_bio->mddev = mddev; - r1_bio->sector = mbio->bi_sector + sectors_handled; + r1_bio->sector = mbio->bi_iter.bi_sector + + sectors_handled; goto read_more; } else @@ -2598,7 +2601,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp } if (bio->bi_end_io) { atomic_inc(&rdev->nr_pending); - bio->bi_sector = sector_nr + rdev->data_offset; + bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; bio->bi_bdev = rdev->bdev; bio->bi_private = r1_bio; } @@ -2698,7 +2701,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp continue; /* remove last page from this bio */ bio->bi_vcnt--; - bio->bi_size -= len; + bio->bi_iter.bi_size -= len; bio->bi_flags &= ~(1<< BIO_SEG_VALID); } goto bio_full; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 8d39d63..33fc408 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1152,14 +1152,12 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) kfree(plug); } -static void make_request(struct mddev *mddev, struct bio * bio) +static void __make_request(struct mddev *mddev, struct bio *bio) { struct r10conf *conf = mddev->private; struct r10bio *r10_bio; struct bio *read_bio; int i; - sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); - int chunk_sects = chunk_mask + 1; const int rw = bio_data_dir(bio); const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); const unsigned long do_fua = (bio->bi_rw & REQ_FUA); @@ -1174,88 +1172,27 @@ static void make_request(struct mddev *mddev, struct bio * bio) int max_sectors; int sectors; - if (unlikely(bio->bi_rw & REQ_FLUSH)) { - md_flush_request(mddev, bio); - return; - } - - /* If this request crosses a chunk boundary, we need to - * split it. This will only happen for 1 PAGE (or less) requests. - */ - if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio) - > chunk_sects - && (conf->geo.near_copies < conf->geo.raid_disks - || conf->prev.near_copies < conf->prev.raid_disks))) { - struct bio_pair *bp; - /* Sanity check -- queue functions should prevent this happening */ - if (bio_segments(bio) > 1) - goto bad_map; - /* This is a one page bio that upper layers - * refuse to split for us, so we need to split it. - */ - bp = bio_split(bio, - chunk_sects - (bio->bi_sector & (chunk_sects - 1)) ); - - /* Each of these 'make_request' calls will call 'wait_barrier'. - * If the first succeeds but the second blocks due to the resync - * thread raising the barrier, we will deadlock because the - * IO to the underlying device will be queued in generic_make_request - * and will never complete, so will never reduce nr_pending. - * So increment nr_waiting here so no new raise_barriers will - * succeed, and so the second wait_barrier cannot block. - */ - spin_lock_irq(&conf->resync_lock); - conf->nr_waiting++; - spin_unlock_irq(&conf->resync_lock); - - make_request(mddev, &bp->bio1); - make_request(mddev, &bp->bio2); - - spin_lock_irq(&conf->resync_lock); - conf->nr_waiting--; - wake_up(&conf->wait_barrier); - spin_unlock_irq(&conf->resync_lock); - - bio_pair_release(bp); - return; - bad_map: - printk("md/raid10:%s: make_request bug: can't convert block across chunks" - " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, - (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2); - - bio_io_error(bio); - return; - } - - md_write_start(mddev, bio); - - /* - * Register the new request and wait if the reconstruction - * thread has put up a bar for new requests. - * Continue immediately if no resync is active currently. - */ - wait_barrier(conf); - sectors = bio_sectors(bio); while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && - bio->bi_sector < conf->reshape_progress && - bio->bi_sector + sectors > conf->reshape_progress) { + bio->bi_iter.bi_sector < conf->reshape_progress && + bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { /* IO spans the reshape position. Need to wait for * reshape to pass */ allow_barrier(conf); wait_event(conf->wait_barrier, - conf->reshape_progress <= bio->bi_sector || - conf->reshape_progress >= bio->bi_sector + sectors); + conf->reshape_progress <= bio->bi_iter.bi_sector || + conf->reshape_progress >= bio->bi_iter.bi_sector + + sectors); wait_barrier(conf); } if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && bio_data_dir(bio) == WRITE && (mddev->reshape_backwards - ? (bio->bi_sector < conf->reshape_safe && - bio->bi_sector + sectors > conf->reshape_progress) - : (bio->bi_sector + sectors > conf->reshape_safe && - bio->bi_sector < conf->reshape_progress))) { + ? (bio->bi_iter.bi_sector < conf->reshape_safe && + bio->bi_iter.bi_sector + sectors > conf->reshape_progress) + : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && + bio->bi_iter.bi_sector < conf->reshape_progress))) { /* Need to update reshape_position in metadata */ mddev->reshape_position = conf->reshape_progress; set_bit(MD_CHANGE_DEVS, &mddev->flags); @@ -1273,7 +1210,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) r10_bio->sectors = sectors; r10_bio->mddev = mddev; - r10_bio->sector = bio->bi_sector; + r10_bio->sector = bio->bi_iter.bi_sector; r10_bio->state = 0; /* We might need to issue multiple reads to different @@ -1302,13 +1239,13 @@ read_again: slot = r10_bio->read_slot; read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(read_bio, r10_bio->sector - bio->bi_sector, + bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); r10_bio->devs[slot].bio = read_bio; r10_bio->devs[slot].rdev = rdev; - read_bio->bi_sector = r10_bio->devs[slot].addr + + read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + choose_data_offset(r10_bio, rdev); read_bio->bi_bdev = rdev->bdev; read_bio->bi_end_io = raid10_end_read_request; @@ -1320,7 +1257,7 @@ read_again: * need another r10_bio. */ sectors_handled = (r10_bio->sector + max_sectors - - bio->bi_sector); + - bio->bi_iter.bi_sector); r10_bio->sectors = max_sectors; spin_lock_irq(&conf->device_lock); if (bio->bi_phys_segments == 0) @@ -1341,7 +1278,8 @@ read_again: r10_bio->sectors = bio_sectors(bio) - sectors_handled; r10_bio->state = 0; r10_bio->mddev = mddev; - r10_bio->sector = bio->bi_sector + sectors_handled; + r10_bio->sector = bio->bi_iter.bi_sector + + sectors_handled; goto read_again; } else generic_make_request(read_bio); @@ -1499,7 +1437,8 @@ retry_write: bio->bi_phys_segments++; spin_unlock_irq(&conf->device_lock); } - sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector; + sectors_handled = r10_bio->sector + max_sectors - + bio->bi_iter.bi_sector; atomic_set(&r10_bio->remaining, 1); bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); @@ -1510,11 +1449,11 @@ retry_write: if (r10_bio->devs[i].bio) { struct md_rdev *rdev = conf->mirrors[d].rdev; mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(mbio, r10_bio->sector - bio->bi_sector, + bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); r10_bio->devs[i].bio = mbio; - mbio->bi_sector = (r10_bio->devs[i].addr+ + mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ choose_data_offset(r10_bio, rdev)); mbio->bi_bdev = rdev->bdev; @@ -1553,11 +1492,11 @@ retry_write: rdev = conf->mirrors[d].rdev; } mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(mbio, r10_bio->sector - bio->bi_sector, + bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); r10_bio->devs[i].repl_bio = mbio; - mbio->bi_sector = (r10_bio->devs[i].addr + + mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr + choose_data_offset( r10_bio, rdev)); mbio->bi_bdev = rdev->bdev; @@ -1591,11 +1530,57 @@ retry_write: r10_bio->sectors = bio_sectors(bio) - sectors_handled; r10_bio->mddev = mddev; - r10_bio->sector = bio->bi_sector + sectors_handled; + r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled; r10_bio->state = 0; goto retry_write; } one_write_done(r10_bio); +} + +static void make_request(struct mddev *mddev, struct bio *bio) +{ + struct r10conf *conf = mddev->private; + sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); + int chunk_sects = chunk_mask + 1; + + struct bio *split; + + if (unlikely(bio->bi_rw & REQ_FLUSH)) { + md_flush_request(mddev, bio); + return; + } + + md_write_start(mddev, bio); + + /* + * Register the new request and wait if the reconstruction + * thread has put up a bar for new requests. + * Continue immediately if no resync is active currently. + */ + wait_barrier(conf); + + do { + + /* + * If this request crosses a chunk boundary, we need to split + * it. + */ + if (unlikely((bio->bi_iter.bi_sector & chunk_mask) + + bio_sectors(bio) > chunk_sects + && (conf->geo.near_copies < conf->geo.raid_disks + || conf->prev.near_copies < + conf->prev.raid_disks))) { + split = bio_split(bio, chunk_sects - + (bio->bi_iter.bi_sector & + (chunk_sects - 1)), + GFP_NOIO, fs_bio_set); + bio_chain(split, bio); + } else { + split = bio; + } + + __make_request(mddev, split); + } while (split != bio); /* In case raid10d snuck in to freeze_array */ wake_up(&conf->wait_barrier); @@ -2124,10 +2109,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) bio_reset(tbio); tbio->bi_vcnt = vcnt; - tbio->bi_size = r10_bio->sectors << 9; + tbio->bi_iter.bi_size = r10_bio->sectors << 9; tbio->bi_rw = WRITE; tbio->bi_private = r10_bio; - tbio->bi_sector = r10_bio->devs[i].addr; + tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; for (j=0; j < vcnt ; j++) { tbio->bi_io_vec[j].bv_offset = 0; @@ -2144,7 +2129,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) atomic_inc(&r10_bio->remaining); md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); - tbio->bi_sector += conf->mirrors[d].rdev->data_offset; + tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; tbio->bi_bdev = conf->mirrors[d].rdev->bdev; generic_make_request(tbio); } @@ -2614,8 +2599,8 @@ static int narrow_write_error(struct r10bio *r10_bio, int i) sectors = sect_to_write; /* Write at 'sector' for 'sectors' */ wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(wbio, sector - bio->bi_sector, sectors); - wbio->bi_sector = (r10_bio->devs[i].addr+ + bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); + wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ choose_data_offset(r10_bio, rdev) + (sector - r10_bio->sector)); wbio->bi_bdev = rdev->bdev; @@ -2687,10 +2672,10 @@ read_more: (unsigned long long)r10_bio->sector); bio = bio_clone_mddev(r10_bio->master_bio, GFP_NOIO, mddev); - bio_trim(bio, r10_bio->sector - bio->bi_sector, max_sectors); + bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); r10_bio->devs[slot].bio = bio; r10_bio->devs[slot].rdev = rdev; - bio->bi_sector = r10_bio->devs[slot].addr + bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + choose_data_offset(r10_bio, rdev); bio->bi_bdev = rdev->bdev; bio->bi_rw = READ | do_sync; @@ -2701,7 +2686,7 @@ read_more: struct bio *mbio = r10_bio->master_bio; int sectors_handled = r10_bio->sector + max_sectors - - mbio->bi_sector; + - mbio->bi_iter.bi_sector; r10_bio->sectors = max_sectors; spin_lock_irq(&conf->device_lock); if (mbio->bi_phys_segments == 0) @@ -2719,7 +2704,7 @@ read_more: set_bit(R10BIO_ReadError, &r10_bio->state); r10_bio->mddev = mddev; - r10_bio->sector = mbio->bi_sector + r10_bio->sector = mbio->bi_iter.bi_sector + sectors_handled; goto read_more; @@ -3157,7 +3142,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_end_io = end_sync_read; bio->bi_rw = READ; from_addr = r10_bio->devs[j].addr; - bio->bi_sector = from_addr + rdev->data_offset; + bio->bi_iter.bi_sector = from_addr + + rdev->data_offset; bio->bi_bdev = rdev->bdev; atomic_inc(&rdev->nr_pending); /* and we write to 'i' (if not in_sync) */ @@ -3181,7 +3167,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_private = r10_bio; bio->bi_end_io = end_sync_write; bio->bi_rw = WRITE; - bio->bi_sector = to_addr + bio->bi_iter.bi_sector = to_addr + rdev->data_offset; bio->bi_bdev = rdev->bdev; atomic_inc(&r10_bio->remaining); @@ -3210,7 +3196,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_private = r10_bio; bio->bi_end_io = end_sync_write; bio->bi_rw = WRITE; - bio->bi_sector = to_addr + rdev->data_offset; + bio->bi_iter.bi_sector = to_addr + + rdev->data_offset; bio->bi_bdev = rdev->bdev; atomic_inc(&r10_bio->remaining); break; @@ -3328,7 +3315,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_private = r10_bio; bio->bi_end_io = end_sync_read; bio->bi_rw = READ; - bio->bi_sector = sector + + bio->bi_iter.bi_sector = sector + conf->mirrors[d].rdev->data_offset; bio->bi_bdev = conf->mirrors[d].rdev->bdev; count++; @@ -3350,7 +3337,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_private = r10_bio; bio->bi_end_io = end_sync_write; bio->bi_rw = WRITE; - bio->bi_sector = sector + + bio->bi_iter.bi_sector = sector + conf->mirrors[d].replacement->data_offset; bio->bi_bdev = conf->mirrors[d].replacement->bdev; count++; @@ -3397,7 +3384,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, bio2 = bio2->bi_next) { /* remove last page from this bio */ bio2->bi_vcnt--; - bio2->bi_size -= len; + bio2->bi_iter.bi_size -= len; bio2->bi_flags &= ~(1<< BIO_SEG_VALID); } goto bio_full; @@ -4418,7 +4405,7 @@ read_more: read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); read_bio->bi_bdev = rdev->bdev; - read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr + read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr + rdev->data_offset); read_bio->bi_private = r10_bio; read_bio->bi_end_io = end_sync_read; @@ -4426,7 +4413,7 @@ read_more: read_bio->bi_flags &= ~(BIO_POOL_MASK - 1); read_bio->bi_flags |= 1 << BIO_UPTODATE; read_bio->bi_vcnt = 0; - read_bio->bi_size = 0; + read_bio->bi_iter.bi_size = 0; r10_bio->master_bio = read_bio; r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; @@ -4452,7 +4439,8 @@ read_more: bio_reset(b); b->bi_bdev = rdev2->bdev; - b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset; + b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + + rdev2->new_data_offset; b->bi_private = r10_bio; b->bi_end_io = end_reshape_write; b->bi_rw = WRITE; @@ -4479,7 +4467,7 @@ read_more: bio2 = bio2->bi_next) { /* Remove last page from this bio */ bio2->bi_vcnt--; - bio2->bi_size -= len; + bio2->bi_iter.bi_size -= len; bio2->bi_flags &= ~(1<<BIO_SEG_VALID); } goto bio_full; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 03f82ab..67ca9c3 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -133,7 +133,7 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) { int sectors = bio_sectors(bio); - if (bio->bi_sector + sectors < sector + STRIPE_SECTORS) + if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) return bio->bi_next; else return NULL; @@ -225,7 +225,7 @@ static void return_io(struct bio *return_bi) return_bi = bi->bi_next; bi->bi_next = NULL; - bi->bi_size = 0; + bi->bi_iter.bi_size = 0; trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), bi, 0); bio_endio(bi, 0); @@ -852,10 +852,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) bi->bi_rw, i); atomic_inc(&sh->count); if (use_new_offset(conf, sh)) - bi->bi_sector = (sh->sector + bi->bi_iter.bi_sector = (sh->sector + rdev->new_data_offset); else - bi->bi_sector = (sh->sector + bi->bi_iter.bi_sector = (sh->sector + rdev->data_offset); if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) bi->bi_rw |= REQ_NOMERGE; @@ -863,7 +863,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) bi->bi_vcnt = 1; bi->bi_io_vec[0].bv_len = STRIPE_SIZE; bi->bi_io_vec[0].bv_offset = 0; - bi->bi_size = STRIPE_SIZE; + bi->bi_iter.bi_size = STRIPE_SIZE; /* * If this is discard request, set bi_vcnt 0. We don't * want to confuse SCSI because SCSI will replace payload @@ -899,15 +899,15 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) rbi->bi_rw, i); atomic_inc(&sh->count); if (use_new_offset(conf, sh)) - rbi->bi_sector = (sh->sector + rbi->bi_iter.bi_sector = (sh->sector + rrdev->new_data_offset); else - rbi->bi_sector = (sh->sector + rbi->bi_iter.bi_sector = (sh->sector + rrdev->data_offset); rbi->bi_vcnt = 1; rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; rbi->bi_io_vec[0].bv_offset = 0; - rbi->bi_size = STRIPE_SIZE; + rbi->bi_iter.bi_size = STRIPE_SIZE; /* * If this is discard request, set bi_vcnt 0. We don't * want to confuse SCSI because SCSI will replace payload @@ -935,24 +935,24 @@ static struct dma_async_tx_descriptor * async_copy_data(int frombio, struct bio *bio, struct page *page, sector_t sector, struct dma_async_tx_descriptor *tx) { - struct bio_vec *bvl; + struct bio_vec bvl; + struct bvec_iter iter; struct page *bio_page; - int i; int page_offset; struct async_submit_ctl submit; enum async_tx_flags flags = 0; - if (bio->bi_sector >= sector) - page_offset = (signed)(bio->bi_sector - sector) * 512; + if (bio->bi_iter.bi_sector >= sector) + page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; else - page_offset = (signed)(sector - bio->bi_sector) * -512; + page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; if (frombio) flags |= ASYNC_TX_FENCE; init_async_submit(&submit, flags, tx, NULL, NULL, NULL); - bio_for_each_segment(bvl, bio, i) { - int len = bvl->bv_len; + bio_for_each_segment(bvl, bio, iter) { + int len = bvl.bv_len; int clen; int b_offset = 0; @@ -968,8 +968,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, clen = len; if (clen > 0) { - b_offset += bvl->bv_offset; - bio_page = bvl->bv_page; + b_offset += bvl.bv_offset; + bio_page = bvl.bv_page; if (frombio) tx = async_memcpy(page, bio_page, page_offset, b_offset, clen, &submit); @@ -1012,7 +1012,7 @@ static void ops_complete_biofill(void *stripe_head_ref) BUG_ON(!dev->read); rbi = dev->read; dev->read = NULL; - while (rbi && rbi->bi_sector < + while (rbi && rbi->bi_iter.bi_sector < dev->sector + STRIPE_SECTORS) { rbi2 = r5_next_bio(rbi, dev->sector); if (!raid5_dec_bi_active_stripes(rbi)) { @@ -1048,7 +1048,7 @@ static void ops_run_biofill(struct stripe_head *sh) dev->read = rbi = dev->toread; dev->toread = NULL; spin_unlock_irq(&sh->stripe_lock); - while (rbi && rbi->bi_sector < + while (rbi && rbi->bi_iter.bi_sector < dev->sector + STRIPE_SECTORS) { tx = async_copy_data(0, rbi, dev->page, dev->sector, tx); @@ -1390,7 +1390,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) wbi = dev->written = chosen; spin_unlock_irq(&sh->stripe_lock); - while (wbi && wbi->bi_sector < + while (wbi && wbi->bi_iter.bi_sector < dev->sector + STRIPE_SECTORS) { if (wbi->bi_rw & REQ_FUA) set_bit(R5_WantFUA, &dev->flags); @@ -2615,7 +2615,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in int firstwrite=0; pr_debug("adding bi b#%llu to stripe s#%llu\n", - (unsigned long long)bi->bi_sector, + (unsigned long long)bi->bi_iter.bi_sector, (unsigned long long)sh->sector); /* @@ -2633,12 +2633,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in firstwrite = 1; } else bip = &sh->dev[dd_idx].toread; - while (*bip && (*bip)->bi_sector < bi->bi_sector) { - if (bio_end_sector(*bip) > bi->bi_sector) + while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) { + if (bio_end_sector(*bip) > bi->bi_iter.bi_sector) goto overlap; bip = & (*bip)->bi_next; } - if (*bip && (*bip)->bi_sector < bio_end_sector(bi)) + if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi)) goto overlap; BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); @@ -2652,7 +2652,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in sector_t sector = sh->dev[dd_idx].sector; for (bi=sh->dev[dd_idx].towrite; sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && - bi && bi->bi_sector <= sector; + bi && bi->bi_iter.bi_sector <= sector; bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { if (bio_end_sector(bi) >= sector) sector = bio_end_sector(bi); @@ -2662,7 +2662,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in } pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", - (unsigned long long)(*bip)->bi_sector, + (unsigned long long)(*bip)->bi_iter.bi_sector, (unsigned long long)sh->sector, dd_idx); spin_unlock_irq(&sh->stripe_lock); @@ -2737,7 +2737,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) wake_up(&conf->wait_for_overlap); - while (bi && bi->bi_sector < + while (bi && bi->bi_iter.bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); clear_bit(BIO_UPTODATE, &bi->bi_flags); @@ -2756,7 +2756,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, bi = sh->dev[i].written; sh->dev[i].written = NULL; if (bi) bitmap_end = 1; - while (bi && bi->bi_sector < + while (bi && bi->bi_iter.bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); clear_bit(BIO_UPTODATE, &bi->bi_flags); @@ -2780,7 +2780,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, spin_unlock_irq(&sh->stripe_lock); if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) wake_up(&conf->wait_for_overlap); - while (bi && bi->bi_sector < + while (bi && bi->bi_iter.bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); @@ -3004,7 +3004,7 @@ static void handle_stripe_clean_event(struct r5conf *conf, clear_bit(R5_UPTODATE, &dev->flags); wbi = dev->written; dev->written = NULL; - while (wbi && wbi->bi_sector < + while (wbi && wbi->bi_iter.bi_sector < dev->sector + STRIPE_SECTORS) { wbi2 = r5_next_bio(wbi, dev->sector); if (!raid5_dec_bi_active_stripes(wbi)) { @@ -4096,7 +4096,7 @@ static int raid5_mergeable_bvec(struct request_queue *q, static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) { - sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); + sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev); unsigned int chunk_sectors = mddev->chunk_sectors; unsigned int bio_sectors = bio_sectors(bio); @@ -4233,9 +4233,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) /* * compute position */ - align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector, - 0, - &dd_idx, NULL); + align_bi->bi_iter.bi_sector = + raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, + 0, &dd_idx, NULL); end_sector = bio_end_sector(align_bi); rcu_read_lock(); @@ -4260,7 +4260,8 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); if (!bio_fits_rdev(align_bi) || - is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi), + is_badblock(rdev, align_bi->bi_iter.bi_sector, + bio_sectors(align_bi), &first_bad, &bad_sectors)) { /* too big in some way, or has a known bad block */ bio_put(align_bi); @@ -4269,7 +4270,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) } /* No reshape active, so we can trust rdev->data_offset */ - align_bi->bi_sector += rdev->data_offset; + align_bi->bi_iter.bi_sector += rdev->data_offset; spin_lock_irq(&conf->device_lock); wait_event_lock_irq(conf->wait_for_stripe, @@ -4281,7 +4282,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) if (mddev->gendisk) trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), align_bi, disk_devt(mddev->gendisk), - raid_bio->bi_sector); + raid_bio->bi_iter.bi_sector); generic_make_request(align_bi); return 1; } else { @@ -4464,8 +4465,8 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) /* Skip discard while reshape is happening */ return; - logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); - last_sector = bi->bi_sector + (bi->bi_size>>9); + logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); + last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9); bi->bi_next = NULL; bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ @@ -4569,7 +4570,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) return; } - logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); + logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); last_sector = bio_end_sector(bi); bi->bi_next = NULL; bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ @@ -5053,7 +5054,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) int remaining; int handled = 0; - logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); + logical_sector = raid_bio->bi_iter.bi_sector & + ~((sector_t)STRIPE_SECTORS-1); sector = raid5_compute_sector(conf, logical_sector, 0, &dd_idx, NULL); last_sector = bio_end_sector(raid_bio); diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index dd239bd..00d339c 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -2235,10 +2235,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, } /* do we need to support multiple segments? */ - if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) { - printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n", - ioc->name, __func__, bio_segments(req->bio), blk_rq_bytes(req), - bio_segments(rsp->bio), blk_rq_bytes(rsp)); + if (bio_multiple_segments(req->bio) || + bio_multiple_segments(rsp->bio)) { + printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u, rsp %u\n", + ioc->name, __func__, blk_rq_bytes(req), blk_rq_bytes(rsp)); return -EINVAL; } diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 92bd22c..9cbc567 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -504,7 +504,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, struct dasd_diag_req *dreq; struct dasd_diag_bio *dbio; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst; unsigned int count, datasize; sector_t recid, first_rec, last_rec; @@ -525,10 +525,10 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, /* Check struct bio and count the number of blocks for the request. */ count = 0; rq_for_each_segment(bv, req, iter) { - if (bv->bv_len & (blksize - 1)) + if (bv.bv_len & (blksize - 1)) /* Fba can only do full blocks. */ return ERR_PTR(-EINVAL); - count += bv->bv_len >> (block->s2b_shift + 9); + count += bv.bv_len >> (block->s2b_shift + 9); } /* Paranoia. */ if (count != last_rec - first_rec + 1) @@ -545,8 +545,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, dbio = dreq->bio; recid = first_rec; rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; - for (off = 0; off < bv->bv_len; off += blksize) { + dst = page_address(bv.bv_page) + bv.bv_offset; + for (off = 0; off < bv.bv_len; off += blksize) { memset(dbio, 0, sizeof (struct dasd_diag_bio)); dbio->type = rw_cmd; dbio->block_number = recid + 1; diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 95e4578..2e8e075 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -2551,7 +2551,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( struct dasd_ccw_req *cqr; struct ccw1 *ccw; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst; unsigned int off; int count, cidaw, cplength, datasize; @@ -2573,13 +2573,13 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( count = 0; cidaw = 0; rq_for_each_segment(bv, req, iter) { - if (bv->bv_len & (blksize - 1)) + if (bv.bv_len & (blksize - 1)) /* Eckd can only do full blocks. */ return ERR_PTR(-EINVAL); - count += bv->bv_len >> (block->s2b_shift + 9); + count += bv.bv_len >> (block->s2b_shift + 9); #if defined(CONFIG_64BIT) - if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) - cidaw += bv->bv_len >> (block->s2b_shift + 9); + if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) + cidaw += bv.bv_len >> (block->s2b_shift + 9); #endif } /* Paranoia. */ @@ -2650,16 +2650,16 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( last_rec - recid + 1, cmd, basedev, blksize); } rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; + dst = page_address(bv.bv_page) + bv.bv_offset; if (dasd_page_cache) { char *copy = kmem_cache_alloc(dasd_page_cache, GFP_DMA | __GFP_NOWARN); if (copy && rq_data_dir(req) == WRITE) - memcpy(copy + bv->bv_offset, dst, bv->bv_len); + memcpy(copy + bv.bv_offset, dst, bv.bv_len); if (copy) - dst = copy + bv->bv_offset; + dst = copy + bv.bv_offset; } - for (off = 0; off < bv->bv_len; off += blksize) { + for (off = 0; off < bv.bv_len; off += blksize) { sector_t trkid = recid; unsigned int recoffs = sector_div(trkid, blk_per_trk); rcmd = cmd; @@ -2735,7 +2735,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( struct dasd_ccw_req *cqr; struct ccw1 *ccw; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst, *idaw_dst; unsigned int cidaw, cplength, datasize; unsigned int tlf; @@ -2813,8 +2813,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( idaw_dst = NULL; idaw_len = 0; rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; - seg_len = bv->bv_len; + dst = page_address(bv.bv_page) + bv.bv_offset; + seg_len = bv.bv_len; while (seg_len) { if (new_track) { trkid = recid; @@ -3039,7 +3039,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( { struct dasd_ccw_req *cqr; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst; unsigned int trkcount, ctidaw; unsigned char cmd; @@ -3125,8 +3125,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( new_track = 1; recid = first_rec; rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; - seg_len = bv->bv_len; + dst = page_address(bv.bv_page) + bv.bv_offset; + seg_len = bv.bv_len; while (seg_len) { if (new_track) { trkid = recid; @@ -3158,9 +3158,9 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( } } else { rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; + dst = page_address(bv.bv_page) + bv.bv_offset; last_tidaw = itcw_add_tidaw(itcw, 0x00, - dst, bv->bv_len); + dst, bv.bv_len); if (IS_ERR(last_tidaw)) { ret = -EINVAL; goto out_error; @@ -3278,7 +3278,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev, struct dasd_ccw_req *cqr; struct ccw1 *ccw; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst; unsigned char cmd; unsigned int trkcount; @@ -3378,8 +3378,8 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev, idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE); } rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; - seg_len = bv->bv_len; + dst = page_address(bv.bv_page) + bv.bv_offset; + seg_len = bv.bv_len; if (cmd == DASD_ECKD_CCW_READ_TRACK) memset(dst, 0, seg_len); if (!len_to_track_end) { @@ -3424,7 +3424,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) struct dasd_eckd_private *private; struct ccw1 *ccw; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst, *cda; unsigned int blksize, blk_per_trk, off; sector_t recid; @@ -3442,8 +3442,8 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) if (private->uses_cdl == 0 || recid > 2*blk_per_trk) ccw++; rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; - for (off = 0; off < bv->bv_len; off += blksize) { + dst = page_address(bv.bv_page) + bv.bv_offset; + for (off = 0; off < bv.bv_len; off += blksize) { /* Skip locate record. */ if (private->uses_cdl && recid <= 2*blk_per_trk) ccw++; @@ -3454,7 +3454,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) cda = (char *)((addr_t) ccw->cda); if (dst != cda) { if (rq_data_dir(req) == READ) - memcpy(dst, cda, bv->bv_len); + memcpy(dst, cda, bv.bv_len); kmem_cache_free(dasd_page_cache, (void *)((addr_t)cda & PAGE_MASK)); } diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 9cbc8c3..2c8e68b 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -260,7 +260,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, struct dasd_ccw_req *cqr; struct ccw1 *ccw; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst; int count, cidaw, cplength, datasize; sector_t recid, first_rec, last_rec; @@ -283,13 +283,13 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, count = 0; cidaw = 0; rq_for_each_segment(bv, req, iter) { - if (bv->bv_len & (blksize - 1)) + if (bv.bv_len & (blksize - 1)) /* Fba can only do full blocks. */ return ERR_PTR(-EINVAL); - count += bv->bv_len >> (block->s2b_shift + 9); + count += bv.bv_len >> (block->s2b_shift + 9); #if defined(CONFIG_64BIT) - if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) - cidaw += bv->bv_len / blksize; + if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) + cidaw += bv.bv_len / blksize; #endif } /* Paranoia. */ @@ -326,16 +326,16 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, } recid = first_rec; rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; + dst = page_address(bv.bv_page) + bv.bv_offset; if (dasd_page_cache) { char *copy = kmem_cache_alloc(dasd_page_cache, GFP_DMA | __GFP_NOWARN); if (copy && rq_data_dir(req) == WRITE) - memcpy(copy + bv->bv_offset, dst, bv->bv_len); + memcpy(copy + bv.bv_offset, dst, bv.bv_len); if (copy) - dst = copy + bv->bv_offset; + dst = copy + bv.bv_offset; } - for (off = 0; off < bv->bv_len; off += blksize) { + for (off = 0; off < bv.bv_len; off += blksize) { /* Locate record for stupid devices. */ if (private->rdc_data.mode.bits.data_chain == 0) { ccw[-1].flags |= CCW_FLAG_CC; @@ -384,7 +384,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req) struct dasd_fba_private *private; struct ccw1 *ccw; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; char *dst, *cda; unsigned int blksize, off; int status; @@ -399,8 +399,8 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req) if (private->rdc_data.mode.bits.data_chain != 0) ccw++; rq_for_each_segment(bv, req, iter) { - dst = page_address(bv->bv_page) + bv->bv_offset; - for (off = 0; off < bv->bv_len; off += blksize) { + dst = page_address(bv.bv_page) + bv.bv_offset; + for (off = 0; off < bv.bv_len; off += blksize) { /* Skip locate record. */ if (private->rdc_data.mode.bits.data_chain == 0) ccw++; @@ -411,7 +411,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req) cda = (char *)((addr_t) ccw->cda); if (dst != cda) { if (rq_data_dir(req) == READ) - memcpy(dst, cda, bv->bv_len); + memcpy(dst, cda, bv.bv_len); kmem_cache_free(dasd_page_cache, (void *)((addr_t)cda & PAGE_MASK)); } diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 6eca019..ebf41e2 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -808,18 +808,19 @@ static void dcssblk_make_request(struct request_queue *q, struct bio *bio) { struct dcssblk_dev_info *dev_info; - struct bio_vec *bvec; + struct bio_vec bvec; + struct bvec_iter iter; unsigned long index; unsigned long page_addr; unsigned long source_addr; unsigned long bytes_done; - int i; bytes_done = 0; dev_info = bio->bi_bdev->bd_disk->private_data; if (dev_info == NULL) goto fail; - if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0) + if ((bio->bi_iter.bi_sector & 7) != 0 || + (bio->bi_iter.bi_size & 4095) != 0) /* Request is not page-aligned. */ goto fail; if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) { @@ -842,22 +843,22 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio) } } - index = (bio->bi_sector >> 3); - bio_for_each_segment(bvec, bio, i) { + index = (bio->bi_iter.bi_sector >> 3); + bio_for_each_segment(bvec, bio, iter) { page_addr = (unsigned long) - page_address(bvec->bv_page) + bvec->bv_offset; + page_address(bvec.bv_page) + bvec.bv_offset; source_addr = dev_info->start + (index<<12) + bytes_done; - if (unlikely((page_addr & 4095) != 0) || (bvec->bv_len & 4095) != 0) + if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0) // More paranoia. goto fail; if (bio_data_dir(bio) == READ) { memcpy((void*)page_addr, (void*)source_addr, - bvec->bv_len); + bvec.bv_len); } else { memcpy((void*)source_addr, (void*)page_addr, - bvec->bv_len); + bvec.bv_len); } - bytes_done += bvec->bv_len; + bytes_done += bvec.bv_len; } bio_endio(bio, 0); return; diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index d0ab501..76bed17 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -130,7 +130,7 @@ static void scm_request_prepare(struct scm_request *scmrq) struct aidaw *aidaw = scmrq->aidaw; struct msb *msb = &scmrq->aob->msb[0]; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; msb->bs = MSB_BS_4K; scmrq->aob->request.msb_count = 1; @@ -142,9 +142,9 @@ static void scm_request_prepare(struct scm_request *scmrq) msb->data_addr = (u64) aidaw; rq_for_each_segment(bv, scmrq->request, iter) { - WARN_ON(bv->bv_offset); - msb->blk_count += bv->bv_len >> 12; - aidaw->data_addr = (u64) page_address(bv->bv_page); + WARN_ON(bv.bv_offset); + msb->blk_count += bv.bv_len >> 12; + aidaw->data_addr = (u64) page_address(bv.bv_page); aidaw++; } } diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c index 27f930c..9aae909 100644 --- a/drivers/s390/block/scm_blk_cluster.c +++ b/drivers/s390/block/scm_blk_cluster.c @@ -122,7 +122,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq) struct aidaw *aidaw = scmrq->aidaw; struct msb *msb = &scmrq->aob->msb[0]; struct req_iterator iter; - struct bio_vec *bv; + struct bio_vec bv; int i = 0; u64 addr; @@ -163,7 +163,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq) i++; } rq_for_each_segment(bv, req, iter) { - aidaw->data_addr = (u64) page_address(bv->bv_page); + aidaw->data_addr = (u64) page_address(bv.bv_page); aidaw++; i++; } diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 58141f0..6969d39 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -184,25 +184,26 @@ static unsigned long xpram_highest_page_index(void) static void xpram_make_request(struct request_queue *q, struct bio *bio) { xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; - struct bio_vec *bvec; + struct bio_vec bvec; + struct bvec_iter iter; unsigned int index; unsigned long page_addr; unsigned long bytes; - int i; - if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0) + if ((bio->bi_iter.bi_sector & 7) != 0 || + (bio->bi_iter.bi_size & 4095) != 0) /* Request is not page-aligned. */ goto fail; - if ((bio->bi_size >> 12) > xdev->size) + if ((bio->bi_iter.bi_size >> 12) > xdev->size) /* Request size is no page-aligned. */ goto fail; - if ((bio->bi_sector >> 3) > 0xffffffffU - xdev->offset) + if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset) goto fail; - index = (bio->bi_sector >> 3) + xdev->offset; - bio_for_each_segment(bvec, bio, i) { + index = (bio->bi_iter.bi_sector >> 3) + xdev->offset; + bio_for_each_segment(bvec, bio, iter) { page_addr = (unsigned long) - kmap(bvec->bv_page) + bvec->bv_offset; - bytes = bvec->bv_len; + kmap(bvec.bv_page) + bvec.bv_offset; + bytes = bvec.bv_len; if ((page_addr & 4095) != 0 || (bytes & 4095) != 0) /* More paranoia. */ goto fail; diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 446b851..0cac7d8 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -2163,10 +2163,10 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, } /* do we need to support multiple segments? */ - if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) { - printk("%s: multiple segments req %u %u, rsp %u %u\n", - __func__, bio_segments(req->bio), blk_rq_bytes(req), - bio_segments(rsp->bio), blk_rq_bytes(rsp)); + if (bio_multiple_segments(req->bio) || + bio_multiple_segments(rsp->bio)) { + printk("%s: multiple segments req %u, rsp %u\n", + __func__, blk_rq_bytes(req), blk_rq_bytes(rsp)); return -EINVAL; } diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c index 9d26637..410f4a3 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c @@ -1901,7 +1901,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); Mpi2SmpPassthroughRequest_t *mpi_request; Mpi2SmpPassthroughReply_t *mpi_reply; - int rc, i; + int rc; u16 smid; u32 ioc_state; unsigned long timeleft; @@ -1916,7 +1916,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, void *pci_addr_out = NULL; u16 wait_state_count; struct request *rsp = req->next_rq; - struct bio_vec *bvec = NULL; + struct bio_vec bvec; + struct bvec_iter iter; if (!rsp) { printk(MPT2SAS_ERR_FMT "%s: the smp response space is " @@ -1942,7 +1943,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, ioc->transport_cmds.status = MPT2_CMD_PENDING; /* Check if the request is split across multiple segments */ - if (bio_segments(req->bio) > 1) { + if (bio_multiple_segments(req->bio)) { u32 offset = 0; /* Allocate memory and copy the request */ @@ -1955,11 +1956,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, goto out; } - bio_for_each_segment(bvec, req->bio, i) { + bio_for_each_segment(bvec, req->bio, iter) { memcpy(pci_addr_out + offset, - page_address(bvec->bv_page) + bvec->bv_offset, - bvec->bv_len); - offset += bvec->bv_len; + page_address(bvec.bv_page) + bvec.bv_offset, + bvec.bv_len); + offset += bvec.bv_len; } } else { dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), @@ -1974,7 +1975,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, /* Check if the response needs to be populated across * multiple segments */ - if (bio_segments(rsp->bio) > 1) { + if (bio_multiple_segments(rsp->bio)) { pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp), &pci_dma_in); if (!pci_addr_in) { @@ -2041,7 +2042,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; - if (bio_segments(req->bio) > 1) { + if (bio_multiple_segments(req->bio)) { ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(req) - 4), pci_dma_out); } else { @@ -2057,7 +2058,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST); sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; - if (bio_segments(rsp->bio) > 1) { + if (bio_multiple_segments(rsp->bio)) { ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(rsp) + 4), pci_dma_in); } else { @@ -2102,23 +2103,23 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, le16_to_cpu(mpi_reply->ResponseDataLength); /* check if the resp needs to be copied from the allocated * pci mem */ - if (bio_segments(rsp->bio) > 1) { + if (bio_multiple_segments(rsp->bio)) { u32 offset = 0; u32 bytes_to_copy = le16_to_cpu(mpi_reply->ResponseDataLength); - bio_for_each_segment(bvec, rsp->bio, i) { - if (bytes_to_copy <= bvec->bv_len) { - memcpy(page_address(bvec->bv_page) + - bvec->bv_offset, pci_addr_in + + bio_for_each_segment(bvec, rsp->bio, iter) { + if (bytes_to_copy <= bvec.bv_len) { + memcpy(page_address(bvec.bv_page) + + bvec.bv_offset, pci_addr_in + offset, bytes_to_copy); break; } else { - memcpy(page_address(bvec->bv_page) + - bvec->bv_offset, pci_addr_in + - offset, bvec->bv_len); - bytes_to_copy -= bvec->bv_len; + memcpy(page_address(bvec.bv_page) + + bvec.bv_offset, pci_addr_in + + offset, bvec.bv_len); + bytes_to_copy -= bvec.bv_len; } - offset += bvec->bv_len; + offset += bvec.bv_len; } } } else { diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index e771a88..65170cb 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -1884,7 +1884,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); Mpi2SmpPassthroughRequest_t *mpi_request; Mpi2SmpPassthroughReply_t *mpi_reply; - int rc, i; + int rc; u16 smid; u32 ioc_state; unsigned long timeleft; @@ -1898,7 +1898,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, void *pci_addr_out = NULL; u16 wait_state_count; struct request *rsp = req->next_rq; - struct bio_vec *bvec = NULL; + struct bio_vec bvec; + struct bvec_iter iter; if (!rsp) { pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n", @@ -1925,7 +1926,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, ioc->transport_cmds.status = MPT3_CMD_PENDING; /* Check if the request is split across multiple segments */ - if (req->bio->bi_vcnt > 1) { + if (bio_multiple_segments(req->bio)) { u32 offset = 0; /* Allocate memory and copy the request */ @@ -1938,11 +1939,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, goto out; } - bio_for_each_segment(bvec, req->bio, i) { + bio_for_each_segment(bvec, req->bio, iter) { memcpy(pci_addr_out + offset, - page_address(bvec->bv_page) + bvec->bv_offset, - bvec->bv_len); - offset += bvec->bv_len; + page_address(bvec.bv_page) + bvec.bv_offset, + bvec.bv_len); + offset += bvec.bv_len; } } else { dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), @@ -1957,7 +1958,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, /* Check if the response needs to be populated across * multiple segments */ - if (rsp->bio->bi_vcnt > 1) { + if (bio_multiple_segments(rsp->bio)) { pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp), &pci_dma_in); if (!pci_addr_in) { @@ -2018,7 +2019,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4); psge = &mpi_request->SGL; - if (req->bio->bi_vcnt > 1) + if (bio_multiple_segments(req->bio)) ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4), pci_dma_in, (blk_rq_bytes(rsp) + 4)); else @@ -2063,23 +2064,23 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, /* check if the resp needs to be copied from the allocated * pci mem */ - if (rsp->bio->bi_vcnt > 1) { + if (bio_multiple_segments(rsp->bio)) { u32 offset = 0; u32 bytes_to_copy = le16_to_cpu(mpi_reply->ResponseDataLength); - bio_for_each_segment(bvec, rsp->bio, i) { - if (bytes_to_copy <= bvec->bv_len) { - memcpy(page_address(bvec->bv_page) + - bvec->bv_offset, pci_addr_in + + bio_for_each_segment(bvec, rsp->bio, iter) { + if (bytes_to_copy <= bvec.bv_len) { + memcpy(page_address(bvec.bv_page) + + bvec.bv_offset, pci_addr_in + offset, bytes_to_copy); break; } else { - memcpy(page_address(bvec->bv_page) + - bvec->bv_offset, pci_addr_in + - offset, bvec->bv_len); - bytes_to_copy -= bvec->bv_len; + memcpy(page_address(bvec.bv_page) + + bvec.bv_offset, pci_addr_in + + offset, bvec.bv_len); + bytes_to_copy -= bvec.bv_len; } - offset += bvec->bv_len; + offset += bvec.bv_len; } } } else { diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index aa66361..bac04c2 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -731,7 +731,7 @@ static int _osd_req_list_objects(struct osd_request *or, bio->bi_rw &= ~REQ_WRITE; or->in.bio = bio; - or->in.total_bytes = bio->bi_size; + or->in.total_bytes = bio->bi_iter.bi_size; return 0; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 9846c6a..470954a 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -801,7 +801,7 @@ static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq) if (sdkp->device->no_write_same) return BLKPREP_KILL; - BUG_ON(bio_offset(bio) || bio_iovec(bio)->bv_len != sdp->sector_size); + BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size); sector >>= ilog2(sdp->sector_size) - 9; nr_sectors >>= ilog2(sdp->sector_size) - 9; diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c index 6174ca4..a7a691d 100644 --- a/drivers/scsi/sd_dif.c +++ b/drivers/scsi/sd_dif.c @@ -365,7 +365,6 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector, struct bio *bio; struct scsi_disk *sdkp; struct sd_dif_tuple *sdt; - unsigned int i, j; u32 phys, virt; sdkp = rq->bio->bi_bdev->bd_disk->private_data; @@ -376,19 +375,21 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector, phys = hw_sector & 0xffffffff; __rq_for_each_bio(bio, rq) { - struct bio_vec *iv; + struct bio_vec iv; + struct bvec_iter iter; + unsigned int j; /* Already remapped? */ if (bio_flagged(bio, BIO_MAPPED_INTEGRITY)) break; - virt = bio->bi_integrity->bip_sector & 0xffffffff; + virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff; - bip_for_each_vec(iv, bio->bi_integrity, i) { - sdt = kmap_atomic(iv->bv_page) - + iv->bv_offset; + bip_for_each_vec(iv, bio->bi_integrity, iter) { + sdt = kmap_atomic(iv.bv_page) + + iv.bv_offset; - for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) { + for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) { if (be32_to_cpu(sdt->ref_tag) == virt) sdt->ref_tag = cpu_to_be32(phys); @@ -414,7 +415,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes) struct scsi_disk *sdkp; struct bio *bio; struct sd_dif_tuple *sdt; - unsigned int i, j, sectors, sector_sz; + unsigned int j, sectors, sector_sz; u32 phys, virt; sdkp = scsi_disk(scmd->request->rq_disk); @@ -430,15 +431,16 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes) phys >>= 3; __rq_for_each_bio(bio, scmd->request) { - struct bio_vec *iv; + struct bio_vec iv; + struct bvec_iter iter; - virt = bio->bi_integrity->bip_sector & 0xffffffff; + virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff; - bip_for_each_vec(iv, bio->bi_integrity, i) { - sdt = kmap_atomic(iv->bv_page) - + iv->bv_offset; + bip_for_each_vec(iv, bio->bi_integrity, iter) { + sdt = kmap_atomic(iv.bv_page) + + iv.bv_offset; - for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) { + for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) { if (sectors == 0) { kunmap_atomic(sdt); diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c index 5338e8d..0718905 100644 --- a/drivers/staging/lustre/lustre/llite/lloop.c +++ b/drivers/staging/lustre/lustre/llite/lloop.c @@ -194,10 +194,10 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) struct cl_object *obj = ll_i2info(inode)->lli_clob; pgoff_t offset; int ret; - int i; int rw; obd_count page_count = 0; - struct bio_vec *bvec; + struct bio_vec bvec; + struct bvec_iter iter; struct bio *bio; ssize_t bytes; @@ -220,15 +220,15 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) for (bio = head; bio != NULL; bio = bio->bi_next) { LASSERT(rw == bio->bi_rw); - offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset; - bio_for_each_segment(bvec, bio, i) { - BUG_ON(bvec->bv_offset != 0); - BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE); + offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset; + bio_for_each_segment(bvec, bio, iter) { + BUG_ON(bvec.bv_offset != 0); + BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE); - pages[page_count] = bvec->bv_page; + pages[page_count] = bvec.bv_page; offsets[page_count] = offset; page_count++; - offset += bvec->bv_len; + offset += bvec.bv_len; } LASSERT(page_count <= LLOOP_MAX_SEGMENTS); } @@ -313,7 +313,8 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req) bio = &lo->lo_bio; while (*bio && (*bio)->bi_rw == rw) { CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n", - (unsigned long long)(*bio)->bi_sector, (*bio)->bi_size, + (unsigned long long)(*bio)->bi_iter.bi_sector, + (*bio)->bi_iter.bi_size, page_count, (*bio)->bi_vcnt); if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS) break; @@ -347,7 +348,8 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio) goto err; CDEBUG(D_INFO, "submit bio sector %llu size %u\n", - (unsigned long long)old_bio->bi_sector, old_bio->bi_size); + (unsigned long long)old_bio->bi_iter.bi_sector, + old_bio->bi_iter.bi_size); spin_lock_irq(&lo->lo_lock); inactive = (lo->lo_state != LLOOP_BOUND); @@ -367,7 +369,7 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio) loop_add_bio(lo, old_bio); return; err: - cfs_bio_io_error(old_bio, old_bio->bi_size); + cfs_bio_io_error(old_bio, old_bio->bi_iter.bi_size); } @@ -378,7 +380,7 @@ static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio) while (bio) { struct bio *tmp = bio->bi_next; bio->bi_next = NULL; - cfs_bio_endio(bio, bio->bi_size, ret); + cfs_bio_endio(bio, bio->bi_iter.bi_size, ret); bio = tmp; } } diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index 3277d98..108f273 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c @@ -171,13 +171,14 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio) u64 start, end, bound; /* unaligned request */ - if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) + if (unlikely(bio->bi_iter.bi_sector & + (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) return 0; - if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) + if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) return 0; - start = bio->bi_sector; - end = start + (bio->bi_size >> SECTOR_SHIFT); + start = bio->bi_iter.bi_sector; + end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT); bound = zram->disksize >> SECTOR_SHIFT; /* out of range range */ if (unlikely(start >= bound || end > bound || start > end)) @@ -680,9 +681,10 @@ out: static void __zram_make_request(struct zram *zram, struct bio *bio, int rw) { - int i, offset; + int offset; u32 index; - struct bio_vec *bvec; + struct bio_vec bvec; + struct bvec_iter iter; switch (rw) { case READ: @@ -693,36 +695,37 @@ static void __zram_make_request(struct zram *zram, struct bio *bio, int rw) break; } - index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; - offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; + index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; + offset = (bio->bi_iter.bi_sector & + (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; - bio_for_each_segment(bvec, bio, i) { + bio_for_each_segment(bvec, bio, iter) { int max_transfer_size = PAGE_SIZE - offset; - if (bvec->bv_len > max_transfer_size) { + if (bvec.bv_len > max_transfer_size) { /* * zram_bvec_rw() can only make operation on a single * zram page. Split the bio vector. */ struct bio_vec bv; - bv.bv_page = bvec->bv_page; + bv.bv_page = bvec.bv_page; bv.bv_len = max_transfer_size; - bv.bv_offset = bvec->bv_offset; + bv.bv_offset = bvec.bv_offset; if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0) goto out; - bv.bv_len = bvec->bv_len - max_transfer_size; + bv.bv_len = bvec.bv_len - max_transfer_size; bv.bv_offset += max_transfer_size; if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0) goto out; } else - if (zram_bvec_rw(zram, bvec, index, offset, bio, rw) + if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw) < 0) goto out; - update_position(&index, &offset, bvec); + update_position(&index, &offset, &bvec); } set_bit(BIO_UPTODATE, &bio->bi_flags); diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index c87959f..2d29356 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -319,7 +319,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) bio->bi_bdev = ib_dev->ibd_bd; bio->bi_private = cmd; bio->bi_end_io = &iblock_bio_done; - bio->bi_sector = lba; + bio->bi_iter.bi_sector = lba; return bio; } |