diff options
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/kern_physio.c | 4 | ||||
-rw-r--r-- | sys/kern/subr_devstat.c | 2 | ||||
-rw-r--r-- | sys/kern/subr_disk.c | 2 | ||||
-rw-r--r-- | sys/kern/subr_disklabel.c | 2 | ||||
-rw-r--r-- | sys/kern/subr_diskslice.c | 4 | ||||
-rw-r--r-- | sys/kern/vfs_aio.c | 6 | ||||
-rw-r--r-- | sys/kern/vfs_bio.c | 64 | ||||
-rw-r--r-- | sys/kern/vfs_cluster.c | 16 | ||||
-rw-r--r-- | sys/kern/vfs_default.c | 6 |
9 files changed, 61 insertions, 45 deletions
diff --git a/sys/kern/kern_physio.c b/sys/kern/kern_physio.c index f5b81e8..62df11c 100644 --- a/sys/kern/kern_physio.c +++ b/sys/kern/kern_physio.c @@ -118,13 +118,13 @@ physio(dev_t dev, struct uio *uio, int ioflag) if (uio->uio_segflg == UIO_USERSPACE) vunmapbuf(bp); iolen = bp->b_bcount - bp->b_resid; - if (iolen == 0 && !(bp->b_flags & B_ERROR)) + if (iolen == 0 && !(bp->b_ioflags & BIO_ERROR)) goto doerror; /* EOF */ uio->uio_iov[i].iov_len -= iolen; uio->uio_iov[i].iov_base += iolen; uio->uio_resid -= iolen; uio->uio_offset += iolen; - if( bp->b_flags & B_ERROR) { + if( bp->b_ioflags & BIO_ERROR) { error = bp->b_error; goto doerror; } diff --git a/sys/kern/subr_devstat.c b/sys/kern/subr_devstat.c index 7ebc257..81740ed 100644 --- a/sys/kern/subr_devstat.c +++ b/sys/kern/subr_devstat.c @@ -241,7 +241,7 @@ devstat_end_transaction_buf(struct devstat *ds, struct buf *bp) flg = DEVSTAT_WRITE; devstat_end_transaction(ds, bp->b_bcount - bp->b_resid, - (bp->b_flags & B_ORDERED) ? + (bp->b_ioflags & BIO_ORDERED) ? DEVSTAT_TAG_ORDERED : DEVSTAT_TAG_SIMPLE, flg); } diff --git a/sys/kern/subr_disk.c b/sys/kern/subr_disk.c index 23ec003..6d812e5 100644 --- a/sys/kern/subr_disk.c +++ b/sys/kern/subr_disk.c @@ -191,7 +191,7 @@ diskstrategy(struct buf *bp) if (!dp) { bp->b_error = ENXIO; - bp->b_flags |= B_ERROR; + bp->b_ioflags |= BIO_ERROR; biodone(bp); return; } diff --git a/sys/kern/subr_disklabel.c b/sys/kern/subr_disklabel.c index c18b984..fb2064a 100644 --- a/sys/kern/subr_disklabel.c +++ b/sys/kern/subr_disklabel.c @@ -77,7 +77,7 @@ bufqdisksort(bufq, bp) * ordered transaction, then it's easy. */ if ((bq = bufq_first(bufq)) == NULL - || (bp->b_flags & B_ORDERED) != 0) { + || (bp->b_ioflags & BIO_ORDERED) != 0) { bufq_insert_tail(bufq, bp); return; } else if (bufq->insert_point != NULL) { diff --git a/sys/kern/subr_diskslice.c b/sys/kern/subr_diskslice.c index 73f735e..fda1901 100644 --- a/sys/kern/subr_diskslice.c +++ b/sys/kern/subr_diskslice.c @@ -307,7 +307,7 @@ bad_blkno: bad: bp->b_resid = bp->b_bcount; - bp->b_flags |= B_ERROR; + bp->b_ioflags |= BIO_ERROR; return (-1); } @@ -540,7 +540,7 @@ dsiodone(bp) bp->b_iodone = ic->ic_prev_iodone; bp->b_iodone_chain = ic->ic_prev_iodone_chain; if (!(bp->b_iocmd == BIO_READ) - || (!(bp->b_flags & B_ERROR) && bp->b_error == 0)) { + || (!(bp->b_ioflags & BIO_ERROR) && bp->b_error == 0)) { msg = fixlabel((char *)NULL, ic->ic_args[1].ia_ptr, (struct disklabel *) (bp->b_data + ic->ic_args[0].ia_long), diff --git a/sys/kern/vfs_aio.c b/sys/kern/vfs_aio.c index 3930d54..28c33cc 100644 --- a/sys/kern/vfs_aio.c +++ b/sys/kern/vfs_aio.c @@ -1043,7 +1043,7 @@ aio_qphysio(struct proc *p, struct aiocblist *aiocbe) * but is returned using the aio_error mechanism. In this case, * aio_suspend will return immediately. */ - if (bp->b_error || (bp->b_flags & B_ERROR)) { + if (bp->b_error || (bp->b_ioflags & BIO_ERROR)) { struct aiocb *job = aiocbe->uuaiocb; aiocbe->uaiocb._aiocb_private.status = 0; @@ -1110,7 +1110,7 @@ aio_fphysio(struct proc *p, struct aiocblist *iocb, int flgwait) error = 0; /* Check for an error. */ - if (bp->b_flags & B_ERROR) + if (bp->b_ioflags & BIO_ERROR) error = bp->b_error; relpbuf(bp, NULL); @@ -2143,7 +2143,7 @@ aio_physwakeup(struct buf *bp) aiocbe->uaiocb._aiocb_private.error = 0; aiocbe->jobflags |= AIOCBLIST_DONE; - if (bp->b_flags & B_ERROR) + if (bp->b_ioflags & BIO_ERROR) aiocbe->uaiocb._aiocb_private.error = bp->b_error; lj = aiocbe->lio; diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index 3b11402..be40650 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -445,7 +445,7 @@ bremfree(struct buf * bp) /* * Get a buffer with the specified data. Look in the cache first. We - * must clear B_ERROR and B_INVAL prior to initiating I/O. If B_CACHE + * must clear BIO_ERROR and B_INVAL prior to initiating I/O. If B_CACHE * is set, the buffer is valid and we do not have to do anything ( see * getblk() ). */ @@ -464,7 +464,8 @@ bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, curproc->p_stats->p_ru.ru_inblock++; KASSERT(!(bp->b_flags & B_ASYNC), ("bread: illegal async bp %p", bp)); bp->b_iocmd = BIO_READ; - bp->b_flags &= ~(B_ERROR | B_INVAL); + bp->b_flags &= ~B_INVAL; + bp->b_ioflags &= ~BIO_ERROR; if (bp->b_rcred == NOCRED) { if (cred != NOCRED) crhold(cred); @@ -479,7 +480,7 @@ bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, /* * Operates like bread, but also starts asynchronous I/O on - * read-ahead blocks. We must clear B_ERROR and B_INVAL prior + * read-ahead blocks. We must clear BIO_ERROR and B_INVAL prior * to initiating I/O . If B_CACHE is set, the buffer is valid * and we do not have to do anything. */ @@ -499,7 +500,8 @@ breadn(struct vnode * vp, daddr_t blkno, int size, if (curproc != NULL) curproc->p_stats->p_ru.ru_inblock++; bp->b_iocmd = BIO_READ; - bp->b_flags &= ~(B_ERROR | B_INVAL); + bp->b_flags &= ~B_INVAL; + bp->b_ioflags &= ~BIO_ERROR; if (bp->b_rcred == NOCRED) { if (cred != NOCRED) crhold(cred); @@ -519,7 +521,8 @@ breadn(struct vnode * vp, daddr_t blkno, int size, if (curproc != NULL) curproc->p_stats->p_ru.ru_inblock++; rabp->b_flags |= B_ASYNC; - rabp->b_flags &= ~(B_ERROR | B_INVAL); + rabp->b_flags &= ~B_INVAL; + rabp->b_ioflags &= ~BIO_ERROR; rabp->b_iocmd = BIO_READ; if (rabp->b_rcred == NOCRED) { if (cred != NOCRED) @@ -629,7 +632,8 @@ bwrite(struct buf * bp) bp = newbp; } - bp->b_flags &= ~(B_DONE | B_ERROR); + bp->b_flags &= ~B_DONE; + bp->b_ioflags &= ~BIO_ERROR; bp->b_flags |= B_WRITEINPROG | B_CACHE; bp->b_iocmd = BIO_WRITE; @@ -862,7 +866,8 @@ bawrite(struct buf * bp) int bowrite(struct buf * bp) { - bp->b_flags |= B_ORDERED | B_ASYNC; + bp->b_ioflags |= BIO_ORDERED; + bp->b_flags |= B_ASYNC; return (BUF_WRITE(bp)); } @@ -911,20 +916,22 @@ brelse(struct buf * bp) s = splbio(); if (bp->b_flags & B_LOCKED) - bp->b_flags &= ~B_ERROR; + bp->b_ioflags &= ~BIO_ERROR; if (bp->b_iocmd == BIO_WRITE && - (bp->b_flags & (B_ERROR | B_INVAL)) == B_ERROR) { + (bp->b_ioflags & BIO_ERROR) && + !(bp->b_flags & B_INVAL)) { /* - * Failed write, redirty. Must clear B_ERROR to prevent + * Failed write, redirty. Must clear BIO_ERROR to prevent * pages from being scrapped. If B_INVAL is set then * this case is not run and the next case is run to * destroy the buffer. B_INVAL can occur if the buffer * is outside the range supported by the underlying device. */ - bp->b_flags &= ~B_ERROR; + bp->b_ioflags &= ~BIO_ERROR; bdirty(bp); - } else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || + } else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) || + (bp->b_ioflags & BIO_ERROR) || bp->b_iocmd == BIO_DELETE || (bp->b_bufsize <= 0)) { /* * Either a failed I/O or we were asked to free or not @@ -965,8 +972,8 @@ brelse(struct buf * bp) * B_INVAL, the struct buf is invalidated but the VM object is kept * around ( i.e. so it is trivial to reconstitute the buffer later ). * - * If B_ERROR or B_NOCACHE is set, pages in the VM object will be - * invalidated. B_ERROR cannot be set for a failed write unless the + * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be + * invalidated. BIO_ERROR cannot be set for a failed write unless the * buffer is also B_INVAL because it hits the re-dirtying code above. * * Normally we can do this whether a buffer is B_DELWRI or not. If @@ -1034,7 +1041,7 @@ brelse(struct buf * bp) pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); } } - if (bp->b_flags & (B_NOCACHE|B_ERROR)) { + if ((bp->b_flags & B_NOCACHE) || (bp->b_ioflags & BIO_ERROR)) { int poffset = foff & PAGE_MASK; int presid = resid > (PAGE_SIZE - poffset) ? (PAGE_SIZE - poffset) : resid; @@ -1086,7 +1093,7 @@ brelse(struct buf * bp) LIST_INSERT_HEAD(&invalhash, bp, b_hash); bp->b_dev = NODEV; /* buffers with junk contents */ - } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { + } else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) || (bp->b_ioflags & BIO_ERROR)) { bp->b_flags |= B_INVAL; bp->b_xflags &= ~BX_BKGRDWRITE; if (bp->b_xflags & BX_BKGRDINPROG) @@ -1155,7 +1162,8 @@ brelse(struct buf * bp) /* unlock */ BUF_UNLOCK(bp); - bp->b_flags &= ~(B_ORDERED | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); + bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); + bp->b_ioflags &= ~BIO_ORDERED; splx(s); } @@ -1187,7 +1195,7 @@ bqrelse(struct buf * bp) return; } if (bp->b_flags & B_LOCKED) { - bp->b_flags &= ~B_ERROR; + bp->b_ioflags &= ~BIO_ERROR; bp->b_qindex = QUEUE_LOCKED; TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); /* buffers with stale but valid contents */ @@ -1214,7 +1222,8 @@ bqrelse(struct buf * bp) /* unlock */ BUF_UNLOCK(bp); - bp->b_flags &= ~(B_ORDERED | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); + bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); + bp->b_ioflags &= ~BIO_ORDERED; splx(s); } @@ -1571,6 +1580,7 @@ restart: allocbuf(bp, 0); bp->b_flags = 0; + bp->b_ioflags = 0; bp->b_xflags = 0; bp->b_dev = NODEV; bp->b_vp = NULL; @@ -2037,7 +2047,7 @@ vfs_setdirty(struct buf *bp) * the caller should set B_CACHE ( as an optimization ), else the caller * should issue the I/O and biodone() will set B_CACHE if the I/O was * a write attempt or if it was a successfull read. If the caller - * intends to issue a READ, the caller must clear B_INVAL and B_ERROR + * intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR * prior to issuing the READ. biodone() will *not* clear B_INVAL. */ struct buf * @@ -2590,7 +2600,7 @@ biowait(register struct buf * bp) bp->b_flags &= ~B_EINTR; return (EINTR); } - if (bp->b_flags & B_ERROR) { + if (bp->b_ioflags & BIO_ERROR) { return (bp->b_error ? bp->b_error : EIO); } else { return (0); @@ -2695,7 +2705,8 @@ biodone(register struct buf * bp) */ iosize = bp->b_bcount - bp->b_resid; if (bp->b_iocmd == BIO_READ && - !(bp->b_flags & (B_INVAL|B_NOCACHE|B_ERROR))) { + !(bp->b_flags & (B_INVAL|B_NOCACHE)) && + !(bp->b_ioflags & BIO_ERROR)) { bp->b_flags |= B_CACHE; } @@ -2776,7 +2787,7 @@ biodone(register struct buf * bp) */ if (bp->b_flags & B_ASYNC) { - if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) + if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR)) brelse(bp); else bqrelse(bp); @@ -2865,7 +2876,7 @@ vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m) * inconsistant. * * Since I/O has not been initiated yet, certain buffer flags - * such as B_ERROR or B_INVAL may be in an inconsistant state + * such as BIO_ERROR or B_INVAL may be in an inconsistant state * and should be ignored. */ void @@ -3006,7 +3017,7 @@ vfs_bio_set_validclean(struct buf *bp, int base, int size) * vfs_bio_clrbuf: * * clear a buffer. This routine essentially fakes an I/O, so we need - * to clear B_ERROR and B_INVAL. + * to clear BIO_ERROR and B_INVAL. * * Note that while we only theoretically need to clear through b_bcount, * we go ahead and clear through b_bufsize. @@ -3017,7 +3028,8 @@ vfs_bio_clrbuf(struct buf *bp) { int i, mask = 0; caddr_t sa, ea; if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) { - bp->b_flags &= ~(B_INVAL|B_ERROR); + bp->b_flags &= ~B_INVAL; + bp->b_ioflags &= ~BIO_ERROR; if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) && (bp->b_offset & PAGE_MASK) == 0) { mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1; diff --git a/sys/kern/vfs_cluster.c b/sys/kern/vfs_cluster.c index d7e6f61..44ac985 100644 --- a/sys/kern/vfs_cluster.c +++ b/sys/kern/vfs_cluster.c @@ -247,7 +247,8 @@ single_block_read: #endif if ((bp->b_flags & B_CLUSTER) == 0) vfs_busy_pages(bp, 0); - bp->b_flags &= ~(B_ERROR|B_INVAL); + bp->b_flags &= ~B_INVAL; + bp->b_ioflags &= ~BIO_ERROR; if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL) BUF_KERNPROC(bp); error = VOP_STRATEGY(vp, bp); @@ -282,7 +283,8 @@ single_block_read: if ((rbp->b_flags & B_CLUSTER) == 0) vfs_busy_pages(rbp, 0); - rbp->b_flags &= ~(B_ERROR|B_INVAL); + rbp->b_flags &= ~B_INVAL; + rbp->b_ioflags &= ~BIO_ERROR; if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL) BUF_KERNPROC(rbp); (void) VOP_STRATEGY(vp, rbp); @@ -468,7 +470,7 @@ cluster_callback(bp) /* * Must propogate errors to all the components. */ - if (bp->b_flags & B_ERROR) + if (bp->b_ioflags & BIO_ERROR) error = bp->b_error; pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); @@ -480,11 +482,12 @@ cluster_callback(bp) tbp; tbp = nbp) { nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry); if (error) { - tbp->b_flags |= B_ERROR; + tbp->b_ioflags |= BIO_ERROR; tbp->b_error = error; } else { tbp->b_dirtyoff = tbp->b_dirtyend = 0; - tbp->b_flags &= ~(B_ERROR|B_INVAL); + tbp->b_flags &= ~B_INVAL; + tbp->b_ioflags &= ~BIO_ERROR; } biodone(tbp); } @@ -837,7 +840,8 @@ cluster_wbuild(vp, size, start_lbn, len) s = splbio(); bundirty(tbp); - tbp->b_flags &= ~(B_DONE | B_ERROR); + tbp->b_flags &= ~B_DONE; + tbp->b_ioflags &= ~BIO_ERROR; tbp->b_flags |= B_ASYNC; tbp->b_iocmd = BIO_WRITE; reassignbuf(tbp, tbp->b_vp); /* put on clean list */ diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c index 35a3d09..71504a5 100644 --- a/sys/kern/vfs_default.c +++ b/sys/kern/vfs_default.c @@ -152,11 +152,11 @@ vop_panic(struct vop_generic_args *ap) * * Strategy routine for VFS devices that have none. * - * B_ERROR and B_INVAL must be cleared prior to calling any strategy + * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy * routine. Typically this is done for a BIO_READ strategy call. * Typically B_INVAL is assumed to already be clear prior to a write * and should not be cleared manually unless you just made the buffer - * invalid. B_ERROR should be cleared either way. + * invalid. BIO_ERROR should be cleared either way. */ static int @@ -165,7 +165,7 @@ vop_nostrategy (struct vop_strategy_args *ap) printf("No strategy for buffer at %p\n", ap->a_bp); vprint("", ap->a_vp); vprint("", ap->a_bp->b_vp); - ap->a_bp->b_flags |= B_ERROR; + ap->a_bp->b_ioflags |= BIO_ERROR; ap->a_bp->b_error = EOPNOTSUPP; biodone(ap->a_bp); return (EOPNOTSUPP); |