diff options
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/kern_physio.c | 8 | ||||
-rw-r--r-- | sys/kern/subr_devstat.c | 4 | ||||
-rw-r--r-- | sys/kern/subr_disklabel.c | 12 | ||||
-rw-r--r-- | sys/kern/subr_diskmbr.c | 4 | ||||
-rw-r--r-- | sys/kern/subr_diskslice.c | 12 | ||||
-rw-r--r-- | sys/kern/vfs_aio.c | 7 | ||||
-rw-r--r-- | sys/kern/vfs_bio.c | 56 | ||||
-rw-r--r-- | sys/kern/vfs_cluster.c | 30 | ||||
-rw-r--r-- | sys/kern/vfs_default.c | 8 |
9 files changed, 77 insertions, 64 deletions
diff --git a/sys/kern/kern_physio.c b/sys/kern/kern_physio.c index 128283f..1510bf1 100644 --- a/sys/kern/kern_physio.c +++ b/sys/kern/kern_physio.c @@ -33,7 +33,6 @@ static void physwakeup(struct buf *bp) { wakeup((caddr_t) bp); - bp->b_flags &= ~B_CALL; } int @@ -63,10 +62,11 @@ physio(dev_t dev, struct uio *uio, int ioflag) for (i = 0; i < uio->uio_iovcnt; i++) { while (uio->uio_iov[i].iov_len) { + bp->b_flags = B_PHYS; if (uio->uio_rw == UIO_READ) - bp->b_flags = B_PHYS | B_CALL | B_READ; + bp->b_iocmd = BIO_READ; else - bp->b_flags = B_PHYS | B_CALL | B_WRITE; + bp->b_iocmd = BIO_WRITE; bp->b_dev = dev; bp->b_iodone = physwakeup; bp->b_data = uio->uio_iov[i].iov_base; @@ -101,7 +101,7 @@ physio(dev_t dev, struct uio *uio, int ioflag) if (uio->uio_segflg == UIO_USERSPACE) { if (!useracc(bp->b_data, bp->b_bufsize, - bp->b_flags & B_READ ? + bp->b_iocmd == BIO_READ ? VM_PROT_WRITE : VM_PROT_READ)) { error = EFAULT; goto doerror; diff --git a/sys/kern/subr_devstat.c b/sys/kern/subr_devstat.c index b04662b..7ebc257 100644 --- a/sys/kern/subr_devstat.c +++ b/sys/kern/subr_devstat.c @@ -233,9 +233,9 @@ devstat_end_transaction_buf(struct devstat *ds, struct buf *bp) { devstat_trans_flags flg; - if (bp->b_flags & B_FREEBUF) + if (bp->b_iocmd == BIO_DELETE) flg = DEVSTAT_FREE; - else if (bp->b_flags & B_READ) + else if (bp->b_iocmd == BIO_READ) flg = DEVSTAT_READ; else flg = DEVSTAT_WRITE; diff --git a/sys/kern/subr_disklabel.c b/sys/kern/subr_disklabel.c index 9184f2f..ff72629 100644 --- a/sys/kern/subr_disklabel.c +++ b/sys/kern/subr_disklabel.c @@ -181,7 +181,7 @@ readdisklabel(dev, lp) bp->b_blkno = LABELSECTOR * ((int)lp->d_secsize/DEV_BSIZE); bp->b_bcount = lp->d_secsize; bp->b_flags &= ~B_INVAL; - bp->b_flags |= B_READ; + bp->b_iocmd = BIO_READ; BUF_STRATEGY(bp, 1); if (biowait(bp)) msg = "I/O error"; @@ -284,7 +284,7 @@ writedisklabel(dev, lp) * (also stupid.. how do you write the first one? by raw writes?) */ bp->b_flags &= ~B_INVAL; - bp->b_flags |= B_READ; + bp->b_iocmd = BIO_READ; BUF_STRATEGY(bp, 1); error = biowait(bp); if (error) @@ -296,8 +296,8 @@ writedisklabel(dev, lp) if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC && dkcksum(dlp) == 0) { *dlp = *lp; - bp->b_flags &= ~(B_DONE | B_READ); - bp->b_flags |= B_WRITE; + bp->b_flags &= ~B_DONE; + bp->b_iocmd = BIO_WRITE; #ifdef __alpha__ alpha_fix_srm_checksum(bp); #endif @@ -313,7 +313,7 @@ done: dlp = (struct disklabel *)bp->b_data; *dlp = *lp; bp->b_flags &= ~B_INVAL; - bp->b_flags |= B_WRITE; + bp->b_iocmd = BIO_WRITE; BUF_STRATEGY(bp, 1); error = biowait(bp); #endif @@ -375,7 +375,7 @@ diskerr(bp, what, pri, blkdone, lp) pr = printf; sname = dsname(bp->b_dev, unit, slice, part, partname); (*pr)("%s%s: %s %sing fsbn ", sname, partname, what, - bp->b_flags & B_READ ? "read" : "writ"); + bp->b_iocmd == BIO_READ ? "read" : "writ"); sn = bp->b_blkno; if (bp->b_bcount <= DEV_BSIZE) (*pr)("%ld", (long)sn); diff --git a/sys/kern/subr_diskmbr.c b/sys/kern/subr_diskmbr.c index c16f5d7..9802d9f 100644 --- a/sys/kern/subr_diskmbr.c +++ b/sys/kern/subr_diskmbr.c @@ -187,7 +187,7 @@ reread_mbr: bp->b_dev = dkmodpart(dkmodslice(dev, WHOLE_DISK_SLICE), RAW_PART); bp->b_blkno = mbr_offset; bp->b_bcount = lp->d_secsize; - bp->b_flags |= B_READ; + bp->b_iocmd = BIO_READ; BUF_STRATEGY(bp, 1); if (biowait(bp) != 0) { diskerr(bp, "reading primary partition table: error", @@ -403,7 +403,7 @@ mbr_extended(dev, lp, ssp, ext_offset, ext_size, base_ext_offset, nsectors, bp->b_dev = dev; bp->b_blkno = ext_offset; bp->b_bcount = lp->d_secsize; - bp->b_flags |= B_READ; + bp->b_iocmd = BIO_READ; BUF_STRATEGY(bp, 1); if (biowait(bp) != 0) { diskerr(bp, "reading extended partition table: error", diff --git a/sys/kern/subr_diskslice.c b/sys/kern/subr_diskslice.c index e42488e..73f735e 100644 --- a/sys/kern/subr_diskslice.c +++ b/sys/kern/subr_diskslice.c @@ -208,14 +208,14 @@ if (labelsect != 0) Debugger("labelsect != 0 in dscheck()"); #if LABELSECTOR != 0 slicerel_secno + nsec > LABELSECTOR + labelsect && #endif - (bp->b_flags & B_READ) == 0 && sp->ds_wlabel == 0) { + (bp->b_iocmd == BIO_WRITE) && sp->ds_wlabel == 0) { bp->b_error = EROFS; goto bad; } #if defined(DOSBBSECTOR) && defined(notyet) /* overwriting master boot record? */ - if (slicerel_secno <= DOSBBSECTOR && (bp->b_flags & B_READ) == 0 && + if (slicerel_secno <= DOSBBSECTOR && (bp->b_iocmd == BIO_WRITE) && sp->ds_wlabel == 0) { bp->b_error = EROFS; goto bad; @@ -259,10 +259,9 @@ if (labelsect != 0) Debugger("labelsect != 0 in dscheck()"); ic->ic_args[0].ia_long = (LABELSECTOR + labelsect - slicerel_secno) * ssp->dss_secsize; ic->ic_args[1].ia_ptr = sp; - bp->b_flags |= B_CALL; bp->b_iodone = dsiodone; bp->b_iodone_chain = ic; - if (!(bp->b_flags & B_READ)) { + if (!(bp->b_iocmd == BIO_READ)) { /* * XXX even disklabel(8) writes directly so we need * to adjust writes. Perhaps we should drop support @@ -537,11 +536,10 @@ dsiodone(bp) char *msg; ic = bp->b_iodone_chain; - bp->b_flags = (ic->ic_prev_flags & B_CALL) - | (bp->b_flags & ~(B_CALL | B_DONE)); + bp->b_flags = bp->b_flags & ~B_DONE; bp->b_iodone = ic->ic_prev_iodone; bp->b_iodone_chain = ic->ic_prev_iodone_chain; - if (!(bp->b_flags & B_READ) + if (!(bp->b_iocmd == BIO_READ) || (!(bp->b_flags & B_ERROR) && bp->b_error == 0)) { msg = fixlabel((char *)NULL, ic->ic_args[1].ia_ptr, (struct disklabel *) diff --git a/sys/kern/vfs_aio.c b/sys/kern/vfs_aio.c index 9ef54a7..10b1eab 100644 --- a/sys/kern/vfs_aio.c +++ b/sys/kern/vfs_aio.c @@ -996,20 +996,20 @@ aio_qphysio(struct proc *p, struct aiocblist *aiocbe) bp->b_bcount = cb->aio_nbytes; bp->b_bufsize = cb->aio_nbytes; - bp->b_flags = B_PHYS | B_CALL; + bp->b_flags = B_PHYS; bp->b_iodone = aio_physwakeup; bp->b_saveaddr = bp->b_data; bp->b_data = (void *)cb->aio_buf; bp->b_blkno = btodb(cb->aio_offset); if (cb->aio_lio_opcode == LIO_WRITE) { - bp->b_flags |= B_WRITE; + bp->b_iocmd = BIO_WRITE; if (!useracc(bp->b_data, bp->b_bufsize, VM_PROT_READ)) { error = EFAULT; goto doerror; } } else { - bp->b_flags |= B_READ; + bp->b_iocmd = BIO_READ; if (!useracc(bp->b_data, bp->b_bufsize, VM_PROT_WRITE)) { error = EFAULT; goto doerror; @@ -2132,7 +2132,6 @@ aio_physwakeup(struct buf *bp) s = splbio(); wakeup((caddr_t)bp); - bp->b_flags &= ~B_CALL; bp->b_flags |= B_DONE; aiocbe = (struct aiocblist *)bp->b_spc; diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index a2e8d26..da67cf2 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -501,7 +501,7 @@ bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, if (curproc != NULL) curproc->p_stats->p_ru.ru_inblock++; KASSERT(!(bp->b_flags & B_ASYNC), ("bread: illegal async bp %p", bp)); - bp->b_flags |= B_READ; + bp->b_iocmd = BIO_READ; bp->b_flags &= ~(B_ERROR | B_INVAL); if (bp->b_rcred == NOCRED) { if (cred != NOCRED) @@ -536,7 +536,7 @@ breadn(struct vnode * vp, daddr_t blkno, int size, if ((bp->b_flags & B_CACHE) == 0) { if (curproc != NULL) curproc->p_stats->p_ru.ru_inblock++; - bp->b_flags |= B_READ; + bp->b_iocmd = BIO_READ; bp->b_flags &= ~(B_ERROR | B_INVAL); if (bp->b_rcred == NOCRED) { if (cred != NOCRED) @@ -556,8 +556,9 @@ breadn(struct vnode * vp, daddr_t blkno, int size, if ((rabp->b_flags & B_CACHE) == 0) { if (curproc != NULL) curproc->p_stats->p_ru.ru_inblock++; - rabp->b_flags |= B_READ | B_ASYNC; + rabp->b_flags |= B_ASYNC; rabp->b_flags &= ~(B_ERROR | B_INVAL); + rabp->b_iocmd = BIO_READ; if (rabp->b_rcred == NOCRED) { if (cred != NOCRED) crhold(cred); @@ -630,8 +631,10 @@ bwrite(struct buf * bp) * copy so as to leave this buffer ready for further use. */ if ((bp->b_xflags & BX_BKGRDWRITE) && (bp->b_flags & B_ASYNC)) { - if (bp->b_flags & B_CALL) + if (bp->b_iodone != NULL) { + printf("bp->b_iodone = %p\n", bp->b_iodone); panic("bwrite: need chained iodone"); + } /* get a new block */ newbp = geteblk(bp->b_bufsize); @@ -643,7 +646,7 @@ bwrite(struct buf * bp) newbp->b_blkno = bp->b_blkno; newbp->b_offset = bp->b_offset; newbp->b_iodone = vfs_backgroundwritedone; - newbp->b_flags |= B_ASYNC | B_CALL; + newbp->b_flags |= B_ASYNC; newbp->b_flags &= ~B_INVAL; /* move over the dependencies */ @@ -664,8 +667,9 @@ bwrite(struct buf * bp) bp = newbp; } - bp->b_flags &= ~(B_READ | B_DONE | B_ERROR); + bp->b_flags &= ~(B_DONE | B_ERROR); bp->b_flags |= B_WRITEINPROG | B_CACHE; + bp->b_iocmd = BIO_WRITE; bp->b_vp->v_numoutput++; vfs_busy_pages(bp, 1); @@ -726,11 +730,12 @@ vfs_backgroundwritedone(bp) } /* * This buffer is marked B_NOCACHE, so when it is released - * by biodone, it will be tossed. We mark it with B_READ + * by biodone, it will be tossed. We mark it with BIO_READ * to avoid biodone doing a second vwakeup. */ - bp->b_flags |= B_NOCACHE | B_READ; - bp->b_flags &= ~(B_CACHE | B_CALL | B_DONE); + bp->b_flags |= B_NOCACHE; + bp->b_iocmd = BIO_READ; + bp->b_flags &= ~(B_CACHE | B_DONE); bp->b_iodone = 0; biodone(bp); } @@ -806,7 +811,7 @@ bdwrite(struct buf * bp) /* * bdirty: * - * Turn buffer into delayed write request. We must clear B_READ and + * Turn buffer into delayed write request. We must clear BIO_READ and * B_RELBUF, and we must set B_DELWRI. We reassign the buffer to * itself to properly update it in the dirty/clean lists. We mark it * B_DONE to ensure that any asynchronization of the buffer properly @@ -827,7 +832,8 @@ bdirty(bp) struct buf *bp; { KASSERT(bp->b_qindex == QUEUE_NONE, ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex)); - bp->b_flags &= ~(B_READ|B_RELBUF); + bp->b_flags &= ~(B_RELBUF); + bp->b_iocmd = BIO_WRITE; if ((bp->b_flags & B_DELWRI) == 0) { bp->b_flags |= B_DONE | B_DELWRI; @@ -946,7 +952,8 @@ brelse(struct buf * bp) if (bp->b_flags & B_LOCKED) bp->b_flags &= ~B_ERROR; - if ((bp->b_flags & (B_READ | B_ERROR | B_INVAL)) == B_ERROR) { + if (bp->b_iocmd == BIO_WRITE && + (bp->b_flags & (B_ERROR | B_INVAL)) == B_ERROR) { /* * Failed write, redirty. Must clear B_ERROR to prevent * pages from being scrapped. If B_INVAL is set then @@ -956,8 +963,8 @@ brelse(struct buf * bp) */ bp->b_flags &= ~B_ERROR; bdirty(bp); - } else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_FREEBUF)) || - (bp->b_bufsize <= 0)) { + } else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || + bp->b_iocmd == BIO_DELETE || (bp->b_bufsize <= 0)) { /* * Either a failed I/O or we were asked to free or not * cache the buffer. @@ -969,7 +976,7 @@ brelse(struct buf * bp) --numdirtybuffers; numdirtywakeup(); } - bp->b_flags &= ~(B_DELWRI | B_CACHE | B_FREEBUF); + bp->b_flags &= ~(B_DELWRI | B_CACHE); if ((bp->b_flags & B_VMIO) == 0) { if (bp->b_bufsize) allocbuf(bp, 0); @@ -2632,7 +2639,7 @@ biowait(register struct buf * bp) #if defined(NO_SCHEDULE_MODS) tsleep(bp, PRIBIO, "biowait", 0); #else - if (bp->b_flags & B_READ) + if (bp->b_iocmd == BIO_READ) tsleep(bp, PRIBIO, "biord", 0); else tsleep(bp, PRIBIO, "biowr", 0); @@ -2673,6 +2680,7 @@ void biodone(register struct buf * bp) { int s; + void (*b_iodone) __P((struct buf *)); s = splbio(); @@ -2681,20 +2689,21 @@ biodone(register struct buf * bp) bp->b_flags |= B_DONE; - if (bp->b_flags & B_FREEBUF) { + if (bp->b_iocmd == BIO_DELETE) { brelse(bp); splx(s); return; } - if ((bp->b_flags & B_READ) == 0) { + if (bp->b_iocmd == BIO_WRITE) { vwakeup(bp); } /* call optional completion function if requested */ - if (bp->b_flags & B_CALL) { - bp->b_flags &= ~B_CALL; - (*bp->b_iodone) (bp); + if (bp->b_iodone != NULL) { + b_iodone = bp->b_iodone; + bp->b_iodone = NULL; + (*b_iodone) (bp); splx(s); return; } @@ -2745,7 +2754,8 @@ biodone(register struct buf * bp) * routines. */ iosize = bp->b_bcount - bp->b_resid; - if ((bp->b_flags & (B_READ|B_FREEBUF|B_INVAL|B_NOCACHE|B_ERROR)) == B_READ) { + if (bp->b_iocmd == BIO_READ && + !(bp->b_flags & (B_INVAL|B_NOCACHE|B_ERROR))) { bp->b_flags |= B_CACHE; } @@ -2782,7 +2792,7 @@ biodone(register struct buf * bp) * already changed correctly ( see bdwrite() ), so we * only need to do this here in the read case. */ - if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { + if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) { vfs_page_set_valid(bp, foff, i, m); } vm_page_flag_clear(m, PG_ZERO); diff --git a/sys/kern/vfs_cluster.c b/sys/kern/vfs_cluster.c index 7a8ce7c..ac14bc4 100644 --- a/sys/kern/vfs_cluster.c +++ b/sys/kern/vfs_cluster.c @@ -200,7 +200,8 @@ single_block_read: * if it isn't in the cache, then get a chunk from * disk if sequential, otherwise just get the block. */ - bp->b_flags |= B_READ | B_RAM; + bp->b_flags |= B_RAM; + bp->b_iocmd = BIO_READ; lblkno += 1; } } @@ -228,7 +229,8 @@ single_block_read: blkno, size, ntoread, NULL); } else { rbp = getblk(vp, lblkno, size, 0, 0); - rbp->b_flags |= B_READ | B_ASYNC | B_RAM; + rbp->b_flags |= B_ASYNC | B_RAM; + rbp->b_iocmd = BIO_READ; rbp->b_blkno = blkno; } } @@ -246,7 +248,7 @@ single_block_read: if ((bp->b_flags & B_CLUSTER) == 0) vfs_busy_pages(bp, 0); bp->b_flags &= ~(B_ERROR|B_INVAL); - if (bp->b_flags & (B_ASYNC|B_CALL)) + if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL) BUF_KERNPROC(bp); error = VOP_STRATEGY(vp, bp); curproc->p_stats->p_ru.ru_inblock++; @@ -257,10 +259,10 @@ single_block_read: */ if (rbp) { if (error) { - rbp->b_flags &= ~(B_ASYNC | B_READ); + rbp->b_flags &= ~B_ASYNC; brelse(rbp); } else if (rbp->b_flags & B_CACHE) { - rbp->b_flags &= ~(B_ASYNC | B_READ); + rbp->b_flags &= ~B_ASYNC; bqrelse(rbp); } else { #if defined(CLUSTERDEBUG) @@ -281,7 +283,7 @@ single_block_read: if ((rbp->b_flags & B_CLUSTER) == 0) vfs_busy_pages(rbp, 0); rbp->b_flags &= ~(B_ERROR|B_INVAL); - if (rbp->b_flags & (B_ASYNC|B_CALL)) + if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL) BUF_KERNPROC(rbp); (void) VOP_STRATEGY(vp, rbp); curproc->p_stats->p_ru.ru_inblock++; @@ -325,12 +327,13 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp) if (fbp) { tbp = fbp; - tbp->b_flags |= B_READ; + tbp->b_iocmd = BIO_READ; } else { tbp = getblk(vp, lbn, size, 0, 0); if (tbp->b_flags & B_CACHE) return tbp; - tbp->b_flags |= B_ASYNC | B_READ | B_RAM; + tbp->b_flags |= B_ASYNC | B_RAM; + tbp->b_iocmd = BIO_READ; } tbp->b_blkno = blkno; @@ -344,7 +347,8 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp) bp->b_data = (char *)((vm_offset_t)bp->b_data | ((vm_offset_t)tbp->b_data & PAGE_MASK)); - bp->b_flags = B_ASYNC | B_READ | B_CALL | B_CLUSTER | B_VMIO; + bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO; + bp->b_iocmd = BIO_READ; bp->b_iodone = cluster_callback; bp->b_blkno = blkno; bp->b_lblkno = lbn; @@ -400,7 +404,8 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp) if ((fbp && (i == 1)) || (i == (run - 1))) tbp->b_flags |= B_RAM; - tbp->b_flags |= B_READ | B_ASYNC; + tbp->b_flags |= B_ASYNC; + tbp->b_iocmd = BIO_READ; if (tbp->b_blkno == tbp->b_lblkno) { tbp->b_blkno = bn; } else if (tbp->b_blkno != bn) { @@ -716,7 +721,7 @@ cluster_wbuild(vp, size, start_lbn, len) bp->b_offset = tbp->b_offset; bp->b_data = (char *)((vm_offset_t)bp->b_data | ((vm_offset_t)tbp->b_data & PAGE_MASK)); - bp->b_flags |= B_CALL | B_CLUSTER | + bp->b_flags |= B_CLUSTER | (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT)); bp->b_iodone = cluster_callback; pbgetvp(vp, bp); @@ -811,8 +816,9 @@ cluster_wbuild(vp, size, start_lbn, len) s = splbio(); bundirty(tbp); - tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR); + tbp->b_flags &= ~(B_DONE | B_ERROR); tbp->b_flags |= B_ASYNC; + tbp->b_iocmd = BIO_WRITE; reassignbuf(tbp, tbp->b_vp); /* put on clean list */ ++tbp->b_vp->v_numoutput; splx(s); diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c index bd7e9ca..35a3d09 100644 --- a/sys/kern/vfs_default.c +++ b/sys/kern/vfs_default.c @@ -153,10 +153,10 @@ vop_panic(struct vop_generic_args *ap) * Strategy routine for VFS devices that have none. * * B_ERROR and B_INVAL must be cleared prior to calling any strategy - * routine. Typically this is done for a B_READ strategy call. Typically - * B_INVAL is assumed to already be clear prior to a write and should not - * be cleared manually unless you just made the buffer invalid. B_ERROR - * should be cleared either way. + * routine. Typically this is done for a BIO_READ strategy call. + * Typically B_INVAL is assumed to already be clear prior to a write + * and should not be cleared manually unless you just made the buffer + * invalid. B_ERROR should be cleared either way. */ static int |