diff options
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/kern_exec.c | 5 | ||||
-rw-r--r-- | sys/kern/vfs_bio.c | 179 | ||||
-rw-r--r-- | sys/kern/vfs_cluster.c | 18 | ||||
-rw-r--r-- | sys/kern/vfs_default.c | 4 | ||||
-rw-r--r-- | sys/kern/vfs_export.c | 6 | ||||
-rw-r--r-- | sys/kern/vfs_extattr.c | 8 | ||||
-rw-r--r-- | sys/kern/vfs_subr.c | 6 | ||||
-rw-r--r-- | sys/kern/vfs_syscalls.c | 8 |
8 files changed, 95 insertions, 139 deletions
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index fcfe4e2..64fea59 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -23,7 +23,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: kern_exec.c,v 1.78 1998/02/25 13:08:06 bde Exp $ + * $Id: kern_exec.c,v 1.79 1998/03/02 05:47:55 peter Exp $ */ #include <sys/param.h> @@ -389,8 +389,7 @@ exec_map_first_page(imgp) if ((rv != VM_PAGER_OK) || (ma[0] == NULL)) { vm_page_protect(ma[0], VM_PROT_NONE); - vm_page_deactivate(ma[0]); - PAGE_WAKEUP(ma[0]); + vm_page_free(ma[0]); splx(s); return EIO; } diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index d6d02b6d..4c09e1d 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -11,7 +11,7 @@ * 2. Absolutely no warranty of function or purpose is made by the author * John S. Dyson. * - * $Id: vfs_bio.c,v 1.152 1998/03/01 04:18:42 dyson Exp $ + * $Id: vfs_bio.c,v 1.153 1998/03/04 03:17:30 dyson Exp $ */ /* @@ -138,7 +138,7 @@ SYSCTL_INT(_vfs, OID_AUTO, kvafreespace, CTLFLAG_RD, &kvafreespace, 0, ""); static LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash; -static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES]; +struct bqueues bufqueues[BUFFER_QUEUES] = {0}; extern int vm_swap_size; @@ -520,7 +520,7 @@ brelse(struct buf * bp) relpbuf(bp); return; } - /* anyone need a "free" block? */ + s = splbio(); /* anyone need this block? */ @@ -538,10 +538,11 @@ brelse(struct buf * bp) if (bp->b_flags & B_DELWRI) --numdirtybuffers; bp->b_flags &= ~(B_DELWRI | B_CACHE); - if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) { + if ((bp->b_flags & B_VMIO) == 0) { if (bp->b_bufsize) allocbuf(bp, 0); - brelvp(bp); + if (bp->b_vp) + brelvp(bp); } } @@ -571,78 +572,72 @@ brelse(struct buf * bp) && bp->b_validend == bp->b_bufsize)) #endif ) { - vm_ooffset_t foff; - vm_object_t obj; - int i, resid; + + int i, j, resid; vm_page_t m; + off_t foff; + vm_pindex_t poff; + vm_object_t obj; struct vnode *vp; - int iototal = bp->b_bufsize; + int blksize; vp = bp->b_vp; -#if !defined(MAX_PERF) - if (!vp) - panic("brelse: missing vp"); -#endif + if (vp->v_type == VBLK) + blksize = DEV_BSIZE; + else + blksize = vp->v_mount->mnt_stat.f_iosize; - if (bp->b_npages) { - vm_pindex_t poff; - obj = (vm_object_t) vp->v_object; - if (vp->v_type == VBLK) - foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT; - else - foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; - poff = OFF_TO_IDX(foff); - for (i = 0; i < bp->b_npages; i++) { - m = bp->b_pages[i]; - if (m == bogus_page) { - m = vm_page_lookup(obj, poff + i); + resid = bp->b_bufsize; + foff = -1LL; + + for (i = 0; i < bp->b_npages; i++) { + m = bp->b_pages[i]; + if (m == bogus_page) { + + obj = (vm_object_t) vp->v_object; + + foff = (off_t) bp->b_lblkno * blksize; + poff = OFF_TO_IDX(foff); + + for (j = i; j < bp->b_npages; j++) { + m = bp->b_pages[j]; + if (m == bogus_page) { + m = vm_page_lookup(obj, poff + j); #if !defined(MAX_PERF) - if (!m) { - panic("brelse: page missing\n"); - } -#endif - bp->b_pages[i] = m; - pmap_qenter(trunc_page(bp->b_data), - bp->b_pages, bp->b_npages); - } - resid = IDX_TO_OFF(m->pindex+1) - foff; - if (resid > iototal) - resid = iototal; - if (resid > 0) { - /* - * Don't invalidate the page if the local machine has already - * modified it. This is the lesser of two evils, and should - * be fixed. - */ - if (bp->b_flags & (B_NOCACHE | B_ERROR)) { - vm_page_test_dirty(m); - if (m->dirty == 0) { - vm_page_set_invalid(m, (vm_offset_t) foff, resid); - if (m->valid == 0) - vm_page_protect(m, VM_PROT_NONE); - } - } - if (resid >= PAGE_SIZE) { - if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { - bp->b_flags |= B_INVAL; - } - } else { - if (!vm_page_is_valid(m, - (((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) { - bp->b_flags |= B_INVAL; + if (!m) { + panic("brelse: page missing\n"); } +#endif + bp->b_pages[j] = m; } } - foff += resid; - iototal -= resid; + + if ((bp->b_flags & B_INVAL) == 0) { + pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); + } + break; + } + if (bp->b_flags & (B_NOCACHE|B_ERROR)) { + if ((blksize & PAGE_MASK) == 0) { + vm_page_set_invalid(m, 0, resid); + } else { + if (foff == -1LL) + foff = (off_t) bp->b_lblkno * blksize; + vm_page_set_invalid(m, (vm_offset_t) foff, resid); + } } + resid -= PAGE_SIZE; } + if (bp->b_flags & (B_INVAL | B_RELBUF)) vfs_vmio_release(bp); + } else if (bp->b_flags & B_VMIO) { + if (bp->b_flags & (B_INVAL | B_RELBUF)) vfs_vmio_release(bp); + } #if !defined(MAX_PERF) @@ -755,6 +750,7 @@ vfs_vmio_release(bp) m = bp->b_pages[i]; bp->b_pages[i] = NULL; vm_page_unwire(m); + /* * We don't mess with busy pages, it is * the responsibility of the process that @@ -765,11 +761,6 @@ vfs_vmio_release(bp) if (m->wire_count == 0) { - if (m->flags & PG_WANTED) { - m->flags &= ~PG_WANTED; - wakeup(m); - } - /* * If this is an async free -- we cannot place * pages onto the cache queue. If it is an @@ -895,33 +886,6 @@ vfs_bio_awrite(struct buf * bp) return nwritten; } } -#if 0 - else if ((vp->v_flag & VOBJBUF) && (vp->v_type == VBLK) && - ((size = bp->b_bufsize) >= PAGE_SIZE)) { - maxcl = MAXPHYS / size; - for (i = 1; i < maxcl; i++) { - if ((bpa = gbincore(vp, lblkno + i)) && - ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == - (B_DELWRI | B_CLUSTEROK)) && - (bpa->b_bufsize == size)) { - if (bpa->b_blkno != - bp->b_blkno + ((i * size) >> DEV_BSHIFT)) - break; - } else { - break; - } - } - ncl = i; - /* - * this is a possible cluster write - */ - if (ncl != 1) { - nwritten = cluster_wbuild(vp, size, lblkno, ncl); - splx(s); - return nwritten; - } - } -#endif bremfree(bp); splx(s); @@ -1362,7 +1326,7 @@ struct buf * getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) { struct buf *bp; - int s; + int i, s; struct bufhashhdr *bh; int maxsize; int generation; @@ -1474,9 +1438,10 @@ loop1: } else { bp->b_flags &= ~B_VMIO; } - splx(s); allocbuf(bp, size); + + splx(s); #ifdef PC98 /* * 1024byte/sector support @@ -1660,6 +1625,8 @@ allocbuf(struct buf * bp, int size) int pageindex, curbpnpages; struct vnode *vp; int bsize; + int orig_validoff = bp->b_validoff; + int orig_validend = bp->b_validend; vp = bp->b_vp; @@ -1676,8 +1643,9 @@ allocbuf(struct buf * bp, int size) off = (vm_ooffset_t) bp->b_lblkno * bsize; curbpnpages = bp->b_npages; doretry: + bp->b_validoff = orig_validoff; + bp->b_validend = orig_validend; bp->b_flags |= B_CACHE; - bp->b_validoff = bp->b_validend = 0; for (toff = 0; toff < newbsize; toff += tinc) { int bytesinpage; @@ -1705,14 +1673,11 @@ allocbuf(struct buf * bp, int size) vm_pageout_deficit += (desiredpages - bp->b_npages); goto doretry; } - /* - * Normally it is unwise to clear PG_BUSY without - * PAGE_WAKEUP -- but it is okay here, as there is - * no chance for blocking between here and vm_page_alloc - */ - m->flags &= ~PG_BUSY; + vm_page_wire(m); + m->flags &= ~PG_BUSY; bp->b_flags &= ~B_CACHE; + } else if (m->flags & PG_BUSY) { s = splvm(); if (m->flags & PG_BUSY) { @@ -1935,14 +1900,13 @@ biodone(register struct buf * bp) #endif panic("biodone: page busy < 0\n"); } - m->flags |= PG_BUSY; - --m->busy; - PAGE_WAKEUP(m); + PAGE_BWAKEUP(m); --obj->paging_in_progress; foff += resid; iosize -= resid; } - if (obj && obj->paging_in_progress == 0 && + if (obj && + (obj->paging_in_progress == 0) && (obj->flags & OBJ_PIPWNT)) { obj->flags &= ~OBJ_PIPWNT; wakeup(obj); @@ -2038,9 +2002,7 @@ vfs_unbusy_pages(struct buf * bp) pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); } --obj->paging_in_progress; - m->flags |= PG_BUSY; - --m->busy; - PAGE_WAKEUP(m); + PAGE_BWAKEUP(m); } if (obj->paging_in_progress == 0 && (obj->flags & OBJ_PIPWNT)) { @@ -2271,6 +2233,7 @@ tryagain: goto tryagain; } vm_page_wire(p); + p->valid = VM_PAGE_BITS_ALL; pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); bp->b_pages[index] = p; PAGE_WAKEUP(p); diff --git a/sys/kern/vfs_cluster.c b/sys/kern/vfs_cluster.c index 220f760..7f477bf 100644 --- a/sys/kern/vfs_cluster.c +++ b/sys/kern/vfs_cluster.c @@ -33,7 +33,7 @@ * SUCH DAMAGE. * * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94 - * $Id: vfs_cluster.c,v 1.54 1998/02/04 22:32:39 eivind Exp $ + * $Id: vfs_cluster.c,v 1.55 1998/02/06 12:13:30 eivind Exp $ */ #include "opt_debug_cluster.h" @@ -150,17 +150,12 @@ cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp) (i == (maxra - 1))) tbp->b_flags |= B_RAM; -#if 0 - if (tbp->b_usecount == 0) { - /* - * Make sure that the soon-to-be used readaheads - * are still there. The getblk/bqrelse pair will - * boost the priority of the buffer. - */ - tbp = getblk(vp, lblkno+i, size, 0, 0); - bqrelse(tbp); + if ((tbp->b_usecount < 5) && + ((tbp->b_flags & B_BUSY) == 0) && + (tbp->b_qindex == QUEUE_LRU)) { + TAILQ_REMOVE(&bufqueues[QUEUE_LRU], tbp, b_freelist); + TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], tbp, b_freelist); } -#endif } splx(s); if (i >= maxra) { @@ -215,7 +210,6 @@ single_block_read: * if we have been doing sequential I/O, then do some read-ahead */ rbp = NULL; - /* if (seqcount && (lblkno < (origblkno + maxra))) { */ if (seqcount && (lblkno < (origblkno + seqcount))) { /* * we now build the read-ahead buffer if it is desirable. diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c index 1aae395..33a2450 100644 --- a/sys/kern/vfs_default.c +++ b/sys/kern/vfs_default.c @@ -342,7 +342,7 @@ vop_sharedlock(ap) return (0); MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock), M_VNODE, M_WAITOK); - lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); + lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE); } switch (flags & LK_TYPE_MASK) { case LK_DRAIN: @@ -410,7 +410,7 @@ vop_nolock(ap) return (0); MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock), M_VNODE, M_WAITOK); - lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); + lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE); } switch (flags & LK_TYPE_MASK) { case LK_DRAIN: diff --git a/sys/kern/vfs_export.c b/sys/kern/vfs_export.c index 052d122..5d27cf5 100644 --- a/sys/kern/vfs_export.c +++ b/sys/kern/vfs_export.c @@ -36,7 +36,7 @@ * SUCH DAMAGE. * * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 - * $Id: vfs_subr.c,v 1.135 1998/03/01 04:18:44 dyson Exp $ + * $Id: vfs_subr.c,v 1.136 1998/03/01 23:07:45 dyson Exp $ */ /* @@ -181,7 +181,7 @@ vfs_busy(mp, flags, interlkp, p) } return (ENOENT); } - lkflags = LK_SHARED; + lkflags = LK_SHARED | LK_NOPAUSE; if (interlkp) lkflags |= LK_INTERLOCK; if (lockmgr(&mp->mnt_lock, lkflags, interlkp, p)) @@ -224,7 +224,7 @@ vfs_rootmountalloc(fstypename, devname, mpp) return (ENODEV); mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); bzero((char *)mp, (u_long)sizeof(struct mount)); - lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0); + lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE); (void)vfs_busy(mp, LK_NOWAIT, 0, p); LIST_INIT(&mp->mnt_vnodelist); mp->mnt_vfc = vfsp; diff --git a/sys/kern/vfs_extattr.c b/sys/kern/vfs_extattr.c index b8fba2c..596de95 100644 --- a/sys/kern/vfs_extattr.c +++ b/sys/kern/vfs_extattr.c @@ -36,7 +36,7 @@ * SUCH DAMAGE. * * @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94 - * $Id: vfs_syscalls.c,v 1.92 1998/02/08 01:41:33 dyson Exp $ + * $Id: vfs_syscalls.c,v 1.93 1998/02/15 04:17:09 dyson Exp $ */ /* For 4.3 integer FS ID compatibility */ @@ -244,7 +244,7 @@ mount(p, uap) mp = (struct mount *)malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); bzero((char *)mp, (u_long)sizeof(struct mount)); - lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0); + lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE); (void)vfs_busy(mp, LK_NOWAIT, 0, p); mp->mnt_op = vfsp->vfc_vfsops; mp->mnt_vfc = vfsp; @@ -2158,9 +2158,9 @@ fsync(p, uap) if (error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) return (error); vp = (struct vnode *)fp->f_data; - if ((error = vn_lock(vp, LK_EXCLUSIVE|LK_RETRY, p)) == NULL) { + if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p)) == NULL) { if (vp->v_object) { - vm_object_page_clean(vp->v_object, 0, 0 ,0); + vm_object_page_clean(vp->v_object, 0, 0, FALSE); } error = VOP_FSYNC(vp, fp->f_cred, (vp->v_mount && (vp->v_mount->mnt_flag & MNT_ASYNC)) ? diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index 052d122..5d27cf5 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -36,7 +36,7 @@ * SUCH DAMAGE. * * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 - * $Id: vfs_subr.c,v 1.135 1998/03/01 04:18:44 dyson Exp $ + * $Id: vfs_subr.c,v 1.136 1998/03/01 23:07:45 dyson Exp $ */ /* @@ -181,7 +181,7 @@ vfs_busy(mp, flags, interlkp, p) } return (ENOENT); } - lkflags = LK_SHARED; + lkflags = LK_SHARED | LK_NOPAUSE; if (interlkp) lkflags |= LK_INTERLOCK; if (lockmgr(&mp->mnt_lock, lkflags, interlkp, p)) @@ -224,7 +224,7 @@ vfs_rootmountalloc(fstypename, devname, mpp) return (ENODEV); mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); bzero((char *)mp, (u_long)sizeof(struct mount)); - lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0); + lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE); (void)vfs_busy(mp, LK_NOWAIT, 0, p); LIST_INIT(&mp->mnt_vnodelist); mp->mnt_vfc = vfsp; diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c index b8fba2c..596de95 100644 --- a/sys/kern/vfs_syscalls.c +++ b/sys/kern/vfs_syscalls.c @@ -36,7 +36,7 @@ * SUCH DAMAGE. * * @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94 - * $Id: vfs_syscalls.c,v 1.92 1998/02/08 01:41:33 dyson Exp $ + * $Id: vfs_syscalls.c,v 1.93 1998/02/15 04:17:09 dyson Exp $ */ /* For 4.3 integer FS ID compatibility */ @@ -244,7 +244,7 @@ mount(p, uap) mp = (struct mount *)malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); bzero((char *)mp, (u_long)sizeof(struct mount)); - lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0); + lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE); (void)vfs_busy(mp, LK_NOWAIT, 0, p); mp->mnt_op = vfsp->vfc_vfsops; mp->mnt_vfc = vfsp; @@ -2158,9 +2158,9 @@ fsync(p, uap) if (error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) return (error); vp = (struct vnode *)fp->f_data; - if ((error = vn_lock(vp, LK_EXCLUSIVE|LK_RETRY, p)) == NULL) { + if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p)) == NULL) { if (vp->v_object) { - vm_object_page_clean(vp->v_object, 0, 0 ,0); + vm_object_page_clean(vp->v_object, 0, 0, FALSE); } error = VOP_FSYNC(vp, fp->f_cred, (vp->v_mount && (vp->v_mount->mnt_flag & MNT_ASYNC)) ? |