summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authordillon <dillon@FreeBSD.org>1999-01-21 08:29:12 +0000
committerdillon <dillon@FreeBSD.org>1999-01-21 08:29:12 +0000
commitdf24433bbe29112b4b9c9f38e80ba6cfb6988cb0 (patch)
treeb0a91cf38166034e837b98d5edacd8177a14aba6 /sys/kern
parentbae5debf723220e076d6a9696e417805639cdc3a (diff)
downloadFreeBSD-src-df24433bbe29112b4b9c9f38e80ba6cfb6988cb0.zip
FreeBSD-src-df24433bbe29112b4b9c9f38e80ba6cfb6988cb0.tar.gz
This is a rather large commit that encompasses the new swapper,
changes to the VM system to support the new swapper, VM bug fixes, several VM optimizations, and some additional revamping of the VM code. The specific bug fixes will be documented with additional forced commits. This commit is somewhat rough in regards to code cleanup issues. Reviewed by: "John S. Dyson" <root@dyson.iquest.net>, "David Greenman" <dg@root.com>
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_malloc.c36
-rw-r--r--sys/kern/kern_physio.c8
-rw-r--r--sys/kern/subr_rlist.c11
-rw-r--r--sys/kern/sysv_shm.c3
-rw-r--r--sys/kern/uipc_syscalls.c29
-rw-r--r--sys/kern/uipc_usrreq.c9
-rw-r--r--sys/kern/vfs_aio.c10
-rw-r--r--sys/kern/vfs_bio.c119
-rw-r--r--sys/kern/vfs_cluster.c10
-rw-r--r--sys/kern/vfs_export.c46
-rw-r--r--sys/kern/vfs_subr.c46
11 files changed, 241 insertions, 86 deletions
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index a9776a5..be9f9d3 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
- * $Id: kern_malloc.c,v 1.50 1999/01/08 17:31:09 eivind Exp $
+ * $Id: kern_malloc.c,v 1.51 1999/01/10 01:58:24 eivind Exp $
*/
#include "opt_vm.h"
@@ -101,7 +101,16 @@ struct freelist {
#endif /* INVARIANTS */
/*
- * Allocate a block of memory
+ * malloc:
+ *
+ * Allocate a block of memory.
+ *
+ * If M_NOWAIT is set, this routine will not block and return NULL if
+ * the allocation fails.
+ *
+ * If M_ASLEEP is set (M_NOWAIT must also be set), this routine
+ * will have the side effect of calling asleep() if it returns NULL,
+ * allowing the parent to await() at some future time.
*/
void *
malloc(size, type, flags)
@@ -122,13 +131,26 @@ malloc(size, type, flags)
#endif
register struct malloc_type *ksp = type;
- if (!type->ks_next)
+ /*
+ * Must be at splmem() prior to initializing segment to handle
+ * potential initialization race.
+ */
+
+ s = splmem();
+
+ if (!type->ks_next) {
malloc_init(type);
+ }
indx = BUCKETINDX(size);
kbp = &bucket[indx];
- s = splmem();
+
while (ksp->ks_memuse >= ksp->ks_limit) {
+ if (flags & M_ASLEEP) {
+ if (ksp->ks_limblocks < 65535)
+ ksp->ks_limblocks++;
+ asleep((caddr_t)ksp, PSWP+2, type->ks_shortdesc, 0);
+ }
if (flags & M_NOWAIT) {
splx(s);
return ((void *) NULL);
@@ -239,7 +261,11 @@ out:
}
/*
- * Free a block of memory allocated by malloc.
+ * free:
+ *
+ * Free a block of memory allocated by malloc.
+ *
+ * This routine may not block.
*/
void
free(addr, type)
diff --git a/sys/kern/kern_physio.c b/sys/kern/kern_physio.c
index 441d95f..ad63a98 100644
--- a/sys/kern/kern_physio.c
+++ b/sys/kern/kern_physio.c
@@ -16,7 +16,7 @@
* 4. Modifications may be freely made to this file if the above conditions
* are met.
*
- * $Id: kern_physio.c,v 1.28 1998/08/19 10:50:32 sos Exp $
+ * $Id: kern_physio.c,v 1.29 1998/10/25 17:44:51 phk Exp $
*/
#include <sys/param.h>
@@ -147,7 +147,7 @@ physio(strategy, bp, dev, rw, minp, uio)
doerror:
- relpbuf(bpa);
+ relpbuf(bpa, NULL);
if (!bp_alloc) {
bp->b_flags &= ~(B_BUSY|B_PHYS);
if( bp->b_flags & B_WANTED) {
@@ -197,13 +197,13 @@ phygetvpbuf(dev_t dev, int resid)
bdsw = cdevsw[major(dev)];
if ((bdsw == NULL) || (bdsw->d_bmaj == -1))
- return getpbuf();
+ return getpbuf(NULL);
maxio = bdsw->d_maxio;
if (resid > maxio)
resid = maxio;
- return getpbuf();
+ return getpbuf(NULL);
}
static void
diff --git a/sys/kern/subr_rlist.c b/sys/kern/subr_rlist.c
index d637ab4..810b87e 100644
--- a/sys/kern/subr_rlist.c
+++ b/sys/kern/subr_rlist.c
@@ -13,7 +13,7 @@
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This software is a component of "386BSD" developed by
- William F. Jolitz, TeleMuse.
+ * William F. Jolitz, TeleMuse.
* 4. Neither the name of the developer nor the name "386BSD"
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
@@ -54,9 +54,13 @@
* functioning of this software, nor does the author assume any responsibility
* for damages incurred with its use.
*
- * $Id: subr_rlist.c,v 1.28 1999/01/08 17:31:12 eivind Exp $
+ * --------- DEPRECIATED ---------
+ *
+ * $Id: subr_rlist.c,v 1.29 1999/01/10 01:58:25 eivind Exp $
*/
+#if 0
+
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/rlist.h>
@@ -307,3 +311,6 @@ rlist_destroy (rlh)
rlist_mfree(lp);
}
}
+
+#endif
+
diff --git a/sys/kern/sysv_shm.c b/sys/kern/sysv_shm.c
index edc74a7..a6c2dfe 100644
--- a/sys/kern/sysv_shm.c
+++ b/sys/kern/sysv_shm.c
@@ -1,4 +1,4 @@
-/* $Id: sysv_shm.c,v 1.38 1998/08/24 08:39:38 dfr Exp $ */
+/* $Id: sysv_shm.c,v 1.39 1998/10/13 08:24:40 dg Exp $ */
/* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
/*
@@ -52,6 +52,7 @@
#include <vm/pmap.h>
#include <vm/vm_object.h>
#include <vm/vm_map.h>
+#include <vm/vm_page.h>
#include <vm/vm_pager.h>
#include <vm/vm_inherit.h>
diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c
index 6cc487a..1634681 100644
--- a/sys/kern/uipc_syscalls.c
+++ b/sys/kern/uipc_syscalls.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94
- * $Id: uipc_syscalls.c,v 1.48 1998/12/03 12:35:47 dg Exp $
+ * $Id: uipc_syscalls.c,v 1.49 1998/12/07 21:58:29 archie Exp $
*/
#include "opt_compat.h"
@@ -1543,7 +1543,13 @@ retry_lookup:
VM_WAIT;
goto retry_lookup;
}
- vm_page_flag_clear(pg, PG_BUSY);
+ /*
+ * don't just clear PG_BUSY manually -
+ * vm_page_alloc() should be considered opaque,
+ * use the VM routine provided to clear
+ * PG_BUSY.
+ */
+ vm_page_wakeup(pg);
}
/*
* Ensure that our page is still around when the I/O completes.
@@ -1583,21 +1589,12 @@ retry_lookup:
goto done;
}
} else {
- if ((pg->flags & PG_BUSY) || pg->busy) {
- s = splvm();
- if ((pg->flags & PG_BUSY) || pg->busy) {
- /*
- * Page is busy. Wait and retry.
- */
- vm_page_flag_set(pg, PG_WANTED);
- tsleep(pg, PVM, "sfpbsy", 0);
- splx(s);
- goto retry_lookup;
- }
- splx(s);
- }
+ if (vm_page_sleep_busy(pg, TRUE, "sfpbsy"))
+ goto retry_lookup;
+
/*
- * Protect from having the page ripped out from beneath us.
+ * Protect from having the page ripped out from
+ * beneath us.
*/
vm_page_wire(pg);
}
diff --git a/sys/kern/uipc_usrreq.c b/sys/kern/uipc_usrreq.c
index 93f6164..d528f5e 100644
--- a/sys/kern/uipc_usrreq.c
+++ b/sys/kern/uipc_usrreq.c
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* From: @(#)uipc_usrreq.c 8.3 (Berkeley) 1/4/94
- * $Id: uipc_usrreq.c,v 1.36 1998/07/15 02:32:12 bde Exp $
+ * $Id: uipc_usrreq.c,v 1.37 1998/10/25 17:44:51 phk Exp $
*/
#include <sys/param.h>
@@ -1114,8 +1114,11 @@ unp_gc()
/*
* for each FD on our hit list, do the following two things
*/
- for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp)
- sorflush((struct socket *)(*fpp)->f_data);
+ for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp) {
+ struct file *tfp = *fpp;
+ if (tfp->f_type == DTYPE_SOCKET && tfp->f_data != NULL)
+ sorflush((struct socket *)(tfp->f_data));
+ }
for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp)
closef(*fpp, (struct proc *) NULL);
free((caddr_t)extra_ref, M_FILE);
diff --git a/sys/kern/vfs_aio.c b/sys/kern/vfs_aio.c
index c7c8aa9..c1af873 100644
--- a/sys/kern/vfs_aio.c
+++ b/sys/kern/vfs_aio.c
@@ -13,7 +13,7 @@
* bad that happens because of using this software isn't the responsibility
* of the author. This software is distributed AS-IS.
*
- * $Id: vfs_aio.c,v 1.35 1998/11/27 01:14:21 tegge Exp $
+ * $Id: vfs_aio.c,v 1.36 1998/12/15 17:38:33 des Exp $
*/
/*
@@ -386,7 +386,7 @@ aio_free_entry(struct aiocblist *aiocbe)
splx(s);
if (aiocbe->bp) {
vunmapbuf(aiocbe->bp);
- relpbuf(aiocbe->bp);
+ relpbuf(aiocbe->bp, NULL);
aiocbe->bp = NULL;
}
}
@@ -1035,7 +1035,7 @@ aio_qphysio(p, aiocbe)
}
/* create and build a buffer header for a transfer */
- bp = (struct buf *)getpbuf();
+ bp = (struct buf *)getpbuf(NULL);
/*
* get a copy of the kva from the physical buffer
@@ -1122,7 +1122,7 @@ doerror:
lj->lioj_buffer_count--;
}
aiocbe->bp = NULL;
- relpbuf(bp);
+ relpbuf(bp, NULL);
return error;
}
@@ -1172,7 +1172,7 @@ aio_fphysio(p, iocb, flgwait)
error = bp->b_error;
}
- relpbuf(bp);
+ relpbuf(bp, NULL);
return (error);
}
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 30018b5..3bb204e 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -11,7 +11,7 @@
* 2. Absolutely no warranty of function or purpose is made by the author
* John S. Dyson.
*
- * $Id: vfs_bio.c,v 1.192 1999/01/12 11:59:34 eivind Exp $
+ * $Id: vfs_bio.c,v 1.193 1999/01/19 08:00:51 dillon Exp $
*/
/*
@@ -562,7 +562,7 @@ brelse(struct buf * bp)
int s;
if (bp->b_flags & B_CLUSTER) {
- relpbuf(bp);
+ relpbuf(bp, NULL);
return;
}
@@ -1364,6 +1364,7 @@ vfs_setdirty(struct buf *bp) {
break;
}
}
+
boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
if (boffset < bp->b_dirtyoff) {
bp->b_dirtyoff = max(boffset, 0);
@@ -1412,7 +1413,6 @@ loop:
if ((bp = gbincore(vp, blkno))) {
if (bp->b_flags & B_BUSY) {
-
bp->b_flags |= B_WANTED;
if (bp->b_usecount < BUF_MAXUSE)
++bp->b_usecount;
@@ -1429,16 +1429,13 @@ loop:
bremfree(bp);
/*
- * check for size inconsistancies (note that they shouldn't
- * happen but do when filesystems don't handle the size changes
- * correctly.) We are conservative on metadata and don't just
- * extend the buffer but write (if needed) and re-constitute it.
+ * check for size inconsistancies for non-VMIO case.
*/
if (bp->b_bcount != size) {
- if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) {
- allocbuf(bp, size);
- } else {
+ if ((bp->b_flags & B_VMIO) == 0 ||
+ (size > bp->b_kvasize)
+ ) {
if (bp->b_flags & B_DELWRI) {
bp->b_flags |= B_NOCACHE;
VOP_BWRITE(bp);
@@ -1455,15 +1452,26 @@ loop:
goto loop;
}
}
+
+ /*
+ * If the size is inconsistant in the VMIO case, we can resize
+ * the buffer. This might lead to B_CACHE getting cleared.
+ */
+
+ if (bp->b_bcount != size)
+ allocbuf(bp, size);
+
KASSERT(bp->b_offset != NOOFFSET,
("getblk: no buffer offset"));
+
/*
* Check that the constituted buffer really deserves for the
* B_CACHE bit to be set. B_VMIO type buffers might not
* contain fully valid pages. Normal (old-style) buffers
- * should be fully valid.
+ * should be fully valid. This might also lead to B_CACHE
+ * getting clear.
*/
- if (bp->b_flags & B_VMIO) {
+ if ((bp->b_flags & B_VMIO|B_CACHE) == (B_VMIO|B_CACHE)) {
int checksize = bp->b_bufsize;
int poffset = bp->b_offset & PAGE_MASK;
int resid;
@@ -1479,6 +1487,19 @@ loop:
}
}
+ /*
+ * If B_DELWRI is set and B_CACHE got cleared ( or was
+ * already clear ), we have to commit the write and
+ * retry. The NFS code absolutely depends on this,
+ * and so might the FFS code. In anycase, it formalizes
+ * the B_CACHE rules. See sys/buf.h.
+ */
+
+ if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
+ VOP_BWRITE(bp);
+ goto loop;
+ }
+
if (bp->b_usecount < BUF_MAXUSE)
++bp->b_usecount;
splx(s);
@@ -1572,19 +1593,18 @@ geteblk(int size)
/*
* This code constitutes the buffer memory from either anonymous system
* memory (in the case of non-VMIO operations) or from an associated
- * VM object (in the case of VMIO operations).
+ * VM object (in the case of VMIO operations). This code is able to
+ * resize a buffer up or down.
*
* Note that this code is tricky, and has many complications to resolve
- * deadlock or inconsistant data situations. Tread lightly!!!
- *
- * Modify the length of a buffer's underlying buffer storage without
- * destroying information (unless, of course the buffer is shrinking).
+ * deadlock or inconsistant data situations. Tread lightly!!!
+ * There are B_CACHE and B_DELWRI interactions that must be dealt with by
+ * the caller. Calling this code willy nilly can result in the loss of data.
*/
+
int
-allocbuf(struct buf * bp, int size)
+allocbuf(struct buf *bp, int size)
{
-
- int s;
int newbsize, mbsize;
int i;
@@ -1705,7 +1725,8 @@ allocbuf(struct buf * bp, int size)
m = bp->b_pages[i];
KASSERT(m != bogus_page,
("allocbuf: bogus page found"));
- vm_page_sleep(m, "biodep", &m->busy);
+ while (vm_page_sleep_busy(m, TRUE, "biodep"))
+ ;
bp->b_pages[i] = NULL;
vm_page_unwire(m, 0);
@@ -1771,16 +1792,25 @@ allocbuf(struct buf * bp, int size)
}
vm_page_wire(m);
- vm_page_flag_clear(m, PG_BUSY);
+ vm_page_wakeup(m);
bp->b_flags &= ~B_CACHE;
- } else if (m->flags & PG_BUSY) {
- s = splvm();
- if (m->flags & PG_BUSY) {
- vm_page_flag_set(m, PG_WANTED);
- tsleep(m, PVM, "pgtblk", 0);
- }
- splx(s);
+ } else if (vm_page_sleep_busy(m, FALSE, "pgtblk")) {
+ /*
+ * If we had to sleep, retry.
+ *
+ * Also note that we only test
+ * PG_BUSY here, not m->busy.
+ *
+ * We cannot sleep on m->busy
+ * here because a vm_fault ->
+ * getpages -> cluster-read ->
+ * ...-> allocbuf sequence
+ * will convert PG_BUSY to
+ * m->busy so we have to let
+ * m->busy through if we do
+ * not want to deadlock.
+ */
goto doretry;
} else {
if ((curproc != pageproc) &&
@@ -2010,12 +2040,8 @@ biodone(register struct buf * bp)
foff += resid;
iosize -= resid;
}
- if (obj &&
- (obj->paging_in_progress == 0) &&
- (obj->flags & OBJ_PIPWNT)) {
- vm_object_clear_flag(obj, OBJ_PIPWNT);
- wakeup(obj);
- }
+ if (obj)
+ vm_object_pip_wakeupn(obj, 0);
}
/*
* For asynchronous completions, release the buffer now. The brelse
@@ -2096,11 +2122,7 @@ vfs_unbusy_pages(struct buf * bp)
vm_page_flag_clear(m, PG_ZERO);
vm_page_io_finish(m);
}
- if (obj->paging_in_progress == 0 &&
- (obj->flags & OBJ_PIPWNT)) {
- vm_object_clear_flag(obj, OBJ_PIPWNT);
- wakeup(obj);
- }
+ vm_object_pip_wakeupn(obj, 0);
}
}
@@ -2109,6 +2131,8 @@ vfs_unbusy_pages(struct buf * bp)
* of a page. If the consumer is not NFS, and the page is not
* valid for the entire range, clear the B_CACHE flag to force
* the consumer to re-read the page.
+ *
+ * B_CACHE interaction is especially tricky.
*/
static void
vfs_buf_set_valid(struct buf *bp,
@@ -2135,13 +2159,16 @@ vfs_buf_set_valid(struct buf *bp,
}
evalid = min(evalid, off + size);
/*
- * Make sure this range is contiguous with the range
- * built up from previous pages. If not, then we will
- * just use the range from the previous pages.
+ * We can only set b_validoff/end if this range is contiguous
+ * with the range built up already. If we cannot set
+ * b_validoff/end, we must clear B_CACHE to force an update
+ * to clean the bp up.
*/
if (svalid == bp->b_validend) {
bp->b_validoff = min(bp->b_validoff, svalid);
bp->b_validend = max(bp->b_validend, evalid);
+ } else {
+ bp->b_flags &= ~B_CACHE;
}
} else if (!vm_page_is_valid(m,
(vm_offset_t) ((foff + off) & PAGE_MASK),
@@ -2154,6 +2181,10 @@ vfs_buf_set_valid(struct buf *bp,
* Set the valid bits in a page, taking care of the b_validoff,
* b_validend fields which NFS uses to optimise small reads. Off is
* the offset within the file and pageno is the page index within the buf.
+ *
+ * XXX we have to set the valid & clean bits for all page fragments
+ * touched by b_validoff/validend, even if the page fragment goes somewhat
+ * beyond b_validoff/validend due to alignment.
*/
static void
vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
@@ -2208,7 +2239,7 @@ vfs_busy_pages(struct buf * bp, int clear_modify)
retry:
for (i = 0; i < bp->b_npages; i++) {
vm_page_t m = bp->b_pages[i];
- if (vm_page_sleep(m, "vbpage", NULL))
+ if (vm_page_sleep_busy(m, FALSE, "vbpage"))
goto retry;
}
diff --git a/sys/kern/vfs_cluster.c b/sys/kern/vfs_cluster.c
index ce842ad..781508e 100644
--- a/sys/kern/vfs_cluster.c
+++ b/sys/kern/vfs_cluster.c
@@ -33,7 +33,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
- * $Id: vfs_cluster.c,v 1.76 1999/01/08 17:31:15 eivind Exp $
+ * $Id: vfs_cluster.c,v 1.77 1999/01/10 01:58:25 eivind Exp $
*/
#include "opt_debug_cluster.h"
@@ -68,6 +68,8 @@ static struct buf *
extern vm_page_t bogus_page;
+extern int cluster_pbuf_freecnt;
+
/*
* Maximum number of blocks for read-ahead.
*/
@@ -336,7 +338,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
((tbp->b_flags & B_VMIO) == 0) || (run <= 1) )
return tbp;
- bp = trypbuf();
+ bp = trypbuf(&cluster_pbuf_freecnt);
if (bp == 0)
return tbp;
@@ -475,7 +477,7 @@ cluster_callback(bp)
tbp->b_dirtyoff = tbp->b_dirtyend = 0;
biodone(tbp);
}
- relpbuf(bp);
+ relpbuf(bp, &cluster_pbuf_freecnt);
}
/*
@@ -654,7 +656,7 @@ cluster_wbuild(vp, size, start_lbn, len)
(tbp->b_bcount != tbp->b_bufsize) ||
(tbp->b_bcount != size) ||
(len == 1) ||
- ((bp = trypbuf()) == NULL)) {
+ ((bp = trypbuf(&cluster_pbuf_freecnt)) == NULL)) {
totalwritten += tbp->b_bufsize;
bawrite(tbp);
++start_lbn;
diff --git a/sys/kern/vfs_export.c b/sys/kern/vfs_export.c
index 179ef78..44b1698 100644
--- a/sys/kern/vfs_export.c
+++ b/sys/kern/vfs_export.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
- * $Id: vfs_subr.c,v 1.181 1999/01/08 17:31:17 eivind Exp $
+ * $Id: vfs_subr.c,v 1.182 1999/01/10 01:58:26 eivind Exp $
*/
/*
@@ -63,10 +63,13 @@
#include <machine/limits.h>
#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_prot.h>
#include <vm/vm_object.h>
#include <vm/vm_extern.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
+#include <vm/vm_page.h>
#include <vm/vm_pager.h>
#include <vm/vnode_pager.h>
#include <vm/vm_zone.h>
@@ -985,6 +988,10 @@ sched_sync(void)
/*
* Associate a p-buffer with a vnode.
+ *
+ * Also sets B_PAGING flag to indicate that vnode is not fully associated
+ * with the buffer. i.e. the bp has not been linked into the vnode or
+ * ref-counted.
*/
void
pbgetvp(vp, bp)
@@ -995,6 +1002,7 @@ pbgetvp(vp, bp)
KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
bp->b_vp = vp;
+ bp->b_flags |= B_PAGING;
if (vp->v_type == VBLK || vp->v_type == VCHR)
bp->b_dev = vp->v_rdev;
else
@@ -1011,7 +1019,34 @@ pbrelvp(bp)
KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
+#if !defined(MAX_PERF)
+ /* XXX REMOVE ME */
+ if (bp->b_vnbufs.tqe_next != NULL) {
+ panic(
+ "relpbuf(): b_vp was probably reassignbuf()d %p %x",
+ bp,
+ (int)bp->b_flags
+ );
+ }
+#endif
bp->b_vp = (struct vnode *) 0;
+ bp->b_flags &= ~B_PAGING;
+}
+
+void
+pbreassignbuf(bp, newvp)
+ struct buf *bp;
+ struct vnode *newvp;
+{
+#if !defined(MAX_PERF)
+ if ((bp->b_flags & B_PAGING) == 0) {
+ panic(
+ "pbreassignbuf() on non phys bp %p",
+ bp
+ );
+ }
+#endif
+ bp->b_vp = newvp;
}
/*
@@ -1034,6 +1069,15 @@ reassignbuf(bp, newvp)
return;
}
+#if !defined(MAX_PERF)
+ /*
+ * B_PAGING flagged buffers cannot be reassigned because their vp
+ * is not fully linked in.
+ */
+ if (bp->b_flags & B_PAGING)
+ panic("cannot reassign paging buffer");
+#endif
+
s = splbio();
/*
* Delete from old vnode list, if on one.
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 179ef78..44b1698 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
- * $Id: vfs_subr.c,v 1.181 1999/01/08 17:31:17 eivind Exp $
+ * $Id: vfs_subr.c,v 1.182 1999/01/10 01:58:26 eivind Exp $
*/
/*
@@ -63,10 +63,13 @@
#include <machine/limits.h>
#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_prot.h>
#include <vm/vm_object.h>
#include <vm/vm_extern.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
+#include <vm/vm_page.h>
#include <vm/vm_pager.h>
#include <vm/vnode_pager.h>
#include <vm/vm_zone.h>
@@ -985,6 +988,10 @@ sched_sync(void)
/*
* Associate a p-buffer with a vnode.
+ *
+ * Also sets B_PAGING flag to indicate that vnode is not fully associated
+ * with the buffer. i.e. the bp has not been linked into the vnode or
+ * ref-counted.
*/
void
pbgetvp(vp, bp)
@@ -995,6 +1002,7 @@ pbgetvp(vp, bp)
KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
bp->b_vp = vp;
+ bp->b_flags |= B_PAGING;
if (vp->v_type == VBLK || vp->v_type == VCHR)
bp->b_dev = vp->v_rdev;
else
@@ -1011,7 +1019,34 @@ pbrelvp(bp)
KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
+#if !defined(MAX_PERF)
+ /* XXX REMOVE ME */
+ if (bp->b_vnbufs.tqe_next != NULL) {
+ panic(
+ "relpbuf(): b_vp was probably reassignbuf()d %p %x",
+ bp,
+ (int)bp->b_flags
+ );
+ }
+#endif
bp->b_vp = (struct vnode *) 0;
+ bp->b_flags &= ~B_PAGING;
+}
+
+void
+pbreassignbuf(bp, newvp)
+ struct buf *bp;
+ struct vnode *newvp;
+{
+#if !defined(MAX_PERF)
+ if ((bp->b_flags & B_PAGING) == 0) {
+ panic(
+ "pbreassignbuf() on non phys bp %p",
+ bp
+ );
+ }
+#endif
+ bp->b_vp = newvp;
}
/*
@@ -1034,6 +1069,15 @@ reassignbuf(bp, newvp)
return;
}
+#if !defined(MAX_PERF)
+ /*
+ * B_PAGING flagged buffers cannot be reassigned because their vp
+ * is not fully linked in.
+ */
+ if (bp->b_flags & B_PAGING)
+ panic("cannot reassign paging buffer");
+#endif
+
s = splbio();
/*
* Delete from old vnode list, if on one.
OpenPOWER on IntegriCloud