summaryrefslogtreecommitdiffstats
path: root/sys/kern/vfs_bio.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/vfs_bio.c')
-rw-r--r--sys/kern/vfs_bio.c841
1 files changed, 617 insertions, 224 deletions
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index ec5c962..6c12d7f 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -1,181 +1,260 @@
-/*-
- * Copyright (c) 1982, 1986, 1989, 1993
- * The Regents of the University of California. All rights reserved.
- * (c) UNIX System Laboratories, Inc.
- * All or some portions of this file are derived from material licensed
- * to the University of California by American Telephone and Telegraph
- * Co. or Unix System Laboratories, Inc. and are reproduced herein with
- * the permission of UNIX System Laboratories, Inc.
+/*
+ * Copyright (c) 1994 John S. Dyson
+ * All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice immediately at the beginning of the file, without modification,
+ * this list of conditions, and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * from: @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
+ * 3. Absolutely no warranty of function or purpose is made by the author
+ * John S. Dyson.
+ * 4. Modifications may be freely made to this file if the above conditions
+ * are met.
*/
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/kernel.h>
#include <sys/proc.h>
-#include <sys/buf.h>
#include <sys/vnode.h>
+#include <sys/buf.h>
#include <sys/mount.h>
-#include <sys/trace.h>
#include <sys/malloc.h>
#include <sys/resourcevar.h>
-
+#include <vm/vm.h>
+#include <vm/vm_pageout.h>
+
+#include <miscfs/specfs/specdev.h>
+
+struct buf *buf; /* the buffer pool itself */
+int nbuf; /* number of buffer headers */
+int bufpages; /* number of memory pages in the buffer pool */
+struct buf *swbuf; /* swap I/O headers */
+int nswbuf;
+#define BUFHSZ 512
+int bufhash = BUFHSZ - 1;
+
+struct buf *getnewbuf(int,int);
+extern vm_map_t buffer_map, io_map;
+void vm_hold_free_pages(vm_offset_t from, vm_offset_t to);
+void vm_hold_load_pages(vm_offset_t from, vm_offset_t to);
/*
* Definitions for the buffer hash lists.
*/
#define BUFHASH(dvp, lbn) \
(&bufhashtbl[((int)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
-LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
-u_long bufhash;
-
-/*
- * Insq/Remq for the buffer hash lists.
- */
-#define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash)
-#define bremhash(bp) LIST_REMOVE(bp, b_hash)
/*
* Definitions for the buffer free lists.
*/
-#define BQUEUES 4 /* number of free buffer queues */
-
-#define BQ_LOCKED 0 /* super-blocks &c */
-#define BQ_LRU 1 /* lru, useful buffers */
-#define BQ_AGE 2 /* rubbish */
-#define BQ_EMPTY 3 /* buffer headers with no memory */
+#define BQUEUES 5 /* number of free buffer queues */
+LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash;
TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
-int needbuffer;
+
+#define BQ_NONE 0 /* on no queue */
+#define BQ_LOCKED 1 /* locked buffers */
+#define BQ_LRU 2 /* useful buffers */
+#define BQ_AGE 3 /* less useful buffers */
+#define BQ_EMPTY 4 /* empty buffer headers*/
+
+int needsbuffer;
/*
- * Insq/Remq for the buffer free lists.
+ * Internal update daemon, process 3
+ * The variable vfs_update_wakeup allows for internal syncs.
*/
-#define binsheadfree(bp, dp) TAILQ_INSERT_HEAD(dp, bp, b_freelist)
-#define binstailfree(bp, dp) TAILQ_INSERT_TAIL(dp, bp, b_freelist)
-
-void
-bremfree(bp)
- struct buf *bp;
-{
- struct bqueues *dp = NULL;
-
- /*
- * We only calculate the head of the freelist when removing
- * the last element of the list as that is the only time that
- * it is needed (e.g. to reset the tail pointer).
- *
- * NB: This makes an assumption about how tailq's are implemented.
- */
- if (bp->b_freelist.tqe_next == NULL) {
- for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
- if (dp->tqh_last == &bp->b_freelist.tqe_next)
- break;
- if (dp == &bufqueues[BQUEUES])
- panic("bremfree: lost tail");
- }
- TAILQ_REMOVE(dp, bp, b_freelist);
-}
+int vfs_update_wakeup;
/*
- * Initialize buffers and hash links for buffers.
+ * Initialize buffer headers and related structures.
*/
-void
-bufinit()
+void bufinit()
{
- register struct buf *bp;
- struct bqueues *dp;
- register int i;
- int base, residual;
-
- for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
- TAILQ_INIT(dp);
- bufhashtbl = hashinit(nbuf, M_CACHE, &bufhash);
- base = bufpages / nbuf;
- residual = bufpages % nbuf;
- for (i = 0; i < nbuf; i++) {
+ struct buf *bp;
+ int i;
+
+ TAILQ_INIT(&bswlist);
+ LIST_INIT(&invalhash);
+
+ /* first, make a null hash table */
+ for(i=0;i<BUFHSZ;i++)
+ LIST_INIT(&bufhashtbl[i]);
+
+ /* next, make a null set of free lists */
+ for(i=0;i<BQUEUES;i++)
+ TAILQ_INIT(&bufqueues[i]);
+
+ /* finally, initialize each buffer header and stick on empty q */
+ for(i=0;i<nbuf;i++) {
bp = &buf[i];
- bzero((char *)bp, sizeof *bp);
+ bzero(bp, sizeof *bp);
+ bp->b_flags = B_INVAL; /* we're just an empty header */
bp->b_dev = NODEV;
+ bp->b_vp = NULL;
bp->b_rcred = NOCRED;
bp->b_wcred = NOCRED;
+ bp->b_qindex = BQ_EMPTY;
bp->b_vnbufs.le_next = NOLIST;
- bp->b_data = buffers + i * MAXBSIZE;
- if (i < residual)
- bp->b_bufsize = (base + 1) * CLBYTES;
- else
- bp->b_bufsize = base * CLBYTES;
- bp->b_flags = B_INVAL;
- dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
- binsheadfree(bp, dp);
- binshash(bp, &invalhash);
+ bp->b_data = (caddr_t)kmem_alloc_pageable(buffer_map, MAXBSIZE);
+ TAILQ_INSERT_TAIL(&bufqueues[BQ_EMPTY], bp, b_freelist);
+ LIST_INSERT_HEAD(&invalhash, bp, b_hash);
}
}
-bread(a1, a2, a3, a4, a5)
- struct vnode *a1;
- daddr_t a2;
- int a3;
- struct ucred *a4;
- struct buf **a5;
+/*
+ * remove the buffer from the appropriate free list
+ */
+void
+bremfree(struct buf *bp)
{
+ int s = splbio();
+ if( bp->b_qindex != BQ_NONE) {
+ TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
+ bp->b_qindex = BQ_NONE;
+ } else {
+ panic("bremfree: removing a buffer when not on a queue");
+ }
+ splx(s);
+}
- /*
- * Body deleted.
- */
- return (EIO);
+/*
+ * Get a buffer with the specified data. Look in the cache first.
+ */
+int
+bread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
+ struct buf **bpp)
+{
+ struct buf *bp;
+
+ bp = getblk (vp, blkno, size, 0, 0);
+ *bpp = bp;
+
+ /* if not found in cache, do some I/O */
+ if ((bp->b_flags & B_CACHE) == 0) {
+ if (curproc && curproc->p_stats) /* count block I/O */
+ curproc->p_stats->p_ru.ru_inblock++;
+ bp->b_flags |= B_READ;
+ bp->b_flags &= ~(B_DONE|B_ERROR|B_INVAL);
+ if( bp->b_rcred == NOCRED) {
+ if (cred != NOCRED)
+ crhold(cred);
+ bp->b_rcred = cred;
+ }
+ VOP_STRATEGY(bp);
+ return( biowait (bp));
+ }
+
+ return (0);
}
-breadn(a1, a2, a3, a4, a5, a6, a7, a8)
- struct vnode *a1;
- daddr_t a2; int a3;
- daddr_t a4[]; int a5[];
- int a6;
- struct ucred *a7;
- struct buf **a8;
+/*
+ * Operates like bread, but also starts asynchronous I/O on
+ * read-ahead blocks.
+ */
+int
+breadn(struct vnode *vp, daddr_t blkno, int size,
+ daddr_t *rablkno, int *rabsize,
+ int cnt, struct ucred *cred, struct buf **bpp)
{
+ struct buf *bp, *rabp;
+ int i;
+ int rv = 0, readwait = 0;
+
+ *bpp = bp = getblk (vp, blkno, size, 0, 0);
+
+ /* if not found in cache, do some I/O */
+ if ((bp->b_flags & B_CACHE) == 0) {
+ if (curproc && curproc->p_stats) /* count block I/O */
+ curproc->p_stats->p_ru.ru_inblock++;
+ bp->b_flags |= B_READ;
+ bp->b_flags &= ~(B_DONE|B_ERROR|B_INVAL);
+ if( bp->b_rcred == NOCRED) {
+ if (cred != NOCRED)
+ crhold(cred);
+ bp->b_rcred = cred;
+ }
+ VOP_STRATEGY(bp);
+ ++readwait;
+ }
+
+ for(i=0;i<cnt;i++, rablkno++, rabsize++) {
+ if( incore(vp, *rablkno)) {
+ continue;
+ }
+ rabp = getblk (vp, *rablkno, *rabsize, 0, 0);
+
+ if ((rabp->b_flags & B_CACHE) == 0) {
+ if (curproc && curproc->p_stats)
+ curproc->p_stats->p_ru.ru_inblock++;
+ rabp->b_flags |= B_READ | B_ASYNC;
+ rabp->b_flags &= ~(B_DONE|B_ERROR|B_INVAL);
+ if( rabp->b_rcred == NOCRED) {
+ if (cred != NOCRED)
+ crhold(cred);
+ rabp->b_rcred = cred;
+ }
+ VOP_STRATEGY(rabp);
+ } else {
+ brelse(rabp);
+ }
+ }
- /*
- * Body deleted.
- */
- return (EIO);
+ if( readwait) {
+ rv = biowait (bp);
+ }
+
+ return (rv);
}
-bwrite(a1)
- struct buf *a1;
+/*
+ * Write, release buffer on completion. (Done by iodone
+ * if async.)
+ */
+int
+bwrite(struct buf *bp)
{
+ int oldflags = bp->b_flags;
+
+ if(bp->b_flags & B_INVAL) {
+ brelse(bp);
+ return (0);
+ }
- /*
- * Body deleted.
- */
- return (EIO);
+ if(!(bp->b_flags & B_BUSY))
+ panic("bwrite: buffer is not busy???");
+
+ bp->b_flags &= ~(B_READ|B_DONE|B_ERROR|B_DELWRI);
+ bp->b_flags |= B_WRITEINPROG;
+
+ if (oldflags & B_ASYNC) {
+ if (oldflags & B_DELWRI) {
+ reassignbuf(bp, bp->b_vp);
+ } else if( curproc) {
+ ++curproc->p_stats->p_ru.ru_oublock;
+ }
+ }
+
+ bp->b_vp->v_numoutput++;
+ VOP_STRATEGY(bp);
+
+ if( (oldflags & B_ASYNC) == 0) {
+ int rtval = biowait(bp);
+ if (oldflags & B_DELWRI) {
+ reassignbuf(bp, bp->b_vp);
+ } else if( curproc) {
+ ++curproc->p_stats->p_ru.ru_oublock;
+ }
+ brelse(bp);
+ return (rtval);
+ }
+
+ return(0);
}
int
@@ -185,155 +264,469 @@ vn_bwrite(ap)
return (bwrite(ap->a_bp));
}
-bdwrite(a1)
- struct buf *a1;
+/*
+ * Delayed write. (Buffer is marked dirty).
+ */
+void
+bdwrite(struct buf *bp)
{
- /*
- * Body deleted.
- */
+ if((bp->b_flags & B_BUSY) == 0) {
+ panic("bdwrite: buffer is not busy");
+ }
+
+ if(bp->b_flags & B_INVAL) {
+ brelse(bp);
+ return;
+ }
+
+ if(bp->b_flags & B_TAPE) {
+ bawrite(bp);
+ return;
+ }
+
+ bp->b_flags &= ~B_READ;
+ if( (bp->b_flags & B_DELWRI) == 0) {
+ if( curproc)
+ ++curproc->p_stats->p_ru.ru_oublock;
+ bp->b_flags |= B_DONE|B_DELWRI;
+ reassignbuf(bp, bp->b_vp);
+ }
+ brelse(bp);
return;
}
-bawrite(a1)
- struct buf *a1;
+/*
+ * Asynchronous write.
+ * Start output on a buffer, but do not wait for it to complete.
+ * The buffer is released when the output completes.
+ */
+void
+bawrite(struct buf *bp)
{
-
- /*
- * Body deleted.
- */
- return;
+ bp->b_flags |= B_ASYNC;
+ (void) bwrite(bp);
}
-brelse(a1)
- struct buf *a1;
+/*
+ * Release a buffer.
+ */
+void
+brelse(struct buf *bp)
{
+ int x;
- /*
- * Body deleted.
- */
- return;
+ /* anyone need a "free" block? */
+ x=splbio();
+ if (needsbuffer) {
+ needsbuffer = 0;
+ wakeup((caddr_t)&needsbuffer);
+ }
+ /* anyone need this very block? */
+ if (bp->b_flags & B_WANTED) {
+ bp->b_flags &= ~(B_WANTED|B_AGE);
+ wakeup((caddr_t)bp);
+ }
+
+ if (bp->b_flags & B_LOCKED)
+ bp->b_flags &= ~B_ERROR;
+
+ if ((bp->b_flags & (B_NOCACHE|B_INVAL|B_ERROR)) ||
+ (bp->b_bufsize <= 0)) {
+ bp->b_flags |= B_INVAL;
+ bp->b_flags &= ~(B_DELWRI|B_CACHE);
+ if(bp->b_vp)
+ brelvp(bp);
+ }
+
+ if( bp->b_qindex != BQ_NONE)
+ panic("brelse: free buffer onto another queue???");
+
+ /* enqueue */
+ /* buffers with junk contents */
+ if(bp->b_bufsize == 0) {
+ bp->b_qindex = BQ_EMPTY;
+ TAILQ_INSERT_HEAD(&bufqueues[BQ_EMPTY], bp, b_freelist);
+ LIST_REMOVE(bp, b_hash);
+ LIST_INSERT_HEAD(&invalhash, bp, b_hash);
+ bp->b_dev = NODEV;
+ } else if(bp->b_flags & (B_ERROR|B_INVAL|B_NOCACHE)) {
+ bp->b_qindex = BQ_AGE;
+ TAILQ_INSERT_HEAD(&bufqueues[BQ_AGE], bp, b_freelist);
+ LIST_REMOVE(bp, b_hash);
+ LIST_INSERT_HEAD(&invalhash, bp, b_hash);
+ bp->b_dev = NODEV;
+ /* buffers that are locked */
+ } else if(bp->b_flags & B_LOCKED) {
+ bp->b_qindex = BQ_LOCKED;
+ TAILQ_INSERT_TAIL(&bufqueues[BQ_LOCKED], bp, b_freelist);
+ /* buffers with stale but valid contents */
+ } else if(bp->b_flags & B_AGE) {
+ bp->b_qindex = BQ_AGE;
+ TAILQ_INSERT_TAIL(&bufqueues[BQ_AGE], bp, b_freelist);
+ /* buffers with valid and quite potentially reuseable contents */
+ } else {
+ bp->b_qindex = BQ_LRU;
+ TAILQ_INSERT_TAIL(&bufqueues[BQ_LRU], bp, b_freelist);
+ }
+
+ /* unlock */
+ bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_NOCACHE|B_AGE);
+ splx(x);
}
+int freebufspace;
+int allocbufspace;
+
+/*
+ * Find a buffer header which is available for use.
+ */
struct buf *
-incore(a1, a2)
- struct vnode *a1;
- daddr_t a2;
+getnewbuf(int slpflag, int slptimeo)
{
+ struct buf *bp;
+ int x;
+ x = splbio();
+start:
+ /* can we constitute a new buffer? */
+ if (bp = bufqueues[BQ_EMPTY].tqh_first) {
+ if( bp->b_qindex != BQ_EMPTY)
+ panic("getnewbuf: inconsistent EMPTY queue");
+ bremfree(bp);
+ goto fillbuf;
+ }
- /*
- * Body deleted.
- */
- return (0);
+tryfree:
+ if (bp = bufqueues[BQ_AGE].tqh_first) {
+ if( bp->b_qindex != BQ_AGE)
+ panic("getnewbuf: inconsistent AGE queue");
+ bremfree(bp);
+ } else if (bp = bufqueues[BQ_LRU].tqh_first) {
+ if( bp->b_qindex != BQ_LRU)
+ panic("getnewbuf: inconsistent LRU queue");
+ bremfree(bp);
+ } else {
+ /* wait for a free buffer of any kind */
+ needsbuffer = 1;
+ tsleep((caddr_t)&needsbuffer, PRIBIO, "newbuf", 0);
+ splx(x);
+ return (0);
+ }
+
+
+ /* if we are a delayed write, convert to an async write */
+ if (bp->b_flags & B_DELWRI) {
+ bp->b_flags |= B_BUSY;
+ bawrite (bp);
+ goto start;
+ }
+
+ if(bp->b_vp)
+ brelvp(bp);
+
+ /* we are not free, nor do we contain interesting data */
+ if (bp->b_rcred != NOCRED)
+ crfree(bp->b_rcred);
+ if (bp->b_wcred != NOCRED)
+ crfree(bp->b_wcred);
+fillbuf:
+ bp->b_flags = B_BUSY;
+ LIST_REMOVE(bp, b_hash);
+ LIST_INSERT_HEAD(&invalhash, bp, b_hash);
+ splx(x);
+ bp->b_dev = NODEV;
+ bp->b_vp = NULL;
+ bp->b_blkno = bp->b_lblkno = 0;
+ bp->b_iodone = 0;
+ bp->b_error = 0;
+ bp->b_resid = 0;
+ bp->b_bcount = 0;
+ bp->b_wcred = bp->b_rcred = NOCRED;
+ bp->b_dirtyoff = bp->b_dirtyend = 0;
+ bp->b_validoff = bp->b_validend = 0;
+ return (bp);
}
+/*
+ * Check to see if a block is currently memory resident.
+ */
struct buf *
-getblk(a1, a2, a3, a4, a5)
- struct vnode *a1;
- daddr_t a2;
- int a3, a4, a5;
+incore(struct vnode *vp, daddr_t blkno)
{
+ struct buf *bp;
+ struct bufhashhdr *bh;
+
+ int s = splbio();
- /*
- * Body deleted.
- */
- return ((struct buf *)0);
+ bh = BUFHASH(vp, blkno);
+ bp = bh->lh_first;
+
+ /* Search hash chain */
+ while (bp) {
+ if( (bp < buf) || (bp >= buf + nbuf)) {
+ printf("incore: buf out of range: %lx, hash: %d\n",
+ bp, bh - bufhashtbl);
+ panic("incore: buf fault");
+ }
+ /* hit */
+ if (bp->b_lblkno == blkno && bp->b_vp == vp
+ && (bp->b_flags & B_INVAL) == 0)
+ return (bp);
+ bp = bp->b_hash.le_next;
+ }
+ splx(s);
+
+ return(0);
}
+/*
+ * Get a block given a specified block and offset into a file/device.
+ */
struct buf *
-geteblk(a1)
- int a1;
+getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo)
{
-
- /*
- * Body deleted.
- */
- return ((struct buf *)0);
+ struct buf *bp;
+ int x;
+ struct bufhashhdr *bh;
+
+ x = splbio();
+loop:
+ if (bp = incore(vp, blkno)) {
+ if (bp->b_flags & B_BUSY) {
+ bp->b_flags |= B_WANTED;
+ tsleep ((caddr_t)bp, PRIBIO, "getblk", 0);
+ goto loop;
+ }
+ bp->b_flags |= B_BUSY | B_CACHE;
+ bremfree(bp);
+ /*
+ * check for size inconsistancies
+ */
+ if (bp->b_bcount != size) {
+ printf("getblk: invalid buffer size: %d\n", bp->b_bcount);
+ bp->b_flags |= B_INVAL;
+ bwrite(bp);
+ goto loop;
+ }
+ } else {
+
+ if ((bp = getnewbuf(0, 0)) == 0)
+ goto loop;
+ allocbuf(bp, size);
+ /*
+ * have to check again, because of a possible
+ * race condition.
+ */
+ if (incore( vp, blkno)) {
+ allocbuf(bp, 0);
+ bp->b_flags |= B_INVAL;
+ brelse(bp);
+ goto loop;
+ }
+ bp->b_blkno = bp->b_lblkno = blkno;
+ bgetvp(vp, bp);
+ LIST_REMOVE(bp, b_hash);
+ bh = BUFHASH(vp, blkno);
+ LIST_INSERT_HEAD(bh, bp, b_hash);
+ }
+ splx(x);
+ return (bp);
}
-allocbuf(a1, a2)
- struct buf *a1;
- int a2;
+/*
+ * Get an empty, disassociated buffer of given size.
+ */
+struct buf *
+geteblk(int size)
{
-
- /*
- * Body deleted.
- */
- return (0);
+ struct buf *bp;
+ while ((bp = getnewbuf(0, 0)) == 0)
+ ;
+ allocbuf(bp, size);
+ bp->b_flags |= B_INVAL;
+ return (bp);
}
-struct buf *
-getnewbuf(a1, a2)
- int a1, a2;
+/*
+ * Modify the length of a buffer's underlying buffer storage without
+ * destroying information (unless, of course the buffer is shrinking).
+ */
+void
+allocbuf(struct buf *bp, int size)
{
- /*
- * Body deleted.
- */
- return ((struct buf *)0);
+ int newbsize = round_page(size);
+
+ if( newbsize == bp->b_bufsize) {
+ bp->b_bcount = size;
+ return;
+ } else if( newbsize < bp->b_bufsize) {
+ vm_hold_free_pages(
+ (vm_offset_t) bp->b_data + newbsize,
+ (vm_offset_t) bp->b_data + bp->b_bufsize);
+ } else if( newbsize > bp->b_bufsize) {
+ vm_hold_load_pages(
+ (vm_offset_t) bp->b_data + bp->b_bufsize,
+ (vm_offset_t) bp->b_data + newbsize);
+ }
+
+ /* adjust buffer cache's idea of memory allocated to buffer contents */
+ freebufspace -= newbsize - bp->b_bufsize;
+ allocbufspace += newbsize - bp->b_bufsize;
+
+ bp->b_bufsize = newbsize;
+ bp->b_bcount = size;
}
-biowait(a1)
- struct buf *a1;
+/*
+ * Wait for buffer I/O completion, returning error status.
+ */
+int
+biowait(register struct buf *bp)
{
-
- /*
- * Body deleted.
- */
- return (EIO);
+ int x;
+
+ x = splbio();
+ while ((bp->b_flags & B_DONE) == 0)
+ tsleep((caddr_t)bp, PRIBIO, "biowait", 0);
+ if((bp->b_flags & B_ERROR) || bp->b_error) {
+ if ((bp->b_flags & B_INVAL) == 0) {
+ bp->b_flags |= B_INVAL;
+ bp->b_dev = NODEV;
+ LIST_REMOVE(bp, b_hash);
+ LIST_INSERT_HEAD(&invalhash, bp, b_hash);
+ }
+ if (!bp->b_error)
+ bp->b_error = EIO;
+ else
+ bp->b_flags |= B_ERROR;
+ splx(x);
+ return (bp->b_error);
+ } else {
+ splx(x);
+ return (0);
+ }
}
+/*
+ * Finish I/O on a buffer, calling an optional function.
+ * This is usually called from interrupt level, so process blocking
+ * is not *a good idea*.
+ */
void
-biodone(a1)
- struct buf *a1;
+biodone(register struct buf *bp)
{
+ int s;
+ s = splbio();
+ bp->b_flags |= B_DONE;
- /*
- * Body deleted.
- */
- return;
+ if ((bp->b_flags & B_READ) == 0) {
+ vwakeup(bp);
+ }
+
+ /* call optional completion function if requested */
+ if (bp->b_flags & B_CALL) {
+ bp->b_flags &= ~B_CALL;
+ (*bp->b_iodone)(bp);
+ splx(s);
+ return;
+ }
+
+/*
+ * For asynchronous completions, release the buffer now. The brelse
+ * checks for B_WANTED and will do the wakeup there if necessary -
+ * so no need to do a wakeup here in the async case.
+ */
+
+ if (bp->b_flags & B_ASYNC) {
+ brelse(bp);
+ } else {
+ bp->b_flags &= ~B_WANTED;
+ wakeup((caddr_t) bp);
+ }
+ splx(s);
}
int
count_lock_queue()
{
+ int count;
+ struct buf *bp;
+
+ count = 0;
+ for(bp = bufqueues[BQ_LOCKED].tqh_first;
+ bp != NULL;
+ bp = bp->b_freelist.tqe_next)
+ count++;
+ return(count);
+}
- /*
- * Body deleted.
- */
- return (0);
+#ifndef UPDATE_INTERVAL
+int vfs_update_interval = 30;
+#else
+int vfs_update_interval = UPDATE_INTERVAL;
+#endif
+
+void
+vfs_update() {
+ (void) spl0();
+ while(1) {
+ tsleep((caddr_t)&vfs_update_wakeup, PRIBIO, "update",
+ hz * vfs_update_interval);
+ vfs_update_wakeup = 0;
+ sync(curproc, NULL, NULL);
+ }
}
-#ifdef DIAGNOSTIC
/*
- * Print out statistics on the current allocation of the buffer pool.
- * Can be enabled to print out on every ``sync'' by setting "syncprt"
- * in vfs_syscalls.c using sysctl.
+ * these routines are not in the correct place (yet)
+ * also they work *ONLY* for kernel_pmap!!!
*/
void
-vfs_bufstats()
-{
- int s, i, j, count;
- register struct buf *bp;
- register struct bqueues *dp;
- int counts[MAXBSIZE/CLBYTES+1];
- static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
-
- for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
- count = 0;
- for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
- counts[j] = 0;
- s = splbio();
- for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
- counts[bp->b_bufsize/CLBYTES]++;
- count++;
+vm_hold_load_pages(vm_offset_t froma, vm_offset_t toa) {
+ vm_offset_t pg;
+ vm_page_t p;
+ vm_offset_t from = round_page(froma);
+ vm_offset_t to = round_page(toa);
+
+ for(pg = from ; pg < to ; pg += PAGE_SIZE) {
+ vm_offset_t pa;
+
+ tryagain:
+ p = vm_page_alloc(kernel_object, pg - VM_MIN_KERNEL_ADDRESS);
+ if( !p) {
+ VM_WAIT;
+ goto tryagain;
}
- splx(s);
- printf("%s: total-%d", bname[i], count);
- for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
- if (counts[j] != 0)
- printf(", %d-%d", j * CLBYTES, counts[j]);
- printf("\n");
+
+ vm_page_wire(p);
+ pmap_enter(kernel_pmap, pg, VM_PAGE_TO_PHYS(p),
+ VM_PROT_READ|VM_PROT_WRITE, 1);
}
}
-#endif /* DIAGNOSTIC */
+
+void
+vm_hold_free_pages(vm_offset_t froma, vm_offset_t toa) {
+ vm_offset_t pg;
+ vm_page_t p;
+ vm_offset_t from = round_page(froma);
+ vm_offset_t to = round_page(toa);
+
+ for(pg = from ; pg < to ; pg += PAGE_SIZE) {
+ vm_offset_t pa;
+ pa = pmap_kextract(pg);
+ if( !pa) {
+ printf("No pa for va: %x\n", pg);
+ } else {
+ p = PHYS_TO_VM_PAGE( pa);
+ pmap_remove(kernel_pmap, pg, pg + PAGE_SIZE);
+ vm_page_free(p);
+ }
+ }
+}
+
+void
+bufstats()
+{
+}
+
OpenPOWER on IntegriCloud