summaryrefslogtreecommitdiffstats
path: root/sys/kern/vfs_bio.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/vfs_bio.c')
-rw-r--r--sys/kern/vfs_bio.c100
1 files changed, 80 insertions, 20 deletions
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 3e6e6f9..a653395 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -18,7 +18,7 @@
* 5. Modifications may be freely made to this file if the above conditions
* are met.
*
- * $Id: vfs_bio.c,v 1.105 1996/11/17 02:10:48 dyson Exp $
+ * $Id: vfs_bio.c,v 1.106 1996/11/28 04:26:04 dyson Exp $
*/
/*
@@ -51,6 +51,8 @@
#include <vm/vm_page.h>
#include <vm/vm_object.h>
#include <vm/vm_extern.h>
+#include <vm/lock.h>
+#include <vm/vm_map.h>
#include <sys/buf.h>
#include <sys/mount.h>
#include <sys/malloc.h>
@@ -92,7 +94,6 @@ int vfs_update_wakeup;
/*
* buffers base kva
*/
-caddr_t buffers_kva;
/*
* bogus page -- for I/O to/from partially complete buffers
@@ -134,7 +135,6 @@ bufinit()
for (i = 0; i < BUFFER_QUEUES; i++)
TAILQ_INIT(&bufqueues[i]);
- buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf);
/* finally, initialize each buffer header and stick on empty q */
for (i = 0; i < nbuf; i++) {
bp = &buf[i];
@@ -145,7 +145,6 @@ bufinit()
bp->b_wcred = NOCRED;
bp->b_qindex = QUEUE_EMPTY;
bp->b_vnbufs.le_next = NOLIST;
- bp->b_data = buffers_kva + i * MAXBSIZE;
TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
LIST_INSERT_HEAD(&invalhash, bp, b_hash);
}
@@ -177,6 +176,25 @@ bufinit()
}
/*
+ * Free the kva allocation for a buffer
+ * Must be called only at splbio or higher,
+ * as this is the only locking for buffer_map.
+ */
+static void
+bfreekva(struct buf * bp)
+{
+ if (bp->b_kvasize == 0)
+ return;
+
+ vm_map_delete(buffer_map,
+ (vm_offset_t) bp->b_kvabase,
+ (vm_offset_t) bp->b_kvabase + bp->b_kvasize);
+
+ bp->b_kvasize = 0;
+
+}
+
+/*
* remove the buffer from the appropriate free list
*/
void
@@ -562,6 +580,10 @@ brelse(struct buf * bp)
LIST_REMOVE(bp, b_hash);
LIST_INSERT_HEAD(&invalhash, bp, b_hash);
bp->b_dev = NODEV;
+ /*
+ * Get rid of the kva allocation *now*
+ */
+ bfreekva(bp);
if (needsbuffer) {
wakeup(&needsbuffer);
needsbuffer=0;
@@ -724,7 +746,7 @@ vfs_vmio_release(bp)
/*
* Check to see if a block is currently memory resident.
*/
-__inline struct buf *
+struct buf *
gbincore(struct vnode * vp, daddr_t blkno)
{
struct buf *bp;
@@ -812,10 +834,11 @@ vfs_bio_awrite(struct buf * bp)
* Find a buffer header which is available for use.
*/
static struct buf *
-getnewbuf(int slpflag, int slptimeo, int doingvmio)
+getnewbuf(int slpflag, int slptimeo, int size, int maxsize)
{
struct buf *bp;
int nbyteswritten = 0;
+ vm_offset_t addr;
start:
if (bufspace >= maxbufspace)
@@ -926,15 +949,43 @@ fillbuf:
bp->b_resid = 0;
bp->b_bcount = 0;
bp->b_npages = 0;
- bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE;
bp->b_dirtyoff = bp->b_dirtyend = 0;
bp->b_validoff = bp->b_validend = 0;
bp->b_usecount = 4;
- if (bufspace >= maxbufspace + nbyteswritten) {
+
+ maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK;
+ bfreekva(bp);
+
+ /*
+ * See if we have buffer kva space
+ */
+ if (vm_map_findspace(buffer_map, 0, maxsize, &addr)) {
bp->b_flags |= B_INVAL;
brelse(bp);
goto trytofreespace;
}
+
+ /*
+ * See if we are below are allocated minimum
+ */
+ if (bufspace >= (maxbufspace + nbyteswritten)) {
+ bp->b_flags |= B_INVAL;
+ brelse(bp);
+ goto trytofreespace;
+ }
+
+ /*
+ * create a map entry for the buffer -- in essence
+ * reserving the kva space.
+ */
+ vm_map_insert(buffer_map, NULL, 0,
+ addr, addr + maxsize,
+ VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
+
+ bp->b_data = (caddr_t) addr;
+ bp->b_kvabase = (caddr_t) addr;
+ bp->b_kvasize = maxsize;
+
return (bp);
}
@@ -1057,6 +1108,18 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
struct buf *bp;
int s;
struct bufhashhdr *bh;
+ int maxsize;
+
+ if (vp->v_mount) {
+ maxsize = vp->v_mount->mnt_stat.f_iosize;
+ /*
+ * This happens on mount points.
+ */
+ if (maxsize < size)
+ maxsize = size;
+ } else {
+ maxsize = size;
+ }
if (size > MAXBSIZE)
panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
@@ -1086,7 +1149,7 @@ loop:
*/
if (bp->b_bcount != size) {
- if (bp->b_flags & B_VMIO) {
+ if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) {
allocbuf(bp, size);
} else {
bp->b_flags |= B_NOCACHE;
@@ -1101,14 +1164,8 @@ loop:
return (bp);
} else {
vm_object_t obj;
- int doingvmio;
- if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) {
- doingvmio = 1;
- } else {
- doingvmio = 0;
- }
- if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) {
+ if ((bp = getnewbuf(slpflag, slptimeo, size, maxsize)) == 0) {
if (slpflag || slptimeo) {
splx(s);
return NULL;
@@ -1138,7 +1195,7 @@ loop:
bh = BUFHASH(vp, blkno);
LIST_INSERT_HEAD(bh, bp, b_hash);
- if (doingvmio) {
+ if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) {
bp->b_flags |= (B_VMIO | B_CACHE);
#if defined(VFS_BIO_DEBUG)
if (vp->v_type != VREG && vp->v_type != VBLK)
@@ -1171,7 +1228,7 @@ geteblk(int size)
int s;
s = splbio();
- while ((bp = getnewbuf(0, 0, 0)) == 0);
+ while ((bp = getnewbuf(0, 0, size, MAXBSIZE)) == 0);
splx(s);
allocbuf(bp, size);
bp->b_flags |= B_INVAL;
@@ -1201,6 +1258,9 @@ allocbuf(struct buf * bp, int size)
if (!(bp->b_flags & B_BUSY))
panic("allocbuf: buffer not busy");
+ if (bp->b_kvasize < size)
+ panic("allocbuf: buffer too small");
+
if ((bp->b_flags & B_VMIO) == 0) {
caddr_t origbuf;
int origbufsize;
@@ -1227,7 +1287,7 @@ allocbuf(struct buf * bp, int size)
free(bp->b_data, M_BIOBUF);
bufspace -= bp->b_bufsize;
bufmallocspace -= bp->b_bufsize;
- bp->b_data = (caddr_t) buffers_kva + (bp - buf) * MAXBSIZE;
+ bp->b_data = bp->b_kvabase;
bp->b_bufsize = 0;
bp->b_bcount = 0;
bp->b_flags &= ~B_MALLOC;
@@ -1268,7 +1328,7 @@ allocbuf(struct buf * bp, int size)
if (bp->b_flags & B_MALLOC) {
origbuf = bp->b_data;
origbufsize = bp->b_bufsize;
- bp->b_data = (caddr_t) buffers_kva + (bp - buf) * MAXBSIZE;
+ bp->b_data = bp->b_kvabase;
bufspace -= bp->b_bufsize;
bufmallocspace -= bp->b_bufsize;
bp->b_bufsize = 0;
OpenPOWER on IntegriCloud