summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordg <dg@FreeBSD.org>1994-08-07 13:10:43 +0000
committerdg <dg@FreeBSD.org>1994-08-07 13:10:43 +0000
commit81dc370a9d5a2ec97a413123079d94be91110fbe (patch)
treec701e7ef61340402192c9838085c1028a0a3b07c
parent024b33e58ce38e9533965d82b09863cc5f9aefd7 (diff)
downloadFreeBSD-src-81dc370a9d5a2ec97a413123079d94be91110fbe.zip
FreeBSD-src-81dc370a9d5a2ec97a413123079d94be91110fbe.tar.gz
Provide support for upcoming merged VM/buffer cache, and fixed a few bugs
that haven't appeared to manifest themselves (yet). Submitted by: John Dyson
-rw-r--r--sys/amd64/amd64/pmap.c3
-rw-r--r--sys/i386/i386/pmap.c3
-rw-r--r--sys/kern/kern_physio.c6
-rw-r--r--sys/vm/swap_pager.c21
-rw-r--r--sys/vm/vm_kern.c6
-rw-r--r--sys/vm/vm_object.c15
-rw-r--r--sys/vm/vm_page.c22
-rw-r--r--sys/vm/vm_pager.c10
-rw-r--r--sys/vm/vnode_pager.c19
9 files changed, 51 insertions, 54 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index b772c30..13b40b2 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
- * $Id: pmap.c,v 1.28 1994/08/06 09:15:15 davidg Exp $
+ * $Id: pmap.c,v 1.29 1994/08/06 10:25:36 davidg Exp $
*/
/*
@@ -695,6 +695,7 @@ pmap_alloc_pv_entry()
* let the kernel see it
*/
pmap_kenter(pvva, VM_PAGE_TO_PHYS(m));
+ tlbflush();
entry = (pv_entry_t) pvva;
/*
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index b772c30..13b40b2 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
- * $Id: pmap.c,v 1.28 1994/08/06 09:15:15 davidg Exp $
+ * $Id: pmap.c,v 1.29 1994/08/06 10:25:36 davidg Exp $
*/
/*
@@ -695,6 +695,7 @@ pmap_alloc_pv_entry()
* let the kernel see it
*/
pmap_kenter(pvva, VM_PAGE_TO_PHYS(m));
+ tlbflush();
entry = (pv_entry_t) pvva;
/*
diff --git a/sys/kern/kern_physio.c b/sys/kern/kern_physio.c
index 487e38e..0fcb0bf 100644
--- a/sys/kern/kern_physio.c
+++ b/sys/kern/kern_physio.c
@@ -16,7 +16,7 @@
* 4. Modifications may be freely made to this file if the above conditions
* are met.
*
- * $Id: kern_physio.c,v 1.3 1994/08/02 07:42:05 davidg Exp $
+ * $Id: kern_physio.c,v 1.4 1994/08/06 09:15:28 davidg Exp $
*/
#include <sys/param.h>
@@ -27,6 +27,7 @@
#include <vm/vm.h>
static void physwakeup();
+u_int minphys(struct buf *bp);
int
physio(strategy, bp, dev, rw, minp, uio)
@@ -78,6 +79,9 @@ physio(strategy, bp, dev, rw, minp, uio)
caddr_t adr;
bp->b_bcount = uio->uio_iov[i].iov_len;
+ bp->b_bcount = minp( bp);
+ if( minp != minphys)
+ bp->b_bcount = minphys( bp);
bp->b_bufsize = bp->b_bcount;
bp->b_flags = B_BUSY | B_PHYS | B_CALL | bufflags;
bp->b_iodone = physwakeup;
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 189929c..ad3d3db 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
- * $Id: swap_pager.c,v 1.4 1994/08/02 07:55:13 davidg Exp $
+ * $Id: swap_pager.c,v 1.5 1994/08/06 09:15:36 davidg Exp $
*/
/*
@@ -1366,17 +1366,6 @@ retrygetspace:
* get a swap pager clean data structure, block until we get it
*/
if (swap_pager_free.tqh_first == NULL) {
-/*
- if (flags & B_ASYNC) {
- for(i=0;i<count;i++) {
- rtvals[i] = VM_PAGER_AGAIN;
- if( swb[i])
- --swb[i]->swb_locked;
- }
- return VM_PAGER_AGAIN;
- }
-*/
-
s = splbio();
if( curproc == pageproc)
(void) swap_pager_clean();
@@ -1442,9 +1431,11 @@ retrygetspace:
bp->b_flags = B_BUSY;
bp->b_proc = &proc0; /* XXX (but without B_PHYS set this is ok) */
bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
- crhold(bp->b_rcred);
- crhold(bp->b_wcred);
- bp->b_un.b_addr = (caddr_t) kva;
+ if( bp->b_rcred != NOCRED)
+ crhold(bp->b_rcred);
+ if( bp->b_wcred != NOCRED)
+ crhold(bp->b_wcred);
+ bp->b_data = (caddr_t) kva;
bp->b_blkno = reqaddr[0];
bgetvp( swapdev_vp, bp);
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 7c8c619..202ca63 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_kern.c,v 1.3 1994/08/02 07:55:22 davidg Exp $
+ * $Id: vm_kern.c,v 1.4 1994/08/06 09:15:38 davidg Exp $
*/
/*
@@ -371,10 +371,6 @@ kmem_malloc(map, size, canwait)
vm_object_lock(kmem_object);
m = vm_page_lookup(kmem_object, offset + i);
vm_object_unlock(kmem_object);
-/*
- pmap_enter(map->pmap, addr + i, VM_PAGE_TO_PHYS(m),
- VM_PROT_DEFAULT, TRUE);
-*/
pmap_kenter( addr + i, VM_PAGE_TO_PHYS(m));
}
pmap_update();
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 8b8e606..57b66ec 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id$
+ * $Id: vm_object.c,v 1.3 1994/08/02 07:55:29 davidg Exp $
*/
/*
@@ -353,7 +353,7 @@ vm_object_terminate(object)
VM_PAGE_CHECK(p);
vm_page_lock_queues();
- s = splimp();
+ s = splhigh();
if (p->flags & PG_ACTIVE) {
TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
p->flags &= ~PG_ACTIVE;
@@ -613,7 +613,6 @@ again:
vm_page_deactivate(p);
vm_page_unlock_queues();
}
- p->flags &= ~PG_BUSY;
PAGE_WAKEUP(p);
goto again;
}
@@ -713,6 +712,7 @@ vm_object_pmap_remove(object, start, end)
register vm_offset_t end;
{
register vm_page_t p;
+ int s;
if (object == NULL)
return;
@@ -721,11 +721,14 @@ vm_object_pmap_remove(object, start, end)
again:
for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
if ((start <= p->offset) && (p->offset < end)) {
+ s = splhigh();
if (p->flags & PG_BUSY) {
p->flags |= PG_WANTED;
tsleep((caddr_t) p, PVM, "vmopmr", 0);
+ splx(s);
goto again;
}
+ splx(s);
pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
if ((p->flags & PG_CLEAN) == 0)
p->flags |= PG_LAUNDRY;
@@ -1456,11 +1459,14 @@ again:
for (p = object->memq.tqh_first; (p != NULL && size > 0); p = next) {
next = p->listq.tqe_next;
if ((start <= p->offset) && (p->offset < end)) {
+ s=splhigh();
if (p->flags & PG_BUSY) {
p->flags |= PG_WANTED;
tsleep((caddr_t) p, PVM, "vmopar", 0);
+ splx(s);
goto again;
}
+ splx(s);
pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
vm_page_lock_queues();
vm_page_free(p);
@@ -1471,11 +1477,14 @@ again:
} else {
while (size > 0) {
while (p = vm_page_lookup(object, start)) {
+ s = splhigh();
if (p->flags & PG_BUSY) {
p->flags |= PG_WANTED;
tsleep((caddr_t) p, PVM, "vmopar", 0);
+ splx(s);
goto again;
}
+ splx(s);
pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
vm_page_lock_queues();
vm_page_free(p);
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 4304100..6d75618 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
- * $Id: vm_page.c,v 1.2 1994/05/25 09:20:05 rgrimes Exp $
+ * $Id: vm_page.c,v 1.3 1994/08/01 11:25:44 davidg Exp $
*/
/*
@@ -387,7 +387,7 @@ void vm_page_insert(mem, object, offset)
*/
bucket = &vm_page_buckets[vm_page_hash(object, offset)];
- s = splimp();
+ s = splhigh();
simple_lock(&bucket_lock);
TAILQ_INSERT_TAIL(bucket, mem, hashq);
simple_unlock(&bucket_lock);
@@ -434,7 +434,7 @@ void vm_page_remove(mem)
*/
bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
- s = splimp();
+ s = splhigh();
simple_lock(&bucket_lock);
TAILQ_REMOVE(bucket, mem, hashq);
simple_unlock(&bucket_lock);
@@ -479,7 +479,7 @@ vm_page_t vm_page_lookup(object, offset)
bucket = &vm_page_buckets[vm_page_hash(object, offset)];
- s = splimp();
+ s = splhigh();
simple_lock(&bucket_lock);
for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) {
VM_PAGE_CHECK(mem);
@@ -534,7 +534,7 @@ vm_page_alloc(object, offset)
register vm_page_t mem;
int s;
- s = splimp();
+ s = splhigh();
simple_lock(&vm_page_queue_free_lock);
if ( object != kernel_object &&
object != kmem_object &&
@@ -596,7 +596,7 @@ void vm_page_free(mem)
register vm_page_t mem;
{
int s;
- s = splimp();
+ s = splhigh();
vm_page_remove(mem);
if (mem->flags & PG_ACTIVE) {
TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
@@ -667,7 +667,7 @@ void vm_page_wire(mem)
VM_PAGE_CHECK(mem);
if (mem->wire_count == 0) {
- s = splimp();
+ s = splhigh();
if (mem->flags & PG_ACTIVE) {
TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
cnt.v_active_count--;
@@ -698,7 +698,7 @@ void vm_page_unwire(mem)
int s;
VM_PAGE_CHECK(mem);
- s = splimp();
+ s = splhigh();
if( mem->wire_count)
mem->wire_count--;
@@ -738,7 +738,7 @@ vm_page_deactivate(m)
* Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
*/
- spl = splimp();
+ spl = splhigh();
if (!(m->flags & PG_INACTIVE) && m->wire_count == 0 &&
m->hold_count == 0) {
@@ -781,7 +781,7 @@ void vm_page_deactivate(m)
int s;
VM_PAGE_CHECK(m);
- s = splimp();
+ s = splhigh();
/*
* Only move active pages -- ignore locked or already
* inactive ones.
@@ -824,7 +824,7 @@ void vm_page_activate(m)
int s;
VM_PAGE_CHECK(m);
- s = splimp();
+ s = splhigh();
if (m->flags & PG_INACTIVE) {
TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
cnt.v_inactive_count--;
diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c
index af6b3ea..1d39709 100644
--- a/sys/vm/vm_pager.c
+++ b/sys/vm/vm_pager.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pager.c,v 1.3 1994/08/02 07:55:35 davidg Exp $
+ * $Id: vm_pager.c,v 1.4 1994/08/06 09:15:40 davidg Exp $
*/
/*
@@ -280,8 +280,8 @@ vm_pager_map_page(m)
vm_offset_t kva;
kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
- pmap_enter(vm_map_pmap(pager_map), kva, VM_PAGE_TO_PHYS(m),
- VM_PROT_DEFAULT, TRUE);
+ pmap_kenter(kva, VM_PAGE_TO_PHYS(m));
+ pmap_update();
return(kva);
}
@@ -289,6 +289,7 @@ void
vm_pager_unmap_page(kva)
vm_offset_t kva;
{
+ pmap_kremove(kva);
kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
}
@@ -298,7 +299,7 @@ vm_pager_atop(kva)
{
vm_offset_t pa;
- pa = pmap_extract(vm_map_pmap(pager_map), kva);
+ pa = pmap_kextract( kva);
if (pa == 0)
panic("vm_pager_atop");
return (PHYS_TO_VM_PAGE(pa));
@@ -353,7 +354,6 @@ getpbuf() {
s = splbio();
/* get a bp from the swap buffer header pool */
-tryagain:
while ((bp = bswlist.tqh_first) == NULL) {
bswneeded = 1;
tsleep((caddr_t)&bswneeded, PVM, "wswbuf", 0);
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index aad8ebd..e6e8f88 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -37,7 +37,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
- * $Id: vnode_pager.c,v 1.4 1994/08/06 09:15:42 davidg Exp $
+ * $Id: vnode_pager.c,v 1.5 1994/08/06 10:25:50 davidg Exp $
*/
/*
@@ -520,7 +520,6 @@ void
vnode_pager_iodone(bp)
struct buf *bp;
{
- int s = splbio();
bp->b_flags |= B_DONE;
wakeup((caddr_t) bp);
if( bp->b_flags & B_ASYNC) {
@@ -536,6 +535,10 @@ vnode_pager_iodone(bp)
bp->b_bufsize - bp->b_bcount);
npages = (bp->b_bufsize + PAGE_SIZE - 1) / PAGE_SIZE;
+/*
+ printf("bcount: %d, bufsize: %d, npages: %d\n",
+ bp->b_bcount, bp->b_bufsize, npages);
+*/
for( i = 0; i < npages; i++) {
m = PHYS_TO_VM_PAGE(pmap_kextract(paddr + i * PAGE_SIZE));
obj = m->object;
@@ -547,6 +550,7 @@ vnode_pager_iodone(bp)
panic("vnode_pager_iodone: page is gone!!!");
}
}
+ pmap_qremove( paddr, npages);
if( obj) {
--obj->paging_in_progress;
if( obj->paging_in_progress == 0)
@@ -555,11 +559,8 @@ vnode_pager_iodone(bp)
panic("vnode_pager_iodone: object is gone???");
}
HOLDRELE(bp->b_vp);
- splx(s);
relpbuf(bp);
- return;
}
- splx(s);
}
/*
@@ -575,7 +576,6 @@ vnode_pager_input_smlfs(vnp, m)
vm_offset_t paging_offset;
struct vnode *dp, *vp;
struct buf *bp;
- vm_offset_t mapsize;
vm_offset_t foff;
vm_offset_t kva;
int fileaddr;
@@ -768,7 +768,6 @@ vnode_pager_input(vnp, m, count, reqpage)
vm_object_t object;
vm_offset_t paging_offset;
struct vnode *dp, *vp;
- vm_offset_t mapsize;
int bsize;
int first, last;
@@ -797,7 +796,6 @@ vnode_pager_input(vnp, m, count, reqpage)
* originally, we did not check for an error return value -- assuming
* an fs always has a bmap entry point -- that assumption is wrong!!!
*/
- mapsize = 0;
foff = m[reqpage]->offset + paging_offset;
/*
@@ -890,7 +888,7 @@ vnode_pager_input(vnp, m, count, reqpage)
* unmap the page and free the kva
*/
pmap_qremove( kva, 1);
- kmem_free_wakeup(pager_map, kva, mapsize);
+ kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
/*
* release the buffer back to the block subsystem
@@ -1087,7 +1085,6 @@ vnode_pager_input(vnp, m, count, reqpage)
finishup:
for (i = 0; i < count; i++) {
- pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
m[i]->flags |= PG_CLEAN;
m[i]->flags &= ~PG_LAUNDRY;
if (i != reqpage) {
@@ -1189,7 +1186,6 @@ vnode_pager_output_smlfs(vnp, m)
vm_offset_t paging_offset;
struct vnode *dp, *vp;
struct buf *bp;
- vm_offset_t mapsize;
vm_offset_t foff;
vm_offset_t kva;
int fileaddr;
@@ -1286,7 +1282,6 @@ vnode_pager_output(vnp, m, count, rtvals)
vm_offset_t paging_offset;
struct vnode *dp, *vp;
struct buf *bp;
- vm_offset_t mapsize;
vm_offset_t reqaddr;
int bsize;
int s;
OpenPOWER on IntegriCloud