summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authordg <dg@FreeBSD.org>1994-03-24 23:12:48 +0000
committerdg <dg@FreeBSD.org>1994-03-24 23:12:48 +0000
commitd777903222b11321f93bfb1f2767ba08437e3946 (patch)
treec53c28fcb5dfd361b564e4bc356d24fcfd42e934 /sys/amd64
parentf4bbdc7da6e18ffed99f492f7865dcaf729ff629 (diff)
downloadFreeBSD-src-d777903222b11321f93bfb1f2767ba08437e3946.zip
FreeBSD-src-d777903222b11321f93bfb1f2767ba08437e3946.tar.gz
From John Dyson: performance improvements to the new bounce buffer
code.
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/trap.c24
-rw-r--r--sys/amd64/amd64/vm_machdep.c29
-rw-r--r--sys/amd64/include/pmap.h17
3 files changed, 52 insertions, 18 deletions
diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c
index 090ae6a..dad751a 100644
--- a/sys/amd64/amd64/trap.c
+++ b/sys/amd64/amd64/trap.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
- * $Id: trap.c,v 1.18 1994/03/07 11:38:35 davidg Exp $
+ * $Id: trap.c,v 1.19 1994/03/14 21:54:03 davidg Exp $
*/
/*
@@ -88,8 +88,8 @@ extern int grow(struct proc *,int);
struct sysent sysent[];
int nsysent;
-extern short cpl;
-extern short netmask, ttymask, biomask;
+extern unsigned cpl;
+extern unsigned netmask, ttymask, biomask;
#define MAX_TRAP_MSG 27
char *trap_msg[] = {
@@ -290,6 +290,7 @@ skiptoswitch:
if (map != kernel_map) {
vm_offset_t pa;
vm_offset_t v = (vm_offset_t) vtopte(va);
+ vm_page_t ptepg;
/*
* Keep swapout from messing with us during this
@@ -318,12 +319,25 @@ skiptoswitch:
/* Fault the pte only if needed: */
*(volatile char *)v += 0;
- vm_page_hold(pmap_pte_vm_page(vm_map_pmap(map),v));
+ ptepg = (vm_page_t) pmap_pte_vm_page(vm_map_pmap(map), v);
+ vm_page_hold(ptepg);
/* Fault in the user page: */
rv = vm_fault(map, va, ftype, FALSE);
- vm_page_unhold(pmap_pte_vm_page(vm_map_pmap(map),v));
+ vm_page_unhold(ptepg);
+
+ /*
+ * page table pages don't need to be kept if they
+ * are not held
+ */
+ if( ptepg->hold_count == 0 && ptepg->wire_count == 0) {
+ pmap_page_protect( VM_PAGE_TO_PHYS(ptepg),
+ VM_PROT_NONE);
+ if( ptepg->flags & PG_CLEAN)
+ vm_page_free(ptepg);
+ }
+
p->p_flag &= ~SLOCK;
p->p_flag |= (oldflags & SLOCK);
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index 1667fce..fdff6e6 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -37,7 +37,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
- * $Id: vm_machdep.c,v 1.13 1994/03/21 09:35:10 davidg Exp $
+ * $Id: vm_machdep.c,v 1.14 1994/03/23 09:15:06 davidg Exp $
*/
#include "npx.h"
@@ -57,10 +57,11 @@
caddr_t bouncememory;
vm_offset_t bouncepa, bouncepaend;
-int bouncepages;
+int bouncepages, bpwait;
vm_map_t bounce_map;
int bmwait, bmfreeing;
+#define BITS_IN_UNSIGNED (8*sizeof(unsigned))
int bounceallocarraysize;
unsigned *bounceallocarray;
int bouncefree;
@@ -98,10 +99,11 @@ retry:
bounceallocarray[i] |= 1 << (bit - 1) ;
bouncefree -= count;
splx(s);
- return bouncepa + (i * 8 * sizeof(unsigned) + (bit - 1)) * NBPG;
+ return bouncepa + (i * BITS_IN_UNSIGNED + (bit - 1)) * NBPG;
}
}
}
+ bpwait = 1;
tsleep((caddr_t) &bounceallocarray, PRIBIO, "bncwai", 0);
goto retry;
}
@@ -126,13 +128,16 @@ vm_bounce_page_free(pa, count)
if ((index < 0) || (index >= bouncepages))
panic("vm_bounce_page_free -- bad index\n");
- allocindex = index / (8 * sizeof(unsigned));
- bit = index % (8 * sizeof(unsigned));
+ allocindex = index / BITS_IN_UNSIGNED;
+ bit = index % BITS_IN_UNSIGNED;
bounceallocarray[allocindex] &= ~(1 << bit);
bouncefree += count;
- wakeup((caddr_t) &bounceallocarray);
+ if (bpwait) {
+ bpwait = 0;
+ wakeup((caddr_t) &bounceallocarray);
+ }
}
/*
@@ -189,7 +194,7 @@ vm_bounce_init()
if (bouncepages == 0)
return;
- bounceallocarraysize = (bouncepages + (8*sizeof(unsigned))-1) / (8 * sizeof(unsigned));
+ bounceallocarraysize = (bouncepages + BITS_IN_UNSIGNED - 1) / BITS_IN_UNSIGNED;
bounceallocarray = malloc(bounceallocarraysize * sizeof(unsigned), M_TEMP, M_NOWAIT);
if (!bounceallocarray)
@@ -199,7 +204,7 @@ vm_bounce_init()
bounce_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, MAXBKVA * NBPG, FALSE);
- bouncepa = pmap_extract(kernel_pmap, (vm_offset_t) bouncememory);
+ bouncepa = pmap_kextract((vm_offset_t) bouncememory);
bouncepaend = bouncepa + bouncepages * NBPG;
bouncefree = bouncepages;
kvasfreecnt = 0;
@@ -238,7 +243,7 @@ vm_bounce_alloc(bp)
*/
va = vapstart;
for (i = 0; i < countvmpg; i++) {
- pa = pmap_extract(kernel_pmap, va);
+ pa = pmap_kextract(va);
if (pa >= SIXTEENMEG)
++dobounceflag;
va += NBPG;
@@ -255,7 +260,7 @@ vm_bounce_alloc(bp)
kva = vm_bounce_kva(countvmpg);
va = vapstart;
for (i = 0; i < countvmpg; i++) {
- pa = pmap_extract(kernel_pmap, va);
+ pa = pmap_kextract(va);
if (pa >= SIXTEENMEG) {
/*
* allocate a replacement page
@@ -338,7 +343,7 @@ vm_bounce_free(bp)
vm_offset_t copycount;
copycount = i386_round_page(bouncekva + 1) - bouncekva;
- mybouncepa = pmap_extract(kernel_pmap, i386_trunc_page(bouncekva));
+ mybouncepa = pmap_kextract(i386_trunc_page(bouncekva));
/*
* if this is a bounced pa, then process as one
@@ -552,7 +557,7 @@ kvtop(void *addr)
{
vm_offset_t va;
- va = pmap_extract(kernel_pmap, (vm_offset_t)addr);
+ va = pmap_kextract((vm_offset_t)addr);
if (va == 0)
panic("kvtop: zero page frame");
return((int)va);
diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
index ee2823a..d015bc1 100644
--- a/sys/amd64/include/pmap.h
+++ b/sys/amd64/include/pmap.h
@@ -42,7 +42,7 @@
*
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
- * $Id: pmap.h,v 1.10 1994/01/31 04:19:00 davidg Exp $
+ * $Id: pmap.h,v 1.11 1994/03/07 11:38:48 davidg Exp $
*/
#ifndef _PMAP_MACHINE_
@@ -181,6 +181,21 @@ extern int IdlePTD; /* physical address of "Idle" state directory */
#define avtophys(va) (((int) (*avtopte(va))&PG_FRAME) | ((int)(va) & PGOFSET))
/*
+ * Routine: pmap_kextract
+ * Function:
+ * Extract the physical page address associated
+ * kernel virtual address.
+ */
+static inline vm_offset_t
+pmap_kextract(va)
+ vm_offset_t va;
+{
+ vm_offset_t pa = *(int *)vtopte(va);
+ pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
+ return pa;
+}
+
+/*
* macros to generate page directory/table indicies
*/
OpenPOWER on IntegriCloud