summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authordg <dg@FreeBSD.org>1994-08-06 09:15:42 +0000
committerdg <dg@FreeBSD.org>1994-08-06 09:15:42 +0000
commit8b20309268255f6f6fbc908e5af8c9cde9a1a1c2 (patch)
tree67aa5e71ca8f826b24875a0de9a602849d2b2573 /sys/amd64
parentedb74877fe59af8008b23d2137302c9ad64c15d2 (diff)
downloadFreeBSD-src-8b20309268255f6f6fbc908e5af8c9cde9a1a1c2.zip
FreeBSD-src-8b20309268255f6f6fbc908e5af8c9cde9a1a1c2.tar.gz
Incorporated post 1.1.5 work from John Dyson. This includes performance
improvements via the new routines pmap_qenter/pmap_qremove and pmap_kenter/ pmap_kremove. These routine allow fast mapping of pages for those architectures that have "normal" MMUs. Also included is a fix to the pageout daemon to properly check a queue end condition. Submitted by: John Dyson
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/machdep.c21
-rw-r--r--sys/amd64/amd64/pmap.c101
2 files changed, 45 insertions, 77 deletions
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index 8ff9462..eb64a9c 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
- * $Id: machdep.c,v 1.45 1994/08/03 02:45:26 davidg Exp $
+ * $Id: machdep.c,v 1.46 1994/08/04 06:10:27 davidg Exp $
*/
#include "npx.h"
@@ -248,8 +248,8 @@ again:
freebufspace = bufpages * NBPG;
if (nswbuf == 0) {
nswbuf = (nbuf / 2) &~ 1; /* force even */
- if (nswbuf > 256)
- nswbuf = 256; /* sanity */
+ if (nswbuf > 64)
+ nswbuf = 64; /* sanity */
}
valloc(swbuf, struct buf, nswbuf);
valloc(buf, struct buf, nbuf);
@@ -284,19 +284,21 @@ again:
panic("startup: table size inconsistency");
clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
- (nbuf*MAXBSIZE) + VM_PHYS_SIZE + maxbkva + pager_map_size, TRUE);
-
- io_map = kmem_suballoc(clean_map, &minaddr, &maxaddr, maxbkva, FALSE);
+ (nbuf*MAXBSIZE) + (nswbuf*MAXPHYS) +
+ maxbkva + pager_map_size, TRUE);
+ buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
+ (nbuf*MAXBSIZE), TRUE);
pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
- pager_map_size, TRUE);
+ (nswbuf*MAXPHYS) + pager_map_size, TRUE);
+ io_map = kmem_suballoc(clean_map, &minaddr, &maxaddr, maxbkva, FALSE);
- buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
- (nbuf * MAXBSIZE), TRUE);
+#if 0
/*
* Allocate a submap for physio
*/
phys_map = kmem_suballoc(clean_map, &minaddr, &maxaddr,
VM_PHYS_SIZE, TRUE);
+#endif
/*
* Finally, allocate mbuf pool. Since mclrefcnt is an off-size
@@ -334,6 +336,7 @@ again:
* Set up buffers, so they can be used to read disk labels.
*/
bufinit();
+ vm_pager_bufferinit();
/*
* Configure the system.
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index c3b3fd1..c84b88f 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
- * $Id: pmap.c,v 1.26 1994/05/25 08:54:35 rgrimes Exp $
+ * $Id: pmap.c,v 1.27 1994/08/03 02:45:28 davidg Exp $
*/
/*
@@ -149,11 +149,17 @@ static inline void *vm_get_pmap();
static inline void vm_put_pmap();
inline void pmap_use_pt();
inline void pmap_unuse_pt();
-inline pt_entry_t * const pmap_pte();
+inline pt_entry_t * pmap_pte();
static inline pv_entry_t get_pv_entry();
void pmap_alloc_pv_entry();
void pmap_clear_modify();
-void i386_protection_init();
+static void i386_protection_init();
+
+void pmap_kenter __P((vm_offset_t, vm_offset_t));
+void pmap_kremove __P((vm_offset_t));
+void pmap_qenter __P((vm_offset_t, vm_page_t *, int));
+void pmap_qremove __P((vm_offset_t, int));
+
extern vm_offset_t clean_sva, clean_eva;
extern int cpu_class;
@@ -693,8 +699,7 @@ pmap_alloc_pv_entry()
/*
* let the kernel see it
*/
- pmap_enter(vm_map_pmap(kernel_map), pvva,
- VM_PAGE_TO_PHYS(m), VM_PROT_DEFAULT,1);
+ pmap_kenter(pvva, VM_PAGE_TO_PHYS(m));
entry = (pv_entry_t) pvva;
/*
@@ -1335,6 +1340,8 @@ pmap_qremove(va, count)
/*
* add a wired page to the kva
+ * note that in order for the mapping to take effect -- you
+ * should do a tlbflush after doing the pmap_kenter...
*/
void
pmap_kenter(va, pa)
@@ -1342,73 +1349,23 @@ pmap_kenter(va, pa)
register vm_offset_t pa;
{
register pt_entry_t *pte;
- register pv_entry_t pv, npv;
- vm_offset_t opa;
- int s;
-
- /*
- * Enter on the PV list if part of our managed memory
- * Note that we raise IPL while manipulating pv_table
- * since pmap_enter can be called at interrupt time.
- */
-
pte = vtopte(va);
- opa = pmap_pte_pa(pte);
- /*
- * Mapping has not changed, must be protection or wiring change.
- */
- if (opa == pa) {
- /*
- * Wiring change, just update stats.
- * We don't worry about wiring PT pages as they remain
- * resident as long as there are valid mappings in them.
- * Hence, if a user page is wired, the PT page will be also.
- */
- if (!pmap_pte_w(pte)) {
- kernel_pmap->pm_stats.wired_count++;
- }
- goto validate;
- }
-
- if (opa) {
- pmap_remove(kernel_pmap, va, va + PAGE_SIZE);
- }
-
- pv = pa_to_pvh(pa);
- s = splhigh();
- /*
- * No entries yet, use header as the first entry
- */
- if (pv->pv_pmap == NULL) {
- pv->pv_va = va;
- pv->pv_pmap = kernel_pmap;
- pv->pv_next = NULL;
- }
- /*
- * There is at least one other VA mapping this page.
- * Place this entry after the header.
- */
- else {
- npv = get_pv_entry();
- npv->pv_va = va;
- npv->pv_pmap = kernel_pmap;
- npv->pv_next = pv->pv_next;
- pv->pv_next = npv;
- }
- splx(s);
-
- /*
- * Increment counters
- */
- kernel_pmap->pm_stats.resident_count++;
+ *pte = (pt_entry_t) ( (int) (pa | PG_RW | PG_V | PG_W));
+}
-validate:
+/*
+ * remove a page from the kernel pagetables
+ */
+void
+pmap_kremove( va)
+ vm_offset_t va;
+{
+ register pt_entry_t *pte;
+ pte = vtopte(va);
- /*
- * Now validate mapping with desired protection/wiring.
- */
- *pte = (pt_entry_t) ( (int) (pa | PG_RW | PG_V | PG_W));
+ *pte = (pt_entry_t) 0;
+ tlbflush();
}
/*
@@ -1799,6 +1756,10 @@ pmap_testbit(pa, bit)
}
}
}
+ if( !pv->pv_pmap) {
+ printf("Null pmap (tb) at va: 0x%lx\n", pv->pv_va);
+ continue;
+ }
pte = pmap_pte(pv->pv_pmap, pv->pv_va);
if ((int) *pte & bit) {
splx(s);
@@ -1846,6 +1807,10 @@ pmap_changebit(pa, bit, setem)
continue;
}
+ if( !pv->pv_pmap) {
+ printf("Null pmap (cb) at va: 0x%lx\n", va);
+ continue;
+ }
pte = pmap_pte(pv->pv_pmap, va);
if (setem)
(int) npte = (int) *pte | bit;
OpenPOWER on IntegriCloud