summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/amd64/amd64/pmap.c161
-rw-r--r--sys/i386/i386/pmap.c161
-rw-r--r--sys/vm/pmap.h3
-rw-r--r--sys/vm/vm_map.c7
4 files changed, 256 insertions, 76 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 81f75a5..89d2cd7 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
- * $Id: pmap.c,v 1.153 1997/08/05 01:02:14 dyson Exp $
+ * $Id: pmap.c,v 1.154 1997/08/05 01:32:05 dyson Exp $
*/
/*
@@ -89,7 +89,6 @@
#include <vm/vm_extern.h>
#include <vm/vm_pageout.h>
#include <vm/vm_pager.h>
-#include <vm/vm_zone.h>
#include <sys/user.h>
@@ -119,6 +118,8 @@
#define PTPHINT
+static void init_pv_entries __P((int));
+
/*
* Get PDEs and PTEs for user/kernel address space
*/
@@ -153,9 +154,8 @@ vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
static vm_offset_t vm_first_phys;
-int pgeflag; /* PG_G or-in */
-int pseflag; /* PG_PS or-in */
-int pv_npg;
+static int pgeflag; /* PG_G or-in */
+static int pseflag; /* PG_PS or-in */
static int nkpt;
static vm_page_t nkpg;
@@ -163,14 +163,15 @@ vm_offset_t kernel_vm_end;
extern vm_offset_t clean_sva, clean_eva;
+#define PV_FREELIST_MIN ((PAGE_SIZE / sizeof (struct pv_entry)) / 2)
+
/*
* Data for the pv entry allocation mechanism
*/
-vm_zone_t pvzone;
-struct vm_zone pvzone_store;
-struct vm_object pvzone_obj;
-#define NPVINIT 8192
-struct pv_entry pvinit[NPVINIT];
+static int pv_freelistcnt;
+TAILQ_HEAD (,pv_entry) pv_freelist = {0};
+static vm_offset_t pvva;
+static int npvvapg;
/*
* All those kernel PT submaps that BSD is so fond of
@@ -190,6 +191,7 @@ static PMAP_INLINE void free_pv_entry __P((pv_entry_t pv));
static unsigned * get_ptbase __P((pmap_t pmap));
static pv_entry_t get_pv_entry __P((void));
static void i386_protection_init __P((void));
+static void pmap_alloc_pv_entry __P((void));
static void pmap_changebit __P((vm_offset_t pa, int bit, boolean_t setem));
static PMAP_INLINE int pmap_is_managed __P((vm_offset_t pa));
@@ -473,50 +475,43 @@ pmap_init(phys_start, phys_end)
{
vm_offset_t addr;
vm_size_t s;
- int i;
+ int i, npg;
/*
* calculate the number of pv_entries needed
*/
vm_first_phys = phys_avail[0];
for (i = 0; phys_avail[i + 1]; i += 2);
- pv_npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE;
+ npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE;
/*
* Allocate memory for random pmap data structures. Includes the
* pv_head_table.
*/
- s = (vm_size_t) (sizeof(pv_table_t) * pv_npg);
+ s = (vm_size_t) (sizeof(pv_table_t) * npg);
s = round_page(s);
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
pv_table = (pv_table_t *) addr;
- for(i = 0; i < pv_npg; i++) {
+ for(i = 0; i < npg; i++) {
vm_offset_t pa;
TAILQ_INIT(&pv_table[i].pv_list);
pv_table[i].pv_list_count = 0;
pa = vm_first_phys + i * PAGE_SIZE;
pv_table[i].pv_vm_page = PHYS_TO_VM_PAGE(pa);
}
+ TAILQ_INIT(&pv_freelist);
/*
* init the pv free list
*/
- pvzone = &pvzone_store;
- _zbootinit(pvzone, "PV entries", sizeof(pvinit[0]), pvinit, NPVINIT);
-
+ init_pv_entries(npg);
/*
* Now it is safe to enable pv_table recording.
*/
pmap_initialized = TRUE;
}
-void
-pmap_init2() {
- _zinit(pvzone, &pvzone_obj, NULL, 0,
- PMAP_SHPGPERPROC * maxproc + pv_npg, ZONE_INTERRUPT, 4);
-}
-
/*
* Used to map a range of physical addresses into kernel
* virtual address space.
@@ -665,9 +660,9 @@ pmap_extract(pmap, va)
vm_offset_t rtval;
vm_offset_t pdirindex;
pdirindex = va >> PDRSHIFT;
- if (pmap && (rtval = (unsigned) pmap->pm_pdir[pdirindex])) {
+ if (pmap) {
unsigned *pte;
- if ((rtval & PG_PS) != 0) {
+ if (((rtval = (unsigned) pmap->pm_pdir[pdirindex]) & PG_PS) != 0) {
rtval &= ~(NBPDR - 1);
rtval |= va & (NBPDR - 1);
return rtval;
@@ -1389,9 +1384,7 @@ retry:
pdstack[pdstackptr] = (vm_offset_t) pmap->pm_pdir;
++pdstackptr;
} else {
- int pdstmp = pdstackptr - 1;
- kmem_free(kernel_map, pdstack[pdstmp], PAGE_SIZE);
- pdstack[pdstmp] = (vm_offset_t) pmap->pm_pdir;
+ kmem_free(kernel_map, (vm_offset_t) pmap->pm_pdir, PAGE_SIZE);
}
pmap->pm_pdir = 0;
}
@@ -1491,11 +1484,12 @@ pmap_reference(pmap)
/*
* free the pv_entry back to the free list
*/
-static inline void
+static PMAP_INLINE void
free_pv_entry(pv)
pv_entry_t pv;
{
- zfreei(pvzone, pv);
+ ++pv_freelistcnt;
+ TAILQ_INSERT_HEAD(&pv_freelist, pv, pv_list);
}
/*
@@ -1504,10 +1498,108 @@ free_pv_entry(pv)
* the memory allocation is performed bypassing the malloc code
* because of the possibility of allocations at interrupt time.
*/
-static inline pv_entry_t
-get_pv_entry(void)
+static pv_entry_t
+get_pv_entry()
+{
+ pv_entry_t tmp;
+
+ /*
+ * get more pv_entry pages if needed
+ */
+ if (pv_freelistcnt < PV_FREELIST_MIN || !TAILQ_FIRST(&pv_freelist)) {
+ pmap_alloc_pv_entry();
+ }
+ /*
+ * get a pv_entry off of the free list
+ */
+ --pv_freelistcnt;
+ tmp = TAILQ_FIRST(&pv_freelist);
+ TAILQ_REMOVE(&pv_freelist, tmp, pv_list);
+ return tmp;
+}
+
+/*
+ * This *strange* allocation routine eliminates the possibility of a malloc
+ * failure (*FATAL*) for a pv_entry_t data structure.
+ * also -- this code is MUCH MUCH faster than the malloc equiv...
+ * We really need to do the slab allocator thingie here.
+ */
+static void
+pmap_alloc_pv_entry()
+{
+ /*
+ * do we have any pre-allocated map-pages left?
+ */
+ if (npvvapg) {
+ vm_page_t m;
+
+ /*
+ * allocate a physical page out of the vm system
+ */
+ m = vm_page_alloc(kernel_object,
+ OFF_TO_IDX(pvva - vm_map_min(kernel_map)),
+ VM_ALLOC_INTERRUPT);
+ if (m) {
+ int newentries;
+ int i;
+ pv_entry_t entry;
+
+ newentries = (PAGE_SIZE / sizeof(struct pv_entry));
+ /*
+ * wire the page
+ */
+ vm_page_wire(m);
+ m->flags &= ~PG_BUSY;
+ /*
+ * let the kernel see it
+ */
+ pmap_kenter(pvva, VM_PAGE_TO_PHYS(m));
+
+ entry = (pv_entry_t) pvva;
+ /*
+ * update the allocation pointers
+ */
+ pvva += PAGE_SIZE;
+ --npvvapg;
+
+ /*
+ * free the entries into the free list
+ */
+ for (i = 0; i < newentries; i++) {
+ free_pv_entry(entry);
+ entry++;
+ }
+ }
+ }
+ if (!TAILQ_FIRST(&pv_freelist))
+ panic("get_pv_entry: cannot get a pv_entry_t");
+}
+
+/*
+ * init the pv_entry allocation system
+ */
+void
+init_pv_entries(npg)
+ int npg;
{
- return zalloci(pvzone);
+ /*
+ * Allocate enough kvm space for one entry per page, and
+ * each process having PMAP_SHPGPERPROC pages shared with other
+ * processes. (The system can panic if this is too small, but also
+ * can fail on bootup if this is too big.)
+ * XXX The pv management mechanism needs to be fixed so that systems
+ * with lots of shared mappings amongst lots of processes will still
+ * work. The fix will likely be that once we run out of pv entries
+ * we will free other entries (and the associated mappings), with
+ * some policy yet to be determined.
+ */
+ npvvapg = ((PMAP_SHPGPERPROC * maxproc + npg) * sizeof(struct pv_entry)
+ + PAGE_SIZE - 1) / PAGE_SIZE;
+ pvva = kmem_alloc_pageable(kernel_map, npvvapg * PAGE_SIZE);
+ /*
+ * get the first batch of entries
+ */
+ pmap_alloc_pv_entry();
}
/*
@@ -2522,8 +2614,7 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
}
srcmpte = vm_page_lookup(src_pmap->pm_pteobj, ptepindex);
- if ((srcmpte == NULL) ||
- (srcmpte->hold_count == 0) || (srcmpte->flags & PG_BUSY))
+ if ((srcmpte->hold_count == 0) || (srcmpte->flags & PG_BUSY))
continue;
if (pdnxt > end_addr)
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 81f75a5..89d2cd7 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
- * $Id: pmap.c,v 1.153 1997/08/05 01:02:14 dyson Exp $
+ * $Id: pmap.c,v 1.154 1997/08/05 01:32:05 dyson Exp $
*/
/*
@@ -89,7 +89,6 @@
#include <vm/vm_extern.h>
#include <vm/vm_pageout.h>
#include <vm/vm_pager.h>
-#include <vm/vm_zone.h>
#include <sys/user.h>
@@ -119,6 +118,8 @@
#define PTPHINT
+static void init_pv_entries __P((int));
+
/*
* Get PDEs and PTEs for user/kernel address space
*/
@@ -153,9 +154,8 @@ vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
static vm_offset_t vm_first_phys;
-int pgeflag; /* PG_G or-in */
-int pseflag; /* PG_PS or-in */
-int pv_npg;
+static int pgeflag; /* PG_G or-in */
+static int pseflag; /* PG_PS or-in */
static int nkpt;
static vm_page_t nkpg;
@@ -163,14 +163,15 @@ vm_offset_t kernel_vm_end;
extern vm_offset_t clean_sva, clean_eva;
+#define PV_FREELIST_MIN ((PAGE_SIZE / sizeof (struct pv_entry)) / 2)
+
/*
* Data for the pv entry allocation mechanism
*/
-vm_zone_t pvzone;
-struct vm_zone pvzone_store;
-struct vm_object pvzone_obj;
-#define NPVINIT 8192
-struct pv_entry pvinit[NPVINIT];
+static int pv_freelistcnt;
+TAILQ_HEAD (,pv_entry) pv_freelist = {0};
+static vm_offset_t pvva;
+static int npvvapg;
/*
* All those kernel PT submaps that BSD is so fond of
@@ -190,6 +191,7 @@ static PMAP_INLINE void free_pv_entry __P((pv_entry_t pv));
static unsigned * get_ptbase __P((pmap_t pmap));
static pv_entry_t get_pv_entry __P((void));
static void i386_protection_init __P((void));
+static void pmap_alloc_pv_entry __P((void));
static void pmap_changebit __P((vm_offset_t pa, int bit, boolean_t setem));
static PMAP_INLINE int pmap_is_managed __P((vm_offset_t pa));
@@ -473,50 +475,43 @@ pmap_init(phys_start, phys_end)
{
vm_offset_t addr;
vm_size_t s;
- int i;
+ int i, npg;
/*
* calculate the number of pv_entries needed
*/
vm_first_phys = phys_avail[0];
for (i = 0; phys_avail[i + 1]; i += 2);
- pv_npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE;
+ npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE;
/*
* Allocate memory for random pmap data structures. Includes the
* pv_head_table.
*/
- s = (vm_size_t) (sizeof(pv_table_t) * pv_npg);
+ s = (vm_size_t) (sizeof(pv_table_t) * npg);
s = round_page(s);
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
pv_table = (pv_table_t *) addr;
- for(i = 0; i < pv_npg; i++) {
+ for(i = 0; i < npg; i++) {
vm_offset_t pa;
TAILQ_INIT(&pv_table[i].pv_list);
pv_table[i].pv_list_count = 0;
pa = vm_first_phys + i * PAGE_SIZE;
pv_table[i].pv_vm_page = PHYS_TO_VM_PAGE(pa);
}
+ TAILQ_INIT(&pv_freelist);
/*
* init the pv free list
*/
- pvzone = &pvzone_store;
- _zbootinit(pvzone, "PV entries", sizeof(pvinit[0]), pvinit, NPVINIT);
-
+ init_pv_entries(npg);
/*
* Now it is safe to enable pv_table recording.
*/
pmap_initialized = TRUE;
}
-void
-pmap_init2() {
- _zinit(pvzone, &pvzone_obj, NULL, 0,
- PMAP_SHPGPERPROC * maxproc + pv_npg, ZONE_INTERRUPT, 4);
-}
-
/*
* Used to map a range of physical addresses into kernel
* virtual address space.
@@ -665,9 +660,9 @@ pmap_extract(pmap, va)
vm_offset_t rtval;
vm_offset_t pdirindex;
pdirindex = va >> PDRSHIFT;
- if (pmap && (rtval = (unsigned) pmap->pm_pdir[pdirindex])) {
+ if (pmap) {
unsigned *pte;
- if ((rtval & PG_PS) != 0) {
+ if (((rtval = (unsigned) pmap->pm_pdir[pdirindex]) & PG_PS) != 0) {
rtval &= ~(NBPDR - 1);
rtval |= va & (NBPDR - 1);
return rtval;
@@ -1389,9 +1384,7 @@ retry:
pdstack[pdstackptr] = (vm_offset_t) pmap->pm_pdir;
++pdstackptr;
} else {
- int pdstmp = pdstackptr - 1;
- kmem_free(kernel_map, pdstack[pdstmp], PAGE_SIZE);
- pdstack[pdstmp] = (vm_offset_t) pmap->pm_pdir;
+ kmem_free(kernel_map, (vm_offset_t) pmap->pm_pdir, PAGE_SIZE);
}
pmap->pm_pdir = 0;
}
@@ -1491,11 +1484,12 @@ pmap_reference(pmap)
/*
* free the pv_entry back to the free list
*/
-static inline void
+static PMAP_INLINE void
free_pv_entry(pv)
pv_entry_t pv;
{
- zfreei(pvzone, pv);
+ ++pv_freelistcnt;
+ TAILQ_INSERT_HEAD(&pv_freelist, pv, pv_list);
}
/*
@@ -1504,10 +1498,108 @@ free_pv_entry(pv)
* the memory allocation is performed bypassing the malloc code
* because of the possibility of allocations at interrupt time.
*/
-static inline pv_entry_t
-get_pv_entry(void)
+static pv_entry_t
+get_pv_entry()
+{
+ pv_entry_t tmp;
+
+ /*
+ * get more pv_entry pages if needed
+ */
+ if (pv_freelistcnt < PV_FREELIST_MIN || !TAILQ_FIRST(&pv_freelist)) {
+ pmap_alloc_pv_entry();
+ }
+ /*
+ * get a pv_entry off of the free list
+ */
+ --pv_freelistcnt;
+ tmp = TAILQ_FIRST(&pv_freelist);
+ TAILQ_REMOVE(&pv_freelist, tmp, pv_list);
+ return tmp;
+}
+
+/*
+ * This *strange* allocation routine eliminates the possibility of a malloc
+ * failure (*FATAL*) for a pv_entry_t data structure.
+ * also -- this code is MUCH MUCH faster than the malloc equiv...
+ * We really need to do the slab allocator thingie here.
+ */
+static void
+pmap_alloc_pv_entry()
+{
+ /*
+ * do we have any pre-allocated map-pages left?
+ */
+ if (npvvapg) {
+ vm_page_t m;
+
+ /*
+ * allocate a physical page out of the vm system
+ */
+ m = vm_page_alloc(kernel_object,
+ OFF_TO_IDX(pvva - vm_map_min(kernel_map)),
+ VM_ALLOC_INTERRUPT);
+ if (m) {
+ int newentries;
+ int i;
+ pv_entry_t entry;
+
+ newentries = (PAGE_SIZE / sizeof(struct pv_entry));
+ /*
+ * wire the page
+ */
+ vm_page_wire(m);
+ m->flags &= ~PG_BUSY;
+ /*
+ * let the kernel see it
+ */
+ pmap_kenter(pvva, VM_PAGE_TO_PHYS(m));
+
+ entry = (pv_entry_t) pvva;
+ /*
+ * update the allocation pointers
+ */
+ pvva += PAGE_SIZE;
+ --npvvapg;
+
+ /*
+ * free the entries into the free list
+ */
+ for (i = 0; i < newentries; i++) {
+ free_pv_entry(entry);
+ entry++;
+ }
+ }
+ }
+ if (!TAILQ_FIRST(&pv_freelist))
+ panic("get_pv_entry: cannot get a pv_entry_t");
+}
+
+/*
+ * init the pv_entry allocation system
+ */
+void
+init_pv_entries(npg)
+ int npg;
{
- return zalloci(pvzone);
+ /*
+ * Allocate enough kvm space for one entry per page, and
+ * each process having PMAP_SHPGPERPROC pages shared with other
+ * processes. (The system can panic if this is too small, but also
+ * can fail on bootup if this is too big.)
+ * XXX The pv management mechanism needs to be fixed so that systems
+ * with lots of shared mappings amongst lots of processes will still
+ * work. The fix will likely be that once we run out of pv entries
+ * we will free other entries (and the associated mappings), with
+ * some policy yet to be determined.
+ */
+ npvvapg = ((PMAP_SHPGPERPROC * maxproc + npg) * sizeof(struct pv_entry)
+ + PAGE_SIZE - 1) / PAGE_SIZE;
+ pvva = kmem_alloc_pageable(kernel_map, npvvapg * PAGE_SIZE);
+ /*
+ * get the first batch of entries
+ */
+ pmap_alloc_pv_entry();
}
/*
@@ -2522,8 +2614,7 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
}
srcmpte = vm_page_lookup(src_pmap->pm_pteobj, ptepindex);
- if ((srcmpte == NULL) ||
- (srcmpte->hold_count == 0) || (srcmpte->flags & PG_BUSY))
+ if ((srcmpte->hold_count == 0) || (srcmpte->flags & PG_BUSY))
continue;
if (pdnxt > end_addr)
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index c27110c..f0eb1f1 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: pmap.h,v 1.21 1997/07/17 04:34:02 dyson Exp $
+ * $Id: pmap.h,v 1.22 1997/08/05 01:32:50 dyson Exp $
*/
/*
@@ -131,7 +131,6 @@ void pmap_swapout_proc __P((struct proc *p));
void pmap_swapin_proc __P((struct proc *p));
void pmap_activate __P((struct proc *p));
vm_offset_t pmap_addr_hint __P((vm_object_t obj, vm_offset_t addr, vm_size_t size));
-void pmap_init2 __P((void));
#endif /* KERNEL */
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 5c7a77b..3ce7173 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.c,v 1.80 1997/08/05 00:01:56 dyson Exp $
+ * $Id: vm_map.c,v 1.81 1997/08/05 01:32:52 dyson Exp $
*/
/*
@@ -207,7 +207,6 @@ vmspace_alloc(min, max, pageable)
void
vm_init2(void) {
- pmap_init2();
_zinit(kmapentzone, &kmapentobj,
NULL, 0, 4096, ZONE_INTERRUPT, 4);
_zinit(mapentzone, &mapentobj,
@@ -304,7 +303,7 @@ vm_map_entry_dispose(map, entry)
vm_map_t map;
vm_map_entry_t entry;
{
- zfreei((map->system_map || !mapentzone) ? kmapentzone : mapentzone, entry);
+ zfree((map->system_map || !mapentzone) ? kmapentzone : mapentzone, entry);
}
/*
@@ -317,7 +316,7 @@ static vm_map_entry_t
vm_map_entry_create(map)
vm_map_t map;
{
- return zalloci((map->system_map || !mapentzone) ? kmapentzone : mapentzone);
+ return zalloc((map->system_map || !mapentzone) ? kmapentzone : mapentzone);
}
/*
OpenPOWER on IntegriCloud