summaryrefslogtreecommitdiffstats
path: root/sys/i386
diff options
context:
space:
mode:
authordyson <dyson@FreeBSD.org>1996-07-27 03:24:10 +0000
committerdyson <dyson@FreeBSD.org>1996-07-27 03:24:10 +0000
commit293abd3564090ccf9196bbd4bc949163b91fce62 (patch)
treee65b39cf45164a5e0f930222b2d2e90148f2f518 /sys/i386
parent5306d9c9d6bc06b09eb3c7ca700475faafbee5c1 (diff)
downloadFreeBSD-src-293abd3564090ccf9196bbd4bc949163b91fce62.zip
FreeBSD-src-293abd3564090ccf9196bbd4bc949163b91fce62.tar.gz
This commit is meant to solve a couple of VM system problems or
performance issues. 1) The pmap module has had too many inlines, and so the object file is simply bigger than it needs to be. Some common code is also merged into subroutines. 2) Removal of some *evil* PHYS_TO_VM_PAGE macro calls. Unfortunately, a few have needed to be added also. The removal caused the need for more vm_page_lookups. I added lookup hints to minimize the need for the page table lookup operations. 3) Removal of some bogus performance improvements, that mostly made the code more complex (tracking individual page table page updates unnecessarily). Those improvements actually hurt 386 processors perf (not that people who worry about perf use 386 processors anymore :-)). 4) Changed pv queue manipulations/structures to be TAILQ's. 5) The pv queue code has had some performance problems since day one. Some significant scalability issues are resolved by threading the pv entries from the pmap AND the physical address instead of just the physical address. This makes certain pmap operations run much faster. This does not affect most micro-benchmarks, but should help loaded system performance *significantly*. DG helped and came up with most of the solution for this one. 6) Most if not all pmap bit operations follow the pattern: pmap_test_bit(); pmap_clear_bit(); That made for twice the necessary pv list traversal. The pmap interface now supports only pmap_tc_bit type operations: pmap_[test/clear]_modified, pmap_[test/clear]_referenced. Additionally, the modified routine now takes a vm_page_t arg instead of a phys address. This eliminates a PHYS_TO_VM_PAGE operation. 7) Several rewrites of routines that contain redundant code to use common routines, so that there is a greater likelihood of keeping the cache footprint smaller.
Diffstat (limited to 'sys/i386')
-rw-r--r--sys/i386/i386/pmap.c941
-rw-r--r--sys/i386/include/pmap.h25
2 files changed, 445 insertions, 521 deletions
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 6dbffd1..5e2838b 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -99,6 +99,12 @@
#define PMAP_DIAGNOSTIC
#endif
+#if !defined(SMALL_KERNEL)
+#define PMAP_INLINE __inline
+#else
+#define PMAP_INLINE
+#endif
+
static void init_pv_entries __P((int));
/*
@@ -146,7 +152,7 @@ extern int cpu_class;
* Data for the pv entry allocation mechanism
*/
static int pv_freelistcnt;
-static pv_entry_t pv_freelist;
+TAILQ_HEAD (,pv_entry) pv_freelist;
static vm_offset_t pvva;
static int npvvapg;
@@ -155,7 +161,6 @@ static int npvvapg;
*/
pt_entry_t *CMAP1;
static pt_entry_t *CMAP2, *ptmmap;
-static pv_entry_t *pv_table;
caddr_t CADDR1, ptvmmap;
static caddr_t CADDR2;
static pt_entry_t *msgbufmap;
@@ -165,32 +170,32 @@ pt_entry_t *PMAP1;
unsigned *PADDR1;
static void free_pv_entry __P((pv_entry_t pv));
-static __inline unsigned * get_ptbase __P((pmap_t pmap));
+static unsigned * get_ptbase __P((pmap_t pmap));
static pv_entry_t get_pv_entry __P((void));
static void i386_protection_init __P((void));
static void pmap_alloc_pv_entry __P((void));
-static void pmap_changebit __P((vm_offset_t pa, int bit, boolean_t setem));
static int pmap_is_managed __P((vm_offset_t pa));
-static void pmap_remove_all __P((vm_offset_t pa));
-static void pmap_enter_quick __P((pmap_t pmap, vm_offset_t va,
- vm_offset_t pa));
+static int pmap_remove_all __P((vm_offset_t pa));
+static vm_page_t pmap_enter_quick __P((pmap_t pmap, vm_offset_t va,
+ vm_offset_t pa, vm_page_t mpte));
static int pmap_remove_pte __P((struct pmap *pmap, unsigned *ptq,
vm_offset_t sva));
static void pmap_remove_page __P((struct pmap *pmap, vm_offset_t va));
-static __inline int pmap_remove_entry __P((struct pmap *pmap, pv_entry_t *pv,
+static int pmap_remove_entry __P((struct pmap *pmap, pv_table_t *pv,
vm_offset_t va));
-static boolean_t pmap_testbit __P((vm_offset_t pa, int bit));
-static __inline void pmap_insert_entry __P((pmap_t pmap, vm_offset_t va,
+static void pmap_insert_entry __P((pmap_t pmap, vm_offset_t va,
vm_page_t mpte, vm_offset_t pa));
-static __inline vm_page_t pmap_allocpte __P((pmap_t pmap, vm_offset_t va));
+static vm_page_t pmap_allocpte __P((pmap_t pmap, vm_offset_t va));
-static __inline int pmap_release_free_page __P((pmap_t pmap, vm_page_t p));
+static int pmap_release_free_page __P((pmap_t pmap, vm_page_t p));
static vm_page_t _pmap_allocpte __P((pmap_t pmap, int ptepindex));
+unsigned * __pure pmap_pte_quick __P((pmap_t pmap, vm_offset_t va));
+int pmap_tcbit __P((vm_offset_t pa, int bit));
+static vm_page_t pmap_page_alloc __P((vm_object_t object, vm_pindex_t pindex));
-#define VATRACK 4
-#define PDSTACKMAX 16
+#define PDSTACKMAX 6
static vm_offset_t pdstack[PDSTACKMAX];
static int pdstackptr;
@@ -240,6 +245,7 @@ pmap_bootstrap(firstaddr, loadaddr)
kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + IdlePTD);
kernel_pmap->pm_count = 1;
+ TAILQ_INIT(&kernel_pmap->pm_pvlist.pv_list);
nkpt = NKPT;
/*
@@ -306,10 +312,15 @@ pmap_init(phys_start, phys_end)
* Allocate memory for random pmap data structures. Includes the
* pv_head_table.
*/
- s = (vm_size_t) (sizeof(struct pv_entry *) * npg);
+ s = (vm_size_t) (sizeof(pv_table_t) * npg);
s = round_page(s);
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
- pv_table = (pv_entry_t *) addr;
+ pv_table = (pv_table_t *) addr;
+ for(i=0;i<npg;i++) {
+ pv_table[i].pv_list_count = 0;
+ TAILQ_INIT(&pv_table[i].pv_list);
+ }
+ TAILQ_INIT(&pv_freelist);
/*
* init the pv free list
@@ -372,7 +383,7 @@ pmap_nw_modified(pt_entry_t ptea) {
* this routine defines the region(s) of memory that should
* not be tested for the modified bit.
*/
-static __inline int
+static PMAP_INLINE int
pmap_track_modified( vm_offset_t va) {
if ((va < clean_sva) || (va >= clean_eva))
return 1;
@@ -384,7 +395,7 @@ pmap_track_modified( vm_offset_t va) {
* The below are finer grained pmap_update routines. These eliminate
* the gratuitious tlb flushes on non-i386 architectures.
*/
-static __inline void
+static PMAP_INLINE void
pmap_update_1pg( vm_offset_t va) {
#if defined(I386_CPU)
if (cpu_class == CPUCLASS_386)
@@ -394,7 +405,7 @@ pmap_update_1pg( vm_offset_t va) {
__asm __volatile(".byte 0xf,0x1,0x38": :"a" (va));
}
-static __inline void
+static PMAP_INLINE void
pmap_update_2pg( vm_offset_t va1, vm_offset_t va2) {
#if defined(I386_CPU)
if (cpu_class == CPUCLASS_386) {
@@ -432,7 +443,7 @@ get_ptbase(pmap)
* with the given map/virtual_address pair.
*/
-__inline unsigned * __pure
+unsigned * __pure
pmap_pte(pmap, va)
register pmap_t pmap;
vm_offset_t va;
@@ -448,25 +459,27 @@ pmap_pte(pmap, va)
* the pv lists. This eliminates many coarse-grained
* pmap_update calls.
*/
-__inline unsigned * __pure
+unsigned * __pure
pmap_pte_quick(pmap, va)
register pmap_t pmap;
vm_offset_t va;
{
- unsigned pde;
+ unsigned pde, newpf;
if (pde = (unsigned) pmap->pm_pdir[va >> PDRSHIFT]) {
unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
/* are we current address space or kernel? */
if (pmap == kernel_pmap || frame == (((unsigned) PTDpde) & PG_FRAME)) {
return (unsigned *) PTmap + i386_btop(va);
}
- * (int *) PMAP1 = (pde & PG_FRAME) | PG_V | PG_RW;
- pmap_update_1pg((vm_offset_t) PADDR1);
+ newpf = pde & PG_FRAME;
+ if ( ((* (unsigned *) PMAP1) & PG_FRAME) != newpf) {
+ * (unsigned *) PMAP1 = newpf | PG_RW | PG_V;
+ pmap_update_1pg((vm_offset_t) PADDR1);
+ }
return PADDR1 + ((unsigned) i386_btop(va) & (NPTEPG - 1));
}
return (0);
}
-
/*
* Routine: pmap_extract
@@ -491,7 +504,7 @@ pmap_extract(pmap, va)
/*
* determine if a page is managed (memory vs. device)
*/
-static __inline __pure int
+static PMAP_INLINE __pure int
pmap_is_managed(pa)
vm_offset_t pa;
{
@@ -535,7 +548,7 @@ pmap_qenter(va, m, count)
pte = (unsigned *)vtopte(tva);
opte = *pte;
*pte = npte;
- if (opte)
+ if (opte & PG_V)
pmap_update_1pg(tva);
}
}
@@ -564,7 +577,7 @@ pmap_qremove(va, count)
* note that in order for the mapping to take effect -- you
* should do a pmap_update after doing the pmap_kenter...
*/
-__inline void
+PMAP_INLINE void
pmap_kenter(va, pa)
vm_offset_t va;
register vm_offset_t pa;
@@ -576,14 +589,14 @@ pmap_kenter(va, pa)
pte = (unsigned *)vtopte(va);
opte = *pte;
*pte = npte;
- if (opte)
+ if (opte & PG_V)
pmap_update_1pg(va);
}
/*
* remove a page from the kernel pagetables
*/
-__inline void
+PMAP_INLINE void
pmap_kremove(va)
vm_offset_t va;
{
@@ -594,80 +607,17 @@ pmap_kremove(va)
pmap_update_1pg(va);
}
-
-/***************************************************
- * Page table page management routines.....
- ***************************************************/
-
-/*
- * This routine unholds page table pages, and if the hold count
- * drops to zero, then it decrements the wire count.
- */
-static __inline int
-pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
- vm_page_unhold(m);
- if (m->hold_count == 0) {
- vm_offset_t pteva;
- /*
- * unmap the page table page
- */
- pmap->pm_pdir[m->pindex] = 0;
- --pmap->pm_stats.resident_count;
- /*
- * Do a pmap_update to make the invalidated mapping
- * take effect immediately.
- */
- pteva = UPT_MIN_ADDRESS + i386_ptob(m->pindex);
- pmap_update_1pg(pteva);
- /*
- * If the page is finally unwired, simply free it.
- */
- --m->wire_count;
- if (m->wire_count == 0) {
- vm_page_free_zero(m);
- --cnt.v_wire_count;
- }
- return 1;
- }
- return 0;
-}
-
-/*
- * After removing a page table entry, this routine is used to
- * conditionally free the page, and manage the hold/wire counts.
- */
-int
-pmap_unuse_pt(pmap, va, mpte)
- pmap_t pmap;
- vm_offset_t va;
- vm_page_t mpte;
+static vm_page_t
+pmap_page_alloc(object, pindex)
+ vm_object_t object;
+ vm_pindex_t pindex;
{
- if (va >= UPT_MIN_ADDRESS)
- return 0;
-
- if (mpte == NULL) {
- vm_offset_t ptepa;
- ptepa = ((vm_offset_t) *pmap_pde(pmap, va));
-#if defined(PMAP_DIAGNOSTIC)
- if (!ptepa)
- panic("pmap_unuse_pt: pagetable page missing, va: 0x%x", va);
-#endif
- if (!ptepa)
- return 0;
- mpte = PHYS_TO_VM_PAGE(ptepa);
- }
-
-#if defined(PMAP_DIAGNOSTIC)
- if (mpte->pindex != (va >> PDRSHIFT))
- panic("pmap_unuse_pt: pindex(0x%x) != va(0x%x)",
- mpte->pindex, (va >> PDRSHIFT));
-
- if (mpte->hold_count == 0) {
- panic("pmap_unuse_pt: hold count < 0, va: 0x%x", va);
+ vm_page_t m;
+ m = vm_page_alloc(object, pindex, VM_ALLOC_ZERO);
+ if (m == NULL) {
+ VM_WAIT;
}
-#endif
-
- return pmap_unwire_pte_hold(pmap, mpte);
+ return m;
}
/*
@@ -701,12 +651,12 @@ pmap_pinit(pmap)
* allocate the page directory page
*/
retry:
- ptdpg = vm_page_alloc( pmap->pm_pteobj, PTDPTDI, VM_ALLOC_ZERO);
- if (ptdpg == NULL) {
- VM_WAIT;
+ ptdpg = pmap_page_alloc( pmap->pm_pteobj, PTDPTDI);
+ if (ptdpg == NULL)
goto retry;
- }
- vm_page_wire(ptdpg);
+
+ ptdpg->wire_count = 1;
+ ++cnt.v_wire_count;
ptdpg->flags &= ~(PG_MAPPED|PG_BUSY); /* not mapped normally */
ptdpg->valid = VM_PAGE_BITS_ALL;
@@ -722,6 +672,8 @@ retry:
VM_PAGE_TO_PHYS(ptdpg) | PG_V | PG_RW;
pmap->pm_count = 1;
+ pmap->pm_ptphint = NULL;
+ TAILQ_INIT(&pmap->pm_pvlist.pv_list);
}
static int
@@ -751,31 +703,7 @@ pmap_release_free_page(pmap, p)
--pmap->pm_stats.resident_count;
if (p->hold_count) {
- int *kvap;
- int i;
-#if defined(PMAP_DIAGNOSTIC)
panic("pmap_release: freeing held page table page");
-#else
- printf("pmap_release: freeing held page table page:\n");
-#endif
- kvap = (int *)vm_pager_map_page(p);
- for(i=0;i<NPTEPG;i++) {
- if (kvap[i]) {
- printf("pte: 0x%x, index: %d\n", kvap[i],i);
- }
- }
- vm_pager_unmap_page((vm_offset_t)kvap);
-
- /*
- * HACK ALERT!!!
- * If this failure happens, we must clear the page, because
- * there is likely a mapping still valid. This condition
- * is an error, but at least this zero operation will mitigate
- * some Sig-11's or crashes, because this page is thought
- * to be zero. This is a robustness fix, and not meant to
- * be a long term work-around.
- */
- pmap_zero_page(VM_PAGE_TO_PHYS(p));
}
/*
* Page directory pages need to have the kernel
@@ -787,6 +715,9 @@ pmap_release_free_page(pmap, p)
pmap_kremove((vm_offset_t) pmap->pm_pdir);
}
+ if (pmap->pm_ptphint == p)
+ pmap->pm_ptphint = NULL;
+
vm_page_free_zero(p);
splx(s);
return 1;
@@ -801,7 +732,7 @@ _pmap_allocpte(pmap, ptepindex)
pmap_t pmap;
int ptepindex;
{
- vm_offset_t pteva, ptepa;
+ vm_offset_t ptepa;
vm_page_t m;
/*
@@ -810,11 +741,9 @@ _pmap_allocpte(pmap, ptepindex)
retry:
m = vm_page_lookup(pmap->pm_pteobj, ptepindex);
if (m == NULL) {
- m = vm_page_alloc(pmap->pm_pteobj, ptepindex, VM_ALLOC_ZERO);
- if (m == NULL) {
- VM_WAIT;
+ m = pmap_page_alloc(pmap->pm_pteobj, ptepindex);
+ if (m == NULL)
goto retry;
- }
if ((m->flags & PG_ZERO) == 0)
pmap_zero_page(VM_PAGE_TO_PHYS(m));
m->flags &= ~(PG_ZERO|PG_BUSY);
@@ -827,22 +756,16 @@ retry:
}
}
- /*
- * mark the object writeable
- */
- pmap->pm_pteobj->flags |= OBJ_WRITEABLE;
-
if (m->queue != PQ_NONE) {
int s = splvm();
- vm_page_unqueue(m);
+ vm_page_unqueue(m,1);
splx(s);
}
- if (m->hold_count == 0) {
- if (m->wire_count == 0)
- ++cnt.v_wire_count;
- ++m->wire_count;
- }
+ if (m->wire_count == 0)
+ ++cnt.v_wire_count;
+ ++m->wire_count;
+
/*
* Increment the hold count for the page table page
* (denoting a new mapping.)
@@ -859,14 +782,12 @@ retry:
ptepa = VM_PAGE_TO_PHYS(m);
pmap->pm_pdir[ptepindex] = (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V);
- pteva = UPT_MIN_ADDRESS + i386_ptob(ptepindex);
- pmap_update_1pg(pteva);
m->flags |= PG_MAPPED;
return m;
}
-static __inline vm_page_t
+PMAP_INLINE static vm_page_t
pmap_allocpte(pmap, va)
pmap_t pmap;
vm_offset_t va;
@@ -890,7 +811,13 @@ pmap_allocpte(pmap, va)
* hold count, and activate it.
*/
if (ptepa) {
- m = PHYS_TO_VM_PAGE(ptepa);
+ if (pmap->pm_ptphint &&
+ (pmap->pm_ptphint->pindex == ptepindex)) {
+ m = pmap->pm_ptphint;
+ } else {
+ m = vm_page_lookup( pmap->pm_pteobj, ptepindex);
+ pmap->pm_ptphint = m;
+ }
++m->hold_count;
return m;
}
@@ -1035,13 +962,12 @@ pmap_reference(pmap)
/*
* free the pv_entry back to the free list
*/
-static __inline void
+static PMAP_INLINE void
free_pv_entry(pv)
pv_entry_t pv;
{
++pv_freelistcnt;
- pv->pv_next = pv_freelist;
- pv_freelist = pv;
+ TAILQ_INSERT_HEAD(&pv_freelist, pv, pv_list);
}
/*
@@ -1050,7 +976,7 @@ free_pv_entry(pv)
* the memory allocation is performed bypassing the malloc code
* because of the possibility of allocations at interrupt time.
*/
-static __inline pv_entry_t
+static PMAP_INLINE pv_entry_t
get_pv_entry()
{
pv_entry_t tmp;
@@ -1058,15 +984,16 @@ get_pv_entry()
/*
* get more pv_entry pages if needed
*/
- if (pv_freelistcnt < PV_FREELIST_MIN || pv_freelist == 0) {
+ if (pv_freelistcnt < PV_FREELIST_MIN) {
pmap_alloc_pv_entry();
}
+
/*
* get a pv_entry off of the free list
*/
--pv_freelistcnt;
- tmp = pv_freelist;
- pv_freelist = tmp->pv_next;
+ tmp = TAILQ_FIRST(&pv_freelist);
+ TAILQ_REMOVE(&pv_freelist, tmp, pv_list);
return tmp;
}
@@ -1123,7 +1050,7 @@ pmap_alloc_pv_entry()
}
}
}
- if (!pv_freelist)
+ if (TAILQ_FIRST(&pv_freelist) == NULL)
panic("get_pv_entry: cannot get a pv_entry_t");
}
@@ -1150,62 +1077,115 @@ init_pv_entries(npg)
}
/*
- * If it is the first entry on the list, it is actually
- * in the header and we must copy the following entry up
- * to the header. Otherwise we must search the list for
- * the entry. In either case we free the now unused entry.
+ * This routine unholds page table pages, and if the hold count
+ * drops to zero, then it decrements the wire count.
*/
-static __inline int
-pmap_remove_entry(pmap, ppv, va)
- struct pmap *pmap;
- pv_entry_t *ppv;
- vm_offset_t va;
-{
- pv_entry_t npv;
- int s;
-
- s = splvm();
- for (npv = *ppv; npv; (ppv = &npv->pv_next, npv = *ppv)) {
- if (pmap == npv->pv_pmap && va == npv->pv_va) {
- int rtval = pmap_unuse_pt(pmap, va, npv->pv_ptem);
- *ppv = npv->pv_next;
- free_pv_entry(npv);
- splx(s);
- return rtval;
+static int
+pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
+ vm_page_unhold(m);
+ if (m->hold_count == 0) {
+ vm_offset_t pteva;
+ /*
+ * unmap the page table page
+ */
+ pmap->pm_pdir[m->pindex] = 0;
+ --pmap->pm_stats.resident_count;
+ if ((((unsigned)pmap->pm_pdir[PTDPTDI]) & PG_FRAME) ==
+ (((unsigned) PTDpde) & PG_FRAME)) {
+ /*
+ * Do a pmap_update to make the invalidated mapping
+ * take effect immediately.
+ */
+ pteva = UPT_MIN_ADDRESS + i386_ptob(m->pindex);
+ pmap_update_1pg(pteva);
+ }
+ /*
+ * If the page is finally unwired, simply free it.
+ */
+ --m->wire_count;
+ if (m->wire_count == 0) {
+ if (pmap->pm_ptphint == m)
+ pmap->pm_ptphint = NULL;
+ vm_page_free_zero(m);
+ --cnt.v_wire_count;
}
+ return 1;
}
- splx(s);
return 0;
}
/*
- * Create a pv entry for page at pa for
- * (pmap, va).
+ * After removing a page table entry, this routine is used to
+ * conditionally free the page, and manage the hold/wire counts.
*/
-static __inline void
-pmap_insert_entry(pmap, va, mpte, pa)
+PMAP_INLINE int
+pmap_unuse_pt(pmap, va, mpte)
pmap_t pmap;
vm_offset_t va;
vm_page_t mpte;
- vm_offset_t pa;
{
+ int ptepindex;
+ if (va >= UPT_MIN_ADDRESS)
+ return 0;
+
+ if (mpte == NULL) {
+ ptepindex = (va >> PDRSHIFT);
+ if (pmap->pm_ptphint &&
+ pmap->pm_ptphint->pindex == ptepindex) {
+ mpte = pmap->pm_ptphint;
+ } else {
+ mpte = vm_page_lookup( pmap->pm_pteobj, ptepindex);
+ pmap->pm_ptphint = mpte;
+ }
+ }
+
+ return pmap_unwire_pte_hold(pmap, mpte);
+}
+/*
+ * If it is the first entry on the list, it is actually
+ * in the header and we must copy the following entry up
+ * to the header. Otherwise we must search the list for
+ * the entry. In either case we free the now unused entry.
+ */
+static int
+pmap_remove_entry(pmap, ppv, va)
+ struct pmap *pmap;
+ pv_table_t *ppv;
+ vm_offset_t va;
+{
+ pv_entry_t pv;
+ int rtval;
int s;
- pv_entry_t *ppv, pv;
s = splvm();
- pv = get_pv_entry();
- pv->pv_va = va;
- pv->pv_pmap = pmap;
- pv->pv_ptem = mpte;
+ if (ppv->pv_list_count < pmap->pm_stats.resident_count) {
+ for (pv = TAILQ_FIRST(&ppv->pv_list);
+ pv;
+ pv = TAILQ_NEXT(pv, pv_list)) {
+ if (pmap == pv->pv_pmap && va == pv->pv_va)
+ break;
+ }
+ } else {
+ for (pv = TAILQ_FIRST(&pmap->pm_pvlist.pv_list);
+ pv;
+ pv = TAILQ_NEXT(pv, pv_plist)) {
+ if (va == pv->pv_va)
+ break;
+ }
+ }
- ppv = pa_to_pvh(pa);
- if (*ppv)
- pv->pv_next = *ppv;
- else
- pv->pv_next = NULL;
- *ppv = pv;
+ rtval = 0;
+ if (pv) {
+ rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem);
+ TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
+ --ppv->pv_list_count;
+ TAILQ_REMOVE(&pmap->pm_pvlist.pv_list, pv, pv_plist);
+ free_pv_entry(pv);
+ }
+
splx(s);
+ return rtval;
}
/*
@@ -1218,7 +1198,6 @@ pmap_remove_pte(pmap, ptq, va)
vm_offset_t va;
{
unsigned oldpte;
- pv_entry_t *ppv;
oldpte = *ptq;
*ptq = 0;
@@ -1235,8 +1214,7 @@ pmap_remove_pte(pmap, ptq, va)
if (pmap_track_modified(va))
PHYS_TO_VM_PAGE(oldpte)->dirty = VM_PAGE_BITS_ALL;
}
- ppv = pa_to_pvh(oldpte);
- return pmap_remove_entry(pmap, ppv, va);
+ return pmap_remove_entry(pmap, pa_to_pvh(oldpte), va);
} else {
return pmap_unuse_pt(pmap, va, NULL);
}
@@ -1265,9 +1243,11 @@ pmap_remove_page(pmap, va)
* get a local va for mappings for this pmap.
*/
ptq = get_ptbase(pmap) + i386_btop(va);
- if (*ptq) {
+ if (*ptq & PG_V) {
(void) pmap_remove_pte(pmap, ptq, va);
- pmap_update_1pg(va);
+ if ((((unsigned)pmap->pm_pdir[PTDPTDI]) & PG_FRAME) == (((unsigned) PTDpde) & PG_FRAME)) {
+ pmap_update_1pg(va);
+ }
}
return;
}
@@ -1290,7 +1270,6 @@ pmap_remove(pmap, sva, eva)
vm_offset_t sindex, eindex;
vm_page_t mpte;
int anyvalid;
- vm_offset_t vachanged[VATRACK];
if (pmap == NULL)
return;
@@ -1315,6 +1294,7 @@ pmap_remove(pmap, sva, eva)
sindex = i386_btop(sva);
eindex = i386_btop(eva);
+ mpte = NULL;
for (; sindex < eindex; sindex = pdnxt) {
@@ -1331,19 +1311,6 @@ pmap_remove(pmap, sva, eva)
if (ptpaddr == 0)
continue;
- if (sindex < i386_btop(UPT_MIN_ADDRESS)) {
- /*
- * get the vm_page_t for the page table page
- */
- mpte = PHYS_TO_VM_PAGE(ptpaddr);
-
- /*
- * if the pte isn't wired, just skip it.
- */
- if (mpte->wire_count == 0)
- continue;
- }
-
/*
* Limit our scan to either the end of the va represented
* by the current page table page, or to the end of the
@@ -1355,13 +1322,11 @@ pmap_remove(pmap, sva, eva)
for ( ;sindex != pdnxt; sindex++) {
vm_offset_t va;
- if (ptbase[sindex] == 0) {
+ if ((ptbase[sindex] & PG_V) == 0) {
continue;
}
va = i386_ptob(sindex);
- if (anyvalid < VATRACK)
- vachanged[anyvalid] = va;
anyvalid++;
if (pmap_remove_pte(pmap,
ptbase + sindex, va))
@@ -1370,15 +1335,8 @@ pmap_remove(pmap, sva, eva)
}
if (anyvalid) {
- if (anyvalid <= VATRACK) {
- int i;
- for(i=0;i<anyvalid;i++)
- pmap_update_1pg(vachanged[i]);
- } else {
- pmap_update();
- }
+ pmap_update();
}
-
}
/*
@@ -1393,15 +1351,18 @@ pmap_remove(pmap, sva, eva)
* inefficient because they iteratively called
* pmap_remove (slow...)
*/
-static void
+static int
pmap_remove_all(pa)
vm_offset_t pa;
{
- register pv_entry_t pv, *ppv, npv;
+ register pv_entry_t pv, npv;
+ pv_table_t *ppv;
register unsigned *pte, tpte;
vm_page_t m;
+ int nmodify;
int s;
+ nmodify = 0;
#if defined(PMAP_DIAGNOSTIC)
/*
* XXX this makes pmap_page_protect(NONE) illegal for non-managed
@@ -1415,9 +1376,11 @@ pmap_remove_all(pa)
s = splvm();
m = NULL;
ppv = pa_to_pvh(pa);
- for (pv = *ppv; pv; pv=pv->pv_next) {
+ for (pv = TAILQ_FIRST(&ppv->pv_list);
+ pv;
+ pv = npv) {
pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
- if (tpte = *pte) {
+ if ((tpte = *pte) & PG_V) {
pv->pv_pmap->pm_stats.resident_count--;
*pte = 0;
if (tpte & PG_W)
@@ -1425,28 +1388,27 @@ pmap_remove_all(pa)
/*
* Update the vm_page_t clean and reference bits.
*/
- if (tpte & PG_M) {
+ if ((tpte & (PG_M|PG_MANAGED)) == (PG_M|PG_MANAGED)) {
#if defined(PMAP_DIAGNOSTIC)
if (pmap_nw_modified((pt_entry_t) tpte)) {
printf("pmap_remove_all: modified page not writable: va: 0x%lx, pte: 0x%lx\n", pv->pv_va, tpte);
}
#endif
- if (pmap_track_modified(pv->pv_va)) {
- if (m == NULL)
- m = PHYS_TO_VM_PAGE(pa);
- m->dirty = VM_PAGE_BITS_ALL;
- }
+ if (pmap_track_modified(pv->pv_va))
+ nmodify += 1;
}
}
- }
+ TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist.pv_list, pv, pv_plist);
- for (pv = *ppv; pv; pv = npv) {
- npv = pv->pv_next;
+ npv = TAILQ_NEXT(pv, pv_list);
+ TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
+ --ppv->pv_list_count;
pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
free_pv_entry(pv);
}
- *ppv = NULL;
+
splx(s);
+ return nmodify;
}
/*
@@ -1484,6 +1446,7 @@ pmap_protect(pmap, sva, eva, prot)
sindex = i386_btop(sva);
eindex = i386_btop(eva);
+ mpte = NULL;
for (; sindex < eindex; sindex = pdnxt) {
pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
@@ -1496,18 +1459,6 @@ pmap_protect(pmap, sva, eva, prot)
if (ptpaddr == 0)
continue;
- /*
- * Skip page ranges, where the page table page isn't wired.
- * If the page table page is not wired, there are no page mappings
- * there.
- */
- if (sindex < i386_btop(UPT_MIN_ADDRESS)) {
- mpte = PHYS_TO_VM_PAGE(ptpaddr);
-
- if (mpte->wire_count == 0)
- continue;
- }
-
if (pdnxt > eindex) {
pdnxt = eindex;
}
@@ -1516,8 +1467,8 @@ pmap_protect(pmap, sva, eva, prot)
unsigned pbits = ptbase[sindex];
- if (pbits & PG_RW) {
- if (pbits & PG_M) {
+ if ((pbits & (PG_RW|PG_V)) == (PG_RW|PG_V)) {
+ if ((pbits & (PG_M|PG_MANAGED)) == (PG_M|PG_MANAGED)) {
vm_offset_t sva = i386_ptob(sindex);
if (pmap_track_modified(sva)) {
vm_page_t m = PHYS_TO_VM_PAGE(pbits);
@@ -1534,6 +1485,37 @@ pmap_protect(pmap, sva, eva, prot)
}
/*
+ * Create a pv entry for page at pa for
+ * (pmap, va).
+ */
+static void
+pmap_insert_entry(pmap, va, mpte, pa)
+ pmap_t pmap;
+ vm_offset_t va;
+ vm_page_t mpte;
+ vm_offset_t pa;
+{
+
+ int s;
+ pv_entry_t pv;
+ pv_table_t *ppv;
+
+ s = splvm();
+ pv = get_pv_entry();
+ pv->pv_va = va;
+ pv->pv_pmap = pmap;
+ pv->pv_ptem = mpte;
+
+ TAILQ_INSERT_TAIL(&pmap->pm_pvlist.pv_list, pv, pv_plist);
+
+ ppv = pa_to_pvh(pa);
+ TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
+ ++ppv->pv_list_count;
+
+ splx(s);
+}
+
+/*
* Insert the given physical page (p) at
* the specified virtual address (v) in the
* target physical map with the protection requested.
@@ -1577,7 +1559,7 @@ pmap_enter(pmap, va, pa, prot, wired)
if (va < UPT_MIN_ADDRESS)
mpte = pmap_allocpte(pmap, va);
- pte = pmap_pte_quick(pmap, va);
+ pte = pmap_pte(pmap, va);
/*
* Page Directory table entry not valid, we need a new PT page
*/
@@ -1593,7 +1575,7 @@ pmap_enter(pmap, va, pa, prot, wired)
/*
* Mapping has not changed, must be protection or wiring change.
*/
- if (opa == pa) {
+ if ((origpte & PG_V) && (opa == pa)) {
/*
* Wiring change, just update stats. We don't worry about
* wiring PT pages as they remain resident as long as there
@@ -1616,12 +1598,10 @@ pmap_enter(pmap, va, pa, prot, wired)
* so we go ahead and sense modify status.
*/
if (origpte & PG_MANAGED) {
- vm_page_t m;
- if (origpte & PG_M) {
- if (pmap_track_modified(va)) {
- m = PHYS_TO_VM_PAGE(pa);
- m->dirty = VM_PAGE_BITS_ALL;
- }
+ if ((origpte & PG_M) && pmap_track_modified(va)) {
+ vm_page_t m;
+ m = PHYS_TO_VM_PAGE(pa);
+ m->dirty = VM_PAGE_BITS_ALL;
}
pa |= PG_MANAGED;
}
@@ -1635,7 +1615,7 @@ pmap_enter(pmap, va, pa, prot, wired)
* Mapping has changed, invalidate old range and fall through to
* handle validating new mapping.
*/
- if (opa) {
+ if (origpte & PG_V) {
int err;
err = pmap_remove_pte(pmap, pte, va);
if (err)
@@ -1692,22 +1672,55 @@ validate:
* but is *MUCH* faster than pmap_enter...
*/
-static void
-pmap_enter_quick(pmap, va, pa)
+static vm_page_t
+pmap_enter_quick(pmap, va, pa, mpte)
register pmap_t pmap;
vm_offset_t va;
register vm_offset_t pa;
+ vm_page_t mpte;
{
register unsigned *pte;
- vm_page_t mpte;
- mpte = NULL;
/*
* In the case that a page table page is not
* resident, we are creating it here.
*/
- if (va < UPT_MIN_ADDRESS)
- mpte = pmap_allocpte(pmap, va);
+ if (va < UPT_MIN_ADDRESS) {
+ int ptepindex;
+ vm_offset_t ptepa;
+
+ /*
+ * Calculate pagetable page index
+ */
+ ptepindex = va >> PDRSHIFT;
+ if (mpte && (mpte->pindex == ptepindex)) {
+ ++mpte->hold_count;
+ } else {
+ /*
+ * Get the page directory entry
+ */
+ ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex];
+
+ /*
+ * If the page table page is mapped, we just increment
+ * the hold count, and activate it.
+ */
+ if (ptepa) {
+ if (pmap->pm_ptphint &&
+ pmap->pm_ptphint->pindex == ptepindex) {
+ mpte = pmap->pm_ptphint;
+ } else {
+ mpte = vm_page_lookup( pmap->pm_pteobj, ptepindex);
+ pmap->pm_ptphint = mpte;
+ }
+ ++mpte->hold_count;
+ } else {
+ mpte = _pmap_allocpte(pmap, ptepindex);
+ }
+ }
+ } else {
+ mpte = NULL;
+ }
/*
* This call to vtopte makes the assumption that we are
@@ -1716,10 +1729,10 @@ pmap_enter_quick(pmap, va, pa)
* But that isn't as quick as vtopte.
*/
pte = (unsigned *)vtopte(va);
- if (*pte) {
+ if (*pte & PG_V) {
if (mpte)
pmap_unwire_pte_hold(pmap, mpte);
- return;
+ return NULL;
}
/*
@@ -1739,7 +1752,7 @@ pmap_enter_quick(pmap, va, pa)
*/
*pte = pa | PG_V | PG_U | PG_MANAGED;
- return;
+ return mpte;
}
#define MAX_INIT_PT (96)
@@ -1759,7 +1772,7 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
{
vm_offset_t tmpidx;
int psize;
- vm_page_t p;
+ vm_page_t p, mpte;
int objpgs;
psize = i386_btop(size);
@@ -1773,6 +1786,7 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
if (psize + pindex > object->size)
psize = object->size - pindex;
+ mpte = NULL;
/*
* if we are processing a major portion of the object, then scan the
* entire thing.
@@ -1798,9 +1812,9 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
if (p->queue == PQ_CACHE)
vm_page_deactivate(p);
p->flags |= PG_BUSY;
- pmap_enter_quick(pmap,
+ mpte = pmap_enter_quick(pmap,
addr + i386_ptob(tmpidx),
- VM_PAGE_TO_PHYS(p));
+ VM_PAGE_TO_PHYS(p), mpte);
p->flags |= PG_MAPPED;
PAGE_WAKEUP(p);
}
@@ -1819,9 +1833,9 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
if (p->queue == PQ_CACHE)
vm_page_deactivate(p);
p->flags |= PG_BUSY;
- pmap_enter_quick(pmap,
+ mpte = pmap_enter_quick(pmap,
addr + i386_ptob(tmpidx),
- VM_PAGE_TO_PHYS(p));
+ VM_PAGE_TO_PHYS(p), mpte);
p->flags |= PG_MAPPED;
PAGE_WAKEUP(p);
}
@@ -1855,7 +1869,7 @@ pmap_prefault(pmap, addra, entry, object)
vm_offset_t starta;
vm_offset_t addr;
vm_pindex_t pindex;
- vm_page_t m;
+ vm_page_t m, mpte;
if (entry->object.vm_object != object)
return;
@@ -1870,6 +1884,7 @@ pmap_prefault(pmap, addra, entry, object)
starta = 0;
}
+ mpte = NULL;
for (i = 0; i < PAGEORDER_SIZE; i++) {
vm_object_t lobject;
unsigned *pte;
@@ -1910,7 +1925,8 @@ pmap_prefault(pmap, addra, entry, object)
vm_page_deactivate(m);
}
m->flags |= PG_BUSY;
- pmap_enter_quick(pmap, addr, VM_PAGE_TO_PHYS(m));
+ mpte = pmap_enter_quick(pmap, addr,
+ VM_PAGE_TO_PHYS(m), mpte);
m->flags |= PG_MAPPED;
PAGE_WAKEUP(m);
}
@@ -1949,8 +1965,6 @@ pmap_change_wiring(pmap, va, wired)
pmap_pte_set_w(pte, wired);
}
-
-
/*
* Copy the range specified by src_addr/len
* from the source map to the range dst_addr/len
@@ -1987,16 +2001,19 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
unsigned *src_pte, *dst_pte;
vm_page_t dstmpte, srcmpte;
vm_offset_t srcptepaddr;
+ int ptepindex;
if (addr >= UPT_MIN_ADDRESS)
panic("pmap_copy: invalid to pmap_copy page tables\n");
+
pdnxt = ((addr + PAGE_SIZE*NPTEPG) & ~(PAGE_SIZE*NPTEPG - 1));
- srcptepaddr = (vm_offset_t) src_pmap->pm_pdir[addr >> PDRSHIFT];
- if (srcptepaddr == 0) {
+ ptepindex = addr >> PDRSHIFT;
+
+ srcptepaddr = (vm_offset_t) src_pmap->pm_pdir[ptepindex];
+ if (srcptepaddr == 0)
continue;
- }
- srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
+ srcmpte = vm_page_lookup(src_pmap->pm_pteobj, ptepindex);
if (srcmpte->hold_count == 0)
continue;
@@ -2020,12 +2037,14 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
dstmpte = pmap_allocpte(dst_pmap, addr);
if ((*dst_pte == 0) && (ptetemp = *src_pte)) {
/*
- * Simply clear the modified and accessed (referenced)
- * bits.
+ * Clear the modified and
+ * accessed (referenced) bits
+ * during the copy.
*/
*dst_pte = ptetemp & ~(PG_M|PG_A);
dst_pmap->pm_stats.resident_count++;
- pmap_insert_entry(dst_pmap, addr, dstmpte,
+ pmap_insert_entry(dst_pmap, addr,
+ dstmpte,
(ptetemp & PG_FRAME));
} else {
pmap_unwire_pte_hold(dst_pmap, dstmpte);
@@ -2126,7 +2145,8 @@ pmap_page_exists(pmap, pa)
pmap_t pmap;
vm_offset_t pa;
{
- register pv_entry_t *ppv, pv;
+ register pv_entry_t pv;
+ pv_table_t *ppv;
int s;
if (!pmap_is_managed(pa))
@@ -2138,7 +2158,9 @@ pmap_page_exists(pmap, pa)
/*
* Not found, check current mappings returning immediately if found.
*/
- for (pv = *ppv; pv; pv = pv->pv_next) {
+ for (pv = TAILQ_FIRST(&ppv->pv_list);
+ pv;
+ pv = TAILQ_NEXT(pv, pv_list)) {
if (pv->pv_pmap == pmap) {
splx(s);
return TRUE;
@@ -2148,149 +2170,73 @@ pmap_page_exists(pmap, pa)
return (FALSE);
}
+#define PMAP_REMOVE_PAGES_CURPROC_ONLY
/*
- * pmap_testbit tests bits in pte's
- * note that the testbit/changebit routines are inline,
- * and a lot of things compile-time evaluate.
+ * Remove all pages from specified address space
+ * this aids process exit speeds. Also, this code
+ * is special cased for current process only.
*/
-static __inline boolean_t
-pmap_testbit(pa, bit)
- register vm_offset_t pa;
- int bit;
+void
+pmap_remove_pages(pmap, sva, eva)
+ pmap_t pmap;
+ vm_offset_t sva, eva;
{
- register pv_entry_t *ppv, pv;
- unsigned *pte;
+ unsigned *pte, tpte;
+ pv_table_t *ppv;
+ pv_entry_t pv, npv;
int s;
- if (!pmap_is_managed(pa))
- return FALSE;
-
- ppv = pa_to_pvh(pa);
- if (*ppv == NULL)
- return FALSE;
+#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
+ if (pmap != &curproc->p_vmspace->vm_pmap) {
+ printf("warning: pmap_remove_pages called with non-current pmap\n");
+ return;
+ }
+#endif
s = splvm();
- /*
- * Not found, check current mappings returning immediately if found.
- */
- for (pv = *ppv ;pv; pv = pv->pv_next) {
- /*
- * if the bit being tested is the modified bit, then
- * mark clean_map and ptes as never
- * modified.
- */
- if (bit & (PG_A|PG_M)) {
- if (!pmap_track_modified(pv->pv_va))
- continue;
- }
+ for(pv = TAILQ_FIRST(&pmap->pm_pvlist.pv_list);
+ pv;
+ pv = npv) {
- if (!pv->pv_pmap) {
-#if defined(PMAP_DIAGNOSTIC)
- printf("Null pmap (tb) at va: 0x%lx\n", pv->pv_va);
-#endif
+ if (pv->pv_va >= eva || pv->pv_va < sva) {
+ npv = TAILQ_NEXT(pv, pv_plist);
continue;
}
+
+#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
+ pte = (unsigned *)vtopte(pv->pv_va);
+#else
pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
- if (pte == NULL)
- continue;
- if (*pte & bit) {
- splx(s);
- return TRUE;
- }
- }
- splx(s);
- return (FALSE);
-}
+#endif
+ tpte = *pte;
+ *pte = 0;
-/*
- * this routine is used to modify bits in ptes
- */
-static __inline void
-pmap_changebit(pa, bit, setem)
- vm_offset_t pa;
- int bit;
- boolean_t setem;
-{
- register pv_entry_t pv, *ppv;
- register unsigned *pte;
- vm_offset_t va;
- int changed;
- int s;
+ TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist.pv_list, pv, pv_plist);
- if (!pmap_is_managed(pa))
- return;
+ if (tpte & PG_V) {
+ pv->pv_pmap->pm_stats.resident_count--;
+ if (tpte & PG_W)
+ pv->pv_pmap->pm_stats.wired_count--;
+ /*
+ * Update the vm_page_t clean and reference bits.
+ */
+ if (tpte & PG_M) {
+ PHYS_TO_VM_PAGE(tpte)->dirty = VM_PAGE_BITS_ALL;
+ }
+ }
- s = splvm();
- changed = 0;
- ppv = pa_to_pvh(pa);
- /*
- * Loop over all current mappings setting/clearing as appropos If
- * setting RO do we need to clear the VAC?
- */
- for ( pv = *ppv; pv; pv = pv->pv_next) {
- va = pv->pv_va;
+ npv = TAILQ_NEXT(pv, pv_plist);
- /*
- * don't write protect pager mappings
- */
- if (!setem && (bit == PG_RW)) {
- if (va >= clean_sva && va < clean_eva)
- continue;
- }
- if (!pv->pv_pmap) {
-#if defined(PMAP_DIAGNOSTIC)
- printf("Null pmap (cb) at va: 0x%lx\n", va);
-#endif
- continue;
- }
+ ppv = pa_to_pvh(tpte);
+ TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
+ --ppv->pv_list_count;
- pte = pmap_pte_quick(pv->pv_pmap, va);
- if (pte == NULL)
- continue;
- if (setem) {
- *(int *)pte |= bit;
- changed = 1;
- } else {
- vm_offset_t pbits = *(vm_offset_t *)pte;
- if (pbits & bit)
- changed = 1;
- if (bit == PG_RW) {
- if (pbits & PG_M) {
- vm_page_t m;
- vm_offset_t pa = pbits & PG_FRAME;
- m = PHYS_TO_VM_PAGE(pa);
- m->dirty = VM_PAGE_BITS_ALL;
- }
- *(int *)pte = pbits & ~(PG_M|PG_RW);
- } else {
- *(int *)pte = pbits & ~bit;
- }
- }
+ pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
+ free_pv_entry(pv);
}
+ pmap_update();
splx(s);
- if (changed)
- pmap_update();
-}
-
-/*
- * pmap_page_protect:
- *
- * Lower the permission for all mappings to a given page.
- */
-void
-pmap_page_protect(phys, prot)
- vm_offset_t phys;
- vm_prot_t prot;
-{
- if ((prot & VM_PROT_WRITE) == 0) {
- if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
- pmap_changebit(phys, PG_RW, FALSE);
- } else {
- pmap_remove_all(phys);
- pmap_update();
- }
- }
}
vm_offset_t
@@ -2301,150 +2247,111 @@ pmap_phys_address(ppn)
}
/*
- * pmap_is_referenced:
+ * pmap_tcbit:
*
- * Return whether or not the specified physical page was referenced
- * by any physical maps.
- */
-boolean_t
-pmap_is_referenced(vm_offset_t pa)
-{
- register pv_entry_t *ppv, pv, lpv;
- unsigned *pte;
- int s;
-
- if (!pmap_is_managed(pa))
- return FALSE;
-
- ppv = pa_to_pvh(pa);
-
- s = splvm();
- /*
- * Not found, check current mappings returning immediately if found.
- */
- for (lpv = NULL, pv = *ppv ;pv; lpv = pv, pv = pv->pv_next) {
- /*
- * if the bit being tested is the modified bit, then
- * mark clean_map and ptes as never
- * modified.
- */
- if (!pmap_track_modified(pv->pv_va))
- continue;
- if (!pv->pv_pmap) {
- continue;
- }
- pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
- if (pte == NULL)
- continue;
- if ((int) *pte & PG_A) {
- if (lpv) {
- lpv->pv_next = pv->pv_next;
- pv->pv_next = *ppv;
- *ppv = pv;
- }
- splx(s);
- return TRUE;
- }
- }
- splx(s);
- return (FALSE);
-}
-
-/*
- * pmap_ts_referenced:
- *
- * Return the count of reference bits for a page, clearing all of them.
+ * Return the count of bits for a page, clearing all of them.
*
*/
int
-pmap_ts_referenced(vm_offset_t pa)
+pmap_tcbit(vm_offset_t pa, int bit)
{
- register pv_entry_t *ppv, pv;
+ register pv_entry_t pv, npv;
+ pv_table_t *ppv;
unsigned *pte;
int s;
int rtval = 0;
- vm_offset_t vachanged[VATRACK];
-
- if (!pmap_is_managed(pa))
- return FALSE;
s = splvm();
ppv = pa_to_pvh(pa);
-
- if (*ppv == NULL) {
- splx(s);
- return 0;
- }
-
/*
* Not found, check current mappings returning immediately if found.
*/
- for (pv = *ppv ;pv; pv = pv->pv_next) {
+ for (pv = TAILQ_FIRST(&ppv->pv_list);
+ pv;
+ pv = npv) {
+ npv = TAILQ_NEXT(pv, pv_list);
/*
* if the bit being tested is the modified bit, then
* mark clean_map and ptes as never
* modified.
*/
- if (!pmap_track_modified(pv->pv_va))
+ if (((bit & PG_M) != 0)
+ && !pmap_track_modified(pv->pv_va))
continue;
- if (!pv->pv_pmap) {
- continue;
- }
pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
if (pte == NULL)
continue;
- if (*pte & PG_A) {
- if (rtval < VATRACK)
- vachanged[rtval] = pv->pv_va;
- rtval++;
- *pte &= ~PG_A;
+
+ if ((rtval == 0) && (*pte & bit)) {
+ rtval = 1;
+ *pte &= ~bit;
+ } else {
+ *pte &= ~bit;
}
}
splx(s);
if (rtval) {
- if (rtval <= VATRACK) {
- int i;
- for(i=0;i<rtval;i++)
- pmap_update_1pg(vachanged[i]);
- } else {
+ if (curproc != pageproc)
pmap_update();
- }
}
return (rtval);
}
/*
- * pmap_is_modified:
+ * pmap_tc_modified:
*
- * Return whether or not the specified physical page was modified
- * in any physical maps.
+ * Return the count of modified bits for a page, clearing all of them.
+ *
*/
-boolean_t
-pmap_is_modified(vm_offset_t pa)
+int
+pmap_tc_modified(vm_page_t m)
{
- return pmap_testbit((pa), PG_M);
+ int rtval;
+
+ rtval = pmap_tcbit(VM_PAGE_TO_PHYS(m), PG_M);
+ if (rtval)
+ m->dirty = VM_PAGE_BITS_ALL;
+
+ return rtval;
}
/*
- * Clear the modify bits on the specified physical page.
+ * pmap_tc_referenced:
+ *
+ * Return the count of referenced bits for a page, clearing all of them.
+ *
*/
-void
-pmap_clear_modify(vm_offset_t pa)
+int
+pmap_tc_referenced(vm_offset_t pa)
{
- pmap_changebit((pa), PG_M, FALSE);
+ if (!pmap_is_managed(pa))
+ return 0;
+ return pmap_tcbit(pa, PG_A);
}
/*
- * pmap_clear_reference:
+ * pmap_page_protect:
*
- * Clear the reference bit on the specified physical page.
+ * Lower the permission for all mappings to a given page.
*/
void
-pmap_clear_reference(vm_offset_t pa)
+pmap_page_protect(m, prot)
+ vm_page_t m;
+ vm_prot_t prot;
{
- pmap_changebit((pa), PG_A, FALSE);
+ if ((prot & VM_PROT_WRITE) == 0) {
+ if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
+ if ((m->flags & PG_FICTITIOUS) == 0)
+ pmap_tcbit(VM_PAGE_TO_PHYS(m), PG_RW);
+ } else {
+ if (pmap_remove_all(VM_PAGE_TO_PHYS(m))) {
+ m->dirty = VM_PAGE_BITS_ALL;
+ }
+ pmap_update();
+ }
+ }
}
/*
@@ -2525,7 +2432,7 @@ pmap_mincore(pmap, addr)
unsigned *ptep, pte;
int val = 0;
- ptep = pmap_pte(pmap, addr);
+ ptep = pmap_pte_quick(pmap, addr);
if (ptep == 0) {
return 0;
}
@@ -2544,8 +2451,10 @@ pmap_mincore(pmap, addr)
* Modified by someone
*/
else if (PHYS_TO_VM_PAGE(pa)->dirty ||
- pmap_is_modified(pa))
+ pmap_tcbit(pa, PG_M)) {
val |= MINCORE_MODIFIED_OTHER;
+ PHYS_TO_VM_PAGE(pa)->dirty = VM_PAGE_BITS_ALL;
+ }
/*
* Referenced by us
*/
@@ -2556,7 +2465,7 @@ pmap_mincore(pmap, addr)
* Referenced by someone
*/
else if ((PHYS_TO_VM_PAGE(pa)->flags & PG_REFERENCED) ||
- pmap_is_referenced(pa))
+ pmap_tcbit(pa, PG_A))
val |= MINCORE_REFERENCED_OTHER;
}
return val;
@@ -2592,7 +2501,7 @@ pmap_pid_dump(int pid) {
}
return npte;
}
- pte = pmap_pte( pmap, va);
+ pte = pmap_pte_quick( pmap, va);
if (pte && pmap_pte_v(pte)) {
vm_offset_t pa;
vm_page_t m;
@@ -2641,7 +2550,7 @@ pads(pm)
continue;
if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
continue;
- ptep = pmap_pte(pm, va);
+ ptep = pmap_pte_quick(pm, va);
if (pmap_pte_v(ptep))
printf("%x:%x ", va, *(int *) ptep);
};
@@ -2655,7 +2564,9 @@ pmap_pvdump(pa)
register pv_entry_t pv;
printf("pa %x", pa);
- for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) {
+ for (pv = TAILQ_FIRST(pa_to_pvh(pa));
+ pv;
+ pv = TAILQ_NEXT(pv->pv_list)) {
#ifdef used_to_be
printf(" -> pmap %x, va %x, flags %x",
pv->pv_pmap, pv->pv_va, pv->pv_flags);
diff --git a/sys/i386/include/pmap.h b/sys/i386/include/pmap.h
index dbadf6f..b53db62 100644
--- a/sys/i386/include/pmap.h
+++ b/sys/i386/include/pmap.h
@@ -42,12 +42,13 @@
*
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
- * $Id: pmap.h,v 1.39 1996/05/18 03:36:38 dyson Exp $
+ * $Id: pmap.h,v 1.40 1996/06/08 11:21:19 bde Exp $
*/
#ifndef _MACHINE_PMAP_H_
#define _MACHINE_PMAP_H_
+
/*
* Page-directory and page-table entires follow this format, with a few
* of the fields not present here and there, depending on a lot of things.
@@ -113,6 +114,9 @@
#define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
#ifndef LOCORE
+
+#include <sys/queue.h>
+
typedef unsigned int *pd_entry_t;
typedef unsigned int *pt_entry_t;
@@ -158,17 +162,24 @@ pmap_kextract(vm_offset_t va)
}
#endif
+struct vm_page;
+
/*
* Pmap stuff
*/
+struct pv_entry;
+typedef struct {
+ int pv_list_count;
+ TAILQ_HEAD(,pv_entry) pv_list;
+} pv_table_t;
struct pmap {
pd_entry_t *pm_pdir; /* KVA of page directory */
vm_object_t pm_pteobj; /* Container for pte's */
- short pm_dref; /* page directory ref count */
- short pm_count; /* pmap reference count */
+ pv_table_t pm_pvlist; /* list of mappings in pmap */
+ int pm_count; /* reference count */
struct pmap_statistics pm_stats; /* pmap statistics */
- struct vm_map *pm_map; /* map that owns this pmap */
+ struct vm_page *pm_ptphint; /* pmap ptp hint */
};
typedef struct pmap *pmap_t;
@@ -177,14 +188,16 @@ typedef struct pmap *pmap_t;
extern pmap_t kernel_pmap;
#endif
+
/*
* For each vm_page_t, there is a list of all currently valid virtual
* mappings of that page. An entry is a pv_entry_t, the list is pv_table.
*/
typedef struct pv_entry {
- struct pv_entry *pv_next; /* next pv_entry */
pmap_t pv_pmap; /* pmap where mapping lies */
vm_offset_t pv_va; /* virtual address for mapping */
+ TAILQ_ENTRY(pv_entry) pv_list;
+ TAILQ_ENTRY(pv_entry) pv_plist;
vm_page_t pv_ptem; /* VM page for pte */
} *pv_entry_t;
@@ -200,7 +213,7 @@ extern pt_entry_t *CMAP1;
extern vm_offset_t avail_end;
extern vm_offset_t avail_start;
extern vm_offset_t phys_avail[];
-extern pv_entry_t *pv_table; /* array of entries, one per page */
+pv_table_t *pv_table;
extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
OpenPOWER on IntegriCloud