summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/amd64/amd64/pmap.c44
-rw-r--r--sys/i386/i386/pmap.c35
-rw-r--r--sys/i386/xen/pmap.c35
-rw-r--r--sys/mips/mips/pmap.c40
4 files changed, 76 insertions, 78 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 1114a6b..03d9ae7 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -323,8 +323,8 @@ static vm_page_t pmap_allocpde(pmap_t pmap, vm_offset_t va,
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va,
struct rwlock **lockp);
-static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m,
- vm_page_t* free);
+static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
+ vm_page_t *free);
static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, vm_page_t *);
static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
@@ -1557,23 +1557,25 @@ pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
}
/*
- * This routine unholds page table pages, and if the hold count
- * drops to zero, then it decrements the wire count.
+ * Decrements a page table page's wire count, which is used to record the
+ * number of valid page table entries within the page. If the wire count
+ * drops to zero, then the page table page is unmapped. Returns TRUE if the
+ * page table page was unmapped and FALSE otherwise.
*/
-static __inline int
-pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free)
+static inline boolean_t
+pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free)
{
--m->wire_count;
- if (m->wire_count == 0)
- return (_pmap_unwire_pte_hold(pmap, va, m, free));
- else
- return (0);
+ if (m->wire_count == 0) {
+ _pmap_unwire_ptp(pmap, va, m, free);
+ return (TRUE);
+ } else
+ return (FALSE);
}
-static int
-_pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m,
- vm_page_t *free)
+static void
+_pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free)
{
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@@ -1602,14 +1604,14 @@ _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_page_t pdpg;
pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME);
- pmap_unwire_pte_hold(pmap, va, pdpg, free);
+ pmap_unwire_ptp(pmap, va, pdpg, free);
}
if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) {
/* We just released a PD, unhold the matching PDP */
vm_page_t pdppg;
pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME);
- pmap_unwire_pte_hold(pmap, va, pdppg, free);
+ pmap_unwire_ptp(pmap, va, pdppg, free);
}
/*
@@ -1624,8 +1626,6 @@ _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m,
* *ALL* TLB shootdown is done
*/
pmap_add_delayed_free_list(m, free, TRUE);
-
- return (1);
}
/*
@@ -1641,7 +1641,7 @@ pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde, vm_page_t *free)
return (0);
KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
- return (pmap_unwire_pte_hold(pmap, va, mpte, free));
+ return (pmap_unwire_ptp(pmap, va, mpte, free));
}
void
@@ -3666,7 +3666,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m),
lockp)) {
free = NULL;
- if (pmap_unwire_pte_hold(pmap, va, mpde, &free)) {
+ if (pmap_unwire_ptp(pmap, va, mpde, &free)) {
pmap_invalidate_page(pmap, va);
pmap_free_zero_pages(free);
}
@@ -3842,7 +3842,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
!pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
if (mpte != NULL) {
free = NULL;
- if (pmap_unwire_pte_hold(pmap, va, mpte, &free)) {
+ if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
pmap_invalidate_page(pmap, va);
pmap_free_zero_pages(free);
}
@@ -4149,8 +4149,8 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
pmap_resident_count_inc(dst_pmap, 1);
} else {
free = NULL;
- if (pmap_unwire_pte_hold(dst_pmap,
- addr, dstmpte, &free)) {
+ if (pmap_unwire_ptp(dst_pmap, addr,
+ dstmpte, &free)) {
pmap_invalidate_page(dst_pmap,
addr);
pmap_free_zero_pages(free);
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 55f6c4a..9e81117 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -344,7 +344,7 @@ static void pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde);
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags);
-static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free);
+static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free);
static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
static void pmap_pte_release(pt_entry_t *pte);
static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *);
@@ -1672,22 +1672,25 @@ pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
}
/*
- * This routine unholds page table pages, and if the hold count
- * drops to zero, then it decrements the wire count.
+ * Decrements a page table page's wire count, which is used to record the
+ * number of valid page table entries within the page. If the wire count
+ * drops to zero, then the page table page is unmapped. Returns TRUE if the
+ * page table page was unmapped and FALSE otherwise.
*/
-static __inline int
-pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
+static inline boolean_t
+pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free)
{
--m->wire_count;
- if (m->wire_count == 0)
- return (_pmap_unwire_pte_hold(pmap, m, free));
- else
- return (0);
+ if (m->wire_count == 0) {
+ _pmap_unwire_ptp(pmap, m, free);
+ return (TRUE);
+ } else
+ return (FALSE);
}
-static int
-_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
+static void
+_pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free)
{
vm_offset_t pteva;
@@ -1716,8 +1719,6 @@ _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
* *ALL* TLB shootdown is done
*/
pmap_add_delayed_free_list(m, free, TRUE);
-
- return (1);
}
/*
@@ -1734,7 +1735,7 @@ pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free)
return (0);
ptepde = *pmap_pde(pmap, va);
mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
- return (pmap_unwire_pte_hold(pmap, mpte, free));
+ return (pmap_unwire_ptp(pmap, mpte, free));
}
/*
@@ -3819,7 +3820,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
!pmap_try_insert_pv_entry(pmap, va, m)) {
if (mpte != NULL) {
free = NULL;
- if (pmap_unwire_pte_hold(pmap, mpte, &free)) {
+ if (pmap_unwire_ptp(pmap, mpte, &free)) {
pmap_invalidate_page(pmap, va);
pmap_free_zero_pages(free);
}
@@ -4088,8 +4089,8 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
dst_pmap->pm_stats.resident_count++;
} else {
free = NULL;
- if (pmap_unwire_pte_hold(dst_pmap,
- dstmpte, &free)) {
+ if (pmap_unwire_ptp(dst_pmap, dstmpte,
+ &free)) {
pmap_invalidate_page(dst_pmap,
addr);
pmap_free_zero_pages(free);
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index 61bc35f..b0e1624 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -300,7 +300,7 @@ static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags);
-static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free);
+static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free);
static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
static void pmap_pte_release(pt_entry_t *pte);
static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *);
@@ -1333,22 +1333,25 @@ pmap_free_zero_pages(vm_page_t free)
}
/*
- * This routine unholds page table pages, and if the hold count
- * drops to zero, then it decrements the wire count.
+ * Decrements a page table page's wire count, which is used to record the
+ * number of valid page table entries within the page. If the wire count
+ * drops to zero, then the page table page is unmapped. Returns TRUE if the
+ * page table page was unmapped and FALSE otherwise.
*/
-static __inline int
-pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
+static inline boolean_t
+pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free)
{
--m->wire_count;
- if (m->wire_count == 0)
- return (_pmap_unwire_pte_hold(pmap, m, free));
- else
- return (0);
+ if (m->wire_count == 0) {
+ _pmap_unwire_ptp(pmap, m, free);
+ return (TRUE);
+ } else
+ return (FALSE);
}
-static int
-_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
+static void
+_pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free)
{
vm_offset_t pteva;
@@ -1384,8 +1387,6 @@ _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
*/
m->right = *free;
*free = m;
-
- return (1);
}
/*
@@ -1402,7 +1403,7 @@ pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free)
return (0);
ptepde = PT_GET(pmap_pde(pmap, va));
mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
- return (pmap_unwire_pte_hold(pmap, mpte, free));
+ return (pmap_unwire_ptp(pmap, mpte, free));
}
/*
@@ -3015,7 +3016,7 @@ pmap_enter_quick_locked(multicall_entry_t **mclpp, int *count, pmap_t pmap, vm_o
!pmap_try_insert_pv_entry(pmap, va, m)) {
if (mpte != NULL) {
free = NULL;
- if (pmap_unwire_pte_hold(pmap, mpte, &free)) {
+ if (pmap_unwire_ptp(pmap, mpte, &free)) {
pmap_invalidate_page(pmap, va);
pmap_free_zero_pages(free);
}
@@ -3294,8 +3295,8 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
dst_pmap->pm_stats.resident_count++;
} else {
free = NULL;
- if (pmap_unwire_pte_hold(dst_pmap,
- dstmpte, &free)) {
+ if (pmap_unwire_ptp(dst_pmap, dstmpte,
+ &free)) {
pmap_invalidate_page(dst_pmap,
addr);
pmap_free_zero_pages(free);
diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c
index 75a511b..4c12788 100644
--- a/sys/mips/mips/pmap.c
+++ b/sys/mips/mips/pmap.c
@@ -190,7 +190,7 @@ static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte,
static void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte);
static void pmap_invalidate_all(pmap_t pmap);
static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va);
-static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m);
+static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m);
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
@@ -928,29 +928,26 @@ pmap_qremove(vm_offset_t va, int count)
* Page table page management routines.....
***************************************************/
-/* Revision 1.507
- *
- * Simplify the reference counting of page table pages. Specifically, use
- * the page table page's wired count rather than its hold count to contain
- * the reference count.
- */
-
/*
- * This routine unholds page table pages, and if the hold count
- * drops to zero, then it decrements the wire count.
+ * Decrements a page table page's wire count, which is used to record the
+ * number of valid page table entries within the page. If the wire count
+ * drops to zero, then the page table page is unmapped. Returns TRUE if the
+ * page table page was unmapped and FALSE otherwise.
*/
-static PMAP_INLINE int
-pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
+static PMAP_INLINE boolean_t
+pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m)
{
+
--m->wire_count;
- if (m->wire_count == 0)
- return (_pmap_unwire_pte_hold(pmap, va, m));
- else
- return (0);
+ if (m->wire_count == 0) {
+ _pmap_unwire_ptp(pmap, va, m);
+ return (TRUE);
+ } else
+ return (FALSE);
}
-static int
-_pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
+static void
+_pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m)
{
pd_entry_t *pde;
@@ -979,7 +976,7 @@ _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
*/
pdp = (pd_entry_t *)*pmap_segmap(pmap, va);
pdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pdp));
- pmap_unwire_pte_hold(pmap, va, pdpg);
+ pmap_unwire_ptp(pmap, va, pdpg);
}
#endif
@@ -988,7 +985,6 @@ _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
*/
vm_page_free_zero(m);
atomic_subtract_int(&cnt.v_wire_count, 1);
- return (1);
}
/*
@@ -1004,7 +1000,7 @@ pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
return (0);
KASSERT(pde != 0, ("pmap_unuse_pt: pde != 0"));
mpte = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pde));
- return (pmap_unwire_pte_hold(pmap, va, mpte));
+ return (pmap_unwire_ptp(pmap, va, mpte));
}
void
@@ -2227,7 +2223,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
if ((m->oflags & VPO_UNMANAGED) == 0 &&
!pmap_try_insert_pv_entry(pmap, mpte, va, m)) {
if (mpte != NULL) {
- pmap_unwire_pte_hold(pmap, va, mpte);
+ pmap_unwire_ptp(pmap, va, mpte);
mpte = NULL;
}
return (mpte);
OpenPOWER on IntegriCloud