summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2004-08-04 18:04:44 +0000
committeralc <alc@FreeBSD.org>2004-08-04 18:04:44 +0000
commit97dc3be9d1d100ff9b272bd53d1b13d35c4d5af7 (patch)
tree092cd668c594d498a9caeabf9ade42ddad301af8 /sys
parent1031eb4832030de197f4e44477c2c5a8423e0927 (diff)
downloadFreeBSD-src-97dc3be9d1d100ff9b272bd53d1b13d35c4d5af7.zip
FreeBSD-src-97dc3be9d1d100ff9b272bd53d1b13d35c4d5af7.tar.gz
Post-locking clean up/simplification, particularly, the elimination of
vm_page_sleep_if_busy() and the page table page's busy flag as a synchronization mechanism on page table pages. Also, relocate the inline pmap_unwire_pte_hold() so that it can be used to shorten _pmap_unwire_pte_hold() on alpha and amd64. This places pmap_unwire_pte_hold() next to a comment that more accurately describes it than _pmap_unwire_pte_hold().
Diffstat (limited to 'sys')
-rw-r--r--sys/alpha/alpha/pmap.c106
-rw-r--r--sys/amd64/amd64/pmap.c132
-rw-r--r--sys/i386/i386/pmap.c77
3 files changed, 146 insertions, 169 deletions
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c
index 2078fbd..0766e6d 100644
--- a/sys/alpha/alpha/pmap.c
+++ b/sys/alpha/alpha/pmap.c
@@ -336,6 +336,7 @@ static void pmap_insert_entry(pmap_t pmap, vm_offset_t va,
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va);
static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex);
+static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m);
static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t);
#ifdef SMP
static void pmap_invalidate_page_action(void *arg);
@@ -894,74 +895,65 @@ pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
* This routine unholds page table pages, and if the hold count
* drops to zero, then it decrements the wire count.
*/
-static int
-_pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
+static PMAP_INLINE int
+pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
{
- while (vm_page_sleep_if_busy(m, FALSE, "pmuwpt"))
- vm_page_lock_queues();
+ vm_page_unhold(m);
+ if (m->hold_count == 0)
+ return _pmap_unwire_pte_hold(pmap, va, m);
+ else
+ return 0;
+}
- if (m->hold_count == 0) {
- vm_offset_t pteva;
- pt_entry_t* pte;
+static int
+_pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
+{
+ vm_offset_t pteva;
+ pt_entry_t* pte;
- /*
- * unmap the page table page
- */
- if (m->pindex >= NUSERLEV3MAPS) {
- /* Level 2 page table */
- pte = pmap_lev1pte(pmap, va);
- pteva = (vm_offset_t) PTlev2 + alpha_ptob(m->pindex - NUSERLEV3MAPS);
- } else {
- /* Level 3 page table */
- pte = pmap_lev2pte(pmap, va);
- pteva = (vm_offset_t) PTmap + alpha_ptob(m->pindex);
- }
+ /*
+ * unmap the page table page
+ */
+ if (m->pindex >= NUSERLEV3MAPS) {
+ /* Level 2 page table */
+ pte = pmap_lev1pte(pmap, va);
+ pteva = (vm_offset_t) PTlev2 + alpha_ptob(m->pindex - NUSERLEV3MAPS);
+ } else {
+ /* Level 3 page table */
+ pte = pmap_lev2pte(pmap, va);
+ pteva = (vm_offset_t) PTmap + alpha_ptob(m->pindex);
+ }
- *pte = 0;
+ *pte = 0;
- if (m->pindex < NUSERLEV3MAPS) {
- /* unhold the level 2 page table */
- vm_page_t lev2pg;
+ if (m->pindex < NUSERLEV3MAPS) {
+ /* unhold the level 2 page table */
+ vm_page_t lev2pg;
- lev2pg = PHYS_TO_VM_PAGE(pmap_pte_pa(pmap_lev1pte(pmap, va)));
- vm_page_unhold(lev2pg);
- if (lev2pg->hold_count == 0)
- _pmap_unwire_pte_hold(pmap, va, lev2pg);
- }
+ lev2pg = PHYS_TO_VM_PAGE(pmap_pte_pa(pmap_lev1pte(pmap, va)));
+ pmap_unwire_pte_hold(pmap, va, lev2pg);
+ }
- --pmap->pm_stats.resident_count;
- /*
- * Do a invltlb to make the invalidated mapping
- * take effect immediately.
- */
- pmap_invalidate_page(pmap, pteva);
+ --pmap->pm_stats.resident_count;
+ /*
+ * Do a invltlb to make the invalidated mapping
+ * take effect immediately.
+ */
+ pmap_invalidate_page(pmap, pteva);
- if (pmap->pm_ptphint == m)
- pmap->pm_ptphint = NULL;
+ if (pmap->pm_ptphint == m)
+ pmap->pm_ptphint = NULL;
- /*
- * If the page is finally unwired, simply free it.
- */
- --m->wire_count;
- if (m->wire_count == 0) {
- vm_page_busy(m);
- vm_page_free_zero(m);
- atomic_subtract_int(&cnt.v_wire_count, 1);
- }
- return 1;
+ /*
+ * If the page is finally unwired, simply free it.
+ */
+ --m->wire_count;
+ if (m->wire_count == 0) {
+ vm_page_free_zero(m);
+ atomic_subtract_int(&cnt.v_wire_count, 1);
}
- return 0;
-}
-
-static PMAP_INLINE int
-pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
-{
- vm_page_unhold(m);
- if (m->hold_count == 0)
- return _pmap_unwire_pte_hold(pmap, va, m);
- else
- return 0;
+ return 1;
}
/*
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index b8c6a9d..f2f367c 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -215,6 +215,7 @@ static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va);
static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex);
+static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m);
static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t);
static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
@@ -964,87 +965,76 @@ pmap_qremove(vm_offset_t sva, int count)
* This routine unholds page table pages, and if the hold count
* drops to zero, then it decrements the wire count.
*/
+static PMAP_INLINE int
+pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
+{
+
+ vm_page_unhold(m);
+ if (m->hold_count == 0)
+ return _pmap_unwire_pte_hold(pmap, va, m);
+ else
+ return 0;
+}
+
static int
_pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
{
+ vm_offset_t pteva;
- while (vm_page_sleep_if_busy(m, FALSE, "pmuwpt"))
- vm_page_lock_queues();
-
- if (m->hold_count == 0) {
- vm_offset_t pteva;
+ /*
+ * unmap the page table page
+ */
+ if (m->pindex >= (NUPDE + NUPDPE)) {
+ /* PDP page */
+ pml4_entry_t *pml4;
+ pml4 = pmap_pml4e(pmap, va);
+ pteva = (vm_offset_t) PDPmap + amd64_ptob(m->pindex - (NUPDE + NUPDPE));
+ *pml4 = 0;
+ } else if (m->pindex >= NUPDE) {
+ /* PD page */
+ pdp_entry_t *pdp;
+ pdp = pmap_pdpe(pmap, va);
+ pteva = (vm_offset_t) PDmap + amd64_ptob(m->pindex - NUPDE);
+ *pdp = 0;
+ } else {
+ /* PTE page */
+ pd_entry_t *pd;
+ pd = pmap_pde(pmap, va);
+ pteva = (vm_offset_t) PTmap + amd64_ptob(m->pindex);
+ *pd = 0;
+ }
+ --pmap->pm_stats.resident_count;
+ if (m->pindex < NUPDE) {
+ /* We just released a PT, unhold the matching PD */
+ vm_page_t pdpg;
- /*
- * unmap the page table page
- */
- if (m->pindex >= (NUPDE + NUPDPE)) {
- /* PDP page */
- pml4_entry_t *pml4;
- pml4 = pmap_pml4e(pmap, va);
- pteva = (vm_offset_t) PDPmap + amd64_ptob(m->pindex - (NUPDE + NUPDPE));
- *pml4 = 0;
- } else if (m->pindex >= NUPDE) {
- /* PD page */
- pdp_entry_t *pdp;
- pdp = pmap_pdpe(pmap, va);
- pteva = (vm_offset_t) PDmap + amd64_ptob(m->pindex - NUPDE);
- *pdp = 0;
- } else {
- /* PTE page */
- pd_entry_t *pd;
- pd = pmap_pde(pmap, va);
- pteva = (vm_offset_t) PTmap + amd64_ptob(m->pindex);
- *pd = 0;
- }
- --pmap->pm_stats.resident_count;
- if (m->pindex < NUPDE) {
- /* We just released a PT, unhold the matching PD */
- vm_page_t pdpg;
-
- pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME);
- vm_page_unhold(pdpg);
- if (pdpg->hold_count == 0)
- _pmap_unwire_pte_hold(pmap, va, pdpg);
- }
- if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) {
- /* We just released a PD, unhold the matching PDP */
- vm_page_t pdppg;
-
- pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME);
- vm_page_unhold(pdppg);
- if (pdppg->hold_count == 0)
- _pmap_unwire_pte_hold(pmap, va, pdppg);
- }
- if (pmap_is_current(pmap)) {
- /*
- * Do an invltlb to make the invalidated mapping
- * take effect immediately.
- */
- pmap_invalidate_page(pmap, pteva);
- }
+ pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME);
+ pmap_unwire_pte_hold(pmap, va, pdpg);
+ }
+ if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) {
+ /* We just released a PD, unhold the matching PDP */
+ vm_page_t pdppg;
+ pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME);
+ pmap_unwire_pte_hold(pmap, va, pdppg);
+ }
+ if (pmap_is_current(pmap)) {
/*
- * If the page is finally unwired, simply free it.
+ * Do an invltlb to make the invalidated mapping
+ * take effect immediately.
*/
- --m->wire_count;
- if (m->wire_count == 0) {
- vm_page_busy(m);
- vm_page_free_zero(m);
- atomic_subtract_int(&cnt.v_wire_count, 1);
- }
- return 1;
+ pmap_invalidate_page(pmap, pteva);
}
- return 0;
-}
-static PMAP_INLINE int
-pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
-{
- vm_page_unhold(m);
- if (m->hold_count == 0)
- return _pmap_unwire_pte_hold(pmap, va, m);
- else
- return 0;
+ /*
+ * If the page is finally unwired, simply free it.
+ */
+ --m->wire_count;
+ if (m->wire_count == 0) {
+ vm_page_free_zero(m);
+ atomic_subtract_int(&cnt.v_wire_count, 1);
+ }
+ return 1;
}
/*
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 69fdae9..8457022 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -261,6 +261,7 @@ static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va);
static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex);
+static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m);
static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
static int pmap_unuse_pt(pmap_t, vm_offset_t);
static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
@@ -1000,50 +1001,10 @@ pmap_qremove(vm_offset_t sva, int count)
* This routine unholds page table pages, and if the hold count
* drops to zero, then it decrements the wire count.
*/
-static int
-_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
-{
-
- while (vm_page_sleep_if_busy(m, FALSE, "pmuwpt"))
- vm_page_lock_queues();
-
- if (m->hold_count == 0) {
- vm_offset_t pteva;
- /*
- * unmap the page table page
- */
- pmap->pm_pdir[m->pindex] = 0;
- --pmap->pm_stats.resident_count;
- /*
- * We never unwire a kernel page table page, making a
- * check for the kernel_pmap unnecessary.
- */
- if ((pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME)) {
- /*
- * Do an invltlb to make the invalidated mapping
- * take effect immediately.
- */
- pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex);
- pmap_invalidate_page(pmap, pteva);
- }
-
- /*
- * If the page is finally unwired, simply free it.
- */
- --m->wire_count;
- if (m->wire_count == 0) {
- vm_page_busy(m);
- vm_page_free_zero(m);
- atomic_subtract_int(&cnt.v_wire_count, 1);
- }
- return 1;
- }
- return 0;
-}
-
static PMAP_INLINE int
pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
{
+
vm_page_unhold(m);
if (m->hold_count == 0)
return _pmap_unwire_pte_hold(pmap, m);
@@ -1051,6 +1012,40 @@ pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
return 0;
}
+static int
+_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
+{
+ vm_offset_t pteva;
+
+ /*
+ * unmap the page table page
+ */
+ pmap->pm_pdir[m->pindex] = 0;
+ --pmap->pm_stats.resident_count;
+ /*
+ * We never unwire a kernel page table page, making a
+ * check for the kernel_pmap unnecessary.
+ */
+ if ((pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME)) {
+ /*
+ * Do an invltlb to make the invalidated mapping
+ * take effect immediately.
+ */
+ pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex);
+ pmap_invalidate_page(pmap, pteva);
+ }
+
+ /*
+ * If the page is finally unwired, simply free it.
+ */
+ --m->wire_count;
+ if (m->wire_count == 0) {
+ vm_page_free_zero(m);
+ atomic_subtract_int(&cnt.v_wire_count, 1);
+ }
+ return 1;
+}
+
/*
* After removing a page table entry, this routine is used to
* conditionally free the page, and manage the hold/wire counts.
OpenPOWER on IntegriCloud