summaryrefslogtreecommitdiffstats
path: root/sys/alpha
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2004-09-19 21:20:01 +0000
committeralc <alc@FreeBSD.org>2004-09-19 21:20:01 +0000
commita58cafd11e24a1ef73abb296f763f5aab599e03e (patch)
tree27d568eee15846ba7a12103d18c803f7a277cd35 /sys/alpha
parent09baad31f132c62e94eec636c0627192f0006ea1 (diff)
downloadFreeBSD-src-a58cafd11e24a1ef73abb296f763f5aab599e03e.zip
FreeBSD-src-a58cafd11e24a1ef73abb296f763f5aab599e03e.tar.gz
Simplify the reference counting of page table pages. Specifically, use
the page table page's wired count rather than its hold count to contain the reference count. My rationale for this change is based on several factors: 1. The machine-independent and pmap layers used the same hold count field in subtly different ways. The machine-independent layer uses the hold count to implement a form of ephemeral wiring that is used by pipes, physio, etc. In other words, subsystems where we wish to temporarily block a page from being swapped out while it is mapped into the kernel's address space. Such pages are never removed from the page queues. Instead, the page daemon recognizes a non-zero hold count to mean "hands off this page." In contrast, page table pages are never in the page queues; they are wired from birth to death. The hold count was being used as a kind of reference count, specifically, the number of valid page table entries within the page. Not surprisingly, these two different uses imply different synchronization rules: in the machine- independent layer access to the hold count requires the page queues lock; whereas in the pmap layer the pmap lock is required. Thus, continued use by the pmap layer of vm_page_unhold(), which asserts that the page queues lock is held, made no sense. 2. _pmap_unwire_pte_hold() was too forgiving in its handling of the wired count. An unexpected wired count on a page table page was ignored and the underlying page leaked. 3. In a word, microoptimization. Using the wired count exclusively, rather than a combination of the wired and hold counts, makes the code slightly smaller and faster. Reviewed by: tegge@
Diffstat (limited to 'sys/alpha')
-rw-r--r--sys/alpha/alpha/pmap.c32
1 files changed, 10 insertions, 22 deletions
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c
index 6a094df..2f449e3 100644
--- a/sys/alpha/alpha/pmap.c
+++ b/sys/alpha/alpha/pmap.c
@@ -895,8 +895,8 @@ static PMAP_INLINE int
pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
{
- vm_page_unhold(m);
- if (m->hold_count == 0)
+ --m->wire_count;
+ if (m->wire_count == 0)
return _pmap_unwire_pte_hold(pmap, va, m);
else
return 0;
@@ -941,14 +941,8 @@ _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
if (pmap->pm_ptphint == m)
pmap->pm_ptphint = NULL;
- /*
- * If the page is finally unwired, simply free it.
- */
- --m->wire_count;
- if (m->wire_count == 0) {
- vm_page_free_zero(m);
- atomic_subtract_int(&cnt.v_wire_count, 1);
- }
+ vm_page_free_zero(m);
+ atomic_subtract_int(&cnt.v_wire_count, 1);
return 1;
}
@@ -1074,12 +1068,6 @@ _pmap_allocpte(pmap, ptepindex)
pmap_zero_page(m);
/*
- * Increment the hold count for the page table page
- * (denoting a new mapping.)
- */
- m->hold_count++;
-
- /*
* Map the pagetable page into the process address space, if
* it isn't already there.
*/
@@ -1096,7 +1084,7 @@ _pmap_allocpte(pmap, ptepindex)
pt_entry_t* l2map;
if (!pmap_pte_v(l1pte)) {
if (_pmap_allocpte(pmap, NUSERLEV3MAPS + l1index) == NULL) {
- vm_page_unhold(m);
+ --m->wire_count;
vm_page_free(m);
return (NULL);
}
@@ -1104,7 +1092,7 @@ _pmap_allocpte(pmap, ptepindex)
vm_page_t l2page;
l2page = PHYS_TO_VM_PAGE(pmap_pte_pa(l1pte));
- l2page->hold_count++;
+ l2page->wire_count++;
}
l2map = (pt_entry_t*) ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l1pte));
pte = &l2map[ptepindex & ((1 << ALPHA_PTSHIFT) - 1)];
@@ -1153,7 +1141,7 @@ retry:
m = PHYS_TO_VM_PAGE(pmap_pte_pa(lev2pte));
pmap->pm_ptphint = m;
}
- m->hold_count++;
+ m->wire_count++;
} else {
/*
* Here if the pte page isn't mapped, or if it has been
@@ -1721,7 +1709,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* Remove extra pte reference
*/
if (mpte)
- mpte->hold_count--;
+ mpte->wire_count--;
/*
* We might be turning off write access to the page,
@@ -1836,7 +1824,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
*/
ptepindex = va >> ALPHA_L2SHIFT;
if (mpte && (mpte->pindex == ptepindex)) {
- mpte->hold_count++;
+ mpte->wire_count++;
} else {
retry:
/*
@@ -1856,7 +1844,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
mpte = PHYS_TO_VM_PAGE(pmap_pte_pa(l2pte));
pmap->pm_ptphint = mpte;
}
- mpte->hold_count++;
+ mpte->wire_count++;
} else {
mpte = _pmap_allocpte(pmap, ptepindex);
if (mpte == NULL)
OpenPOWER on IntegriCloud