summaryrefslogtreecommitdiffstats
path: root/sys/i386/xen/pmap.c
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2012-06-04 03:51:08 +0000
committeralc <alc@FreeBSD.org>2012-06-04 03:51:08 +0000
commit93b7cabc813f6facf850e8518fc26d0be69aa5c8 (patch)
tree3269eec0e84545e089e9d81f45ceaa52ebd80a79 /sys/i386/xen/pmap.c
parentf32938acd98289bf2026819c4ce62e42fb71c699 (diff)
downloadFreeBSD-src-93b7cabc813f6facf850e8518fc26d0be69aa5c8.zip
FreeBSD-src-93b7cabc813f6facf850e8518fc26d0be69aa5c8.tar.gz
Various small changes to PV entry management:
Constify pc_freemask[]. pmap_pv_reclaim() Eliminate "freemask" because it was a pessimization. Add a comment about the resident count adjustment. free_pv_entry() [i386 only] Merge an optimization from amd64 (r233954). get_pv_entry() Eliminate the move to tail of the pv_chunk on the global pv_chunks list. (The right strategy needs more thought. Moreover, there were unintended differences between the amd64 and i386 implementation.) pmap_remove_pages() Eliminate unnecessary ()'s.
Diffstat (limited to 'sys/i386/xen/pmap.c')
-rw-r--r--sys/i386/xen/pmap.c29
1 files changed, 16 insertions, 13 deletions
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index b5a0f2d..4f123b0 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -1929,7 +1929,7 @@ pv_to_chunk(pv_entry_t pv)
#define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */
#define PC_FREE10 0x0000fffful /* Free values for index 10 */
-static uint32_t pc_freemask[_NPCM] = {
+static const uint32_t pc_freemask[_NPCM] = {
PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
@@ -1977,7 +1977,7 @@ pmap_pv_reclaim(pmap_t locked_pmap)
pv_entry_t pv;
vm_offset_t va;
vm_page_t free, m, m_pc;
- uint32_t inuse, freemask;
+ uint32_t inuse;
int bit, field, freed;
PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
@@ -2010,7 +2010,6 @@ pmap_pv_reclaim(pmap_t locked_pmap)
*/
freed = 0;
for (field = 0; field < _NPCM; field++) {
- freemask = 0;
for (inuse = ~pc->pc_map[field] & pc_freemask[field];
inuse != 0; inuse &= ~(1UL << bit)) {
bit = bsfl(inuse);
@@ -2030,16 +2029,16 @@ pmap_pv_reclaim(pmap_t locked_pmap)
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
if (TAILQ_EMPTY(&m->md.pv_list))
vm_page_aflag_clear(m, PGA_WRITEABLE);
+ pc->pc_map[field] |= 1UL << bit;
pmap_unuse_pt(pmap, va, &free);
- freemask |= 1UL << bit;
freed++;
}
- pc->pc_map[field] |= freemask;
}
if (freed == 0) {
TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
continue;
}
+ /* Every freed mapping is for a 4 KB page. */
pmap->pm_stats.resident_count -= freed;
PV_STAT(pv_entry_frees += freed);
PV_STAT(pv_entry_spare += freed);
@@ -2108,13 +2107,21 @@ free_pv_entry(pmap_t pmap, pv_entry_t pv)
field = idx / 32;
bit = idx % 32;
pc->pc_map[field] |= 1ul << bit;
- /* move to head of list */
- TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
for (idx = 0; idx < _NPCM; idx++)
if (pc->pc_map[idx] != pc_freemask[idx]) {
- TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
+ /*
+ * 98% of the time, pc is already at the head of the
+ * list. If it isn't already, move it to the head.
+ */
+ if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) !=
+ pc)) {
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
+ pc_list);
+ }
return;
}
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
free_pv_chunk(pc);
}
@@ -2178,10 +2185,6 @@ retry:
}
TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
- if (pc != TAILQ_LAST(&pv_chunks, pch)) {
- TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
- TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
- }
PV_STAT(pv_entry_spare--);
return (pv);
}
@@ -3538,7 +3541,7 @@ pmap_remove_pages(pmap_t pmap)
TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
allfree = 1;
for (field = 0; field < _NPCM; field++) {
- inuse = (~(pc->pc_map[field])) & pc_freemask[field];
+ inuse = ~pc->pc_map[field] & pc_freemask[field];
while (inuse != 0) {
bit = bsfl(inuse);
bitmask = 1UL << bit;
OpenPOWER on IntegriCloud