summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/amd64/amd64/pmap.c15
-rw-r--r--sys/i386/i386/pmap.c29
-rw-r--r--sys/i386/xen/pmap.c29
3 files changed, 37 insertions, 36 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index a1d2cf5..35cb131 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -2021,7 +2021,7 @@ pv_to_chunk(pv_entry_t pv)
#define PC_FREE1 0xfffffffffffffffful
#define PC_FREE2 0x000000fffffffffful
-static uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
+static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
"Current number of pv entries");
@@ -2070,7 +2070,7 @@ pmap_pv_reclaim(pmap_t locked_pmap)
pv_entry_t pv;
vm_offset_t va;
vm_page_t free, m, m_pc;
- uint64_t inuse, freemask;
+ uint64_t inuse;
int bit, field, freed;
rw_assert(&pvh_global_lock, RA_WLOCKED);
@@ -2102,7 +2102,6 @@ pmap_pv_reclaim(pmap_t locked_pmap)
*/
freed = 0;
for (field = 0; field < _NPCM; field++) {
- freemask = 0;
for (inuse = ~pc->pc_map[field] & pc_freemask[field];
inuse != 0; inuse &= ~(1UL << bit)) {
bit = bsfq(inuse);
@@ -2131,16 +2130,16 @@ pmap_pv_reclaim(pmap_t locked_pmap)
PGA_WRITEABLE);
}
}
+ pc->pc_map[field] |= 1UL << bit;
pmap_unuse_pt(pmap, va, *pde, &free);
- freemask |= 1UL << bit;
freed++;
}
- pc->pc_map[field] |= freemask;
}
if (freed == 0) {
TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
continue;
}
+ /* Every freed mapping is for a 4 KB page. */
pmap_resident_count_dec(pmap, freed);
PV_STAT(pv_entry_frees += freed);
PV_STAT(pv_entry_spare += freed);
@@ -2261,10 +2260,6 @@ retry:
TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
pc_list);
}
- if (pc != TAILQ_LAST(&pv_chunks, pch)) {
- TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
- TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
- }
pv_entry_count++;
PV_STAT(pv_entry_spare--);
return (pv);
@@ -4137,7 +4132,7 @@ pmap_remove_pages(pmap_t pmap)
TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
allfree = 1;
for (field = 0; field < _NPCM; field++) {
- inuse = (~(pc->pc_map[field])) & pc_freemask[field];
+ inuse = ~pc->pc_map[field] & pc_freemask[field];
while (inuse != 0) {
bit = bsfq(inuse);
bitmask = 1UL << bit;
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index c28efbc..d02f00e 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -2177,7 +2177,7 @@ pv_to_chunk(pv_entry_t pv)
#define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */
#define PC_FREE10 0x0000fffful /* Free values for index 10 */
-static uint32_t pc_freemask[_NPCM] = {
+static const uint32_t pc_freemask[_NPCM] = {
PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
@@ -2227,7 +2227,7 @@ pmap_pv_reclaim(pmap_t locked_pmap)
pv_entry_t pv;
vm_offset_t va;
vm_page_t free, m, m_pc;
- uint32_t inuse, freemask;
+ uint32_t inuse;
int bit, field, freed;
PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
@@ -2260,7 +2260,6 @@ pmap_pv_reclaim(pmap_t locked_pmap)
*/
freed = 0;
for (field = 0; field < _NPCM; field++) {
- freemask = 0;
for (inuse = ~pc->pc_map[field] & pc_freemask[field];
inuse != 0; inuse &= ~(1UL << bit)) {
bit = bsfl(inuse);
@@ -2289,16 +2288,16 @@ pmap_pv_reclaim(pmap_t locked_pmap)
PGA_WRITEABLE);
}
}
+ pc->pc_map[field] |= 1UL << bit;
pmap_unuse_pt(pmap, va, &free);
- freemask |= 1UL << bit;
freed++;
}
- pc->pc_map[field] |= freemask;
}
if (freed == 0) {
TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
continue;
}
+ /* Every freed mapping is for a 4 KB page. */
pmap->pm_stats.resident_count -= freed;
PV_STAT(pv_entry_frees += freed);
PV_STAT(pv_entry_spare += freed);
@@ -2367,13 +2366,21 @@ free_pv_entry(pmap_t pmap, pv_entry_t pv)
field = idx / 32;
bit = idx % 32;
pc->pc_map[field] |= 1ul << bit;
- /* move to head of list */
- TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
for (idx = 0; idx < _NPCM; idx++)
if (pc->pc_map[idx] != pc_freemask[idx]) {
- TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
+ /*
+ * 98% of the time, pc is already at the head of the
+ * list. If it isn't already, move it to the head.
+ */
+ if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) !=
+ pc)) {
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
+ pc_list);
+ }
return;
}
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
free_pv_chunk(pc);
}
@@ -2437,10 +2444,6 @@ retry:
}
TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
- if (pc != TAILQ_LAST(&pv_chunks, pch)) {
- TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
- TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
- }
PV_STAT(pv_entry_spare--);
return (pv);
}
@@ -4371,7 +4374,7 @@ pmap_remove_pages(pmap_t pmap)
TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
allfree = 1;
for (field = 0; field < _NPCM; field++) {
- inuse = (~(pc->pc_map[field])) & pc_freemask[field];
+ inuse = ~pc->pc_map[field] & pc_freemask[field];
while (inuse != 0) {
bit = bsfl(inuse);
bitmask = 1UL << bit;
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index b5a0f2d..4f123b0 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -1929,7 +1929,7 @@ pv_to_chunk(pv_entry_t pv)
#define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */
#define PC_FREE10 0x0000fffful /* Free values for index 10 */
-static uint32_t pc_freemask[_NPCM] = {
+static const uint32_t pc_freemask[_NPCM] = {
PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
@@ -1977,7 +1977,7 @@ pmap_pv_reclaim(pmap_t locked_pmap)
pv_entry_t pv;
vm_offset_t va;
vm_page_t free, m, m_pc;
- uint32_t inuse, freemask;
+ uint32_t inuse;
int bit, field, freed;
PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
@@ -2010,7 +2010,6 @@ pmap_pv_reclaim(pmap_t locked_pmap)
*/
freed = 0;
for (field = 0; field < _NPCM; field++) {
- freemask = 0;
for (inuse = ~pc->pc_map[field] & pc_freemask[field];
inuse != 0; inuse &= ~(1UL << bit)) {
bit = bsfl(inuse);
@@ -2030,16 +2029,16 @@ pmap_pv_reclaim(pmap_t locked_pmap)
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
if (TAILQ_EMPTY(&m->md.pv_list))
vm_page_aflag_clear(m, PGA_WRITEABLE);
+ pc->pc_map[field] |= 1UL << bit;
pmap_unuse_pt(pmap, va, &free);
- freemask |= 1UL << bit;
freed++;
}
- pc->pc_map[field] |= freemask;
}
if (freed == 0) {
TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
continue;
}
+ /* Every freed mapping is for a 4 KB page. */
pmap->pm_stats.resident_count -= freed;
PV_STAT(pv_entry_frees += freed);
PV_STAT(pv_entry_spare += freed);
@@ -2108,13 +2107,21 @@ free_pv_entry(pmap_t pmap, pv_entry_t pv)
field = idx / 32;
bit = idx % 32;
pc->pc_map[field] |= 1ul << bit;
- /* move to head of list */
- TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
for (idx = 0; idx < _NPCM; idx++)
if (pc->pc_map[idx] != pc_freemask[idx]) {
- TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
+ /*
+ * 98% of the time, pc is already at the head of the
+ * list. If it isn't already, move it to the head.
+ */
+ if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) !=
+ pc)) {
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
+ pc_list);
+ }
return;
}
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
free_pv_chunk(pc);
}
@@ -2178,10 +2185,6 @@ retry:
}
TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
- if (pc != TAILQ_LAST(&pv_chunks, pch)) {
- TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
- TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
- }
PV_STAT(pv_entry_spare--);
return (pv);
}
@@ -3538,7 +3541,7 @@ pmap_remove_pages(pmap_t pmap)
TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
allfree = 1;
for (field = 0; field < _NPCM; field++) {
- inuse = (~(pc->pc_map[field])) & pc_freemask[field];
+ inuse = ~pc->pc_map[field] & pc_freemask[field];
while (inuse != 0) {
bit = bsfl(inuse);
bitmask = 1UL << bit;
OpenPOWER on IntegriCloud