summaryrefslogtreecommitdiffstats
path: root/sys/i386
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2007-11-30 07:14:42 +0000
committeralc <alc@FreeBSD.org>2007-11-30 07:14:42 +0000
commit7e64e9843c86bf55f1675397d4e3fa03eb978896 (patch)
tree9ac3c7756e7a5658ca1bbbb5c4a247761de93fd2 /sys/i386
parent39911d1f8271132636fdc616e40573bad0577e82 (diff)
downloadFreeBSD-src-7e64e9843c86bf55f1675397d4e3fa03eb978896.zip
FreeBSD-src-7e64e9843c86bf55f1675397d4e3fa03eb978896.tar.gz
Improve get_pv_entry()'s handling of low-memory conditions. After page
allocation fails and pv entries are reclaimed, there may be an unused pv entry in a pv chunk that survived the reclamation. However, previously, after reclamation, get_pv_entry() did not look for an unused pv entry in a surviving pv chunk; it simply retried the page allocation. Now, it does look for an unused pv entry before retrying the page allocation. Note: This only applies to RELENG_7. Earlier branches use a different pv entry allocator. MFC after: 6 weeks
Diffstat (limited to 'sys/i386')
-rw-r--r--sys/i386/i386/pmap.c52
1 files changed, 21 insertions, 31 deletions
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index f3c2dc4..9f7d4a1 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -1813,6 +1813,7 @@ get_pv_entry(pmap_t pmap, int try)
static const struct timeval printinterval = { 60, 0 };
static struct timeval lastprint;
static vm_pindex_t colour;
+ struct vpgqueues *pq;
int bit, field;
pv_entry_t pv;
struct pv_chunk *pc;
@@ -1827,6 +1828,8 @@ get_pv_entry(pmap_t pmap, int try)
printf("Approaching the limit on PV entries, consider "
"increasing either the vm.pmap.shpgperproc or the "
"vm.pmap.pv_entry_max tunable.\n");
+ pq = NULL;
+retry:
pc = TAILQ_FIRST(&pmap->pm_pvchunk);
if (pc != NULL) {
for (field = 0; field < _NPCM; field++) {
@@ -1850,21 +1853,17 @@ get_pv_entry(pmap_t pmap, int try)
return (pv);
}
}
- pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
- m = vm_page_alloc(NULL, colour, VM_ALLOC_NORMAL |
- VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
- if (m == NULL || pc == NULL) {
+ /*
+ * Access to the ptelist "pv_vafree" is synchronized by the page
+ * queues lock. If "pv_vafree" is currently non-empty, it will
+ * remain non-empty until pmap_ptelist_alloc() completes.
+ */
+ if (pv_vafree == 0 || (m = vm_page_alloc(NULL, colour, (pq ==
+ &vm_page_queues[PQ_ACTIVE] ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) |
+ VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
if (try) {
pv_entry_count--;
PV_STAT(pc_chunk_tryfail++);
- if (m) {
- vm_page_lock_queues();
- vm_page_unwire(m, 0);
- vm_page_free(m);
- vm_page_unlock_queues();
- }
- if (pc)
- pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
return (NULL);
}
/*
@@ -1872,30 +1871,21 @@ get_pv_entry(pmap_t pmap, int try)
* inactive pages. After that, if a pv chunk entry
* is still needed, destroy mappings to active pages.
*/
- PV_STAT(pmap_collect_inactive++);
- pmap_collect(pmap, &vm_page_queues[PQ_INACTIVE]);
- if (m == NULL)
- m = vm_page_alloc(NULL, colour, VM_ALLOC_NORMAL |
- VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
- if (pc == NULL)
- pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
- if (m == NULL || pc == NULL) {
+ if (pq == NULL) {
+ PV_STAT(pmap_collect_inactive++);
+ pq = &vm_page_queues[PQ_INACTIVE];
+ } else if (pq == &vm_page_queues[PQ_INACTIVE]) {
PV_STAT(pmap_collect_active++);
- pmap_collect(pmap, &vm_page_queues[PQ_ACTIVE]);
- if (m == NULL)
- m = vm_page_alloc(NULL, colour,
- VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ |
- VM_ALLOC_WIRED);
- if (pc == NULL)
- pc = (struct pv_chunk *)
- pmap_ptelist_alloc(&pv_vafree);
- if (m == NULL || pc == NULL)
- panic("get_pv_entry: increase vm.pmap.shpgperproc");
- }
+ pq = &vm_page_queues[PQ_ACTIVE];
+ } else
+ panic("get_pv_entry: increase vm.pmap.shpgperproc");
+ pmap_collect(pmap, pq);
+ goto retry;
}
PV_STAT(pc_chunk_count++);
PV_STAT(pc_chunk_allocs++);
colour++;
+ pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
pmap_qenter((vm_offset_t)pc, &m, 1);
pc->pc_pmap = pmap;
pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */
OpenPOWER on IntegriCloud