summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorgreen <green@FreeBSD.org>2004-06-15 01:02:00 +0000
committergreen <green@FreeBSD.org>2004-06-15 01:02:00 +0000
commit52f66d087920043fa401e32440c2257bdc0fe0d9 (patch)
treea4d2d03381eb804a361b55862f784074bfd0af38 /sys/vm
parentdc22af89a43f0e7cdd55830145b1958eda2d3217 (diff)
downloadFreeBSD-src-52f66d087920043fa401e32440c2257bdc0fe0d9.zip
FreeBSD-src-52f66d087920043fa401e32440c2257bdc0fe0d9.tar.gz
Make contigmalloc() more reliable:
1. Remove a race whereby contigmalloc() would deadlock against the running processes in the system if they kept reinstantiating the memory on the active and inactive page queues that it was trying to flush out. The process doing the contigmalloc() would sit in "swwrt" forever and the swap pager would be going at full force, but never get anywhere. Instead of doing it until the queues are empty, launder for as many iterations as there are pages in the queue. 2. Do all laundering to swap synchronously; previously, the vnode laundering was synchronous and the swap laundering not. 3. Increase the number of launder-or-allocate passes to three, from two, while failing without bothering to do all the laundering on the third pass if allocation was not possible. This effectively gives exactly two chances to launder enough contiguous memory, helpful with high memory churn where a lot of memory from one pass to the next (and during a single laundering loop) becomes dirtied again. I can now reliably hot-plug hardware requiring a 256KB contigmalloc() without having the kldload/cbb ithread sit around failing to make progress, while running a busy X session. Previously, it took killing X to get contigmalloc() to get further (that is, quiescing the system), and even then contigmalloc() returned failure.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_contig.c31
1 files changed, 25 insertions, 6 deletions
diff --git a/sys/vm/vm_contig.c b/sys/vm/vm_contig.c
index 6a4befe..7300aee 100644
--- a/sys/vm/vm_contig.c
+++ b/sys/vm/vm_contig.c
@@ -119,7 +119,7 @@ vm_contig_launder(int queue)
} else if (object->type == OBJT_SWAP ||
object->type == OBJT_DEFAULT) {
m_tmp = m;
- vm_pageout_flush(&m_tmp, 1, 0);
+ vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC);
VM_OBJECT_UNLOCK(object);
return (TRUE);
}
@@ -152,6 +152,7 @@ contigmalloc1(
vm_object_t object;
vm_offset_t addr, tmp_addr;
int pass, pqtype;
+ int inactl, actl, inactmax, actmax;
vm_page_t pga = vm_page_array;
size = round_page(size);
@@ -163,7 +164,7 @@ contigmalloc1(
panic("contigmalloc1: boundary must be a power of 2");
start = 0;
- for (pass = 0; pass <= 1; pass++) {
+ for (pass = 2; pass >= 0; pass--) {
vm_page_lock_queues();
again0:
mtx_lock_spin(&vm_page_queue_free_mtx);
@@ -188,11 +189,29 @@ again:
if ((i == cnt.v_page_count) ||
((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
mtx_unlock_spin(&vm_page_queue_free_mtx);
+ /*
+ * Instead of racing to empty the inactive/active
+ * queues, give up, even with more left to free,
+ * if we try more than the initial amount of pages.
+ *
+ * There's no point attempting this on the last pass.
+ */
+ if (pass > 0) {
+ inactl = actl = 0;
+ inactmax = vm_page_queues[PQ_INACTIVE].lcnt;
+ actmax = vm_page_queues[PQ_ACTIVE].lcnt;
again1:
- if (vm_contig_launder(PQ_INACTIVE))
- goto again1;
- if (vm_contig_launder(PQ_ACTIVE))
- goto again1;
+ if (inactl < inactmax &&
+ vm_contig_launder(PQ_INACTIVE)) {
+ inactl++;
+ goto again1;
+ }
+ if (actl < actmax &&
+ vm_contig_launder(PQ_ACTIVE)) {
+ actl++;
+ goto again1;
+ }
+ }
vm_page_unlock_queues();
continue;
}
OpenPOWER on IntegriCloud