summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2007-02-07 06:37:30 +0000
committeralc <alc@FreeBSD.org>2007-02-07 06:37:30 +0000
commit2eb15b506b7d7b69a41d0fcdf2d7a806d723e4c7 (patch)
tree7d9cd04185bace746ccf4dda9dee1d367ab089d6 /sys/vm
parentc1270b41ec4a4e36c238fef98b15dd551be5f9d6 (diff)
downloadFreeBSD-src-2eb15b506b7d7b69a41d0fcdf2d7a806d723e4c7.zip
FreeBSD-src-2eb15b506b7d7b69a41d0fcdf2d7a806d723e4c7.tar.gz
Change the pagedaemon, vm_wait(), and vm_waitpfault() to sleep on the
vm page queue free mutex instead of the vm page queue mutex.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_page.c16
-rw-r--r--sys/vm/vm_pageout.c20
2 files changed, 21 insertions, 15 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 4234635..4e96e2f 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -984,17 +984,17 @@ void
vm_wait(void)
{
- vm_page_lock_queues();
+ mtx_lock(&vm_page_queue_free_mtx);
if (curproc == pageproc) {
vm_pageout_pages_needed = 1;
- msleep(&vm_pageout_pages_needed, &vm_page_queue_mtx,
+ msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx,
PDROP | PSWP, "VMWait", 0);
} else {
if (!vm_pages_needed) {
vm_pages_needed = 1;
wakeup(&vm_pages_needed);
}
- msleep(&cnt.v_free_count, &vm_page_queue_mtx, PDROP | PVM,
+ msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM,
"vmwait", 0);
}
}
@@ -1013,12 +1013,12 @@ void
vm_waitpfault(void)
{
- vm_page_lock_queues();
+ mtx_lock(&vm_page_queue_free_mtx);
if (!vm_pages_needed) {
vm_pages_needed = 1;
wakeup(&vm_pages_needed);
}
- msleep(&cnt.v_free_count, &vm_page_queue_mtx, PDROP | PUSER,
+ msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER,
"pfault", 0);
}
@@ -1066,7 +1066,7 @@ static inline void
vm_page_free_wakeup(void)
{
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
/*
* if pageout daemon needs pages, then tell it that there are
* some free.
@@ -1165,8 +1165,8 @@ vm_page_free_toq(vm_page_t m)
} else {
TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
}
- mtx_unlock(&vm_page_queue_free_mtx);
vm_page_free_wakeup();
+ mtx_unlock(&vm_page_queue_free_mtx);
}
/*
@@ -1404,7 +1404,9 @@ vm_page_cache(vm_page_t m)
}
vm_pageq_remove_nowakeup(m);
vm_pageq_enqueue(PQ_CACHE + m->pc, m);
+ mtx_lock(&vm_page_queue_free_mtx);
vm_page_free_wakeup();
+ mtx_unlock(&vm_page_queue_free_mtx);
}
/*
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 9ef9cfa..c0611ba 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -1479,13 +1479,13 @@ vm_pageout()
* The pageout daemon is never done, so loop forever.
*/
while (TRUE) {
- vm_page_lock_queues();
/*
* If we have enough free memory, wakeup waiters. Do
* not clear vm_pages_needed until we reach our target,
* otherwise we may be woken up over and over again and
* waste a lot of cpu.
*/
+ mtx_lock(&vm_page_queue_free_mtx);
if (vm_pages_needed && !vm_page_count_min()) {
if (!vm_paging_needed())
vm_pages_needed = 0;
@@ -1499,8 +1499,9 @@ vm_pageout()
*/
++pass;
if (pass > 1)
- msleep(&vm_pages_needed, &vm_page_queue_mtx, PVM,
- "psleep", hz/2);
+ msleep(&vm_pages_needed,
+ &vm_page_queue_free_mtx, PVM, "psleep",
+ hz / 2);
} else {
/*
* Good enough, sleep & handle stats. Prime the pass
@@ -1510,10 +1511,13 @@ vm_pageout()
pass = 1;
else
pass = 0;
- error = msleep(&vm_pages_needed, &vm_page_queue_mtx, PVM,
- "psleep", vm_pageout_stats_interval * hz);
+ error = msleep(&vm_pages_needed,
+ &vm_page_queue_free_mtx, PVM, "psleep",
+ vm_pageout_stats_interval * hz);
if (error && !vm_pages_needed) {
+ mtx_unlock(&vm_page_queue_free_mtx);
pass = 0;
+ vm_page_lock_queues();
vm_pageout_page_stats();
vm_page_unlock_queues();
continue;
@@ -1521,16 +1525,16 @@ vm_pageout()
}
if (vm_pages_needed)
cnt.v_pdwakeups++;
- vm_page_unlock_queues();
+ mtx_unlock(&vm_page_queue_free_mtx);
vm_pageout_scan(pass);
}
}
/*
- * Unless the page queue lock is held by the caller, this function
+ * Unless the free page queue lock is held by the caller, this function
* should be regarded as advisory. Specifically, the caller should
* not msleep() on &cnt.v_free_count following this function unless
- * the page queue lock is held until the msleep() is performed.
+ * the free page queue lock is held until the msleep() is performed.
*/
void
pagedaemon_wakeup()
OpenPOWER on IntegriCloud