summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_contig.c
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2012-07-16 18:13:43 +0000
committeralc <alc@FreeBSD.org>2012-07-16 18:13:43 +0000
commit8af6bec3e35a5f83c6fe4955a3235cde9b77a070 (patch)
treee2389227dc088f1bb3c3c7f406a2ed913f5d3542 /sys/vm/vm_contig.c
parent3abf0120170ad6928c403bd90bacdb74b3db3570 (diff)
downloadFreeBSD-src-8af6bec3e35a5f83c6fe4955a3235cde9b77a070.zip
FreeBSD-src-8af6bec3e35a5f83c6fe4955a3235cde9b77a070.tar.gz
Various improvements to vm_contig_grow_cache(). Most notably, even when
it can't sleep, it can still move clean pages from the inactive queue to the cache. Also, when a page is cached, there is no need to restart the scan. The "next" page pointer held by vm_contig_launder() is still valid. Finally, add a comment summarizing what vm_contig_grow_cache() does based upon the value of "tries". MFC after: 3 weeks
Diffstat (limited to 'sys/vm/vm_contig.c')
-rw-r--r--sys/vm/vm_contig.c53
1 files changed, 31 insertions, 22 deletions
diff --git a/sys/vm/vm_contig.c b/sys/vm/vm_contig.c
index 79abd9c..5f2b24c 100644
--- a/sys/vm/vm_contig.c
+++ b/sys/vm/vm_contig.c
@@ -83,7 +83,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_pager.h>
static int
-vm_contig_launder_page(vm_page_t m, vm_page_t *next)
+vm_contig_launder_page(vm_page_t m, vm_page_t *next, int tries)
{
vm_object_t object;
vm_page_t m_tmp;
@@ -92,7 +92,10 @@ vm_contig_launder_page(vm_page_t m, vm_page_t *next)
int vfslocked;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- vm_page_lock_assert(m, MA_OWNED);
+ if (!vm_pageout_page_lock(m, next) || m->hold_count != 0) {
+ vm_page_unlock(m);
+ return (EAGAIN);
+ }
object = m->object;
if (!VM_OBJECT_TRYLOCK(object) &&
(!vm_pageout_fallback_object_lock(m, next) || m->hold_count != 0)) {
@@ -100,7 +103,13 @@ vm_contig_launder_page(vm_page_t m, vm_page_t *next)
VM_OBJECT_UNLOCK(object);
return (EAGAIN);
}
- if (vm_page_sleep_if_busy(m, TRUE, "vpctw0")) {
+ if ((m->oflags & VPO_BUSY) != 0 || m->busy != 0) {
+ if (tries == 0) {
+ vm_page_unlock(m);
+ VM_OBJECT_UNLOCK(object);
+ return (EAGAIN);
+ }
+ vm_page_sleep(m, "vpctw0");
VM_OBJECT_UNLOCK(object);
vm_page_lock_queues();
return (EBUSY);
@@ -110,7 +119,7 @@ vm_contig_launder_page(vm_page_t m, vm_page_t *next)
pmap_remove_all(m);
if (m->dirty != 0) {
vm_page_unlock(m);
- if ((object->flags & OBJ_DEAD) != 0) {
+ if (tries == 0 || (object->flags & OBJ_DEAD) != 0) {
VM_OBJECT_UNLOCK(object);
return (EAGAIN);
}
@@ -146,34 +155,25 @@ vm_contig_launder_page(vm_page_t m, vm_page_t *next)
vm_page_unlock(m);
}
VM_OBJECT_UNLOCK(object);
- return (0);
+ return (EAGAIN);
}
static int
-vm_contig_launder(int queue, vm_paddr_t low, vm_paddr_t high)
+vm_contig_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
{
vm_page_t m, next;
vm_paddr_t pa;
int error;
TAILQ_FOREACH_SAFE(m, &vm_page_queues[queue].pl, pageq, next) {
-
- /* Skip marker pages */
+ KASSERT(m->queue == queue,
+ ("vm_contig_launder: page %p's queue is not %d", m, queue));
if ((m->flags & PG_MARKER) != 0)
continue;
-
pa = VM_PAGE_TO_PHYS(m);
if (pa < low || pa + PAGE_SIZE > high)
continue;
-
- if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) {
- vm_page_unlock(m);
- continue;
- }
- KASSERT(m->queue == queue,
- ("vm_contig_launder: page %p's queue is not %d", m, queue));
- error = vm_contig_launder_page(m, &next);
- vm_page_lock_assert(m, MA_NOTOWNED);
+ error = vm_contig_launder_page(m, &next, tries);
if (error == 0)
return (TRUE);
if (error == EBUSY)
@@ -183,7 +183,15 @@ vm_contig_launder(int queue, vm_paddr_t low, vm_paddr_t high)
}
/*
- * Increase the number of cached pages.
+ * Increase the number of cached pages. The specified value, "tries",
+ * determines which categories of pages are cached:
+ *
+ * 0: All clean, inactive pages within the specified physical address range
+ * are cached. Will not sleep.
+ * 1: The vm_lowmem handlers are called. All inactive pages within
+ * the specified physical address range are cached. May sleep.
+ * 2: The vm_lowmem handlers are called. All inactive and active pages
+ * within the specified physical address range are cached. May sleep.
*/
void
vm_contig_grow_cache(int tries, vm_paddr_t low, vm_paddr_t high)
@@ -206,15 +214,16 @@ vm_contig_grow_cache(int tries, vm_paddr_t low, vm_paddr_t high)
}
vm_page_lock_queues();
inactl = 0;
- inactmax = tries < 1 ? 0 : cnt.v_inactive_count;
+ inactmax = cnt.v_inactive_count;
actl = 0;
actmax = tries < 2 ? 0 : cnt.v_active_count;
again:
- if (inactl < inactmax && vm_contig_launder(PQ_INACTIVE, low, high)) {
+ if (inactl < inactmax && vm_contig_launder(PQ_INACTIVE, tries, low,
+ high)) {
inactl++;
goto again;
}
- if (actl < actmax && vm_contig_launder(PQ_ACTIVE, low, high)) {
+ if (actl < actmax && vm_contig_launder(PQ_ACTIVE, tries, low, high)) {
actl++;
goto again;
}
OpenPOWER on IntegriCloud