summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_pageout.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2013-07-26 23:22:05 +0000
committerjeff <jeff@FreeBSD.org>2013-07-26 23:22:05 +0000
commit8076cabebbb9f2fb450279bf634fe1b3048b7554 (patch)
tree1117f1676610b21ed430541178c1c1dbdb23d9c3 /sys/vm/vm_pageout.c
parent447a0fbacacfd09ab106fdc840ac39a6fbe01397 (diff)
downloadFreeBSD-src-8076cabebbb9f2fb450279bf634fe1b3048b7554.zip
FreeBSD-src-8076cabebbb9f2fb450279bf634fe1b3048b7554.tar.gz
Improve page LRU quality and simplify the logic.
- Don't short-circuit aging tests for unmapped objects. This biases against unmapped file pages and transient mappings. - Always honor PGA_REFERENCED. We can now use this after soft busying to lazily restart the LRU. - Don't transition directly from active to cached bypassing the inactive queue. This frees recently used data much too early. - Rename actcount to act_delta to be more consistent with use and meaning. Reviewed by: kib, alc Sponsored by: EMC / Isilon Storage Division
Diffstat (limited to 'sys/vm/vm_pageout.c')
-rw-r--r--sys/vm/vm_pageout.c128
1 files changed, 57 insertions, 71 deletions
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 36f3140..841820b 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -708,7 +708,7 @@ vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
{
vm_object_t backing_object, object;
vm_page_t p;
- int actcount, remove_mode;
+ int act_delta, remove_mode;
VM_OBJECT_ASSERT_LOCKED(first_object);
if ((first_object->flags & OBJ_FICTITIOUS) != 0)
@@ -739,17 +739,17 @@ vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
vm_page_unlock(p);
continue;
}
- actcount = pmap_ts_referenced(p);
+ act_delta = pmap_ts_referenced(p);
if ((p->aflags & PGA_REFERENCED) != 0) {
- if (actcount == 0)
- actcount = 1;
+ if (act_delta == 0)
+ act_delta = 1;
vm_page_aflag_clear(p, PGA_REFERENCED);
}
- if (p->queue != PQ_ACTIVE && actcount != 0) {
+ if (p->queue != PQ_ACTIVE && act_delta != 0) {
vm_page_activate(p);
- p->act_count += actcount;
+ p->act_count += act_delta;
} else if (p->queue == PQ_ACTIVE) {
- if (actcount == 0) {
+ if (act_delta == 0) {
p->act_count -= min(p->act_count,
ACT_DECLINE);
if (!remove_mode && p->act_count == 0) {
@@ -869,7 +869,7 @@ vm_pageout_scan(int pass)
int page_shortage, maxscan, pcount;
int addl_page_shortage;
vm_object_t object;
- int actcount;
+ int act_delta;
int vnodes_skipped = 0;
int maxlaunder;
boolean_t queues_locked;
@@ -989,44 +989,40 @@ vm_pageout_scan(int pass)
queues_locked = FALSE;
/*
- * If the object is not being used, we ignore previous
+ * We bump the activation count if the page has been
+ * referenced while in the inactive queue. This makes
+ * it less likely that the page will be added back to the
+ * inactive queue prematurely again. Here we check the
+ * page tables (or emulated bits, if any), given the upper
+ * level VM system not knowing anything about existing
* references.
*/
- if (object->ref_count == 0) {
+ act_delta = 0;
+ if ((m->aflags & PGA_REFERENCED) != 0) {
vm_page_aflag_clear(m, PGA_REFERENCED);
+ act_delta = 1;
+ }
+ if (object->ref_count != 0) {
+ act_delta += pmap_ts_referenced(m);
+ } else {
KASSERT(!pmap_page_is_mapped(m),
("vm_pageout_scan: page %p is mapped", m));
-
- /*
- * Otherwise, if the page has been referenced while in the
- * inactive queue, we bump the "activation count" upwards,
- * making it less likely that the page will be added back to
- * the inactive queue prematurely again. Here we check the
- * page tables (or emulated bits, if any), given the upper
- * level VM system not knowing anything about existing
- * references.
- */
- } else if ((m->aflags & PGA_REFERENCED) == 0 &&
- (actcount = pmap_ts_referenced(m)) != 0) {
- vm_page_activate(m);
- VM_OBJECT_WUNLOCK(object);
- m->act_count += actcount + ACT_ADVANCE;
- vm_page_unlock(m);
- goto relock_queues;
}
/*
* If the upper level VM system knows about any page
- * references, we activate the page. We also set the
- * "activation count" higher than normal so that we will less
- * likely place pages back onto the inactive queue again.
+ * references, we reactivate the page or requeue it.
*/
- if ((m->aflags & PGA_REFERENCED) != 0) {
- vm_page_aflag_clear(m, PGA_REFERENCED);
- actcount = pmap_ts_referenced(m);
- vm_page_activate(m);
+ if (act_delta != 0) {
+ if (object->ref_count) {
+ vm_page_activate(m);
+ m->act_count += act_delta + ACT_ADVANCE;
+ } else {
+ vm_pagequeue_lock(pq);
+ queues_locked = TRUE;
+ vm_page_requeue_locked(m);
+ }
VM_OBJECT_WUNLOCK(object);
- m->act_count += actcount + ACT_ADVANCE + 1;
vm_page_unlock(m);
goto relock_queues;
}
@@ -1324,50 +1320,40 @@ relock_queues:
/*
* Check to see "how much" the page has been used.
*/
- actcount = 0;
- if (object->ref_count != 0) {
- if (m->aflags & PGA_REFERENCED) {
- actcount += 1;
- }
- actcount += pmap_ts_referenced(m);
- if (actcount) {
- m->act_count += ACT_ADVANCE + actcount;
- if (m->act_count > ACT_MAX)
- m->act_count = ACT_MAX;
- }
+ act_delta = 0;
+ if (m->aflags & PGA_REFERENCED) {
+ vm_page_aflag_clear(m, PGA_REFERENCED);
+ act_delta += 1;
}
+ if (object->ref_count != 0)
+ act_delta += pmap_ts_referenced(m);
/*
- * Since we have "tested" this bit, we need to clear it now.
+ * Advance or decay the act_count based on recent usage.
*/
- vm_page_aflag_clear(m, PGA_REFERENCED);
+ if (act_delta) {
+ m->act_count += ACT_ADVANCE + act_delta;
+ if (m->act_count > ACT_MAX)
+ m->act_count = ACT_MAX;
+ } else {
+ m->act_count -= min(m->act_count, ACT_DECLINE);
+ act_delta = m->act_count;
+ }
/*
- * Only if an object is currently being used, do we use the
- * page activation count stats.
+ * Move this page to the tail of the active or inactive
+ * queue depending on usage.
*/
- if (actcount != 0 && object->ref_count != 0)
+ if (act_delta == 0) {
+ KASSERT(object->ref_count != 0 ||
+ !pmap_page_is_mapped(m),
+ ("vm_pageout_scan: page %p is mapped", m));
+ /* Dequeue to avoid later lock recursion. */
+ vm_page_dequeue_locked(m);
+ vm_page_deactivate(m);
+ page_shortage--;
+ } else
vm_page_requeue_locked(m);
- else {
- m->act_count -= min(m->act_count, ACT_DECLINE);
- if (object->ref_count == 0 ||
- m->act_count == 0) {
- page_shortage--;
- /* Dequeue to avoid later lock recursion. */
- vm_page_dequeue_locked(m);
- if (object->ref_count == 0) {
- KASSERT(!pmap_page_is_mapped(m),
- ("vm_pageout_scan: page %p is mapped", m));
- if (m->dirty == 0)
- vm_page_cache(m);
- else
- vm_page_deactivate(m);
- } else {
- vm_page_deactivate(m);
- }
- } else
- vm_page_requeue_locked(m);
- }
vm_page_unlock(m);
VM_OBJECT_WUNLOCK(object);
m = next;
OpenPOWER on IntegriCloud