summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_pageout.c
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2015-09-27 04:36:09 +0000
committeralc <alc@FreeBSD.org>2015-09-27 04:36:09 +0000
commit40b680d8ca087b9703e5fa3db21fdbc342f4ad36 (patch)
treec8e75e7055bedb82b9ef69dc060ffa900691ecb9 /sys/vm/vm_pageout.c
parentecb8678c0ac92b2653c1a120858f827cbec9b8f6 (diff)
downloadFreeBSD-src-40b680d8ca087b9703e5fa3db21fdbc342f4ad36.zip
FreeBSD-src-40b680d8ca087b9703e5fa3db21fdbc342f4ad36.tar.gz
MFC r285282
The intention of r254304 was to scan the active queue continuously. However, I've observed the active queue scan stopping when there are frequent free page shortages and the inactive queue is steadily refilled by other mechanisms, such as the sequential access heuristic in vm_fault() or madvise(2). To remedy this problem, record the time of the last active queue scan, and always scan a number of pages proportional to the time since the last scan, regardless of whether that last scan was a timeout-triggered ("pass == 0") or free-page-shortage-triggered ("pass > 0") scan. Also, on a timeout-triggered scan, allow a full scan of the active queue when the system is short of inactive pages.
Diffstat (limited to 'sys/vm/vm_pageout.c')
-rw-r--r--sys/vm/vm_pageout.c34
1 files changed, 19 insertions, 15 deletions
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 3e019fb..37900d5 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -926,9 +926,10 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
vm_page_t m, next;
struct vm_pagequeue *pq;
vm_object_t object;
+ long min_scan;
int act_delta, addl_page_shortage, deficit, maxscan, page_shortage;
int vnodes_skipped = 0;
- int maxlaunder;
+ int maxlaunder, scan_tick, scanned;
int lockmode;
boolean_t queues_locked;
@@ -1359,34 +1360,37 @@ relock_queues:
* If we're just idle polling attempt to visit every
* active page within 'update_period' seconds.
*/
- if (pass == 0 && vm_pageout_update_period != 0) {
- maxscan /= vm_pageout_update_period;
- page_shortage = maxscan;
- }
+ scan_tick = ticks;
+ if (vm_pageout_update_period != 0) {
+ min_scan = pq->pq_cnt;
+ min_scan *= scan_tick - vmd->vmd_last_active_scan;
+ min_scan /= hz * vm_pageout_update_period;
+ } else
+ min_scan = 0;
+ if (min_scan > 0 || (page_shortage > 0 && maxscan > 0))
+ vmd->vmd_last_active_scan = scan_tick;
/*
- * Scan the active queue for things we can deactivate. We nominally
- * track the per-page activity counter and use it to locate
- * deactivation candidates.
+ * Scan the active queue for pages that can be deactivated. Update
+ * the per-page activity counter and use it to identify deactivation
+ * candidates.
*/
- m = TAILQ_FIRST(&pq->pq_pl);
- while (m != NULL && maxscan-- > 0 && page_shortage > 0) {
+ for (m = TAILQ_FIRST(&pq->pq_pl), scanned = 0; m != NULL && (scanned <
+ min_scan || (page_shortage > 0 && scanned < maxscan)); m = next,
+ scanned++) {
KASSERT(m->queue == PQ_ACTIVE,
("vm_pageout_scan: page %p isn't active", m));
next = TAILQ_NEXT(m, plinks.q);
- if ((m->flags & PG_MARKER) != 0) {
- m = next;
+ if ((m->flags & PG_MARKER) != 0)
continue;
- }
KASSERT((m->flags & PG_FICTITIOUS) == 0,
("Fictitious page %p cannot be in active queue", m));
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("Unmanaged page %p cannot be in active queue", m));
if (!vm_pageout_page_lock(m, &next)) {
vm_page_unlock(m);
- m = next;
continue;
}
@@ -1439,7 +1443,6 @@ relock_queues:
} else
vm_page_requeue_locked(m);
vm_page_unlock(m);
- m = next;
}
vm_pagequeue_unlock(pq);
#if !defined(NO_SWAPPING)
@@ -1627,6 +1630,7 @@ vm_pageout_worker(void *arg)
*/
KASSERT(domain->vmd_segs != 0, ("domain without segments"));
+ domain->vmd_last_active_scan = ticks;
vm_pageout_init_marker(&domain->vmd_marker, PQ_INACTIVE);
/*
OpenPOWER on IntegriCloud