summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2014-05-13 05:21:54 +0000
committeralc <alc@FreeBSD.org>2014-05-13 05:21:54 +0000
commit498371bb43d94d13fdf5c8bb2c7014840ec696ab (patch)
tree82b45223efd8c9ff8db0dd4b5e8fe6de1a7bbe90 /sys/vm
parent2a67370d6908eb7a4340689dfc266e7e591d301f (diff)
downloadFreeBSD-src-498371bb43d94d13fdf5c8bb2c7014840ec696ab.zip
FreeBSD-src-498371bb43d94d13fdf5c8bb2c7014840ec696ab.tar.gz
MFC r260567
Correctly update the count of stuck pages, "addl_page_shortage", in vm_pageout_scan(). There were missing increments in two less common cases. Don't conflate the count of stuck pages and the pageout deficit provided by vm_page_alloc{,_contig}(). Handle held pages consistently in the inactive queue scan. In the more common case, we did not move the page to the tail of the queue. Whereas, in the less common case, we did. There's no particular reason to move the page in the less common case, so remove it. Perform the calculation of the page shortage for the active queue scan a little earlier, before the active queue lock is acquired. The correctness of this calculation doesn't depend on the active queue lock being held. Eliminate a redundant variable, "pcount". Use the more descriptive variable, "maxscan", in its place. Apply a few nearby style fixes, e.g., eliminate stray whitespace and excess parentheses.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_pageout.c32
1 files changed, 17 insertions, 15 deletions
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 60d1476..d2ccfa0 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -909,10 +909,8 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
{
vm_page_t m, next;
struct vm_pagequeue *pq;
- int page_shortage, maxscan, pcount;
- int addl_page_shortage;
vm_object_t object;
- int act_delta;
+ int act_delta, addl_page_shortage, deficit, maxscan, page_shortage;
int vnodes_skipped = 0;
int maxlaunder;
int lockmode;
@@ -942,13 +940,15 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
* number of pages from the inactive count that should be
* discounted in setting the target for the active queue scan.
*/
- addl_page_shortage = atomic_readandclear_int(&vm_pageout_deficit);
+ addl_page_shortage = 0;
+
+ deficit = atomic_readandclear_int(&vm_pageout_deficit);
/*
* Calculate the number of pages we want to either free or move
* to the cache.
*/
- page_shortage = vm_paging_target() + addl_page_shortage;
+ page_shortage = vm_paging_target() + deficit;
/*
* maxlaunder limits the number of dirty pages we flush per scan.
@@ -1245,6 +1245,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
*/
if (vm_page_busied(m)) {
vm_page_unlock(m);
+ addl_page_shortage++;
goto unlock_and_continue;
}
@@ -1252,9 +1253,9 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
* If the page has become held it might
* be undergoing I/O, so skip it
*/
- if (m->hold_count) {
+ if (m->hold_count != 0) {
vm_page_unlock(m);
- vm_page_requeue_locked(m);
+ addl_page_shortage++;
if (object->flags & OBJ_MIGHTBEDIRTY)
vnodes_skipped++;
goto unlock_and_continue;
@@ -1309,19 +1310,20 @@ relock_queues:
* Compute the number of pages we want to try to move from the
* active queue to the inactive queue.
*/
+ page_shortage = cnt.v_inactive_target - cnt.v_inactive_count +
+ vm_paging_target() + deficit + addl_page_shortage;
+
pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
vm_pagequeue_lock(pq);
- pcount = pq->pq_cnt;
- page_shortage = vm_paging_target() +
- cnt.v_inactive_target - cnt.v_inactive_count;
- page_shortage += addl_page_shortage;
+ maxscan = pq->pq_cnt;
+
/*
* If we're just idle polling attempt to visit every
* active page within 'update_period' seconds.
*/
- if (pass == 0 && vm_pageout_update_period != 0) {
- pcount /= vm_pageout_update_period;
- page_shortage = pcount;
+ if (pass == 0 && vm_pageout_update_period != 0) {
+ maxscan /= vm_pageout_update_period;
+ page_shortage = maxscan;
}
/*
@@ -1330,7 +1332,7 @@ relock_queues:
* deactivation candidates.
*/
m = TAILQ_FIRST(&pq->pq_pl);
- while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
+ while (m != NULL && maxscan-- > 0 && page_shortage > 0) {
KASSERT(m->queue == PQ_ACTIVE,
("vm_pageout_scan: page %p isn't active", m));
OpenPOWER on IntegriCloud