summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/vm/vm_pageout.c32
1 files changed, 17 insertions, 15 deletions
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 60d1476..d2ccfa0 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -909,10 +909,8 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
{
vm_page_t m, next;
struct vm_pagequeue *pq;
- int page_shortage, maxscan, pcount;
- int addl_page_shortage;
vm_object_t object;
- int act_delta;
+ int act_delta, addl_page_shortage, deficit, maxscan, page_shortage;
int vnodes_skipped = 0;
int maxlaunder;
int lockmode;
@@ -942,13 +940,15 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
* number of pages from the inactive count that should be
* discounted in setting the target for the active queue scan.
*/
- addl_page_shortage = atomic_readandclear_int(&vm_pageout_deficit);
+ addl_page_shortage = 0;
+
+ deficit = atomic_readandclear_int(&vm_pageout_deficit);
/*
* Calculate the number of pages we want to either free or move
* to the cache.
*/
- page_shortage = vm_paging_target() + addl_page_shortage;
+ page_shortage = vm_paging_target() + deficit;
/*
* maxlaunder limits the number of dirty pages we flush per scan.
@@ -1245,6 +1245,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
*/
if (vm_page_busied(m)) {
vm_page_unlock(m);
+ addl_page_shortage++;
goto unlock_and_continue;
}
@@ -1252,9 +1253,9 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
* If the page has become held it might
* be undergoing I/O, so skip it
*/
- if (m->hold_count) {
+ if (m->hold_count != 0) {
vm_page_unlock(m);
- vm_page_requeue_locked(m);
+ addl_page_shortage++;
if (object->flags & OBJ_MIGHTBEDIRTY)
vnodes_skipped++;
goto unlock_and_continue;
@@ -1309,19 +1310,20 @@ relock_queues:
* Compute the number of pages we want to try to move from the
* active queue to the inactive queue.
*/
+ page_shortage = cnt.v_inactive_target - cnt.v_inactive_count +
+ vm_paging_target() + deficit + addl_page_shortage;
+
pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
vm_pagequeue_lock(pq);
- pcount = pq->pq_cnt;
- page_shortage = vm_paging_target() +
- cnt.v_inactive_target - cnt.v_inactive_count;
- page_shortage += addl_page_shortage;
+ maxscan = pq->pq_cnt;
+
/*
* If we're just idle polling attempt to visit every
* active page within 'update_period' seconds.
*/
- if (pass == 0 && vm_pageout_update_period != 0) {
- pcount /= vm_pageout_update_period;
- page_shortage = pcount;
+ if (pass == 0 && vm_pageout_update_period != 0) {
+ maxscan /= vm_pageout_update_period;
+ page_shortage = maxscan;
}
/*
@@ -1330,7 +1332,7 @@ relock_queues:
* deactivation candidates.
*/
m = TAILQ_FIRST(&pq->pq_pl);
- while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
+ while (m != NULL && maxscan-- > 0 && page_shortage > 0) {
KASSERT(m->queue == PQ_ACTIVE,
("vm_pageout_scan: page %p isn't active", m));
OpenPOWER on IntegriCloud