summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_pageout.c
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2007-06-04 21:45:18 +0000
committerattilio <attilio@FreeBSD.org>2007-06-04 21:45:18 +0000
commit9bd4fdf7ce811d83f0305cacc5990ec339df9f13 (patch)
tree9c942b3b9815da8b9fd9ecd5f775fdc264e8e2dc /sys/vm/vm_pageout.c
parente333d0ff0eb23a5f94f36fd95b4bbcfda3ccbc8f (diff)
downloadFreeBSD-src-9bd4fdf7ce811d83f0305cacc5990ec339df9f13.zip
FreeBSD-src-9bd4fdf7ce811d83f0305cacc5990ec339df9f13.tar.gz
Do proper "locking" for missing vmmeters part.
Now, we assume no more sched_lock protection for some of them and use the distribuited loads method for vmmeter (distribuited through CPUs). Reviewed by: alc, bde Approved by: jeff (mentor)
Diffstat (limited to 'sys/vm/vm_pageout.c')
-rw-r--r--sys/vm/vm_pageout.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index c0611ba..dcf69ef 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -538,7 +538,7 @@ vm_pageout_object_deactivate_pages(pmap, first_object, desired)
goto unlock_return;
}
next = TAILQ_NEXT(p, listq);
- cnt.v_pdpages++;
+ PCPU_INC(cnt.v_pdpages);
if (p->wire_count != 0 ||
p->hold_count != 0 ||
p->busy != 0 ||
@@ -745,7 +745,7 @@ rescan0:
m != NULL && maxscan-- > 0 && page_shortage > 0;
m = next) {
- cnt.v_pdpages++;
+ PCPU_INC(cnt.v_pdpages);
if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE) {
goto rescan0;
@@ -856,7 +856,7 @@ rescan0:
* Invalid pages can be easily freed
*/
vm_page_free(m);
- cnt.v_dfree++;
+ PCPU_INC(cnt.v_dfree);
--page_shortage;
} else if (m->dirty == 0) {
/*
@@ -1089,7 +1089,7 @@ unlock_and_continue:
* The count for pagedaemon pages is done after checking the
* page for eligibility...
*/
- cnt.v_pdpages++;
+ PCPU_INC(cnt.v_pdpages);
/*
* Check to see "how much" the page has been used.
@@ -1168,7 +1168,7 @@ unlock_and_continue:
m));
vm_page_free(m);
VM_OBJECT_UNLOCK(object);
- cnt.v_dfree++;
+ PCPU_INC(cnt.v_dfree);
cache_last_free = cache_cur;
cache_first_failure = -1;
break;
@@ -1427,6 +1427,11 @@ vm_pageout()
cnt.v_free_reserved = vm_pageout_page_count +
cnt.v_pageout_free_min + (cnt.v_page_count / 768) + PQ_NUMCOLORS;
cnt.v_free_severe = cnt.v_free_min / 2;
+
+ /*
+ * Here adds don't need to be atomic since we are only initializing
+ * v_free_min and v_free_severe.
+ */
cnt.v_free_min += cnt.v_free_reserved;
cnt.v_free_severe += cnt.v_free_reserved;
@@ -1524,7 +1529,7 @@ vm_pageout()
}
}
if (vm_pages_needed)
- cnt.v_pdwakeups++;
+ PCPU_INC(cnt.v_pdwakeups);
mtx_unlock(&vm_page_queue_free_mtx);
vm_pageout_scan(pass);
}
OpenPOWER on IntegriCloud