diff options
author | attilio <attilio@FreeBSD.org> | 2007-06-10 21:59:14 +0000 |
---|---|---|
committer | attilio <attilio@FreeBSD.org> | 2007-06-10 21:59:14 +0000 |
commit | e9fc4edc4441a4cdc380539f97a90207de0cd092 (patch) | |
tree | 5076d8c3c653c4deb666bc72dcf20bf604e5fb6e /sys/vm/vm_pageout.c | |
parent | 445024c7ff4986844b8675e79a722921503413dc (diff) | |
download | FreeBSD-src-e9fc4edc4441a4cdc380539f97a90207de0cd092.zip FreeBSD-src-e9fc4edc4441a4cdc380539f97a90207de0cd092.tar.gz |
Optimize vmmeter locking.
In particular:
- Add an explicative table for locking of struct vmmeter members
- Apply new rules for some of those members
- Remove some unuseful comments
Heavily reviewed by: alc, bde, jeff
Approved by: jeff (mentor)
Diffstat (limited to 'sys/vm/vm_pageout.c')
-rw-r--r-- | sys/vm/vm_pageout.c | 17 |
1 files changed, 6 insertions, 11 deletions
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index 99630ce..e0d7c5a 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -538,7 +538,7 @@ vm_pageout_object_deactivate_pages(pmap, first_object, desired) goto unlock_return; } next = TAILQ_NEXT(p, listq); - PCPU_INC(cnt.v_pdpages); + cnt.v_pdpages++; if (p->wire_count != 0 || p->hold_count != 0 || p->busy != 0 || @@ -745,7 +745,7 @@ rescan0: m != NULL && maxscan-- > 0 && page_shortage > 0; m = next) { - PCPU_INC(cnt.v_pdpages); + cnt.v_pdpages++; if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE) { goto rescan0; @@ -856,7 +856,7 @@ rescan0: * Invalid pages can be easily freed */ vm_page_free(m); - PCPU_INC(cnt.v_dfree); + cnt.v_dfree++; --page_shortage; } else if (m->dirty == 0) { /* @@ -1089,7 +1089,7 @@ unlock_and_continue: * The count for pagedaemon pages is done after checking the * page for eligibility... */ - PCPU_INC(cnt.v_pdpages); + cnt.v_pdpages++; /* * Check to see "how much" the page has been used. @@ -1168,7 +1168,7 @@ unlock_and_continue: m)); vm_page_free(m); VM_OBJECT_UNLOCK(object); - PCPU_INC(cnt.v_dfree); + cnt.v_dfree++; cache_last_free = cache_cur; cache_first_failure = -1; break; @@ -1429,11 +1429,6 @@ vm_pageout() cnt.v_free_reserved = vm_pageout_page_count + cnt.v_pageout_free_min + (cnt.v_page_count / 768) + PQ_NUMCOLORS; cnt.v_free_severe = cnt.v_free_min / 2; - - /* - * Here adds don't need to be atomic since we are only initializing - * v_free_min and v_free_severe. - */ cnt.v_free_min += cnt.v_free_reserved; cnt.v_free_severe += cnt.v_free_reserved; @@ -1531,7 +1526,7 @@ vm_pageout() } } if (vm_pages_needed) - PCPU_INC(cnt.v_pdwakeups); + cnt.v_pdwakeups++; mtx_unlock(&vm_page_queue_free_mtx); vm_pageout_scan(pass); } |