summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_page.c
diff options
context:
space:
mode:
authordyson <dyson@FreeBSD.org>1996-07-27 03:24:10 +0000
committerdyson <dyson@FreeBSD.org>1996-07-27 03:24:10 +0000
commit293abd3564090ccf9196bbd4bc949163b91fce62 (patch)
treee65b39cf45164a5e0f930222b2d2e90148f2f518 /sys/vm/vm_page.c
parent5306d9c9d6bc06b09eb3c7ca700475faafbee5c1 (diff)
downloadFreeBSD-src-293abd3564090ccf9196bbd4bc949163b91fce62.zip
FreeBSD-src-293abd3564090ccf9196bbd4bc949163b91fce62.tar.gz
This commit is meant to solve a couple of VM system problems or
performance issues. 1) The pmap module has had too many inlines, and so the object file is simply bigger than it needs to be. Some common code is also merged into subroutines. 2) Removal of some *evil* PHYS_TO_VM_PAGE macro calls. Unfortunately, a few have needed to be added also. The removal caused the need for more vm_page_lookups. I added lookup hints to minimize the need for the page table lookup operations. 3) Removal of some bogus performance improvements, that mostly made the code more complex (tracking individual page table page updates unnecessarily). Those improvements actually hurt 386 processors perf (not that people who worry about perf use 386 processors anymore :-)). 4) Changed pv queue manipulations/structures to be TAILQ's. 5) The pv queue code has had some performance problems since day one. Some significant scalability issues are resolved by threading the pv entries from the pmap AND the physical address instead of just the physical address. This makes certain pmap operations run much faster. This does not affect most micro-benchmarks, but should help loaded system performance *significantly*. DG helped and came up with most of the solution for this one. 6) Most if not all pmap bit operations follow the pattern: pmap_test_bit(); pmap_clear_bit(); That made for twice the necessary pv list traversal. The pmap interface now supports only pmap_tc_bit type operations: pmap_[test/clear]_modified, pmap_[test/clear]_referenced. Additionally, the modified routine now takes a vm_page_t arg instead of a phys address. This eliminates a PHYS_TO_VM_PAGE operation. 7) Several rewrites of routines that contain redundant code to use common routines, so that there is a greater likelihood of keeping the cache footprint smaller.
Diffstat (limited to 'sys/vm/vm_page.c')
-rw-r--r--sys/vm/vm_page.c49
1 files changed, 16 insertions, 33 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 4a95e6e..79dd930 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
- * $Id: vm_page.c,v 1.59 1996/06/21 05:39:22 dyson Exp $
+ * $Id: vm_page.c,v 1.60 1996/06/26 05:39:25 dyson Exp $
*/
/*
@@ -385,7 +385,7 @@ vm_page_hash(object, pindex)
* The object and page must be locked, and must be splhigh.
*/
-__inline void
+void
vm_page_insert(m, object, pindex)
register vm_page_t m;
register vm_object_t object;
@@ -434,7 +434,7 @@ vm_page_insert(m, object, pindex)
* The object and page must be locked, and at splhigh.
*/
-__inline void
+void
vm_page_remove(m)
register vm_page_t m;
{
@@ -523,34 +523,19 @@ vm_page_rename(m, new_object, new_pindex)
}
/*
- * vm_page_unqueue without any wakeup
- */
-__inline void
-vm_page_unqueue_nowakeup(m)
- vm_page_t m;
-{
- int queue = m->queue;
- if (queue != PQ_NONE) {
- m->queue = PQ_NONE;
- TAILQ_REMOVE(vm_page_queues[queue].pl, m, pageq);
- --(*vm_page_queues[queue].cnt);
- }
-}
-
-
-/*
* vm_page_unqueue must be called at splhigh();
*/
__inline void
-vm_page_unqueue(m)
+vm_page_unqueue(m, wakeup)
vm_page_t m;
+ int wakeup;
{
int queue = m->queue;
if (queue != PQ_NONE) {
m->queue = PQ_NONE;
TAILQ_REMOVE(vm_page_queues[queue].pl, m, pageq);
--(*vm_page_queues[queue].cnt);
- if (queue == PQ_CACHE) {
+ if ((queue == PQ_CACHE) && wakeup) {
if ((cnt.v_cache_count + cnt.v_free_count) <
(cnt.v_free_reserved + cnt.v_cache_min))
pagedaemon_wakeup();
@@ -736,7 +721,7 @@ vm_page_activate(m)
if (m->queue == PQ_CACHE)
cnt.v_reactivated++;
- vm_page_unqueue(m);
+ vm_page_unqueue(m, 1);
if (m->wire_count == 0) {
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
@@ -751,7 +736,7 @@ vm_page_activate(m)
/*
* helper routine for vm_page_free and vm_page_free_zero
*/
-static int
+__inline static int
vm_page_freechk_and_unqueue(m)
vm_page_t m;
{
@@ -769,7 +754,7 @@ vm_page_freechk_and_unqueue(m)
}
vm_page_remove(m);
- vm_page_unqueue_nowakeup(m);
+ vm_page_unqueue(m,0);
if ((m->flags & PG_FICTITIOUS) != 0) {
return 0;
}
@@ -788,7 +773,7 @@ vm_page_freechk_and_unqueue(m)
/*
* helper routine for vm_page_free and vm_page_free_zero
*/
-static __inline void
+__inline static void
vm_page_free_wakeup()
{
@@ -895,7 +880,7 @@ vm_page_wire(m)
if (m->wire_count == 0) {
s = splvm();
- vm_page_unqueue(m);
+ vm_page_unqueue(m,1);
splx(s);
cnt.v_wire_count++;
}
@@ -961,7 +946,7 @@ vm_page_deactivate(m)
if (m->wire_count == 0 && m->hold_count == 0) {
if (m->queue == PQ_CACHE)
cnt.v_reactivated++;
- vm_page_unqueue(m);
+ vm_page_unqueue(m,1);
TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
m->queue = PQ_INACTIVE;
cnt.v_inactive_count++;
@@ -992,7 +977,7 @@ vm_page_cache(m)
panic("vm_page_cache: caching a dirty page, pindex: %d", m->pindex);
}
s = splvm();
- vm_page_unqueue_nowakeup(m);
+ vm_page_unqueue(m,0);
TAILQ_INSERT_TAIL(&vm_page_queue_cache, m, pageq);
m->queue = PQ_CACHE;
cnt.v_cache_count++;
@@ -1031,7 +1016,7 @@ vm_page_set_validclean(m, base, size)
m->valid |= pagebits;
m->dirty &= ~pagebits;
if( base == 0 && size == PAGE_SIZE)
- pmap_clear_modify(VM_PAGE_TO_PHYS(m));
+ pmap_tc_modified(m);
}
/*
@@ -1071,10 +1056,8 @@ void
vm_page_test_dirty(m)
vm_page_t m;
{
- if ((m->dirty != VM_PAGE_BITS_ALL) &&
- pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
- m->dirty = VM_PAGE_BITS_ALL;
- }
+ if (m->dirty != VM_PAGE_BITS_ALL)
+ pmap_tc_modified(m);
}
/*
OpenPOWER on IntegriCloud