summaryrefslogtreecommitdiffstats
path: root/sys/powerpc
diff options
context:
space:
mode:
authorsilby <silby@FreeBSD.org>2002-02-27 18:03:02 +0000
committersilby <silby@FreeBSD.org>2002-02-27 18:03:02 +0000
commit230f96f3ce0496343800b107e3e952de9d7747de (patch)
treec901dac940b687287b5e926cf9d3fee071a8a002 /sys/powerpc
parent62c8af563fb2047ffe16514abfeef77eea1691b8 (diff)
downloadFreeBSD-src-230f96f3ce0496343800b107e3e952de9d7747de.zip
FreeBSD-src-230f96f3ce0496343800b107e3e952de9d7747de.tar.gz
Fix a horribly suboptimal algorithm in the vm_daemon.
In order to determine what to page out, the vm_daemon checks reference bits on all pages belonging to all processes. Unfortunately, the algorithm used reacted badly with shared pages; each shared page would be checked once per process sharing it; this caused an O(N^2) growth of tlb invalidations. The algorithm has been changed so that each page will be checked only 16 times. Prior to this change, a fork/sleepbomb of 1300 processes could cause the vm_daemon to take over 60 seconds to complete, effectively freezing the system for that time period. With this change in place, the vm_daemon completes in less than a second. Any system with hundreds of processes sharing pages should benefit from this change. Note that the vm_daemon is only run when the system is under extreme memory pressure. It is likely that many people with loaded systems saw no symptoms of this problem until they reached the point where swapping began. Special thanks go to dillon, peter, and Chuck Cranor, who helped me get up to speed with vm internals. PR: 33542, 20393 Reviewed by: dillon MFC after: 1 week
Diffstat (limited to 'sys/powerpc')
-rw-r--r--sys/powerpc/aim/mmu_oea.c22
-rw-r--r--sys/powerpc/powerpc/mmu_oea.c22
-rw-r--r--sys/powerpc/powerpc/pmap.c22
3 files changed, 63 insertions, 3 deletions
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 0ba2131..2bd4633 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -936,6 +936,19 @@ pmap_clear_reference(vm_page_t m)
TODO;
}
+/*
+ * pmap_ts_referenced:
+ *
+ * Return a count of reference bits for a page, clearing those bits.
+ * It is not necessary for every reference bit to be cleared, but it
+ * is necessary that 0 only be returned when there are truly no
+ * reference bits set.
+ *
+ * XXX: The exact number of bits to check and clear is a matter that
+ * should be tested and standardized at some point in the future for
+ * optimal aging of shared pages.
+ */
+
int
pmap_ts_referenced(vm_page_t m)
{
@@ -1160,8 +1173,15 @@ pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
{
}
+/*
+ * Returns true if the pmap's pv is one of the first
+ * 16 pvs linked to from this page. This count may
+ * be changed upwards or downwards in the future; it
+ * is only necessary that true be returned for a small
+ * subset of pmaps for proper page aging.
+ */
boolean_t
-pmap_page_exists(pmap_t pmap, vm_page_t m)
+pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
{
TODO;
return (0);
diff --git a/sys/powerpc/powerpc/mmu_oea.c b/sys/powerpc/powerpc/mmu_oea.c
index 0ba2131..2bd4633 100644
--- a/sys/powerpc/powerpc/mmu_oea.c
+++ b/sys/powerpc/powerpc/mmu_oea.c
@@ -936,6 +936,19 @@ pmap_clear_reference(vm_page_t m)
TODO;
}
+/*
+ * pmap_ts_referenced:
+ *
+ * Return a count of reference bits for a page, clearing those bits.
+ * It is not necessary for every reference bit to be cleared, but it
+ * is necessary that 0 only be returned when there are truly no
+ * reference bits set.
+ *
+ * XXX: The exact number of bits to check and clear is a matter that
+ * should be tested and standardized at some point in the future for
+ * optimal aging of shared pages.
+ */
+
int
pmap_ts_referenced(vm_page_t m)
{
@@ -1160,8 +1173,15 @@ pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
{
}
+/*
+ * Returns true if the pmap's pv is one of the first
+ * 16 pvs linked to from this page. This count may
+ * be changed upwards or downwards in the future; it
+ * is only necessary that true be returned for a small
+ * subset of pmaps for proper page aging.
+ */
boolean_t
-pmap_page_exists(pmap_t pmap, vm_page_t m)
+pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
{
TODO;
return (0);
diff --git a/sys/powerpc/powerpc/pmap.c b/sys/powerpc/powerpc/pmap.c
index 0ba2131..2bd4633 100644
--- a/sys/powerpc/powerpc/pmap.c
+++ b/sys/powerpc/powerpc/pmap.c
@@ -936,6 +936,19 @@ pmap_clear_reference(vm_page_t m)
TODO;
}
+/*
+ * pmap_ts_referenced:
+ *
+ * Return a count of reference bits for a page, clearing those bits.
+ * It is not necessary for every reference bit to be cleared, but it
+ * is necessary that 0 only be returned when there are truly no
+ * reference bits set.
+ *
+ * XXX: The exact number of bits to check and clear is a matter that
+ * should be tested and standardized at some point in the future for
+ * optimal aging of shared pages.
+ */
+
int
pmap_ts_referenced(vm_page_t m)
{
@@ -1160,8 +1173,15 @@ pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
{
}
+/*
+ * Returns true if the pmap's pv is one of the first
+ * 16 pvs linked to from this page. This count may
+ * be changed upwards or downwards in the future; it
+ * is only necessary that true be returned for a small
+ * subset of pmaps for proper page aging.
+ */
boolean_t
-pmap_page_exists(pmap_t pmap, vm_page_t m)
+pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
{
TODO;
return (0);
OpenPOWER on IntegriCloud