summaryrefslogtreecommitdiffstats
path: root/sys/amd64/amd64/vm_machdep.c
diff options
context:
space:
mode:
authordillon <dillon@FreeBSD.org>1999-02-08 00:37:36 +0000
committerdillon <dillon@FreeBSD.org>1999-02-08 00:37:36 +0000
commitb7a0b99c31ad20c9d83cd00f353e622a25f80182 (patch)
tree287ab6f615e494be7a3958316918da47ac087e12 /sys/amd64/amd64/vm_machdep.c
parent3e732af5eab8cbd6d41b59b62aa54411e81cd91c (diff)
downloadFreeBSD-src-b7a0b99c31ad20c9d83cd00f353e622a25f80182.zip
FreeBSD-src-b7a0b99c31ad20c9d83cd00f353e622a25f80182.tar.gz
Rip out PQ_ZERO queue. PQ_ZERO functionality is now combined in with
PQ_FREE. There is little operational difference other then the kernel being a few kilobytes smaller and the code being more readable. * vm_page_select_free() has been *greatly* simplified. * The PQ_ZERO page queue and supporting structures have been removed * vm_page_zero_idle() revamped (see below) PG_ZERO setting and clearing has been migrated from vm_page_alloc() to vm_page_free[_zero]() and will eventually be guarenteed to remain tracked throughout a page's life ( if it isn't already ). When a page is freed, PG_ZERO pages are appended to the appropriate tailq in the PQ_FREE queue while non-PG_ZERO pages are prepended. When locating a new free page, PG_ZERO selection operates from within vm_page_list_find() ( get page from end of queue instead of beginning of queue ) and then only occurs in the nominal critical path case. If the nominal case misses, both normal and zero-page allocation devolves into the same _vm_page_list_find() select code without any specific zero-page optimizations. Additionally, vm_page_zero_idle() has been revamped. Hysteresis has been added and zero-page tracking adjusted to conform with the other changes. Currently hysteresis is set at 1/3 (lo) and 1/2 (hi) the number of free pages. We may wish to increase both parameters as time permits. The hysteresis is designed to avoid silly zeroing in borderline allocation/free situations.
Diffstat (limited to 'sys/amd64/amd64/vm_machdep.c')
-rw-r--r--sys/amd64/amd64/vm_machdep.c42
1 files changed, 23 insertions, 19 deletions
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index 297a10f..2e9e6aa 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
- * $Id: vm_machdep.c,v 1.115 1999/01/06 23:05:37 julian Exp $
+ * $Id: vm_machdep.c,v 1.116 1999/01/29 08:36:42 dillon Exp $
*/
#include "npx.h"
@@ -590,31 +590,32 @@ int
vm_page_zero_idle()
{
static int free_rover;
+ static int zero_state;
vm_page_t m;
int s;
/*
- * XXX
- * We stop zeroing pages when there are sufficent prezeroed pages.
- * This threshold isn't really needed, except we want to
- * bypass unneeded calls to vm_page_list_find, and the
- * associated cache flush and latency. The pre-zero will
- * still be called when there are significantly more
- * non-prezeroed pages than zeroed pages. The threshold
- * of half the number of reserved pages is arbitrary, but
- * approximately the right amount. Eventually, we should
- * perhaps interrupt the zero operation when a process
- * is found to be ready to run.
+ * Attempt to maintain approximately 1/2 of our free pages in a
+ * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid
+ * generally zeroing a page when the system is near steady-state.
+ * Otherwise we might get 'flutter' during disk I/O / IPC or
+ * fast sleeps. We also do not want to be continuously zeroing
+ * pages because doing so may flush our L1 and L2 caches too much.
*/
- if (cnt.v_free_count - vm_page_zero_count <= cnt.v_free_reserved / 2)
- return (0);
+
+ if (zero_state && vm_page_zero_count >= cnt.v_free_count / 3)
+ return(0);
+ if (vm_page_zero_count >= cnt.v_free_count / 2)
+ return(0);
+
#ifdef SMP
if (try_mplock()) {
#endif
s = splvm();
__asm __volatile("sti" : : : "memory");
- m = vm_page_list_find(PQ_FREE, free_rover);
- if (m != NULL) {
+ zero_state = 0;
+ m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
+ if (m != NULL && (m->flags & PG_ZERO) == 0) {
--(*vm_page_queues[m->queue].lcnt);
TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
m->queue = PQ_NONE;
@@ -627,14 +628,17 @@ vm_page_zero_idle()
get_mplock();
#endif
(void)splvm();
- m->queue = PQ_ZERO + m->pc;
+ vm_page_flag_set(m, PG_ZERO);
+ m->queue = PQ_FREE + m->pc;
++(*vm_page_queues[m->queue].lcnt);
- TAILQ_INSERT_HEAD(vm_page_queues[m->queue].pl, m,
+ TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m,
pageq);
- free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
++vm_page_zero_count;
++cnt_prezero;
+ if (vm_page_zero_count >= cnt.v_free_count / 2)
+ zero_state = 1;
}
+ free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
splx(s);
__asm __volatile("cli" : : : "memory");
#ifdef SMP
OpenPOWER on IntegriCloud