diff options
author | dillon <dillon@FreeBSD.org> | 2001-07-05 01:32:42 +0000 |
---|---|---|
committer | dillon <dillon@FreeBSD.org> | 2001-07-05 01:32:42 +0000 |
commit | 1cf218e40f33f1f84ab5aea35ab812cfa1b46302 (patch) | |
tree | 4f13877024500ba8eed1f8aa537da3e416268450 /sys | |
parent | 93369f554a43c46419d56436721efe61e4b858c7 (diff) | |
download | FreeBSD-src-1cf218e40f33f1f84ab5aea35ab812cfa1b46302.zip FreeBSD-src-1cf218e40f33f1f84ab5aea35ab812cfa1b46302.tar.gz |
Move vm_page_zero_idle() from machine-dependant sections to a
machine-independant source file, vm/vm_zeroidle.c. It was exactly the
same for all platforms and updating them all was getting annoying.
Diffstat (limited to 'sys')
-rw-r--r-- | sys/alpha/alpha/vm_machdep.c | 64 | ||||
-rw-r--r-- | sys/amd64/amd64/vm_machdep.c | 61 | ||||
-rw-r--r-- | sys/conf/files | 1 | ||||
-rw-r--r-- | sys/i386/i386/vm_machdep.c | 61 | ||||
-rw-r--r-- | sys/ia64/ia64/vm_machdep.c | 65 | ||||
-rw-r--r-- | sys/powerpc/aim/vm_machdep.c | 71 | ||||
-rw-r--r-- | sys/powerpc/powerpc/vm_machdep.c | 71 | ||||
-rw-r--r-- | sys/vm/vm_zeroidle.c | 117 |
8 files changed, 118 insertions, 393 deletions
diff --git a/sys/alpha/alpha/vm_machdep.c b/sys/alpha/alpha/vm_machdep.c index 77d73cf..195a934 100644 --- a/sys/alpha/alpha/vm_machdep.c +++ b/sys/alpha/alpha/vm_machdep.c @@ -406,70 +406,6 @@ grow_stack(p, sp) } -static int cnt_prezero; - -SYSCTL_INT(_machdep, OID_AUTO, cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, ""); - -/* - * Implement the pre-zeroed page mechanism. - * This routine is called from the idle loop. - */ - -#define ZIDLE_LO(v) ((v) * 2 / 3) -#define ZIDLE_HI(v) ((v) * 4 / 5) - -int -vm_page_zero_idle() -{ - static int free_rover; - static int zero_state; - vm_page_t m; - int s; - - /* - * Attempt to maintain approximately 1/2 of our free pages in a - * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid - * generally zeroing a page when the system is near steady-state. - * Otherwise we might get 'flutter' during disk I/O / IPC or - * fast sleeps. We also do not want to be continuously zeroing - * pages because doing so may flush our L1 and L2 caches too much. - */ - - if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) { - return(0); - } - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) { - return(0); - } - if (mtx_trylock(&Giant)) { - s = splvm(); - m = vm_pageq_find(PQ_FREE, free_rover, FALSE); - zero_state = 0; - if (m != NULL && (m->flags & PG_ZERO) == 0) { - vm_page_queues[m->queue].lcnt--; - TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq); - m->queue = PQ_NONE; - splx(s); - pmap_zero_page(VM_PAGE_TO_PHYS(m)); - (void)splvm(); - vm_page_flag_set(m, PG_ZERO); - m->queue = PQ_FREE + m->pc; - vm_page_queues[m->queue].lcnt++; - TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, - pageq); - ++vm_page_zero_count; - ++cnt_prezero; - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) - zero_state = 1; - } - free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; - splx(s); - mtx_unlock(&Giant); - return (1); - } - return(0); -} - /* * Software interrupt handler for queued VM system processing. */ diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c index 51072e7..e5e5ea6 100644 --- a/sys/amd64/amd64/vm_machdep.c +++ b/sys/amd64/amd64/vm_machdep.c @@ -550,67 +550,6 @@ grow_stack(p, sp) return (1); } -SYSCTL_DECL(_vm_stats_misc); - -static int cnt_prezero; - -SYSCTL_INT(_vm_stats_misc, OID_AUTO, - cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, ""); - -/* - * Implement the pre-zeroed page mechanism. - * This routine is called from the idle loop. - */ - -#define ZIDLE_LO(v) ((v) * 2 / 3) -#define ZIDLE_HI(v) ((v) * 4 / 5) - -int -vm_page_zero_idle() -{ - static int free_rover; - static int zero_state; - vm_page_t m; - - /* - * Attempt to maintain approximately 1/2 of our free pages in a - * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid - * generally zeroing a page when the system is near steady-state. - * Otherwise we might get 'flutter' during disk I/O / IPC or - * fast sleeps. We also do not want to be continuously zeroing - * pages because doing so may flush our L1 and L2 caches too much. - */ - - if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) - return(0); - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) - return(0); - - if (mtx_trylock(&Giant)) { - zero_state = 0; - m = vm_pageq_find(PQ_FREE, free_rover, FALSE); - if (m != NULL && (m->flags & PG_ZERO) == 0) { - vm_page_queues[m->queue].lcnt--; - TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq); - m->queue = PQ_NONE; - pmap_zero_page(VM_PAGE_TO_PHYS(m)); - vm_page_flag_set(m, PG_ZERO); - m->queue = PQ_FREE + m->pc; - vm_page_queues[m->queue].lcnt++; - TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, - pageq); - ++vm_page_zero_count; - ++cnt_prezero; - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) - zero_state = 1; - } - free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; - mtx_unlock(&Giant); - return (1); - } - return(0); -} - /* * Software interrupt handler for queued VM system processing. */ diff --git a/sys/conf/files b/sys/conf/files index 06f46e2..adb512c 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -1254,6 +1254,7 @@ vm/vm_object.c standard vm/vm_page.c standard vm/vm_pageq.c standard vm/vm_contig.c standard +vm/vm_zeroidle.c standard vm/vm_pageout.c standard vm/vm_pager.c standard vm/vm_swap.c standard diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c index 51072e7..e5e5ea6 100644 --- a/sys/i386/i386/vm_machdep.c +++ b/sys/i386/i386/vm_machdep.c @@ -550,67 +550,6 @@ grow_stack(p, sp) return (1); } -SYSCTL_DECL(_vm_stats_misc); - -static int cnt_prezero; - -SYSCTL_INT(_vm_stats_misc, OID_AUTO, - cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, ""); - -/* - * Implement the pre-zeroed page mechanism. - * This routine is called from the idle loop. - */ - -#define ZIDLE_LO(v) ((v) * 2 / 3) -#define ZIDLE_HI(v) ((v) * 4 / 5) - -int -vm_page_zero_idle() -{ - static int free_rover; - static int zero_state; - vm_page_t m; - - /* - * Attempt to maintain approximately 1/2 of our free pages in a - * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid - * generally zeroing a page when the system is near steady-state. - * Otherwise we might get 'flutter' during disk I/O / IPC or - * fast sleeps. We also do not want to be continuously zeroing - * pages because doing so may flush our L1 and L2 caches too much. - */ - - if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) - return(0); - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) - return(0); - - if (mtx_trylock(&Giant)) { - zero_state = 0; - m = vm_pageq_find(PQ_FREE, free_rover, FALSE); - if (m != NULL && (m->flags & PG_ZERO) == 0) { - vm_page_queues[m->queue].lcnt--; - TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq); - m->queue = PQ_NONE; - pmap_zero_page(VM_PAGE_TO_PHYS(m)); - vm_page_flag_set(m, PG_ZERO); - m->queue = PQ_FREE + m->pc; - vm_page_queues[m->queue].lcnt++; - TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, - pageq); - ++vm_page_zero_count; - ++cnt_prezero; - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) - zero_state = 1; - } - free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; - mtx_unlock(&Giant); - return (1); - } - return(0); -} - /* * Software interrupt handler for queued VM system processing. */ diff --git a/sys/ia64/ia64/vm_machdep.c b/sys/ia64/ia64/vm_machdep.c index c38f983..35a45b9 100644 --- a/sys/ia64/ia64/vm_machdep.c +++ b/sys/ia64/ia64/vm_machdep.c @@ -444,71 +444,6 @@ grow_stack(p, sp) return (1); } - -static int cnt_prezero; - -SYSCTL_INT(_machdep, OID_AUTO, cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, ""); - -/* - * Implement the pre-zeroed page mechanism. - * This routine is called from the idle loop. - */ - -#define ZIDLE_LO(v) ((v) * 2 / 3) -#define ZIDLE_HI(v) ((v) * 4 / 5) - -int -vm_page_zero_idle() -{ - static int free_rover; - static int zero_state; - vm_page_t m; - int s; - - /* - * Attempt to maintain approximately 1/2 of our free pages in a - * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid - * generally zeroing a page when the system is near steady-state. - * Otherwise we might get 'flutter' during disk I/O / IPC or - * fast sleeps. We also do not want to be continuously zeroing - * pages because doing so may flush our L1 and L2 caches too much. - */ - - if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) { - return(0); - } - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) { - return(0); - } - if (mtx_trylock(&Giant)) { - s = splvm(); - m = vm_pageq_find(PQ_FREE, free_rover, FALSE); - zero_state = 0; - if (m != NULL && (m->flags & PG_ZERO) == 0) { - vm_page_queues[m->queue].lcnt--; - TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq); - m->queue = PQ_NONE; - splx(s); - pmap_zero_page(VM_PAGE_TO_PHYS(m)); - (void)splvm(); - vm_page_flag_set(m, PG_ZERO); - m->queue = PQ_FREE + m->pc; - vm_page_queues[m->queue].lcnt++; - TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, - pageq); - ++vm_page_zero_count; - ++cnt_prezero; - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) - zero_state = 1; - } - free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; - splx(s); - mtx_unlock(&Giant); - return (1); - } - return(0); -} - /* * Software interrupt handler for queued VM system processing. */ diff --git a/sys/powerpc/aim/vm_machdep.c b/sys/powerpc/aim/vm_machdep.c index 98bbd27..6d50e27 100644 --- a/sys/powerpc/aim/vm_machdep.c +++ b/sys/powerpc/aim/vm_machdep.c @@ -316,77 +316,6 @@ grow_stack(p, sp) return (1); } - -static int cnt_prezero; - -SYSCTL_INT(_machdep, OID_AUTO, cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, ""); - -/* - * Implement the pre-zeroed page mechanism. - * This routine is called from the idle loop. - */ - -#define ZIDLE_LO(v) ((v) * 2 / 3) -#define ZIDLE_HI(v) ((v) * 4 / 5) - -int -vm_page_zero_idle() -{ - static int free_rover; - static int zero_state; - vm_page_t m; - int s; - - /* - * Attempt to maintain approximately 1/2 of our free pages in a - * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid - * generally zeroing a page when the system is near steady-state. - * Otherwise we might get 'flutter' during disk I/O / IPC or - * fast sleeps. We also do not want to be continuously zeroing - * pages because doing so may flush our L1 and L2 caches too much. - */ - - if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) { - return(0); - } - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) { - return(0); - } - if (mtx_trylock(&Giant)) { - s = splvm(); - m = vm_pageq_find(PQ_FREE, free_rover, FALSE); - zero_state = 0; - if (m != NULL && (m->flags & PG_ZERO) == 0) { - vm_page_queues[m->queue].lcnt--; - TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq); - m->queue = PQ_NONE; - splx(s); -#if 0 - rel_mplock(); -#endif - pmap_zero_page(VM_PAGE_TO_PHYS(m)); -#if 0 - get_mplock(); -#endif - (void)splvm(); - vm_page_flag_set(m, PG_ZERO); - m->queue = PQ_FREE + m->pc; - vm_page_queues[m->queue].lcnt++; - TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, - pageq); - ++vm_page_zero_count; - ++cnt_prezero; - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) - zero_state = 1; - } - free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; - splx(s); - mtx_unlock(&Giant); - return (1); - } - return(0); -} - /* * Software interrupt handler for queued VM system processing. */ diff --git a/sys/powerpc/powerpc/vm_machdep.c b/sys/powerpc/powerpc/vm_machdep.c index 98bbd27..6d50e27 100644 --- a/sys/powerpc/powerpc/vm_machdep.c +++ b/sys/powerpc/powerpc/vm_machdep.c @@ -316,77 +316,6 @@ grow_stack(p, sp) return (1); } - -static int cnt_prezero; - -SYSCTL_INT(_machdep, OID_AUTO, cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, ""); - -/* - * Implement the pre-zeroed page mechanism. - * This routine is called from the idle loop. - */ - -#define ZIDLE_LO(v) ((v) * 2 / 3) -#define ZIDLE_HI(v) ((v) * 4 / 5) - -int -vm_page_zero_idle() -{ - static int free_rover; - static int zero_state; - vm_page_t m; - int s; - - /* - * Attempt to maintain approximately 1/2 of our free pages in a - * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid - * generally zeroing a page when the system is near steady-state. - * Otherwise we might get 'flutter' during disk I/O / IPC or - * fast sleeps. We also do not want to be continuously zeroing - * pages because doing so may flush our L1 and L2 caches too much. - */ - - if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) { - return(0); - } - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) { - return(0); - } - if (mtx_trylock(&Giant)) { - s = splvm(); - m = vm_pageq_find(PQ_FREE, free_rover, FALSE); - zero_state = 0; - if (m != NULL && (m->flags & PG_ZERO) == 0) { - vm_page_queues[m->queue].lcnt--; - TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq); - m->queue = PQ_NONE; - splx(s); -#if 0 - rel_mplock(); -#endif - pmap_zero_page(VM_PAGE_TO_PHYS(m)); -#if 0 - get_mplock(); -#endif - (void)splvm(); - vm_page_flag_set(m, PG_ZERO); - m->queue = PQ_FREE + m->pc; - vm_page_queues[m->queue].lcnt++; - TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, - pageq); - ++vm_page_zero_count; - ++cnt_prezero; - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) - zero_state = 1; - } - free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; - splx(s); - mtx_unlock(&Giant); - return (1); - } - return(0); -} - /* * Software interrupt handler for queued VM system processing. */ diff --git a/sys/vm/vm_zeroidle.c b/sys/vm/vm_zeroidle.c new file mode 100644 index 0000000..c1361de --- /dev/null +++ b/sys/vm/vm_zeroidle.c @@ -0,0 +1,117 @@ +/*- + * Copyright (c) 1994 John Dyson + * Copyright (c) 2001 Matt Dillon + * + * All rights reserved. Terms for use and redistribution + * are covered by the BSD Copyright as found in /usr/src/COPYRIGHT. + * + * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 + * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ + * $FreeBSD$ + */ + +#include "opt_npx.h" +#ifdef PC98 +#include "opt_pc98.h" +#endif +#include "opt_reset.h" +#include "opt_isa.h" + +#include <sys/param.h> +#include <sys/systm.h> +#include <sys/malloc.h> +#include <sys/proc.h> +#include <sys/bio.h> +#include <sys/buf.h> +#include <sys/vnode.h> +#include <sys/vmmeter.h> +#include <sys/kernel.h> +#include <sys/ktr.h> +#include <sys/mutex.h> +#include <sys/smp.h> +#include <sys/sysctl.h> +#include <sys/unistd.h> + +#include <machine/cpu.h> +#include <machine/md_var.h> +#include <machine/pcb.h> +#include <machine/pcb_ext.h> +#include <machine/vm86.h> + +#include <vm/vm.h> +#include <vm/vm_param.h> +#include <sys/lock.h> +#include <vm/vm_kern.h> +#include <vm/vm_page.h> +#include <vm/vm_map.h> +#include <vm/vm_extern.h> + +#include <sys/user.h> + +#ifdef PC98 +#include <pc98/pc98/pc98.h> +#else +#include <i386/isa/isa.h> +#endif + +SYSCTL_DECL(_vm_stats_misc); + +static int cnt_prezero; + +SYSCTL_INT(_vm_stats_misc, OID_AUTO, + cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, ""); + +/* + * Implement the pre-zeroed page mechanism. + * This routine is called from the idle loop. + */ + +#define ZIDLE_LO(v) ((v) * 2 / 3) +#define ZIDLE_HI(v) ((v) * 4 / 5) + +int +vm_page_zero_idle(void) +{ + static int free_rover; + static int zero_state; + vm_page_t m; + + /* + * Attempt to maintain approximately 1/2 of our free pages in a + * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid + * generally zeroing a page when the system is near steady-state. + * Otherwise we might get 'flutter' during disk I/O / IPC or + * fast sleeps. We also do not want to be continuously zeroing + * pages because doing so may flush our L1 and L2 caches too much. + */ + + if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) + return(0); + if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) + return(0); + + if (mtx_trylock(&Giant)) { + zero_state = 0; + m = vm_pageq_find(PQ_FREE, free_rover, FALSE); + if (m != NULL && (m->flags & PG_ZERO) == 0) { + vm_page_queues[m->queue].lcnt--; + TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq); + m->queue = PQ_NONE; + pmap_zero_page(VM_PAGE_TO_PHYS(m)); + vm_page_flag_set(m, PG_ZERO); + m->queue = PQ_FREE + m->pc; + vm_page_queues[m->queue].lcnt++; + TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, + pageq); + ++vm_page_zero_count; + ++cnt_prezero; + if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) + zero_state = 1; + } + free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; + mtx_unlock(&Giant); + return (1); + } + return(0); +} + |