summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorbde <bde@FreeBSD.org>1997-03-29 04:35:26 +0000
committerbde <bde@FreeBSD.org>1997-03-29 04:35:26 +0000
commit423123bb4c624b9fbf14f1f0b407ce17a10a8bd1 (patch)
treee59eb6901758c50dd56e2e4c2126f676cb817f98 /sys
parent141381e1cbdbd8001c13e9e8c0adbfc81d1b3cd0 (diff)
downloadFreeBSD-src-423123bb4c624b9fbf14f1f0b407ce17a10a8bd1.zip
FreeBSD-src-423123bb4c624b9fbf14f1f0b407ce17a10a8bd1.tar.gz
Don't keep cpu interrupts enabled during the lookup in vm_page_zero_idle().
Lookup isn't done every time the system goes idle now, but it can still take > 1800 instructions in the worst case, so if cpu interrupts are kept disabled then it might lose 20 characters of sio input at 115200 bps. Fixed style in vm_page_zero_idle().
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/vm_machdep.c60
-rw-r--r--sys/i386/i386/vm_machdep.c60
2 files changed, 70 insertions, 50 deletions
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index 376af2a..0cf7d31 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
- * $Id: vm_machdep.c,v 1.75 1997/02/22 09:33:01 peter Exp $
+ * $Id: vm_machdep.c,v 1.76 1997/03/22 04:28:16 dyson Exp $
*/
#include "npx.h"
@@ -847,41 +847,51 @@ grow(p, sp)
}
/*
- * prototype routine to implement the pre-zeroed page mechanism
- * this routine is called from the idle loop.
+ * Implement the pre-zeroed page mechanism.
+ * This routine is called from the idle loop.
*/
int
-vm_page_zero_idle() {
+vm_page_zero_idle()
+{
+ static int free_rover;
vm_page_t m;
- static int free_rover = 0;
-/* XXX
- * We stop zeroing pages when there are sufficent prezeroed pages.
- * This threshold isn't really needed, except we want to
- * bypass unneeded calls to vm_page_list_find, and the
- * associated cache flush and latency. The pre-zero will
- * still be called when there are significantly more
- * non-prezeroed pages than zeroed pages. The threshold
- * of half the number of reserved pages is arbitrary, but
- * approximately the right amount. Eventually, we should
- * perhaps interrupt the zero operation when a process
- * is found to be ready to run.
- */
- if (((cnt.v_free_count - vm_page_zero_count) > (cnt.v_free_reserved / 2)) &&
-#ifdef NOT_NEEDED
- (cnt.v_free_count > cnt.v_interrupt_free_min) &&
+ int s;
+
+#ifdef WRONG
+ if (cnt.v_free_count <= cnt.v_interrupt_free_min)
+ return (0);
#endif
- (m = vm_page_list_find(PQ_FREE, free_rover))) {
+ /*
+ * XXX
+ * We stop zeroing pages when there are sufficent prezeroed pages.
+ * This threshold isn't really needed, except we want to
+ * bypass unneeded calls to vm_page_list_find, and the
+ * associated cache flush and latency. The pre-zero will
+ * still be called when there are significantly more
+ * non-prezeroed pages than zeroed pages. The threshold
+ * of half the number of reserved pages is arbitrary, but
+ * approximately the right amount. Eventually, we should
+ * perhaps interrupt the zero operation when a process
+ * is found to be ready to run.
+ */
+ if (cnt.v_free_count - vm_page_zero_count <= cnt.v_free_reserved / 2)
+ return (0);
+ s = splvm();
+ enable_intr();
+ m = vm_page_list_find(PQ_FREE, free_rover);
+ if (m != NULL) {
--(*vm_page_queues[m->queue].lcnt);
TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
- enable_intr();
+ splx(s);
pmap_zero_page(VM_PAGE_TO_PHYS(m));
- disable_intr();
+ (void)splvm();
m->queue = PQ_ZERO + m->pc;
++(*vm_page_queues[m->queue].lcnt);
TAILQ_INSERT_HEAD(vm_page_queues[m->queue].pl, m, pageq);
free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
++vm_page_zero_count;
- return 1;
}
- return 0;
+ splx(s);
+ disable_intr();
+ return (1);
}
diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c
index 376af2a..0cf7d31 100644
--- a/sys/i386/i386/vm_machdep.c
+++ b/sys/i386/i386/vm_machdep.c
@@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
- * $Id: vm_machdep.c,v 1.75 1997/02/22 09:33:01 peter Exp $
+ * $Id: vm_machdep.c,v 1.76 1997/03/22 04:28:16 dyson Exp $
*/
#include "npx.h"
@@ -847,41 +847,51 @@ grow(p, sp)
}
/*
- * prototype routine to implement the pre-zeroed page mechanism
- * this routine is called from the idle loop.
+ * Implement the pre-zeroed page mechanism.
+ * This routine is called from the idle loop.
*/
int
-vm_page_zero_idle() {
+vm_page_zero_idle()
+{
+ static int free_rover;
vm_page_t m;
- static int free_rover = 0;
-/* XXX
- * We stop zeroing pages when there are sufficent prezeroed pages.
- * This threshold isn't really needed, except we want to
- * bypass unneeded calls to vm_page_list_find, and the
- * associated cache flush and latency. The pre-zero will
- * still be called when there are significantly more
- * non-prezeroed pages than zeroed pages. The threshold
- * of half the number of reserved pages is arbitrary, but
- * approximately the right amount. Eventually, we should
- * perhaps interrupt the zero operation when a process
- * is found to be ready to run.
- */
- if (((cnt.v_free_count - vm_page_zero_count) > (cnt.v_free_reserved / 2)) &&
-#ifdef NOT_NEEDED
- (cnt.v_free_count > cnt.v_interrupt_free_min) &&
+ int s;
+
+#ifdef WRONG
+ if (cnt.v_free_count <= cnt.v_interrupt_free_min)
+ return (0);
#endif
- (m = vm_page_list_find(PQ_FREE, free_rover))) {
+ /*
+ * XXX
+ * We stop zeroing pages when there are sufficent prezeroed pages.
+ * This threshold isn't really needed, except we want to
+ * bypass unneeded calls to vm_page_list_find, and the
+ * associated cache flush and latency. The pre-zero will
+ * still be called when there are significantly more
+ * non-prezeroed pages than zeroed pages. The threshold
+ * of half the number of reserved pages is arbitrary, but
+ * approximately the right amount. Eventually, we should
+ * perhaps interrupt the zero operation when a process
+ * is found to be ready to run.
+ */
+ if (cnt.v_free_count - vm_page_zero_count <= cnt.v_free_reserved / 2)
+ return (0);
+ s = splvm();
+ enable_intr();
+ m = vm_page_list_find(PQ_FREE, free_rover);
+ if (m != NULL) {
--(*vm_page_queues[m->queue].lcnt);
TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
- enable_intr();
+ splx(s);
pmap_zero_page(VM_PAGE_TO_PHYS(m));
- disable_intr();
+ (void)splvm();
m->queue = PQ_ZERO + m->pc;
++(*vm_page_queues[m->queue].lcnt);
TAILQ_INSERT_HEAD(vm_page_queues[m->queue].pl, m, pageq);
free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
++vm_page_zero_count;
- return 1;
}
- return 0;
+ splx(s);
+ disable_intr();
+ return (1);
}
OpenPOWER on IntegriCloud