summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authordyson <dyson@FreeBSD.org>1995-09-03 20:40:43 +0000
committerdyson <dyson@FreeBSD.org>1995-09-03 20:40:43 +0000
commit19cfe6b04c65e8642786c0ed37f1699b1461c945 (patch)
tree5f4ed8f7995c003619ed9cd8360e2533d535e372 /sys/vm
parent94c170a64751b11530d3a419d295d6fd6f38c938 (diff)
downloadFreeBSD-src-19cfe6b04c65e8642786c0ed37f1699b1461c945.zip
FreeBSD-src-19cfe6b04c65e8642786c0ed37f1699b1461c945.tar.gz
Machine independent changes to support pre-zeroed free pages. This
significantly improves demand-zero performance.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_fault.c11
-rw-r--r--sys/vm/vm_kern.c14
-rw-r--r--sys/vm/vm_page.c71
3 files changed, 72 insertions, 24 deletions
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index debb3c9..1fa59dc 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_fault.c,v 1.25 1995/05/30 08:15:59 rgrimes Exp $
+ * $Id: vm_fault.c,v 1.26 1995/07/13 08:48:20 davidg Exp $
*/
/*
@@ -310,8 +310,8 @@ RetryFault:;
/*
* Allocate a new page for this object/offset pair.
*/
-
- m = vm_page_alloc(object, offset, VM_ALLOC_NORMAL);
+ m = vm_page_alloc(object, offset,
+ vp?VM_ALLOC_NORMAL:(VM_ALLOC_NORMAL|VM_ALLOC_ZERO));
if (m == NULL) {
UNLOCK_AND_DEALLOCATE;
@@ -439,7 +439,8 @@ readrest:
}
first_m = NULL;
- vm_page_zero_fill(m);
+ if ((m->flags & PG_ZERO) == 0)
+ vm_page_zero_fill(m);
m->valid = VM_PAGE_BITS_ALL;
cnt.v_zfod++;
break;
@@ -637,7 +638,7 @@ readrest:
}
}
- m->flags |= PG_MAPPED;
+ m->flags |= PG_MAPPED|PG_REFERENCED;
pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), prot, wired);
#if 0
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 1f21ec5..e4ab774 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_kern.c,v 1.14 1995/07/13 08:48:25 davidg Exp $
+ * $Id: vm_kern.c,v 1.15 1995/07/29 11:44:20 bde Exp $
*/
/*
@@ -178,11 +178,12 @@ kmem_alloc(map, size)
for (i = 0; i < size; i += PAGE_SIZE) {
vm_page_t mem;
- while ((mem = vm_page_alloc(kernel_object, offset + i, VM_ALLOC_NORMAL)) == NULL) {
+ while ((mem = vm_page_alloc(kernel_object, offset + i, (VM_ALLOC_NORMAL|VM_ALLOC_ZERO))) == NULL) {
VM_WAIT;
}
- vm_page_zero_fill(mem);
- mem->flags &= ~PG_BUSY;
+ if ((mem->flags & PG_ZERO) == 0)
+ vm_page_zero_fill(mem);
+ mem->flags &= ~(PG_BUSY|PG_ZERO);
mem->valid = VM_PAGE_BITS_ALL;
}
@@ -346,10 +347,7 @@ kmem_malloc(map, size, waitflag)
vm_map_unlock(map);
return (0);
}
-#if 0
- vm_page_zero_fill(m);
-#endif
- m->flags &= ~PG_BUSY;
+ m->flags &= ~(PG_BUSY|PG_ZERO);
m->valid = VM_PAGE_BITS_ALL;
}
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index e0e834c..4eaf273 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
- * $Id: vm_page.c,v 1.34 1995/07/20 05:28:07 davidg Exp $
+ * $Id: vm_page.c,v 1.35 1995/09/03 19:57:25 dyson Exp $
*/
/*
@@ -88,6 +88,7 @@ int vm_page_bucket_count; /* How big is array? */
int vm_page_hash_mask; /* Mask for hash function */
struct pglist vm_page_queue_free;
+struct pglist vm_page_queue_zero;
struct pglist vm_page_queue_active;
struct pglist vm_page_queue_inactive;
struct pglist vm_page_queue_cache;
@@ -199,6 +200,7 @@ vm_page_startup(starta, enda, vaddr)
*/
TAILQ_INIT(&vm_page_queue_free);
+ TAILQ_INIT(&vm_page_queue_zero);
TAILQ_INIT(&vm_page_queue_active);
TAILQ_INIT(&vm_page_queue_inactive);
TAILQ_INIT(&vm_page_queue_cache);
@@ -536,6 +538,8 @@ vm_page_unqueue(vm_page_t mem)
* VM_ALLOC_NORMAL normal process request
* VM_ALLOC_SYSTEM system *really* needs a page
* VM_ALLOC_INTERRUPT interrupt time request
+ * or in:
+ * VM_ALLOC_ZERO zero page
*
* Object must be locked.
*/
@@ -565,18 +569,37 @@ vm_page_alloc(object, offset, page_req)
s = splhigh();
- mem = vm_page_queue_free.tqh_first;
-
- switch (page_req) {
+ switch ((page_req & ~(VM_ALLOC_ZERO))) {
case VM_ALLOC_NORMAL:
if (cnt.v_free_count >= cnt.v_free_reserved) {
- TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
+ if (page_req & VM_ALLOC_ZERO) {
+ mem = vm_page_queue_zero.tqh_first;
+ if (mem) {
+ TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
+ mem->flags = PG_BUSY|PG_ZERO;
+ } else {
+ mem = vm_page_queue_free.tqh_first;
+ TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
+ mem->flags = PG_BUSY;
+ }
+ } else {
+ mem = vm_page_queue_free.tqh_first;
+ if (mem) {
+ TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
+ mem->flags = PG_BUSY;
+ } else {
+ mem = vm_page_queue_zero.tqh_first;
+ TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
+ mem->flags = PG_BUSY|PG_ZERO;
+ }
+ }
cnt.v_free_count--;
} else {
mem = vm_page_queue_cache.tqh_first;
if (mem != NULL) {
TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
vm_page_remove(mem);
+ mem->flags = PG_BUSY;
cnt.v_cache_count--;
} else {
splx(s);
@@ -590,13 +613,34 @@ vm_page_alloc(object, offset, page_req)
if ((cnt.v_free_count >= cnt.v_free_reserved) ||
((cnt.v_cache_count == 0) &&
(cnt.v_free_count >= cnt.v_interrupt_free_min))) {
- TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
+ if (page_req & VM_ALLOC_ZERO) {
+ mem = vm_page_queue_zero.tqh_first;
+ if (mem) {
+ TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
+ mem->flags = PG_BUSY|PG_ZERO;
+ } else {
+ mem = vm_page_queue_free.tqh_first;
+ TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
+ mem->flags = PG_BUSY;
+ }
+ } else {
+ mem = vm_page_queue_free.tqh_first;
+ if (mem) {
+ TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
+ mem->flags = PG_BUSY;
+ } else {
+ mem = vm_page_queue_zero.tqh_first;
+ TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
+ mem->flags = PG_BUSY|PG_ZERO;
+ }
+ }
cnt.v_free_count--;
} else {
mem = vm_page_queue_cache.tqh_first;
if (mem != NULL) {
TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
vm_page_remove(mem);
+ mem->flags = PG_BUSY;
cnt.v_cache_count--;
} else {
splx(s);
@@ -607,8 +651,16 @@ vm_page_alloc(object, offset, page_req)
break;
case VM_ALLOC_INTERRUPT:
- if (mem != NULL) {
- TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
+ if (cnt.v_free_count > 0) {
+ mem = vm_page_queue_free.tqh_first;
+ if (mem) {
+ TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
+ mem->flags = PG_BUSY;
+ } else {
+ mem = vm_page_queue_zero.tqh_first;
+ TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
+ mem->flags = PG_BUSY|PG_ZERO;
+ }
cnt.v_free_count--;
} else {
splx(s);
@@ -621,7 +673,6 @@ vm_page_alloc(object, offset, page_req)
panic("vm_page_alloc: invalid allocation class");
}
- mem->flags = PG_BUSY;
mem->wire_count = 0;
mem->hold_count = 0;
mem->act_count = 0;
@@ -904,7 +955,6 @@ vm_page_deactivate(m)
spl = splhigh();
if (!(m->flags & PG_INACTIVE) && m->wire_count == 0 &&
m->hold_count == 0) {
- pmap_clear_reference(VM_PAGE_TO_PHYS(m));
if (m->flags & PG_CACHE)
cnt.v_reactivated++;
vm_page_unqueue(m);
@@ -962,7 +1012,6 @@ vm_page_zero_fill(m)
vm_page_t m;
{
pmap_zero_page(VM_PAGE_TO_PHYS(m));
- m->valid = VM_PAGE_BITS_ALL;
return (TRUE);
}
OpenPOWER on IntegriCloud