summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_page.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/vm/vm_page.c')
-rw-r--r--sys/vm/vm_page.c748
1 files changed, 494 insertions, 254 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 757d4fd..dc19ea0 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright (c) 1991 Regents of the University of California.
* All rights reserved.
*
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
- * $Id: vm_page.c,v 1.11 1994/10/18 14:59:19 davidg Exp $
+ * $Id: vm_page.c,v 1.12 1994/10/23 06:15:04 davidg Exp $
*/
/*
@@ -42,17 +42,17 @@
* All rights reserved.
*
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
- *
+ *
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@@ -82,27 +82,39 @@
* page structure.
*/
-struct pglist *vm_page_buckets; /* Array of buckets */
-int vm_page_bucket_count = 0; /* How big is array? */
-int vm_page_hash_mask; /* Mask for hash function */
-simple_lock_data_t bucket_lock; /* lock for all buckets XXX */
+struct pglist *vm_page_buckets; /* Array of buckets */
+int vm_page_bucket_count = 0; /* How big is array? */
+int vm_page_hash_mask; /* Mask for hash function */
+simple_lock_data_t bucket_lock; /* lock for all buckets XXX */
-struct pglist vm_page_queue_free;
-struct pglist vm_page_queue_active;
-struct pglist vm_page_queue_inactive;
-simple_lock_data_t vm_page_queue_lock;
-simple_lock_data_t vm_page_queue_free_lock;
+struct pglist vm_page_queue_free;
+struct pglist vm_page_queue_active;
+struct pglist vm_page_queue_inactive;
+struct pglist vm_page_queue_cache;
+simple_lock_data_t vm_page_queue_lock;
+simple_lock_data_t vm_page_queue_free_lock;
/* has physical page allocation been initialized? */
boolean_t vm_page_startup_initialized;
-vm_page_t vm_page_array;
-long first_page;
-long last_page;
-vm_offset_t first_phys_addr;
-vm_offset_t last_phys_addr;
-vm_size_t page_mask;
-int page_shift;
+vm_page_t vm_page_array;
+int vm_page_array_size;
+long first_page;
+long last_page;
+vm_offset_t first_phys_addr;
+vm_offset_t last_phys_addr;
+vm_size_t page_mask;
+int page_shift;
+
+/*
+ * map of contiguous valid DEV_BSIZE chunks in a page
+ * (this list is valid for page sizes upto 16*DEV_BSIZE)
+ */
+static u_short vm_page_dev_bsize_chunks[] = {
+ 0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff,
+ 0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff
+};
+
/*
* vm_set_page_size:
@@ -113,7 +125,8 @@ int page_shift;
*
* Sets page_shift and page_mask from cnt.v_page_size.
*/
-void vm_set_page_size()
+void
+vm_set_page_size()
{
if (cnt.v_page_size == 0)
@@ -121,7 +134,7 @@ void vm_set_page_size()
page_mask = cnt.v_page_size - 1;
if ((page_mask & cnt.v_page_size) != 0)
panic("vm_set_page_size: page size not a power of two");
- for (page_shift = 0; ; page_shift++)
+ for (page_shift = 0;; page_shift++)
if ((1 << page_shift) == cnt.v_page_size)
break;
}
@@ -138,24 +151,25 @@ void vm_set_page_size()
vm_offset_t
vm_page_startup(starta, enda, vaddr)
- register vm_offset_t starta;
- vm_offset_t enda;
- register vm_offset_t vaddr;
+ register vm_offset_t starta;
+ vm_offset_t enda;
+ register vm_offset_t vaddr;
{
- register vm_offset_t mapped;
- register vm_page_t m;
+ register vm_offset_t mapped;
+ register vm_page_t m;
register struct pglist *bucket;
- vm_size_t npages, page_range;
- register vm_offset_t new_start;
- int i;
- vm_offset_t pa;
+ vm_size_t npages, page_range;
+ register vm_offset_t new_start;
+ int i;
+ vm_offset_t pa;
int nblocks;
- vm_offset_t first_managed_page;
+ vm_offset_t first_managed_page;
- extern vm_offset_t kentry_data;
- extern vm_size_t kentry_data_size;
+ extern vm_offset_t kentry_data;
+ extern vm_size_t kentry_data_size;
extern vm_offset_t phys_avail[];
-/* the biggest memory array is the second group of pages */
+
+ /* the biggest memory array is the second group of pages */
vm_offset_t start;
vm_offset_t biggestone, biggestsize;
@@ -169,11 +183,12 @@ vm_page_startup(starta, enda, vaddr)
for (i = 0; phys_avail[i + 1]; i += 2) {
phys_avail[i] = round_page(phys_avail[i]);
- phys_avail[i+1] = trunc_page(phys_avail[i+1]);
+ phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
}
-
+
for (i = 0; phys_avail[i + 1]; i += 2) {
- int size = phys_avail[i+1] - phys_avail[i];
+ int size = phys_avail[i + 1] - phys_avail[i];
+
if (size > biggestsize) {
biggestone = i;
biggestsize = size;
@@ -186,56 +201,54 @@ vm_page_startup(starta, enda, vaddr)
/*
- * Initialize the locks
+ * Initialize the locks
*/
simple_lock_init(&vm_page_queue_free_lock);
simple_lock_init(&vm_page_queue_lock);
/*
- * Initialize the queue headers for the free queue,
- * the active queue and the inactive queue.
+ * Initialize the queue headers for the free queue, the active queue
+ * and the inactive queue.
*/
TAILQ_INIT(&vm_page_queue_free);
TAILQ_INIT(&vm_page_queue_active);
TAILQ_INIT(&vm_page_queue_inactive);
+ TAILQ_INIT(&vm_page_queue_cache);
/*
- * Allocate (and initialize) the hash table buckets.
- *
- * The number of buckets MUST BE a power of 2, and
- * the actual value is the next power of 2 greater
- * than the number of physical pages in the system.
- *
- * Note:
- * This computation can be tweaked if desired.
+ * Allocate (and initialize) the hash table buckets.
+ *
+ * The number of buckets MUST BE a power of 2, and the actual value is
+ * the next power of 2 greater than the number of physical pages in
+ * the system.
+ *
+ * Note: This computation can be tweaked if desired.
*/
- vm_page_buckets = (struct pglist *)vaddr;
+ vm_page_buckets = (struct pglist *) vaddr;
bucket = vm_page_buckets;
if (vm_page_bucket_count == 0) {
vm_page_bucket_count = 1;
while (vm_page_bucket_count < atop(total))
vm_page_bucket_count <<= 1;
}
-
-
vm_page_hash_mask = vm_page_bucket_count - 1;
/*
- * Validate these addresses.
+ * Validate these addresses.
*/
new_start = start + vm_page_bucket_count * sizeof(struct pglist);
new_start = round_page(new_start);
mapped = vaddr;
vaddr = pmap_map(mapped, start, new_start,
- VM_PROT_READ|VM_PROT_WRITE);
+ VM_PROT_READ | VM_PROT_WRITE);
start = new_start;
bzero((caddr_t) mapped, vaddr - mapped);
mapped = vaddr;
- for (i = 0; i< vm_page_bucket_count; i++) {
+ for (i = 0; i < vm_page_bucket_count; i++) {
TAILQ_INIT(bucket);
bucket++;
}
@@ -243,49 +256,54 @@ vm_page_startup(starta, enda, vaddr)
simple_lock_init(&bucket_lock);
/*
- * round (or truncate) the addresses to our page size.
+ * round (or truncate) the addresses to our page size.
*/
/*
- * Pre-allocate maps and map entries that cannot be dynamically
- * allocated via malloc(). The maps include the kernel_map and
- * kmem_map which must be initialized before malloc() will
- * work (obviously). Also could include pager maps which would
- * be allocated before kmeminit.
- *
- * Allow some kernel map entries... this should be plenty
- * since people shouldn't be cluttering up the kernel
- * map (they should use their own maps).
+ * Pre-allocate maps and map entries that cannot be dynamically
+ * allocated via malloc(). The maps include the kernel_map and
+ * kmem_map which must be initialized before malloc() will work
+ * (obviously). Also could include pager maps which would be
+ * allocated before kmeminit.
+ *
+ * Allow some kernel map entries... this should be plenty since people
+ * shouldn't be cluttering up the kernel map (they should use their
+ * own maps).
*/
kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
- MAX_KMAPENT * sizeof(struct vm_map_entry);
+ MAX_KMAPENT * sizeof(struct vm_map_entry);
kentry_data_size = round_page(kentry_data_size);
kentry_data = (vm_offset_t) vaddr;
vaddr += kentry_data_size;
/*
- * Validate these zone addresses.
+ * Validate these zone addresses.
*/
new_start = start + (vaddr - mapped);
- pmap_map(mapped, start, new_start, VM_PROT_READ|VM_PROT_WRITE);
+ pmap_map(mapped, start, new_start, VM_PROT_READ | VM_PROT_WRITE);
bzero((caddr_t) mapped, (vaddr - mapped));
start = round_page(new_start);
/*
- * Compute the number of pages of memory that will be
- * available for use (taking into account the overhead
- * of a page structure per page).
+ * Compute the number of pages of memory that will be available for
+ * use (taking into account the overhead of a page structure per
+ * page).
*/
- npages = (total - (start - phys_avail[biggestone])) / (PAGE_SIZE + sizeof(struct vm_page));
first_page = phys_avail[0] / PAGE_SIZE;
- page_range = (phys_avail[(nblocks-1)*2 + 1] - phys_avail[0]) / PAGE_SIZE;
+ /* for VM_PAGE_CHECK() */
+ last_page = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
+
+ page_range = last_page - (phys_avail[0] / PAGE_SIZE);
+ npages = (total - (page_range * sizeof(struct vm_page)) -
+ (start - phys_avail[biggestone])) / PAGE_SIZE;
+
/*
- * Initialize the mem entry structures now, and
- * put them in the free queue.
+ * Initialize the mem entry structures now, and put them in the free
+ * queue.
*/
vm_page_array = (vm_page_t) vaddr;
@@ -293,25 +311,26 @@ vm_page_startup(starta, enda, vaddr)
/*
- * Validate these addresses.
+ * Validate these addresses.
*/
- new_start = round_page(start + page_range * sizeof (struct vm_page));
+ new_start = round_page(start + page_range * sizeof(struct vm_page));
mapped = pmap_map(mapped, start, new_start,
- VM_PROT_READ|VM_PROT_WRITE);
+ VM_PROT_READ | VM_PROT_WRITE);
start = new_start;
first_managed_page = start / PAGE_SIZE;
/*
- * Clear all of the page structures
+ * Clear all of the page structures
*/
- bzero((caddr_t)vm_page_array, page_range * sizeof(struct vm_page));
+ bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
+ vm_page_array_size = page_range;
cnt.v_page_count = 0;
- cnt.v_free_count= 0;
+ cnt.v_free_count = 0;
for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
- if (i == biggestone)
+ if (i == biggestone)
pa = ptoa(first_managed_page);
else
pa = phys_avail[i];
@@ -319,7 +338,8 @@ vm_page_startup(starta, enda, vaddr)
++cnt.v_page_count;
++cnt.v_free_count;
m = PHYS_TO_VM_PAGE(pa);
- m->flags = PG_CLEAN | PG_FREE;
+ m->flags = PG_FREE;
+ vm_page_set_clean(m, 0, PAGE_SIZE);
m->object = 0;
m->phys_addr = pa;
m->hold_count = 0;
@@ -329,12 +349,12 @@ vm_page_startup(starta, enda, vaddr)
}
/*
- * Initialize vm_pages_needed lock here - don't wait for pageout
- * daemon XXX
+ * Initialize vm_pages_needed lock here - don't wait for pageout
+ * daemon XXX
*/
simple_lock_init(&vm_pages_needed_lock);
- return(mapped);
+ return (mapped);
}
/*
@@ -349,7 +369,7 @@ vm_page_hash(object, offset)
vm_object_t object;
vm_offset_t offset;
{
- return ((unsigned)object + offset/NBPG) & vm_page_hash_mask;
+ return ((unsigned) object + offset / NBPG) & vm_page_hash_mask;
}
/*
@@ -361,13 +381,14 @@ vm_page_hash(object, offset)
* The object and page must be locked.
*/
-void vm_page_insert(mem, object, offset)
- register vm_page_t mem;
- register vm_object_t object;
- register vm_offset_t offset;
+void
+vm_page_insert(mem, object, offset)
+ register vm_page_t mem;
+ register vm_object_t object;
+ register vm_offset_t offset;
{
- register struct pglist *bucket;
- int s;
+ register struct pglist *bucket;
+ int s;
VM_PAGE_CHECK(mem);
@@ -375,14 +396,14 @@ void vm_page_insert(mem, object, offset)
panic("vm_page_insert: already inserted");
/*
- * Record the object/offset pair in this page
+ * Record the object/offset pair in this page
*/
mem->object = object;
mem->offset = offset;
/*
- * Insert it into the object_object/offset hash table
+ * Insert it into the object_object/offset hash table
*/
bucket = &vm_page_buckets[vm_page_hash(object, offset)];
@@ -393,15 +414,14 @@ void vm_page_insert(mem, object, offset)
(void) splx(s);
/*
- * Now link into the object's list of backed pages.
+ * Now link into the object's list of backed pages.
*/
TAILQ_INSERT_TAIL(&object->memq, mem, listq);
mem->flags |= PG_TABLED;
/*
- * And show that the object has one more resident
- * page.
+ * And show that the object has one more resident page.
*/
object->resident_page_count++;
@@ -417,19 +437,21 @@ void vm_page_insert(mem, object, offset)
* The object and page must be locked.
*/
-void vm_page_remove(mem)
- register vm_page_t mem;
+void
+vm_page_remove(mem)
+ register vm_page_t mem;
{
- register struct pglist *bucket;
- int s;
+ register struct pglist *bucket;
+ int s;
VM_PAGE_CHECK(mem);
+
if (!(mem->flags & PG_TABLED))
return;
/*
- * Remove from the object_object/offset hash table
+ * Remove from the object_object/offset hash table
*/
bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
@@ -440,14 +462,13 @@ void vm_page_remove(mem)
(void) splx(s);
/*
- * Now remove from the object's list of backed pages.
+ * Now remove from the object's list of backed pages.
*/
TAILQ_REMOVE(&mem->object->memq, mem, listq);
/*
- * And show that the object has one fewer resident
- * page.
+ * And show that the object has one fewer resident page.
*/
mem->object->resident_page_count--;
@@ -464,16 +485,17 @@ void vm_page_remove(mem)
* The object must be locked. No side effects.
*/
-vm_page_t vm_page_lookup(object, offset)
- register vm_object_t object;
- register vm_offset_t offset;
+vm_page_t
+vm_page_lookup(object, offset)
+ register vm_object_t object;
+ register vm_offset_t offset;
{
- register vm_page_t mem;
- register struct pglist *bucket;
- int s;
+ register vm_page_t mem;
+ register struct pglist *bucket;
+ int s;
/*
- * Search the hash table for this object/offset pair
+ * Search the hash table for this object/offset pair
*/
bucket = &vm_page_buckets[vm_page_hash(object, offset)];
@@ -485,13 +507,13 @@ vm_page_t vm_page_lookup(object, offset)
if ((mem->object == object) && (mem->offset == offset)) {
simple_unlock(&bucket_lock);
splx(s);
- return(mem);
+ return (mem);
}
}
simple_unlock(&bucket_lock);
splx(s);
- return(NULL);
+ return (NULL);
}
/*
@@ -502,21 +524,77 @@ vm_page_t vm_page_lookup(object, offset)
*
* The object must be locked.
*/
-void vm_page_rename(mem, new_object, new_offset)
- register vm_page_t mem;
- register vm_object_t new_object;
- vm_offset_t new_offset;
+void
+vm_page_rename(mem, new_object, new_offset)
+ register vm_page_t mem;
+ register vm_object_t new_object;
+ vm_offset_t new_offset;
{
+ int s;
+
if (mem->object == new_object)
return;
- vm_page_lock_queues(); /* keep page from moving out from
- under pageout daemon */
- vm_page_remove(mem);
+ vm_page_lock_queues(); /* keep page from moving out from under pageout daemon */
+ s = splhigh();
+ vm_page_remove(mem);
vm_page_insert(mem, new_object, new_offset);
+ splx(s);
vm_page_unlock_queues();
}
+int
+vm_page_unqueue(vm_page_t mem)
+{
+ int s, origflags;
+
+ s = splhigh();
+ origflags = mem->flags;
+ if (mem->flags & PG_ACTIVE) {
+ TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
+ cnt.v_active_count--;
+ mem->flags &= ~PG_ACTIVE;
+ } else if (mem->flags & PG_INACTIVE) {
+ TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
+ cnt.v_inactive_count--;
+ mem->flags &= ~PG_INACTIVE;
+ } else if (mem->flags & PG_CACHE) {
+ TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
+ cnt.v_cache_count--;
+ mem->flags &= ~PG_CACHE;
+ if (cnt.v_cache_count + cnt.v_free_count < cnt.v_free_reserved)
+ wakeup((caddr_t) & vm_pages_needed);
+ }
+ splx(s);
+ return origflags;
+}
+
+void
+vm_page_requeue(vm_page_t mem, int flags)
+{
+ int s;
+
+ if (mem->wire_count)
+ return;
+ s = splhigh();
+ if (flags & PG_CACHE) {
+ TAILQ_INSERT_TAIL(&vm_page_queue_cache, mem, pageq);
+ mem->flags |= PG_CACHE;
+ cnt.v_cache_count++;
+ } else if (flags & PG_ACTIVE) {
+ TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
+ mem->flags |= PG_ACTIVE;
+ cnt.v_active_count++;
+ } else if (flags & PG_INACTIVE) {
+ TAILQ_INSERT_TAIL(&vm_page_queue_inactive, mem, pageq);
+ mem->flags |= PG_INACTIVE;
+ cnt.v_inactive_count++;
+ }
+ TAILQ_REMOVE(&mem->object->memq, mem, listq);
+ TAILQ_INSERT_TAIL(&mem->object->memq, mem, listq);
+ splx(s);
+}
+
/*
* vm_page_alloc:
*
@@ -526,47 +604,61 @@ void vm_page_rename(mem, new_object, new_offset)
* Object must be locked.
*/
vm_page_t
-vm_page_alloc(object, offset)
- vm_object_t object;
- vm_offset_t offset;
+vm_page_alloc(object, offset, inttime)
+ vm_object_t object;
+ vm_offset_t offset;
+ int inttime;
{
- register vm_page_t mem;
- int s;
+ register vm_page_t mem;
+ int s;
- s = splhigh();
simple_lock(&vm_page_queue_free_lock);
- if ( object != kernel_object &&
- object != kmem_object &&
- curproc != pageproc && curproc != &proc0 &&
- cnt.v_free_count < cnt.v_free_reserved) {
+ s = splhigh();
+
+ if (object != kernel_object &&
+ object != kmem_object &&
+ curproc != pageproc &&
+ curproc != &proc0 &&
+ (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_reserved) {
simple_unlock(&vm_page_queue_free_lock);
splx(s);
- /*
- * this wakeup seems unnecessary, but there is code that
- * might just check to see if there are free pages, and
- * punt if there aren't. VM_WAIT does this too, but
- * redundant wakeups aren't that bad...
- */
- if (curproc != pageproc)
- wakeup((caddr_t) &vm_pages_needed);
- return(NULL);
+ return (NULL);
}
- if (( mem = vm_page_queue_free.tqh_first) == 0) {
- simple_unlock(&vm_page_queue_free_lock);
- printf("No pages???\n");
- splx(s);
- /*
- * comment above re: wakeups applies here too...
- */
- if (curproc != pageproc)
- wakeup((caddr_t) &vm_pages_needed);
- return(NULL);
+ if (inttime) {
+ if ((mem = vm_page_queue_free.tqh_first) == 0) {
+ for (mem = vm_page_queue_cache.tqh_first; mem; mem = mem->pageq.tqe_next) {
+ if ((mem->object->flags & OBJ_ILOCKED) == 0) {
+ TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
+ vm_page_remove(mem);
+ cnt.v_cache_count--;
+ goto gotpage;
+ }
+ }
+ splx(s);
+ return NULL;
+ }
+ } else {
+ if ((cnt.v_free_count < 3) ||
+ (mem = vm_page_queue_free.tqh_first) == 0) {
+ mem = vm_page_queue_cache.tqh_first;
+ if (mem) {
+ TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
+ vm_page_remove(mem);
+ cnt.v_cache_count--;
+ goto gotpage;
+ }
+ simple_unlock(&vm_page_queue_free_lock);
+ splx(s);
+ /* wakeup((caddr_t) &vm_pages_needed); */
+ return (NULL);
+ }
}
TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
-
cnt.v_free_count--;
+
+gotpage:
simple_unlock(&vm_page_queue_free_lock);
VM_PAGE_INIT(mem, object, offset);
@@ -577,18 +669,18 @@ vm_page_alloc(object, offset)
* we would be nearly out of memory.
*/
if (curproc != pageproc &&
- (cnt.v_free_count < cnt.v_free_reserved))
- wakeup((caddr_t) &vm_pages_needed);
+ ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min))
+ wakeup((caddr_t) & vm_pages_needed);
- return(mem);
+ return (mem);
}
vm_offset_t
vm_page_alloc_contig(size, low, high, alignment)
- vm_offset_t size;
- vm_offset_t low;
- vm_offset_t high;
- vm_offset_t alignment;
+ vm_offset_t size;
+ vm_offset_t low;
+ vm_offset_t high;
+ vm_offset_t alignment;
{
int i, s, start;
vm_offset_t addr, phys, tmp_addr;
@@ -619,7 +711,6 @@ again:
splx(s);
return (NULL);
}
-
start = i;
/*
@@ -627,7 +718,7 @@ again:
*/
for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
if ((VM_PAGE_TO_PHYS(&pga[i]) !=
- (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
+ (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
((pga[i].flags & PG_FREE) != PG_FREE)) {
start++;
goto again;
@@ -636,8 +727,8 @@ again:
/*
* We've found a contiguous chunk that meets are requirements.
- * Allocate kernel VM, unfree and assign the physical pages to it
- * and return kernel VM pointer.
+ * Allocate kernel VM, unfree and assign the physical pages to it and
+ * return kernel VM pointer.
*/
tmp_addr = addr = kmem_alloc_pageable(kernel_map, size);
@@ -645,11 +736,11 @@ again:
TAILQ_REMOVE(&vm_page_queue_free, &pga[i], pageq);
cnt.v_free_count--;
vm_page_wire(&pga[i]);
- pga[i].flags = PG_CLEAN; /* shut off PG_FREE and any other flags */
+ vm_page_set_clean(&pga[i], 0, PAGE_SIZE);
pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(&pga[i]));
tmp_addr += PAGE_SIZE;
}
-
+
splx(s);
return (addr);
}
@@ -662,22 +753,20 @@ again:
*
* Object and page must be locked prior to entry.
*/
-void vm_page_free(mem)
- register vm_page_t mem;
+void
+vm_page_free(mem)
+ register vm_page_t mem;
{
int s;
+
s = splhigh();
vm_page_remove(mem);
- if (mem->flags & PG_ACTIVE) {
- TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
- mem->flags &= ~PG_ACTIVE;
- cnt.v_active_count--;
- }
+ vm_page_unqueue(mem);
- if (mem->flags & PG_INACTIVE) {
- TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
- mem->flags &= ~PG_INACTIVE;
- cnt.v_inactive_count--;
+ if (mem->bmapped || mem->busy || mem->flags & PG_BUSY) {
+ printf("vm_page_free: offset(%d), bmapped(%d), busy(%d), PG_BUSY(%d)\n",
+ mem->offset, mem->bmapped, mem->busy, (mem->flags & PG_BUSY) ? 1 : 0);
+ panic("vm_page_free: freeing busy page\n");
}
if (mem->flags & PG_FREE)
panic("vm_page_free: freeing free page");
@@ -693,7 +782,6 @@ void vm_page_free(mem)
cnt.v_wire_count--;
mem->wire_count = 0;
}
-
mem->flags |= PG_FREE;
TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);
@@ -701,31 +789,25 @@ void vm_page_free(mem)
simple_unlock(&vm_page_queue_free_lock);
splx(s);
/*
- * if pageout daemon needs pages, then tell it that there
- * are some free.
+ * if pageout daemon needs pages, then tell it that there are
+ * some free.
*/
if (vm_pageout_pages_needed)
- wakeup((caddr_t)&vm_pageout_pages_needed);
+ wakeup((caddr_t) & vm_pageout_pages_needed);
/*
- * wakeup processes that are waiting on memory if we
- * hit a high water mark.
+ * wakeup processes that are waiting on memory if we hit a
+ * high water mark. And wakeup scheduler process if we have
+ * lots of memory. this process will swapin processes.
*/
- if (cnt.v_free_count == cnt.v_free_min) {
- wakeup((caddr_t)&cnt.v_free_count);
- }
-
- /*
- * wakeup scheduler process if we have lots of memory.
- * this process will swapin processes.
- */
- if (cnt.v_free_count == cnt.v_free_target) {
- wakeup((caddr_t)&proc0);
+ if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
+ wakeup((caddr_t) & cnt.v_free_count);
+ wakeup((caddr_t) & proc0);
}
} else {
splx(s);
}
- if( mem->flags & PG_WANTED)
+ if (mem->flags & PG_WANTED)
wakeup((caddr_t) mem);
cnt.v_tfree++;
}
@@ -740,25 +822,16 @@ void vm_page_free(mem)
*
* The page queues must be locked.
*/
-void vm_page_wire(mem)
- register vm_page_t mem;
+void
+vm_page_wire(mem)
+ register vm_page_t mem;
{
int s;
+
VM_PAGE_CHECK(mem);
if (mem->wire_count == 0) {
- s = splhigh();
- if (mem->flags & PG_ACTIVE) {
- TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
- cnt.v_active_count--;
- mem->flags &= ~PG_ACTIVE;
- }
- if (mem->flags & PG_INACTIVE) {
- TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
- cnt.v_inactive_count--;
- mem->flags &= ~PG_INACTIVE;
- }
- splx(s);
+ vm_page_unqueue(mem);
cnt.v_wire_count++;
}
mem->wire_count++;
@@ -772,15 +845,17 @@ void vm_page_wire(mem)
*
* The page queues must be locked.
*/
-void vm_page_unwire(mem)
- register vm_page_t mem;
+void
+vm_page_unwire(mem)
+ register vm_page_t mem;
{
int s;
+
VM_PAGE_CHECK(mem);
s = splhigh();
- if( mem->wire_count)
+ if (mem->wire_count)
mem->wire_count--;
if (mem->wire_count == 0) {
TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
@@ -802,47 +877,67 @@ void vm_page_unwire(mem)
*/
void
vm_page_deactivate(m)
- register vm_page_t m;
+ register vm_page_t m;
{
int spl;
+
VM_PAGE_CHECK(m);
/*
- * Only move active pages -- ignore locked or already
- * inactive ones.
- *
- * XXX: sometimes we get pages which aren't wired down
- * or on any queue - we need to put them on the inactive
- * queue also, otherwise we lose track of them.
- * Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
+ * Only move active pages -- ignore locked or already inactive ones.
+ *
+ * XXX: sometimes we get pages which aren't wired down or on any queue -
+ * we need to put them on the inactive queue also, otherwise we lose
+ * track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
*/
spl = splhigh();
if (!(m->flags & PG_INACTIVE) && m->wire_count == 0 &&
- m->hold_count == 0) {
-
+ m->hold_count == 0) {
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
- if (m->flags & PG_ACTIVE) {
- TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
- m->flags &= ~PG_ACTIVE;
- cnt.v_active_count--;
- }
+ vm_page_unqueue(m);
TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
m->flags |= PG_INACTIVE;
cnt.v_inactive_count++;
-#define NOT_DEACTIVATE_PROTECTS
-#ifndef NOT_DEACTIVATE_PROTECTS
- pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
-#else
- if ((m->flags & PG_CLEAN) &&
- pmap_is_modified(VM_PAGE_TO_PHYS(m)))
- m->flags &= ~PG_CLEAN;
-#endif
- if ((m->flags & PG_CLEAN) == 0)
- m->flags |= PG_LAUNDRY;
- }
+ m->act_count = 0;
+ }
splx(spl);
}
+
+/*
+ * vm_page_cache
+ *
+ * Put the specified page onto the page cache queue (if appropriate).
+ */
+
+void
+vm_page_cache(m)
+ register vm_page_t m;
+{
+ int s;
+
+ VM_PAGE_CHECK(m);
+ if ((m->flags & (PG_CACHE | PG_BUSY)) || m->busy || m->wire_count ||
+ m->bmapped)
+ return;
+
+ s = splhigh();
+ vm_page_unqueue(m);
+ pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
+
+ TAILQ_INSERT_TAIL(&vm_page_queue_cache, m, pageq);
+ m->flags |= PG_CACHE;
+ cnt.v_cache_count++;
+ if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
+ wakeup((caddr_t) & cnt.v_free_count);
+ wakeup((caddr_t) & proc0);
+ }
+ if (vm_pageout_pages_needed)
+ wakeup((caddr_t) & vm_pageout_pages_needed);
+
+ splx(s);
+}
+
/*
* vm_page_activate:
*
@@ -851,27 +946,29 @@ vm_page_deactivate(m)
* The page queues must be locked.
*/
-void vm_page_activate(m)
- register vm_page_t m;
+void
+vm_page_activate(m)
+ register vm_page_t m;
{
int s;
+
VM_PAGE_CHECK(m);
s = splhigh();
- if (m->flags & PG_INACTIVE) {
- TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
- cnt.v_inactive_count--;
- m->flags &= ~PG_INACTIVE;
- }
- if (m->wire_count == 0) {
- if (m->flags & PG_ACTIVE)
- panic("vm_page_activate: already active");
+ if (m->flags & PG_ACTIVE)
+ panic("vm_page_activate: already active");
+
+ vm_page_unqueue(m);
+ if (m->wire_count == 0) {
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
m->flags |= PG_ACTIVE;
TAILQ_REMOVE(&m->object->memq, m, listq);
TAILQ_INSERT_TAIL(&m->object->memq, m, listq);
- m->act_count = 1;
+ if (m->act_count < 5)
+ m->act_count = 5;
+ else
+ m->act_count += 1;
cnt.v_active_count++;
}
splx(s);
@@ -887,12 +984,13 @@ void vm_page_activate(m)
boolean_t
vm_page_zero_fill(m)
- vm_page_t m;
+ vm_page_t m;
{
VM_PAGE_CHECK(m);
pmap_zero_page(VM_PAGE_TO_PHYS(m));
- return(TRUE);
+ m->valid = VM_PAGE_BITS_ALL;
+ return (TRUE);
}
/*
@@ -902,11 +1000,153 @@ vm_page_zero_fill(m)
*/
void
vm_page_copy(src_m, dest_m)
- vm_page_t src_m;
- vm_page_t dest_m;
+ vm_page_t src_m;
+ vm_page_t dest_m;
{
VM_PAGE_CHECK(src_m);
VM_PAGE_CHECK(dest_m);
pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
+ dest_m->valid = VM_PAGE_BITS_ALL;
+}
+
+
+/*
+ * mapping function for valid bits or for dirty bits in
+ * a page
+ */
+inline int
+vm_page_bits(int base, int size)
+{
+ u_short chunk;
+
+ size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
+ base = (base % PAGE_SIZE) / DEV_BSIZE;
+ chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE];
+ return (chunk << base) & VM_PAGE_BITS_ALL;
+}
+
+/*
+ * set a page (partially) valid
+ */
+void
+vm_page_set_valid(m, base, size)
+ vm_page_t m;
+ int base;
+ int size;
+{
+ m->valid |= vm_page_bits(base, size);
+}
+
+/*
+ * set a page (partially) invalid
+ */
+void
+vm_page_set_invalid(m, base, size)
+ vm_page_t m;
+ int base;
+ int size;
+{
+ int bits;
+
+ m->valid &= ~(bits = vm_page_bits(base, size));
+ if (m->valid == 0)
+ m->dirty &= ~bits;
+}
+
+/*
+ * is (partial) page valid?
+ */
+int
+vm_page_is_valid(m, base, size)
+ vm_page_t m;
+ int base;
+ int size;
+{
+ int bits;
+
+ if (m->valid && ((m->valid & (bits = vm_page_bits(base, size))) == bits))
+ return 1;
+ else
+ return 0;
+}
+
+
+/*
+ * set a page (partially) dirty
+ */
+void
+vm_page_set_dirty(m, base, size)
+ vm_page_t m;
+ int base;
+ int size;
+{
+ if ((base != 0) || (size != PAGE_SIZE)) {
+ if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
+ m->dirty = VM_PAGE_BITS_ALL;
+ pmap_clear_modify(VM_PAGE_TO_PHYS(m));
+ return;
+ }
+ m->dirty |= vm_page_bits(base, size);
+ } else {
+ m->dirty = VM_PAGE_BITS_ALL;
+ pmap_clear_modify(VM_PAGE_TO_PHYS(m));
+ }
+}
+
+void
+vm_page_test_dirty(m)
+ vm_page_t m;
+{
+ if ((!m->dirty || (m->dirty != vm_page_bits(0, PAGE_SIZE))) &&
+ pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
+ pmap_clear_modify(VM_PAGE_TO_PHYS(m));
+ m->dirty = VM_PAGE_BITS_ALL;
+ }
+}
+
+/*
+ * set a page (partially) clean
+ */
+void
+vm_page_set_clean(m, base, size)
+ vm_page_t m;
+ int base;
+ int size;
+{
+ m->dirty &= ~vm_page_bits(base, size);
+}
+
+/*
+ * is (partial) page clean
+ */
+int
+vm_page_is_clean(m, base, size)
+ vm_page_t m;
+ int base;
+ int size;
+{
+ if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
+ m->dirty = VM_PAGE_BITS_ALL;
+ pmap_clear_modify(VM_PAGE_TO_PHYS(m));
+ }
+ if ((m->dirty & m->valid & vm_page_bits(base, size)) == 0)
+ return 1;
+ else
+ return 0;
+}
+
+void
+print_page_info()
+{
+ printf("cnt.v_free_count: %d\n", cnt.v_free_count);
+ printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
+ printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
+ printf("cnt.v_active_count: %d\n", cnt.v_active_count);
+ printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
+ printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
+ printf("cnt.v_free_min: %d\n", cnt.v_free_min);
+ printf("cnt.v_free_target: %d\n", cnt.v_free_target);
+ printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
+ printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
}
OpenPOWER on IntegriCloud