summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_page.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/vm/vm_page.c')
-rw-r--r--sys/vm/vm_page.c391
1 files changed, 287 insertions, 104 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 0cd9d87..38d320f 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1991, 1993
- * The Regents of the University of California. All rights reserved.
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* The Mach Operating System project at Carnegie-Mellon University.
@@ -33,9 +33,11 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vm_page.c 8.3 (Berkeley) 3/21/94
- *
- *
+ * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
+ * $Id: vm_page.c,v 1.17 1994/04/20 07:07:14 davidg Exp $
+ */
+
+/*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
* All rights reserved.
*
@@ -68,6 +70,7 @@
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/proc.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
@@ -123,7 +126,6 @@ void vm_set_page_size()
break;
}
-
/*
* vm_page_startup:
*
@@ -133,17 +135,55 @@ void vm_set_page_size()
* for the object/offset-to-page hash table headers.
* Each page cell is initialized and placed on the free list.
*/
-void vm_page_startup(start, end)
- vm_offset_t *start;
- vm_offset_t *end;
+
+vm_offset_t
+vm_page_startup(starta, enda, vaddr)
+ register vm_offset_t starta;
+ vm_offset_t enda;
+ register vm_offset_t vaddr;
{
+ register vm_offset_t mapped;
register vm_page_t m;
- register struct pglist *bucket;
- vm_size_t npages;
+ register struct pglist *bucket;
+ vm_size_t npages, page_range;
+ register vm_offset_t new_start;
int i;
vm_offset_t pa;
+ int nblocks;
+ vm_offset_t first_managed_page;
+ int size;
+
extern vm_offset_t kentry_data;
extern vm_size_t kentry_data_size;
+ extern vm_offset_t phys_avail[];
+/* the biggest memory array is the second group of pages */
+ vm_offset_t start;
+ vm_offset_t biggestone, biggestsize;
+
+ vm_offset_t total;
+
+ total = 0;
+ biggestsize = 0;
+ biggestone = 0;
+ nblocks = 0;
+ vaddr = round_page(vaddr);
+
+ for (i = 0; phys_avail[i + 1]; i += 2) {
+ phys_avail[i] = round_page(phys_avail[i]);
+ phys_avail[i+1] = trunc_page(phys_avail[i+1]);
+ }
+
+ for (i = 0; phys_avail[i + 1]; i += 2) {
+ int size = phys_avail[i+1] - phys_avail[i];
+ if (size > biggestsize) {
+ biggestone = i;
+ biggestsize = size;
+ }
+ ++nblocks;
+ total += size;
+ }
+
+ start = phys_avail[biggestone];
/*
@@ -163,7 +203,7 @@ void vm_page_startup(start, end)
TAILQ_INIT(&vm_page_queue_inactive);
/*
- * Calculate the number of hash table buckets.
+ * Allocate (and initialize) the hash table buckets.
*
* The number of buckets MUST BE a power of 2, and
* the actual value is the next power of 2 greater
@@ -172,23 +212,31 @@ void vm_page_startup(start, end)
* Note:
* This computation can be tweaked if desired.
*/
-
+ vm_page_buckets = (struct pglist *)vaddr;
+ bucket = vm_page_buckets;
if (vm_page_bucket_count == 0) {
vm_page_bucket_count = 1;
- while (vm_page_bucket_count < atop(*end - *start))
+ while (vm_page_bucket_count < atop(total))
vm_page_bucket_count <<= 1;
}
+
vm_page_hash_mask = vm_page_bucket_count - 1;
/*
- * Allocate (and initialize) the hash table buckets.
+ * Validate these addresses.
*/
- vm_page_buckets = (struct pglist *)
- pmap_bootstrap_alloc(vm_page_bucket_count * sizeof(struct pglist));
- bucket = vm_page_buckets;
- for (i = vm_page_bucket_count; i--;) {
+ new_start = start + vm_page_bucket_count * sizeof(struct pglist);
+ new_start = round_page(new_start);
+ mapped = vaddr;
+ vaddr = pmap_map(mapped, start, new_start,
+ VM_PROT_READ|VM_PROT_WRITE);
+ start = new_start;
+ bzero((caddr_t) mapped, vaddr - mapped);
+ mapped = vaddr;
+
+ for (i = 0; i< vm_page_bucket_count; i++) {
TAILQ_INIT(bucket);
bucket++;
}
@@ -196,11 +244,9 @@ void vm_page_startup(start, end)
simple_lock_init(&bucket_lock);
/*
- * Truncate the remainder of physical memory to our page size.
+ * round (or truncate) the addresses to our page size.
*/
- *end = trunc_page(*end);
-
/*
* Pre-allocate maps and map entries that cannot be dynamically
* allocated via malloc(). The maps include the kernel_map and
@@ -213,9 +259,20 @@ void vm_page_startup(start, end)
* map (they should use their own maps).
*/
- kentry_data_size = round_page(MAX_KMAP*sizeof(struct vm_map) +
- MAX_KMAPENT*sizeof(struct vm_map_entry));
- kentry_data = (vm_offset_t) pmap_bootstrap_alloc(kentry_data_size);
+ kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
+ MAX_KMAPENT * sizeof(struct vm_map_entry);
+ kentry_data_size = round_page(kentry_data_size);
+ kentry_data = (vm_offset_t) vaddr;
+ vaddr += kentry_data_size;
+
+ /*
+ * Validate these zone addresses.
+ */
+
+ new_start = start + (vaddr - mapped);
+ pmap_map(mapped, start, new_start, VM_PROT_READ|VM_PROT_WRITE);
+ bzero((caddr_t) mapped, (vaddr - mapped));
+ start = round_page(new_start);
/*
* Compute the number of pages of memory that will be
@@ -223,53 +280,53 @@ void vm_page_startup(start, end)
* of a page structure per page).
*/
- cnt.v_free_count = npages = (*end - *start + sizeof(struct vm_page))
- / (PAGE_SIZE + sizeof(struct vm_page));
+ npages = (total - (start - phys_avail[biggestone])) / (PAGE_SIZE + sizeof(struct vm_page));
+ first_page = phys_avail[0] / PAGE_SIZE;
+ page_range = (phys_avail[(nblocks-1)*2 + 1] - phys_avail[0]) / PAGE_SIZE;
/*
- * Record the extent of physical memory that the
- * virtual memory system manages.
+ * Initialize the mem entry structures now, and
+ * put them in the free queue.
*/
- first_page = *start;
- first_page += npages*sizeof(struct vm_page);
- first_page = atop(round_page(first_page));
- last_page = first_page + npages - 1;
-
- first_phys_addr = ptoa(first_page);
- last_phys_addr = ptoa(last_page) + PAGE_MASK;
+ vm_page_array = (vm_page_t) vaddr;
+ mapped = vaddr;
/*
- * Allocate and clear the mem entry structures.
+ * Validate these addresses.
*/
- m = vm_page_array = (vm_page_t)
- pmap_bootstrap_alloc(npages * sizeof(struct vm_page));
+ new_start = round_page(start + page_range * sizeof (struct vm_page));
+ mapped = pmap_map(mapped, start, new_start,
+ VM_PROT_READ|VM_PROT_WRITE);
+ start = new_start;
+
+ first_managed_page = start / PAGE_SIZE;
/*
- * Initialize the mem entry structures now, and
- * put them in the free queue.
+ * Clear all of the page structures
*/
+ bzero((caddr_t)vm_page_array, page_range * sizeof(struct vm_page));
- pa = first_phys_addr;
- while (npages--) {
- m->flags = 0;
- m->object = NULL;
- m->phys_addr = pa;
-#ifdef i386
- if (pmap_isvalidphys(m->phys_addr)) {
+ cnt.v_page_count = 0;
+ cnt.v_free_count= 0;
+ for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
+ if (i == biggestone)
+ pa = ptoa(first_managed_page);
+ else
+ pa = phys_avail[i];
+ while (pa < phys_avail[i + 1] && npages-- > 0) {
+ ++cnt.v_page_count;
+ ++cnt.v_free_count;
+ m = PHYS_TO_VM_PAGE(pa);
+ m->flags = 0;
+ m->object = 0;
+ m->phys_addr = pa;
+ m->hold_count = 0;
TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
- } else {
- /* perhaps iomem needs it's own type, or dev pager? */
- m->flags |= PG_FICTITIOUS | PG_BUSY;
- cnt.v_free_count--;
+ pa += PAGE_SIZE;
}
-#else /* i386 */
- TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
-#endif /* i386 */
- m++;
- pa += PAGE_SIZE;
}
/*
@@ -278,8 +335,7 @@ void vm_page_startup(start, end)
*/
simple_lock_init(&vm_pages_needed_lock);
- /* from now on, pmap_bootstrap_alloc can't be used */
- vm_page_startup_initialized = TRUE;
+ return(mapped);
}
/*
@@ -289,8 +345,13 @@ void vm_page_startup(start, end)
*
* NOTE: This macro depends on vm_page_bucket_count being a power of 2.
*/
-#define vm_page_hash(object, offset) \
- (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
+inline const int
+vm_page_hash(object, offset)
+ vm_object_t object;
+ vm_offset_t offset;
+{
+ return ((unsigned)object + offset/NBPG) & vm_page_hash_mask;
+}
/*
* vm_page_insert: [ internal use only ]
@@ -307,7 +368,7 @@ void vm_page_insert(mem, object, offset)
register vm_offset_t offset;
{
register struct pglist *bucket;
- int spl;
+ int s;
VM_PAGE_CHECK(mem);
@@ -326,11 +387,11 @@ void vm_page_insert(mem, object, offset)
*/
bucket = &vm_page_buckets[vm_page_hash(object, offset)];
- spl = splimp();
+ s = splimp();
simple_lock(&bucket_lock);
TAILQ_INSERT_TAIL(bucket, mem, hashq);
simple_unlock(&bucket_lock);
- (void) splx(spl);
+ (void) splx(s);
/*
* Now link into the object's list of backed pages.
@@ -361,7 +422,7 @@ void vm_page_remove(mem)
register vm_page_t mem;
{
register struct pglist *bucket;
- int spl;
+ int s;
VM_PAGE_CHECK(mem);
@@ -373,11 +434,11 @@ void vm_page_remove(mem)
*/
bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
- spl = splimp();
+ s = splimp();
simple_lock(&bucket_lock);
TAILQ_REMOVE(bucket, mem, hashq);
simple_unlock(&bucket_lock);
- (void) splx(spl);
+ (void) splx(s);
/*
* Now remove from the object's list of backed pages.
@@ -410,7 +471,7 @@ vm_page_t vm_page_lookup(object, offset)
{
register vm_page_t mem;
register struct pglist *bucket;
- int spl;
+ int s;
/*
* Search the hash table for this object/offset pair
@@ -418,19 +479,19 @@ vm_page_t vm_page_lookup(object, offset)
bucket = &vm_page_buckets[vm_page_hash(object, offset)];
- spl = splimp();
+ s = splimp();
simple_lock(&bucket_lock);
for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) {
VM_PAGE_CHECK(mem);
if ((mem->object == object) && (mem->offset == offset)) {
simple_unlock(&bucket_lock);
- splx(spl);
+ splx(s);
return(mem);
}
}
simple_unlock(&bucket_lock);
- splx(spl);
+ splx(s);
return(NULL);
}
@@ -465,46 +526,62 @@ void vm_page_rename(mem, new_object, new_offset)
*
* Object must be locked.
*/
-vm_page_t vm_page_alloc(object, offset)
+vm_page_t
+vm_page_alloc(object, offset)
vm_object_t object;
vm_offset_t offset;
{
register vm_page_t mem;
- int spl;
+ int s;
- spl = splimp(); /* XXX */
+ s = splimp();
simple_lock(&vm_page_queue_free_lock);
- if (vm_page_queue_free.tqh_first == NULL) {
+ if ( object != kernel_object &&
+ object != kmem_object &&
+ curproc != pageproc && curproc != &proc0 &&
+ cnt.v_free_count < cnt.v_free_reserved) {
+
simple_unlock(&vm_page_queue_free_lock);
- splx(spl);
+ splx(s);
+ /*
+ * this wakeup seems unnecessary, but there is code that
+ * might just check to see if there are free pages, and
+ * punt if there aren't. VM_WAIT does this too, but
+ * redundant wakeups aren't that bad...
+ */
+ if (curproc != pageproc)
+ wakeup((caddr_t) &vm_pages_needed);
+ return(NULL);
+ }
+ if (( mem = vm_page_queue_free.tqh_first) == 0) {
+ simple_unlock(&vm_page_queue_free_lock);
+ printf("No pages???\n");
+ splx(s);
+ /*
+ * comment above re: wakeups applies here too...
+ */
+ if (curproc != pageproc)
+ wakeup((caddr_t) &vm_pages_needed);
return(NULL);
}
- mem = vm_page_queue_free.tqh_first;
TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
cnt.v_free_count--;
simple_unlock(&vm_page_queue_free_lock);
- splx(spl);
VM_PAGE_INIT(mem, object, offset);
+ splx(s);
- /*
- * Decide if we should poke the pageout daemon.
- * We do this if the free count is less than the low
- * water mark, or if the free count is less than the high
- * water mark (but above the low water mark) and the inactive
- * count is less than its target.
- *
- * We don't have the counts locked ... if they change a little,
- * it doesn't really matter.
- */
+/*
+ * don't wakeup too often, so we wakeup the pageout daemon when
+ * we would be nearly out of memory.
+ */
+ if (curproc != pageproc &&
+ (cnt.v_free_count < cnt.v_free_reserved))
+ wakeup((caddr_t) &vm_pages_needed);
- if (cnt.v_free_count < cnt.v_free_min ||
- (cnt.v_free_count < cnt.v_free_target &&
- cnt.v_inactive_count < cnt.v_inactive_target))
- thread_wakeup((int)&vm_pages_needed);
- return (mem);
+ return(mem);
}
/*
@@ -518,6 +595,8 @@ vm_page_t vm_page_alloc(object, offset)
void vm_page_free(mem)
register vm_page_t mem;
{
+ int s;
+ s = splimp();
vm_page_remove(mem);
if (mem->flags & PG_ACTIVE) {
TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
@@ -532,18 +611,46 @@ void vm_page_free(mem)
}
if (!(mem->flags & PG_FICTITIOUS)) {
- int spl;
- spl = splimp();
simple_lock(&vm_page_queue_free_lock);
+ if (mem->wire_count) {
+ cnt.v_wire_count--;
+ mem->wire_count = 0;
+ }
TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);
cnt.v_free_count++;
simple_unlock(&vm_page_queue_free_lock);
- splx(spl);
+ splx(s);
+ /*
+ * if pageout daemon needs pages, then tell it that there
+ * are some free.
+ */
+ if (vm_pageout_pages_needed)
+ wakeup((caddr_t)&vm_pageout_pages_needed);
+
+ /*
+ * wakeup processes that are waiting on memory if we
+ * hit a high water mark.
+ */
+ if (cnt.v_free_count == cnt.v_free_min) {
+ wakeup((caddr_t)&cnt.v_free_count);
+ }
+
+ /*
+ * wakeup scheduler process if we have lots of memory.
+ * this process will swapin processes.
+ */
+ if (cnt.v_free_count == cnt.v_free_target) {
+ wakeup((caddr_t)&proc0);
+ }
+ } else {
+ splx(s);
}
+ wakeup((caddr_t) mem);
}
+
/*
* vm_page_wire:
*
@@ -556,9 +663,11 @@ void vm_page_free(mem)
void vm_page_wire(mem)
register vm_page_t mem;
{
+ int s;
VM_PAGE_CHECK(mem);
if (mem->wire_count == 0) {
+ s = splimp();
if (mem->flags & PG_ACTIVE) {
TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
cnt.v_active_count--;
@@ -569,6 +678,7 @@ void vm_page_wire(mem)
cnt.v_inactive_count--;
mem->flags &= ~PG_INACTIVE;
}
+ splx(s);
cnt.v_wire_count++;
}
mem->wire_count++;
@@ -585,17 +695,77 @@ void vm_page_wire(mem)
void vm_page_unwire(mem)
register vm_page_t mem;
{
+ int s;
VM_PAGE_CHECK(mem);
- mem->wire_count--;
+ s = splimp();
+
+ if( mem->wire_count)
+ mem->wire_count--;
if (mem->wire_count == 0) {
TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
cnt.v_active_count++;
mem->flags |= PG_ACTIVE;
cnt.v_wire_count--;
}
+ splx(s);
}
+#if 0
+/*
+ * vm_page_deactivate:
+ *
+ * Returns the given page to the inactive list,
+ * indicating that no physical maps have access
+ * to this page. [Used by the physical mapping system.]
+ *
+ * The page queues must be locked.
+ */
+void
+vm_page_deactivate(m)
+ register vm_page_t m;
+{
+ int spl;
+ VM_PAGE_CHECK(m);
+
+ /*
+ * Only move active pages -- ignore locked or already
+ * inactive ones.
+ *
+ * XXX: sometimes we get pages which aren't wired down
+ * or on any queue - we need to put them on the inactive
+ * queue also, otherwise we lose track of them.
+ * Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
+ */
+
+ spl = splimp();
+ if (!(m->flags & PG_INACTIVE) && m->wire_count == 0 &&
+ m->hold_count == 0) {
+
+ pmap_clear_reference(VM_PAGE_TO_PHYS(m));
+ if (m->flags & PG_ACTIVE) {
+ TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
+ m->flags &= ~PG_ACTIVE;
+ cnt.v_active_count--;
+ }
+ TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
+ m->flags |= PG_INACTIVE;
+ cnt.v_inactive_count++;
+#define NOT_DEACTIVATE_PROTECTS
+#ifndef NOT_DEACTIVATE_PROTECTS
+ pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
+#else
+ if ((m->flags & PG_CLEAN) &&
+ pmap_is_modified(VM_PAGE_TO_PHYS(m)))
+ m->flags &= ~PG_CLEAN;
+#endif
+ if ((m->flags & PG_CLEAN) == 0)
+ m->flags |= PG_LAUNDRY;
+ }
+ splx(spl);
+}
+#endif
+#if 1
/*
* vm_page_deactivate:
*
@@ -608,14 +778,16 @@ void vm_page_unwire(mem)
void vm_page_deactivate(m)
register vm_page_t m;
{
+ int s;
VM_PAGE_CHECK(m);
+ s = splimp();
/*
* Only move active pages -- ignore locked or already
* inactive ones.
*/
- if (m->flags & PG_ACTIVE) {
+ if ((m->flags & PG_ACTIVE) && (m->hold_count == 0)) {
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
@@ -623,15 +795,21 @@ void vm_page_deactivate(m)
m->flags |= PG_INACTIVE;
cnt.v_active_count--;
cnt.v_inactive_count++;
+#define NOT_DEACTIVATE_PROTECTS
+#ifndef NOT_DEACTIVATE_PROTECTS
+ pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
+#else
if (pmap_is_modified(VM_PAGE_TO_PHYS(m)))
m->flags &= ~PG_CLEAN;
+#endif
if (m->flags & PG_CLEAN)
m->flags &= ~PG_LAUNDRY;
else
m->flags |= PG_LAUNDRY;
}
+ splx(s);
}
-
+#endif
/*
* vm_page_activate:
*
@@ -643,8 +821,10 @@ void vm_page_deactivate(m)
void vm_page_activate(m)
register vm_page_t m;
{
+ int s;
VM_PAGE_CHECK(m);
+ s = splimp();
if (m->flags & PG_INACTIVE) {
TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
cnt.v_inactive_count--;
@@ -656,8 +836,12 @@ void vm_page_activate(m)
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
m->flags |= PG_ACTIVE;
+ TAILQ_REMOVE(&m->object->memq, m, listq);
+ TAILQ_INSERT_TAIL(&m->object->memq, m, listq);
+ m->act_count = 10;
cnt.v_active_count++;
}
+ splx(s);
}
/*
@@ -668,12 +852,12 @@ void vm_page_activate(m)
* be used by the zero-fill object.
*/
-boolean_t vm_page_zero_fill(m)
+boolean_t
+vm_page_zero_fill(m)
vm_page_t m;
{
VM_PAGE_CHECK(m);
- m->flags &= ~PG_CLEAN;
pmap_zero_page(VM_PAGE_TO_PHYS(m));
return(TRUE);
}
@@ -683,14 +867,13 @@ boolean_t vm_page_zero_fill(m)
*
* Copy one page to another
*/
-
-void vm_page_copy(src_m, dest_m)
+void
+vm_page_copy(src_m, dest_m)
vm_page_t src_m;
vm_page_t dest_m;
{
VM_PAGE_CHECK(src_m);
VM_PAGE_CHECK(dest_m);
- dest_m->flags &= ~PG_CLEAN;
pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
}
OpenPOWER on IntegriCloud