summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authordg <dg@FreeBSD.org>1994-09-27 18:00:29 +0000
committerdg <dg@FreeBSD.org>1994-09-27 18:00:29 +0000
commitcefcba07fb8a5d3f9d988c797f6772ad539471f4 (patch)
treee2b689f039a82a1cb8e4bc276362e5d02e0def4b /sys
parent54a697ba7cc3a705c4d27d169175ce7ac64dfc24 (diff)
downloadFreeBSD-src-cefcba07fb8a5d3f9d988c797f6772ad539471f4.zip
FreeBSD-src-cefcba07fb8a5d3f9d988c797f6772ad539471f4.tar.gz
1) New "vm_page_alloc_contig" routine by me.
2) Created a new vm_page flag "PG_FREE" to help track free pages. 3) Use PG_FREE flag to detect inconsistencies in a few places.
Diffstat (limited to 'sys')
-rw-r--r--sys/vm/vm_extern.h3
-rw-r--r--sys/vm/vm_page.c81
-rw-r--r--sys/vm/vm_page.h4
3 files changed, 83 insertions, 5 deletions
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index d9e807d..eb16dae 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_extern.h 8.2 (Berkeley) 1/12/94
- * $Id: vm_extern.h,v 1.4 1994/08/21 07:19:44 paul Exp $
+ * $Id: vm_extern.h,v 1.5 1994/09/02 15:06:50 davidg Exp $
*/
#ifndef _VM_VM_EXTERN_H_
@@ -123,6 +123,7 @@ void vm_init_limits __P((struct proc *));
void vm_mem_init __P((void));
int vm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t,
vm_prot_t, vm_prot_t, int, caddr_t, vm_offset_t));
+vm_offset_t vm_page_alloc_contig __P((vm_offset_t, vm_offset_t, vm_offset_t, vm_offset_t));
int vm_protect __P((vm_map_t,
vm_offset_t, vm_size_t, boolean_t, vm_prot_t));
void vm_set_page_size __P((void));
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index bce746b..5be3d61 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
- * $Id: vm_page.c,v 1.4 1994/08/07 13:10:41 davidg Exp $
+ * $Id: vm_page.c,v 1.5 1994/08/10 03:09:37 davidg Exp $
*/
/*
@@ -320,7 +320,7 @@ vm_page_startup(starta, enda, vaddr)
++cnt.v_page_count;
++cnt.v_free_count;
m = PHYS_TO_VM_PAGE(pa);
- m->flags = 0;
+ m->flags = PG_CLEAN | PG_FREE;
m->object = 0;
m->phys_addr = pa;
m->hold_count = 0;
@@ -584,6 +584,75 @@ vm_page_alloc(object, offset)
return(mem);
}
+vm_offset_t
+vm_page_alloc_contig(size, low, high, alignment)
+ vm_offset_t size;
+ vm_offset_t low;
+ vm_offset_t high;
+ vm_offset_t alignment;
+{
+ int i, s, start = 0;
+ vm_offset_t addr, phys;
+ vm_page_t *pga = (vm_page_t *)vm_page_array;
+ extern vm_map_t kernel_map;
+
+ if ((alignment & (alignment - 1)) != 0)
+ panic("vm_page_alloc_contig: alignment must be a power of 2");
+
+ s = splhigh();
+again:
+ /*
+ * Find first page in array that is free, within range, and aligned.
+ */
+ for (i = start; i < cnt.v_page_count; i++) {
+ phys = VM_PAGE_TO_PHYS(pga[i]);
+ if ((pga[i]->flags & PG_FREE == PG_FREE) &&
+ (phys >= low) && (phys < high) &&
+ ((phys & (alignment - 1)) == 0))
+ break;
+ }
+
+ /*
+ * If the above failed or we will exceed the upper bound, fail.
+ */
+ if ((i == cnt.v_page_count) || ((VM_PAGE_TO_PHYS(pga[i]) + size) > high)) {
+ splx(s);
+ return (NULL);
+ }
+
+ start = i;
+
+ /*
+ * Check successive pages for contiguous and free.
+ */
+ for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
+ if ((VM_PAGE_TO_PHYS(pga[i]) !=
+ (VM_PAGE_TO_PHYS(pga[i - 1]) + PAGE_SIZE)) ||
+ ((pga[i]->flags & PG_FREE) != PG_FREE)) {
+ start++;
+ goto again;
+ }
+ }
+
+ /*
+ * We've found a contiguous chunk that meets are requirements.
+ * Allocate kernel VM, unfree and assign the physical pages to it
+ * and return kernel VM pointer.
+ */
+ addr = kmem_alloc_pageable(kernel_map, size);
+
+ for (i = start; i < (start + size / PAGE_SIZE); i++) {
+ TAILQ_REMOVE(&vm_page_queue_free, pga[i], pageq);
+ cnt.v_free_count--;
+ vm_page_wire(pga[i]);
+ pga[i]->flags = PG_CLEAN; /* shut off PG_FREE and any other flags */
+ }
+ pmap_qenter(addr, &pga[start], size / PAGE_SIZE);
+
+ splx(s);
+ return (addr);
+}
+
/*
* vm_page_free:
*
@@ -609,14 +678,22 @@ void vm_page_free(mem)
mem->flags &= ~PG_INACTIVE;
cnt.v_inactive_count--;
}
+ if (mem->flags & PG_FREE)
+ panic("vm_page_free: freeing free page");
if (!(mem->flags & PG_FICTITIOUS)) {
simple_lock(&vm_page_queue_free_lock);
if (mem->wire_count) {
+ if (mem->wire_count > 1) {
+ printf("vm_page_free: wire count > 1 (%d)", mem->wire_count);
+ panic("vm_page_free: invalid wire count");
+ }
cnt.v_wire_count--;
mem->wire_count = 0;
}
+
+ mem->flags |= PG_FREE;
TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);
cnt.v_free_count++;
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index ee049aa..cbfb74c 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_page.h,v 1.4 1994/08/04 03:06:45 davidg Exp $
+ * $Id: vm_page.h,v 1.5 1994/09/06 11:28:44 davidg Exp $
*/
/*
@@ -135,7 +135,7 @@ struct vm_page {
#define PG_DIRTY 0x0800 /* client flag to set when dirty */
#define PG_REFERENCED 0x1000 /* page has been referenced */
#define PG_PAGEROWNED 0x4000 /* DEBUG: async paging op in progress */
-#define PG_PTPAGE 0x8000 /* DEBUG: is a user page table page */
+#define PG_FREE 0x8000 /* page is in free list */
#if VM_PAGE_DEBUG
#define VM_PAGE_CHECK(mem) { \
OpenPOWER on IntegriCloud