summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2002-07-18 04:08:10 +0000
committeralc <alc@FreeBSD.org>2002-07-18 04:08:10 +0000
commitbf14f2641b55a6ad0f889aa2b0ff3240ad1cc384 (patch)
treed36c6bd5e594479dd64d78868dd7e7d2fb35a878 /sys
parentcc2882c16b223d38b64f938d27b4b686d3892590 (diff)
downloadFreeBSD-src-bf14f2641b55a6ad0f889aa2b0ff3240ad1cc384.zip
FreeBSD-src-bf14f2641b55a6ad0f889aa2b0ff3240ad1cc384.tar.gz
o Introduce an argument, VM_ALLOC_WIRED, that requests vm_page_alloc()
to return a wired page. o Use VM_ALLOC_WIRED within Alpha's pmap_growkernel(). Also, because Alpha's pmap_growkernel() calls vm_page_alloc() from within a critical section, specify VM_ALLOC_INTERRUPT instead of VM_ALLOC_SYSTEM. (Only VM_ALLOC_INTERRUPT is implemented entirely with a spin mutex.) o Assert that the page queues mutex is held in vm_page_wire() on Alpha, just like the other platforms.
Diffstat (limited to 'sys')
-rw-r--r--sys/alpha/alpha/pmap.c9
-rw-r--r--sys/vm/vm_page.c19
-rw-r--r--sys/vm/vm_page.h6
3 files changed, 19 insertions, 15 deletions
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c
index 9bc5884..6c825c9 100644
--- a/sys/alpha/alpha/pmap.c
+++ b/sys/alpha/alpha/pmap.c
@@ -1561,14 +1561,14 @@ pmap_growkernel(vm_offset_t addr)
if (!pmap_pte_v(pte)) {
int pindex = NKLEV3MAPS + pmap_lev1_index(kernel_vm_end) - K1SEGLEV1I;
- nkpg = vm_page_alloc(kptobj, pindex, VM_ALLOC_SYSTEM);
+ nkpg = vm_page_alloc(kptobj, pindex,
+ VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
if (!nkpg)
panic("pmap_growkernel: no memory to grow kernel");
printf("pmap_growkernel: growing to %lx\n", addr);
printf("pmap_growkernel: adding new level2 page table\n");
nklev2++;
- vm_page_wire(nkpg);
pmap_zero_page(nkpg);
pa = VM_PAGE_TO_PHYS(nkpg);
@@ -1596,13 +1596,12 @@ pmap_growkernel(vm_offset_t addr)
/*
* This index is bogus, but out of the way
*/
- nkpg = vm_page_alloc(kptobj, nklev3, VM_ALLOC_SYSTEM);
+ nkpg = vm_page_alloc(kptobj, nklev3,
+ VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
if (!nkpg)
panic("pmap_growkernel: no memory to grow kernel");
nklev3++;
-
- vm_page_wire(nkpg);
pmap_zero_page(nkpg);
pa = VM_PAGE_TO_PHYS(nkpg);
newlev2 = pmap_phys_to_pte(pa) | PG_V | PG_ASM | PG_KRE | PG_KWE;
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 7dd2300..e62e0d1 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -834,19 +834,17 @@ vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zer
* the page cache in this case.
*/
vm_page_t
-vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req)
+vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
{
vm_page_t m = NULL;
- boolean_t prefer_zero;
- int s;
+ int page_req, s;
GIANT_REQUIRED;
KASSERT(!vm_page_lookup(object, pindex),
("vm_page_alloc: page already allocated"));
- prefer_zero = (page_req & VM_ALLOC_ZERO) != 0 ? TRUE : FALSE;
- page_req &= ~VM_ALLOC_ZERO;
+ page_req = req & VM_ALLOC_CLASS_MASK;
/*
* The pager is allowed to eat deeper into the free page list.
@@ -863,7 +861,8 @@ loop:
* Allocate from the free queue if there are plenty of pages
* in it.
*/
- m = vm_page_select_free(object, pindex, prefer_zero);
+ m = vm_page_select_free(object, pindex,
+ (req & VM_ALLOC_ZERO) != 0);
} else if (
(page_req == VM_ALLOC_SYSTEM &&
cnt.v_cache_count == 0 &&
@@ -934,7 +933,11 @@ loop:
} else {
m->flags = PG_BUSY;
}
- m->wire_count = 0;
+ if (req & VM_ALLOC_WIRED) {
+ cnt.v_wire_count++;
+ m->wire_count = 1;
+ } else
+ m->wire_count = 0;
m->hold_count = 0;
m->act_count = 0;
m->busy = 0;
@@ -1241,9 +1244,7 @@ vm_page_wire(vm_page_t m)
* it is already off the queues).
*/
s = splvm();
-#ifndef __alpha__
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
-#endif
if (m->wire_count == 0) {
if ((m->flags & PG_UNMANAGED) == 0)
vm_pageq_remove(m);
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 17afdb8..fe215c0 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -307,11 +307,15 @@ extern struct mtx vm_page_queue_mtx;
#define VM_PAGE_BITS_ALL 0xffff
#endif
+/* page allocation classes: */
#define VM_ALLOC_NORMAL 0
#define VM_ALLOC_INTERRUPT 1
#define VM_ALLOC_SYSTEM 2
+#define VM_ALLOC_CLASS_MASK 3
+/* page allocation flags: */
+#define VM_ALLOC_WIRED 0x20 /* vm_page_alloc() only */
#define VM_ALLOC_ZERO 0x40
-#define VM_ALLOC_RETRY 0x80
+#define VM_ALLOC_RETRY 0x80 /* vm_page_grab() only */
void vm_page_flag_set(vm_page_t m, unsigned short bits);
void vm_page_flag_clear(vm_page_t m, unsigned short bits);
OpenPOWER on IntegriCloud