summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2002-11-01 00:59:03 +0000
committerjeff <jeff@FreeBSD.org>2002-11-01 00:59:03 +0000
commit827f7f802d6c9bb6e26502be3f21dc7aa7ce254e (patch)
tree309c689442dae3738f39ce26b69429d30a1f9ec1
parentc46715077793ed90d5eaf4f4ce3ebd59fe491d56 (diff)
downloadFreeBSD-src-827f7f802d6c9bb6e26502be3f21dc7aa7ce254e.zip
FreeBSD-src-827f7f802d6c9bb6e26502be3f21dc7aa7ce254e.tar.gz
- Add a new flag to vm_page_alloc, VM_ALLOC_NOOBJ. This tells
vm_page_alloc not to insert this page into an object. The pindex is still used for colorization. - Rework vm_page_select_* to accept a color instead of an object and pindex to work with VM_PAGE_NOOBJ. - Document other VM_ALLOC_ flags. Reviewed by: peter, jake
-rw-r--r--sys/vm/vm_page.c43
-rw-r--r--sys/vm/vm_page.h7
2 files changed, 28 insertions, 22 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 61af7e7..c207da4 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -796,17 +796,13 @@ vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
* This routine may not block.
*/
static vm_page_t
-vm_page_select_cache(vm_object_t object, vm_pindex_t pindex)
+vm_page_select_cache(vm_pindex_t color)
{
vm_page_t m;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
while (TRUE) {
- m = vm_pageq_find(
- PQ_CACHE,
- (pindex + object->pg_color) & PQ_L2_MASK,
- FALSE
- );
+ m = vm_pageq_find(PQ_CACHE, color & PQ_L2_MASK, FALSE);
if (m && ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy ||
m->hold_count || m->wire_count)) {
vm_page_deactivate(m);
@@ -825,15 +821,11 @@ vm_page_select_cache(vm_object_t object, vm_pindex_t pindex)
* This routine may not block.
*/
static __inline vm_page_t
-vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zero)
+vm_page_select_free(vm_pindex_t color, boolean_t prefer_zero)
{
vm_page_t m;
- m = vm_pageq_find(
- PQ_FREE,
- (pindex + object->pg_color) & PQ_L2_MASK,
- prefer_zero
- );
+ m = vm_pageq_find(PQ_FREE, color & PQ_L2_MASK, prefer_zero);
return (m);
}
@@ -859,15 +851,27 @@ vm_page_t
vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
{
vm_page_t m = NULL;
+ vm_pindex_t color;
int page_req, s;
GIANT_REQUIRED;
- KASSERT(!vm_page_lookup(object, pindex),
- ("vm_page_alloc: page already allocated"));
+#ifdef INVARIANTS
+ if ((req & VM_ALLOC_NOOBJ) == 0) {
+ KASSERT(object != NULL,
+ ("vm_page_alloc: NULL object."));
+ KASSERT(!vm_page_lookup(object, pindex),
+ ("vm_page_alloc: page already allocated"));
+ }
+#endif
page_req = req & VM_ALLOC_CLASS_MASK;
+ if ((req & VM_ALLOC_NOOBJ) == 0)
+ color = pindex + object->pg_color;
+ else
+ color = pindex;
+
/*
* The pager is allowed to eat deeper into the free page list.
*/
@@ -883,8 +887,8 @@ loop:
* Allocate from the free queue if there are plenty of pages
* in it.
*/
- m = vm_page_select_free(object, pindex,
- (req & VM_ALLOC_ZERO) != 0);
+ m = vm_page_select_free(color, (req & VM_ALLOC_ZERO) != 0);
+
} else if (
(page_req == VM_ALLOC_SYSTEM &&
cnt.v_cache_count == 0 &&
@@ -894,7 +898,7 @@ loop:
/*
* Interrupt or system, dig deeper into the free list.
*/
- m = vm_page_select_free(object, pindex, FALSE);
+ m = vm_page_select_free(color, FALSE);
} else if (page_req != VM_ALLOC_INTERRUPT) {
mtx_unlock_spin(&vm_page_queue_free_mtx);
/*
@@ -903,7 +907,7 @@ loop:
* cnt.v_*_free_min counters are replenished.
*/
vm_page_lock_queues();
- if ((m = vm_page_select_cache(object, pindex)) == NULL) {
+ if ((m = vm_page_select_cache(color)) == NULL) {
vm_page_unlock_queues();
splx(s);
#if defined(DIAGNOSTIC)
@@ -973,7 +977,8 @@ loop:
* could cause us to block allocating memory). We cannot block
* anywhere.
*/
- vm_page_insert(m, object, pindex);
+ if ((req & VM_ALLOC_NOOBJ) == 0)
+ vm_page_insert(m, object, pindex);
/*
* Don't wakeup too often - wakeup the pageout daemon when
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 834d01e..b5bf5e9 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -314,9 +314,10 @@ extern struct mtx vm_page_queue_mtx;
#define VM_ALLOC_SYSTEM 2
#define VM_ALLOC_CLASS_MASK 3
/* page allocation flags: */
-#define VM_ALLOC_WIRED 0x20
-#define VM_ALLOC_ZERO 0x40
-#define VM_ALLOC_RETRY 0x80 /* vm_page_grab() only */
+#define VM_ALLOC_WIRED 0x0020 /* non pageable */
+#define VM_ALLOC_ZERO 0x0040 /* Try to obtain a zeroed page */
+#define VM_ALLOC_RETRY 0x0080 /* vm_page_grab() only */
+#define VM_ALLOC_NOOBJ 0x0100 /* No associated object */
void vm_page_flag_set(vm_page_t m, unsigned short bits);
void vm_page_flag_clear(vm_page_t m, unsigned short bits);
OpenPOWER on IntegriCloud