summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2007-02-25 06:14:58 +0000
committeralc <alc@FreeBSD.org>2007-02-25 06:14:58 +0000
commit573a964db6deb3f61961bfe0f4586caa6b8cbb9e (patch)
tree8f61202f24e3670c10df7262f4f001d14fbb32df /sys/vm
parent7b83533659e4ccdf5e867d26a2d738717e00f2cc (diff)
downloadFreeBSD-src-573a964db6deb3f61961bfe0f4586caa6b8cbb9e.zip
FreeBSD-src-573a964db6deb3f61961bfe0f4586caa6b8cbb9e.tar.gz
Change the way that unmanaged pages are created. Specifically,
immediately flag any page that is allocated to a OBJT_PHYS object as unmanaged in vm_page_alloc() rather than waiting for a later call to vm_page_unmanage(). This allows for the elimination of some uses of the page queues lock. Change the type of the kernel and kmem objects from OBJT_DEFAULT to OBJT_PHYS. This allows us to take advantage of the above change to simplify the allocation of unmanaged pages in kmem_alloc() and kmem_malloc(). Remove vm_page_unmanage(). It is no longer used.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/phys_pager.c6
-rw-r--r--sys/vm/vm_kern.c10
-rw-r--r--sys/vm/vm_map.c3
-rw-r--r--sys/vm/vm_object.c7
-rw-r--r--sys/vm/vm_page.c32
-rw-r--r--sys/vm/vm_page.h1
6 files changed, 11 insertions, 48 deletions
diff --git a/sys/vm/phys_pager.c b/sys/vm/phys_pager.c
index 2151bce..6d3ef28 100644
--- a/sys/vm/phys_pager.c
+++ b/sys/vm/phys_pager.c
@@ -150,11 +150,6 @@ phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
}
KASSERT(m[i]->valid == VM_PAGE_BITS_ALL,
("phys_pager_getpages: partially valid page %p", m[i]));
- }
- vm_page_lock_queues();
- for (i = 0; i < count; i++) {
- /* Switch off pv_entries */
- vm_page_unmanage(m[i]);
m[i]->dirty = 0;
/* The requested page must remain busy, the others not. */
if (reqpage != i) {
@@ -162,7 +157,6 @@ phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
m[i]->busy = 0;
}
}
- vm_page_unlock_queues();
return (VM_PAGER_OK);
}
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 6a78bb9..b6a4738 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -175,9 +175,8 @@ kmem_alloc(map, size)
mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i),
VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
mem->valid = VM_PAGE_BITS_ALL;
- vm_page_lock_queues();
- vm_page_unmanage(mem);
- vm_page_unlock_queues();
+ KASSERT((mem->flags & PG_UNMANAGED) != 0,
+ ("kmem_alloc: page %p is managed", mem));
}
VM_OBJECT_UNLOCK(kernel_object);
@@ -364,9 +363,8 @@ retry:
if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
- vm_page_lock_queues();
- vm_page_unmanage(m);
- vm_page_unlock_queues();
+ KASSERT((m->flags & PG_UNMANAGED) != 0,
+ ("kmem_malloc: page %p is managed", m));
}
VM_OBJECT_UNLOCK(kmem_object);
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 92b1fa9..dc96069 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -2269,8 +2269,7 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
VM_OBJECT_LOCK(object);
if (object->ref_count != 1 &&
((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
- object == kernel_object || object == kmem_object) &&
- (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
+ object == kernel_object || object == kmem_object)) {
vm_object_collapse(object);
vm_object_page_remove(object, offidxstart, offidxend, FALSE);
if (object->type == OBJT_SWAP)
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index ce42b99..2dd28be 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -249,11 +249,11 @@ vm_object_init(void)
mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
VM_OBJECT_LOCK_INIT(&kernel_object_store, "kernel object");
- _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
+ _vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
kernel_object);
VM_OBJECT_LOCK_INIT(&kmem_object_store, "kmem object");
- _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
+ _vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
kmem_object);
/*
@@ -1800,7 +1800,8 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
* remove pages from the object (we must instead remove the page
* references, and then destroy the object).
*/
- KASSERT(object->type != OBJT_PHYS,
+ KASSERT(object->type != OBJT_PHYS || object == kernel_object ||
+ object == kmem_object,
("attempt to remove pages from a physical object"));
vm_object_pip_add(object, 1);
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 3badc5b..e97eca9 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -938,6 +938,8 @@ loop:
if (req & VM_ALLOC_ZERO)
flags = PG_ZERO;
}
+ if (object != NULL && object->type == OBJT_PHYS)
+ flags |= PG_UNMANAGED;
m->flags = flags;
if (req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ))
m->oflags = 0;
@@ -1169,36 +1171,6 @@ vm_page_free_toq(vm_page_t m)
}
/*
- * vm_page_unmanage:
- *
- * Prevent PV management from being done on the page. The page is
- * removed from the paging queues as if it were wired, and as a
- * consequence of no longer being managed the pageout daemon will not
- * touch it (since there is no way to locate the pte mappings for the
- * page). madvise() calls that mess with the pmap will also no longer
- * operate on the page.
- *
- * Beyond that the page is still reasonably 'normal'. Freeing the page
- * will clear the flag.
- *
- * This routine is used by OBJT_PHYS objects - objects using unswappable
- * physical memory as backing store rather then swap-backed memory and
- * will eventually be extended to support 4MB unmanaged physical
- * mappings.
- */
-void
-vm_page_unmanage(vm_page_t m)
-{
-
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- if ((m->flags & PG_UNMANAGED) == 0) {
- if (m->wire_count == 0)
- vm_pageq_remove(m);
- }
- vm_page_flag_set(m, PG_UNMANAGED);
-}
-
-/*
* vm_page_wire:
*
* Mark this page as wired down by yet
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index a3ab085..1b75f38 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -343,7 +343,6 @@ vm_page_t vm_page_select_cache(int);
void vm_page_sleep(vm_page_t m, const char *msg);
vm_page_t vm_page_splay(vm_pindex_t, vm_page_t);
vm_offset_t vm_page_startup(vm_offset_t vaddr);
-void vm_page_unmanage (vm_page_t);
void vm_page_unwire (vm_page_t, int);
void vm_page_wire (vm_page_t);
void vm_page_set_validclean (vm_page_t, int, int);
OpenPOWER on IntegriCloud