diff options
author | alc <alc@FreeBSD.org> | 2012-02-29 05:41:29 +0000 |
---|---|---|
committer | alc <alc@FreeBSD.org> | 2012-02-29 05:41:29 +0000 |
commit | 54c1d2e89ae3474ab82015cb6b3f6d53e899e322 (patch) | |
tree | ec276cb455e46f7bcb8b039ab89c346994d10fe6 /sys/vm/vm_kern.c | |
parent | 49fb0a40aae272b8183ccc0f087bd05e0ed20365 (diff) | |
download | FreeBSD-src-54c1d2e89ae3474ab82015cb6b3f6d53e899e322.zip FreeBSD-src-54c1d2e89ae3474ab82015cb6b3f6d53e899e322.tar.gz |
Simplify kmem_alloc() by eliminating code that existed on account of
external pagers in Mach. FreeBSD doesn't implement external pagers.
Moreover, it don't pageout the kernel object. So, the reasons for
having code don't hold.
Reviewed by: kib
MFC after: 6 weeks
Diffstat (limited to 'sys/vm/vm_kern.c')
-rw-r--r-- | sys/vm/vm_kern.c | 30 |
1 files changed, 0 insertions, 30 deletions
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c index 54f86dd..5e157a6 100644 --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -161,7 +161,6 @@ kmem_alloc(map, size) { vm_offset_t addr; vm_offset_t offset; - vm_offset_t i; size = round_page(size); @@ -187,35 +186,6 @@ kmem_alloc(map, size) vm_map_unlock(map); /* - * Guarantee that there are pages already in this object before - * calling vm_map_wire. This is to prevent the following - * scenario: - * - * 1) Threads have swapped out, so that there is a pager for the - * kernel_object. 2) The kmsg zone is empty, and so we are - * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault; - * there is no page, but there is a pager, so we call - * pager_data_request. But the kmsg zone is empty, so we must - * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when - * we get the data back from the pager, it will be (very stale) - * non-zero data. kmem_alloc is defined to return zero-filled memory. - * - * We're intentionally not activating the pages we allocate to prevent a - * race with page-out. vm_map_wire will wire the pages. - */ - VM_OBJECT_LOCK(kernel_object); - for (i = 0; i < size; i += PAGE_SIZE) { - vm_page_t mem; - - mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i), - VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_RETRY); - mem->valid = VM_PAGE_BITS_ALL; - KASSERT((mem->oflags & VPO_UNMANAGED) != 0, - ("kmem_alloc: page %p is managed", mem)); - } - VM_OBJECT_UNLOCK(kernel_object); - - /* * And finally, mark the data as non-pageable. */ (void) vm_map_wire(map, addr, addr + size, |