summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2013-08-07 06:21:20 +0000
committerjeff <jeff@FreeBSD.org>2013-08-07 06:21:20 +0000
commitde4ecca21340ce4d0bf9182cac133c14e031218e (patch)
tree950bad07f0aeeeae78036d82b9aa11ae998c3654 /sys/vm
parente141f5c0bac3839e4886a26e1ba796f4e46e6455 (diff)
downloadFreeBSD-src-de4ecca21340ce4d0bf9182cac133c14e031218e.zip
FreeBSD-src-de4ecca21340ce4d0bf9182cac133c14e031218e.tar.gz
Replace kernel virtual address space allocation with vmem. This provides
transparent layering and better fragmentation. - Normalize functions that allocate memory to use kmem_* - Those that allocate address space are named kva_* - Those that operate on maps are named kmap_* - Implement recursive allocation handling for kmem_arena in vmem. Reviewed by: alc Tested by: pho Sponsored by: EMC / Isilon Storage Division
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/memguard.c75
-rw-r--r--sys/vm/memguard.h3
-rw-r--r--sys/vm/pmap.h3
-rw-r--r--sys/vm/uma_core.c12
-rw-r--r--sys/vm/vm_extern.h37
-rw-r--r--sys/vm/vm_glue.c14
-rw-r--r--sys/vm/vm_init.c77
-rw-r--r--sys/vm/vm_kern.c384
-rw-r--r--sys/vm/vm_kern.h3
-rw-r--r--sys/vm/vm_map.c31
-rw-r--r--sys/vm/vm_map.h4
-rw-r--r--sys/vm/vm_object.c2
12 files changed, 278 insertions, 367 deletions
diff --git a/sys/vm/memguard.c b/sys/vm/memguard.c
index b1740c3..ea2d925 100644
--- a/sys/vm/memguard.c
+++ b/sys/vm/memguard.c
@@ -48,6 +48,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/malloc.h>
#include <sys/sysctl.h>
+#include <sys/vmem.h>
#include <vm/vm.h>
#include <vm/uma.h>
@@ -99,8 +100,9 @@ SYSCTL_PROC(_vm_memguard, OID_AUTO, desc,
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
memguard_sysctl_desc, "A", "Short description of memory type to monitor");
-static vm_map_t memguard_map = NULL;
+static vmem_t *memguard_map = NULL;
static vm_offset_t memguard_cursor;
+static vm_offset_t memguard_base;
static vm_size_t memguard_mapsize;
static vm_size_t memguard_physlimit;
static u_long memguard_wasted;
@@ -112,7 +114,7 @@ static u_long memguard_fail_pgs;
SYSCTL_ULONG(_vm_memguard, OID_AUTO, cursor, CTLFLAG_RD,
&memguard_cursor, 0, "MemGuard cursor");
SYSCTL_ULONG(_vm_memguard, OID_AUTO, mapsize, CTLFLAG_RD,
- &memguard_mapsize, 0, "MemGuard private vm_map size");
+ &memguard_mapsize, 0, "MemGuard private arena size");
SYSCTL_ULONG(_vm_memguard, OID_AUTO, phys_limit, CTLFLAG_RD,
&memguard_physlimit, 0, "Limit on MemGuard memory consumption");
SYSCTL_ULONG(_vm_memguard, OID_AUTO, wasted, CTLFLAG_RD,
@@ -200,21 +202,18 @@ memguard_fudge(unsigned long km_size, const struct vm_map *parent_map)
* out of a single VM map (contiguous chunk of address space).
*/
void
-memguard_init(vm_map_t parent_map)
+memguard_init(vmem_t *parent)
{
- vm_offset_t base, limit;
-
- memguard_map = kmem_suballoc(parent_map, &base, &limit,
- memguard_mapsize, FALSE);
- memguard_map->system_map = 1;
- KASSERT(memguard_mapsize == limit - base,
- ("Expected %lu, got %lu", (u_long)memguard_mapsize,
- (u_long)(limit - base)));
+ vm_offset_t base;
+
+ vmem_alloc(parent, memguard_mapsize, M_WAITOK, &base);
+ memguard_map = vmem_create("memguard arena", base, memguard_mapsize,
+ PAGE_SIZE, 0, M_WAITOK);
memguard_cursor = base;
+ memguard_base = base;
printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n");
printf("\tMEMGUARD map base: 0x%lx\n", (u_long)base);
- printf("\tMEMGUARD map limit: 0x%lx\n", (u_long)limit);
printf("\tMEMGUARD map size: %jd KBytes\n",
(uintmax_t)memguard_mapsize >> 10);
}
@@ -230,11 +229,13 @@ memguard_sysinit(void)
parent = SYSCTL_STATIC_CHILDREN(_vm_memguard);
SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapstart", CTLFLAG_RD,
- &memguard_map->min_offset, "MemGuard KVA base");
+ &memguard_base, "MemGuard KVA base");
SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "maplimit", CTLFLAG_RD,
- &memguard_map->max_offset, "MemGuard KVA end");
+ &memguard_mapsize, "MemGuard KVA size");
+#if 0
SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapused", CTLFLAG_RD,
&memguard_map->size, "MemGuard KVA used");
+#endif
}
SYSINIT(memguard, SI_SUB_KLD, SI_ORDER_ANY, memguard_sysinit, NULL);
@@ -263,6 +264,21 @@ v2sizep(vm_offset_t va)
return ((u_long *)&p->pageq.tqe_next);
}
+static u_long *
+v2sizev(vm_offset_t va)
+{
+ vm_paddr_t pa;
+ struct vm_page *p;
+
+ pa = pmap_kextract(va);
+ if (pa == 0)
+ panic("MemGuard detected double-free of %p", (void *)va);
+ p = PHYS_TO_VM_PAGE(pa);
+ KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
+ ("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
+ return ((u_long *)&p->pageq.tqe_prev);
+}
+
/*
* Allocate a single object of specified size with specified flags
* (either M_WAITOK or M_NOWAIT).
@@ -289,14 +305,13 @@ memguard_alloc(unsigned long req_size, int flags)
if (do_guard)
size_v += 2 * PAGE_SIZE;
- vm_map_lock(memguard_map);
/*
* When we pass our memory limit, reject sub-page allocations.
* Page-size and larger allocations will use the same amount
* of physical memory whether we allocate or hand off to
* uma_large_alloc(), so keep those.
*/
- if (memguard_map->size >= memguard_physlimit &&
+ if (vmem_size(memguard_map, VMEM_ALLOC) >= memguard_physlimit &&
req_size < PAGE_SIZE) {
addr = (vm_offset_t)NULL;
memguard_fail_pgs++;
@@ -313,33 +328,34 @@ memguard_alloc(unsigned long req_size, int flags)
* map, unless vm_map_findspace() is tweaked.
*/
for (;;) {
- rv = vm_map_findspace(memguard_map, memguard_cursor,
- size_v, &addr);
- if (rv == KERN_SUCCESS)
+ if (vmem_xalloc(memguard_map, size_v, 0, 0, 0, memguard_cursor,
+ VMEM_ADDR_MAX, M_BESTFIT | M_NOWAIT, &addr) == 0)
break;
/*
* The map has no space. This may be due to
* fragmentation, or because the cursor is near the
* end of the map.
*/
- if (memguard_cursor == vm_map_min(memguard_map)) {
+ if (memguard_cursor == memguard_base) {
memguard_fail_kva++;
addr = (vm_offset_t)NULL;
goto out;
}
memguard_wrap++;
- memguard_cursor = vm_map_min(memguard_map);
+ memguard_cursor = memguard_base;
}
if (do_guard)
addr += PAGE_SIZE;
- rv = kmem_back(memguard_map, addr, size_p, flags);
+ rv = kmem_back(kmem_object, addr, size_p, flags);
if (rv != KERN_SUCCESS) {
+ vmem_xfree(memguard_map, addr, size_v);
memguard_fail_pgs++;
addr = (vm_offset_t)NULL;
goto out;
}
- memguard_cursor = addr + size_p;
+ memguard_cursor = addr + size_v;
*v2sizep(trunc_page(addr)) = req_size;
+ *v2sizev(trunc_page(addr)) = size_v;
memguard_succ++;
if (req_size < PAGE_SIZE) {
memguard_wasted += (PAGE_SIZE - req_size);
@@ -354,7 +370,6 @@ memguard_alloc(unsigned long req_size, int flags)
}
}
out:
- vm_map_unlock(memguard_map);
return ((void *)addr);
}
@@ -363,7 +378,7 @@ is_memguard_addr(void *addr)
{
vm_offset_t a = (vm_offset_t)(uintptr_t)addr;
- return (a >= memguard_map->min_offset && a < memguard_map->max_offset);
+ return (a >= memguard_base && a < memguard_base + memguard_mapsize);
}
/*
@@ -373,12 +388,13 @@ void
memguard_free(void *ptr)
{
vm_offset_t addr;
- u_long req_size, size;
+ u_long req_size, size, sizev;
char *temp;
int i;
addr = trunc_page((uintptr_t)ptr);
req_size = *v2sizep(addr);
+ sizev = *v2sizev(addr);
size = round_page(req_size);
/*
@@ -400,11 +416,12 @@ memguard_free(void *ptr)
* vm_map lock to serialize updates to memguard_wasted, since
* we had the lock at increment.
*/
- vm_map_lock(memguard_map);
+ kmem_unback(kmem_object, addr, size);
+ if (sizev > size)
+ addr -= PAGE_SIZE;
+ vmem_xfree(memguard_map, addr, sizev);
if (req_size < PAGE_SIZE)
memguard_wasted -= (PAGE_SIZE - req_size);
- (void)vm_map_delete(memguard_map, addr, addr + size);
- vm_map_unlock(memguard_map);
}
/*
diff --git a/sys/vm/memguard.h b/sys/vm/memguard.h
index 9ec4ffd..9e99e98 100644
--- a/sys/vm/memguard.h
+++ b/sys/vm/memguard.h
@@ -33,10 +33,11 @@
struct malloc_type;
struct vm_map;
+struct vmem;
#ifdef DEBUG_MEMGUARD
unsigned long memguard_fudge(unsigned long, const struct vm_map *);
-void memguard_init(struct vm_map *);
+void memguard_init(struct vmem *);
void *memguard_alloc(unsigned long, int);
void *memguard_realloc(void *, unsigned long, struct malloc_type *, int);
void memguard_free(void *);
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index c64a549..c0f80a7 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -100,9 +100,6 @@ extern vm_offset_t kernel_vm_end;
void pmap_activate(struct thread *td);
void pmap_align_superpage(vm_object_t, vm_ooffset_t, vm_offset_t *,
vm_size_t);
-#if defined(__mips__)
-void pmap_align_tlb(vm_offset_t *);
-#endif
void pmap_change_wiring(pmap_t, vm_offset_t, boolean_t);
void pmap_clear_modify(vm_page_t m);
void pmap_clear_reference(vm_page_t m);
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 900209e..5db1816 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -1015,7 +1015,7 @@ page_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
void *p; /* Returned page */
*pflag = UMA_SLAB_KMEM;
- p = (void *) kmem_malloc(kmem_map, bytes, wait);
+ p = (void *) kmem_malloc(kmem_arena, bytes, wait);
return (p);
}
@@ -1097,16 +1097,16 @@ noobj_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait)
static void
page_free(void *mem, int size, uint8_t flags)
{
- vm_map_t map;
+ struct vmem *vmem;
if (flags & UMA_SLAB_KMEM)
- map = kmem_map;
+ vmem = kmem_arena;
else if (flags & UMA_SLAB_KERNEL)
- map = kernel_map;
+ vmem = kernel_arena;
else
panic("UMA: page_free used with invalid flags %d", flags);
- kmem_free(map, (vm_offset_t)mem, size);
+ kmem_free(vmem, (vm_offset_t)mem, size);
}
/*
@@ -2983,7 +2983,7 @@ uma_zone_reserve_kva(uma_zone_t zone, int count)
#else
if (1) {
#endif
- kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
+ kva = kva_alloc(pages * UMA_SLAB_SIZE);
if (kva == 0)
return (0);
} else
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 4a2dc04..fee55d0 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -36,27 +36,40 @@
struct proc;
struct vmspace;
struct vnode;
+struct vmem;
#ifdef _KERNEL
-int kernacc(void *, int, int);
-vm_offset_t kmem_alloc(vm_map_t, vm_size_t);
-vm_offset_t kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags,
+/* These operate on kernel virtual addresses only. */
+vm_offset_t kva_alloc(vm_size_t);
+void kva_free(vm_offset_t, vm_size_t);
+
+/* These operate on pageable virtual addresses. */
+vm_offset_t kmap_alloc_wait(vm_map_t, vm_size_t);
+void kmap_free_wakeup(vm_map_t, vm_offset_t, vm_size_t);
+
+/* These operate on virtual addresses backed by memory. */
+vm_offset_t kmem_alloc_attr(struct vmem *, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr);
-vm_offset_t kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags,
+vm_offset_t kmem_alloc_contig(struct vmem *, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr);
-vm_offset_t kmem_alloc_nofault(vm_map_t, vm_size_t);
-vm_offset_t kmem_alloc_nofault_space(vm_map_t, vm_size_t, int);
-vm_offset_t kmem_alloc_wait(vm_map_t, vm_size_t);
-void kmem_free(vm_map_t, vm_offset_t, vm_size_t);
-void kmem_free_wakeup(vm_map_t, vm_offset_t, vm_size_t);
-void kmem_init(vm_offset_t, vm_offset_t);
-vm_offset_t kmem_malloc(vm_map_t map, vm_size_t size, int flags);
-int kmem_back(vm_map_t, vm_offset_t, vm_size_t, int);
+vm_offset_t kmem_malloc(struct vmem *, vm_size_t size, int flags);
+void kmem_free(struct vmem *, vm_offset_t, vm_size_t);
+
+/* This provides memory for previously allocated address space. */
+int kmem_back(vm_object_t, vm_offset_t, vm_size_t, int);
+void kmem_unback(vm_object_t, vm_offset_t, vm_size_t);
+
+/* Bootstrapping. */
vm_map_t kmem_suballoc(vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t,
boolean_t);
+void kmem_init(vm_offset_t, vm_offset_t);
+void kmem_init_zero_region(void);
+void kmeminit(void);
+
void swapout_procs(int);
+int kernacc(void *, int, int);
int useracc(void *, int, int);
int vm_fault(vm_map_t, vm_offset_t, vm_prot_t, int);
void vm_fault_copy_entry(vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t,
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 948e2b3..94e07f9 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -67,6 +67,7 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/limits.h>
#include <sys/lock.h>
+#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/racct.h>
@@ -76,6 +77,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sf_buf.h>
#include <sys/shm.h>
#include <sys/vmmeter.h>
+#include <sys/vmem.h>
#include <sys/sx.h>
#include <sys/sysctl.h>
#include <sys/_kstack_cache.h>
@@ -359,11 +361,13 @@ vm_thread_new(struct thread *td, int pages)
* We need to align the kstack's mapped address to fit within
* a single TLB entry.
*/
- ks = kmem_alloc_nofault_space(kernel_map,
- (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE, VMFS_TLB_ALIGNED_SPACE);
+ if (vmem_xalloc(kernel_arena, (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE,
+ PAGE_SIZE * 2, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
+ M_BESTFIT | M_NOWAIT, &ks)) {
+ ks = 0;
+ }
#else
- ks = kmem_alloc_nofault(kernel_map,
- (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
+ ks = kva_alloc((pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
#endif
if (ks == 0) {
printf("vm_thread_new: kstack allocation failed\n");
@@ -422,7 +426,7 @@ vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
}
VM_OBJECT_WUNLOCK(ksobj);
vm_object_deallocate(ksobj);
- kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
+ kva_free(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
}
diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c
index 2c4bcb6..b539f9d 100644
--- a/sys/vm/vm_init.c
+++ b/sys/vm/vm_init.c
@@ -70,6 +70,7 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/proc.h>
#include <sys/rwlock.h>
+#include <sys/malloc.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/selinfo.h>
@@ -101,6 +102,26 @@ static void vm_mem_init(void *);
SYSINIT(vm_mem, SI_SUB_VM, SI_ORDER_FIRST, vm_mem_init, NULL);
/*
+ * Import kva into the kernel arena.
+ */
+static int
+kva_import(void *unused, vmem_size_t size, int flags, vmem_addr_t *addrp)
+{
+ vm_offset_t addr;
+ int result;
+
+ addr = vm_map_min(kernel_map);
+ result = vm_map_find(kernel_map, NULL, 0, &addr, size,
+ VMFS_ALIGNED_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
+ if (result != KERN_SUCCESS)
+ return (ENOMEM);
+
+ *addrp = addr;
+
+ return (0);
+}
+
+/*
* vm_init initializes the virtual memory system.
* This is done only by the first cpu up.
*
@@ -111,6 +132,7 @@ static void
vm_mem_init(dummy)
void *dummy;
{
+
/*
* Initializes resident memory structures. From here on, all physical
* memory is accounted for, and we use only virtual addresses.
@@ -125,6 +147,19 @@ vm_mem_init(dummy)
vm_object_init();
vm_map_startup();
kmem_init(virtual_avail, virtual_end);
+
+ /*
+ * Initialize the kernel_arena. This can grow on demand.
+ */
+ vmem_init(kernel_arena, "kernel arena", 0, 0, PAGE_SIZE, 0, 0);
+ vmem_set_import(kernel_arena, kva_import, NULL, NULL,
+#if VM_NRESERVLEVEL > 0
+ 1 << (VM_LEVEL_0_ORDER + PAGE_SHIFT));
+#else
+ PAGE_SIZE);
+#endif
+
+ kmem_init_zero_region();
pmap_init();
vm_pager_init();
}
@@ -138,7 +173,6 @@ vm_ksubmap_init(struct kva_md_info *kmi)
long physmem_est;
vm_offset_t minaddr;
vm_offset_t maxaddr;
- vm_map_t clean_map;
/*
* Allocate space for system data structures.
@@ -146,8 +180,6 @@ vm_ksubmap_init(struct kva_md_info *kmi)
* As pages of kernel virtual memory are allocated, "v" is incremented.
* As pages of memory are allocated and cleared,
* "firstaddr" is incremented.
- * An index into the kernel page table corresponding to the
- * virtual memory address maintained in "v" is kept in "mapaddr".
*/
/*
@@ -173,7 +205,8 @@ again:
*/
if (firstaddr == 0) {
size = (vm_size_t)v;
- firstaddr = kmem_alloc(kernel_map, round_page(size));
+ firstaddr = kmem_malloc(kernel_arena, round_page(size),
+ M_ZERO | M_WAITOK);
if (firstaddr == 0)
panic("startup: no room for tables");
goto again;
@@ -185,31 +218,49 @@ again:
if ((vm_size_t)((char *)v - firstaddr) != size)
panic("startup: table size inconsistency");
+ /*
+ * Allocate the clean map to hold all of the paging and I/O virtual
+ * memory.
+ */
size = (long)nbuf * BKVASIZE + (long)nswbuf * MAXPHYS +
(long)bio_transient_maxcnt * MAXPHYS;
- clean_map = kmem_suballoc(kernel_map, &kmi->clean_sva, &kmi->clean_eva,
- size, TRUE);
+ kmi->clean_sva = firstaddr = kva_alloc(size);
+ kmi->clean_eva = firstaddr + size;
+ /*
+ * Allocate the buffer arena.
+ */
size = (long)nbuf * BKVASIZE;
- kmi->buffer_sva = kmem_alloc_nofault(clean_map, size);
+ kmi->buffer_sva = firstaddr;
kmi->buffer_eva = kmi->buffer_sva + size;
vmem_init(buffer_arena, "buffer arena", kmi->buffer_sva, size,
PAGE_SIZE, 0, 0);
+ firstaddr += size;
+ /*
+ * Now swap kva.
+ */
+ swapbkva = firstaddr;
size = (long)nswbuf * MAXPHYS;
- swapbkva = kmem_alloc_nofault(clean_map, size);
- if (!swapbkva)
- panic("Not enough clean_map VM space for pager buffers");
+ firstaddr += size;
+ /*
+ * And optionally transient bio space.
+ */
if (bio_transient_maxcnt != 0) {
size = (long)bio_transient_maxcnt * MAXPHYS;
vmem_init(transient_arena, "transient arena",
- kmem_alloc_nofault(clean_map, size),
- size, PAGE_SIZE, 0, 0);
+ firstaddr, size, PAGE_SIZE, 0, 0);
+ firstaddr += size;
}
+ if (firstaddr != kmi->clean_eva)
+ panic("Clean map calculation incorrect");
+
+ /*
+ * Allocate the pageable submaps.
+ */
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
exec_map_entries * round_page(PATH_MAX + ARG_MAX), FALSE);
pipe_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, maxpipekva,
FALSE);
}
-
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 42cd699..c7cb409 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -74,9 +74,11 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/rwlock.h>
#include <sys/sysctl.h>
+#include <sys/vmem.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
+#include <vm/vm_kern.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
@@ -86,7 +88,6 @@ __FBSDID("$FreeBSD$");
#include <vm/uma.h>
vm_map_t kernel_map;
-vm_map_t kmem_map;
vm_map_t exec_map;
vm_map_t pipe_map;
@@ -105,7 +106,7 @@ SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD,
"Max kernel address");
/*
- * kmem_alloc_nofault:
+ * kva_alloc:
*
* Allocate a virtual address range with no underlying object and
* no initial mapping to physical memory. Any mapping from this
@@ -114,94 +115,35 @@ SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD,
* a mapping on demand through vm_fault() will result in a panic.
*/
vm_offset_t
-kmem_alloc_nofault(map, size)
- vm_map_t map;
+kva_alloc(size)
vm_size_t size;
{
vm_offset_t addr;
- int result;
size = round_page(size);
- addr = vm_map_min(map);
- result = vm_map_find(map, NULL, 0, &addr, size, VMFS_ANY_SPACE,
- VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
- if (result != KERN_SUCCESS) {
+ if (vmem_alloc(kernel_arena, size, M_BESTFIT | M_NOWAIT, &addr))
return (0);
- }
+
return (addr);
}
/*
- * kmem_alloc_nofault_space:
+ * kva_free:
*
- * Allocate a virtual address range with no underlying object and
- * no initial mapping to physical memory within the specified
- * address space. Any mapping from this range to physical memory
- * must be explicitly created prior to its use, typically with
- * pmap_qenter(). Any attempt to create a mapping on demand
- * through vm_fault() will result in a panic.
+ * Release a region of kernel virtual memory allocated
+ * with kva_alloc, and return the physical pages
+ * associated with that region.
+ *
+ * This routine may not block on kernel maps.
*/
-vm_offset_t
-kmem_alloc_nofault_space(map, size, find_space)
- vm_map_t map;
- vm_size_t size;
- int find_space;
-{
+void
+kva_free(addr, size)
vm_offset_t addr;
- int result;
-
- size = round_page(size);
- addr = vm_map_min(map);
- result = vm_map_find(map, NULL, 0, &addr, size, find_space,
- VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
- if (result != KERN_SUCCESS) {
- return (0);
- }
- return (addr);
-}
-
-/*
- * Allocate wired-down memory in the kernel's address map
- * or a submap.
- */
-vm_offset_t
-kmem_alloc(map, size)
- vm_map_t map;
vm_size_t size;
{
- vm_offset_t addr;
- vm_offset_t offset;
size = round_page(size);
-
- /*
- * Use the kernel object for wired-down kernel pages. Assume that no
- * region of the kernel object is referenced more than once.
- */
-
- /*
- * Locate sufficient space in the map. This will give us the final
- * virtual address for the new memory, and thus will tell us the
- * offset within the kernel map.
- */
- vm_map_lock(map);
- if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
- vm_map_unlock(map);
- return (0);
- }
- offset = addr - VM_MIN_KERNEL_ADDRESS;
- vm_object_reference(kernel_object);
- vm_map_insert(map, kernel_object, offset, addr, addr + size,
- VM_PROT_ALL, VM_PROT_ALL, 0);
- vm_map_unlock(map);
-
- /*
- * And finally, mark the data as non-pageable.
- */
- (void) vm_map_wire(map, addr, addr + size,
- VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES);
-
- return (addr);
+ vmem_free(kernel_arena, addr, size);
}
/*
@@ -213,62 +155,57 @@ kmem_alloc(map, size)
* given flags, then the pages are zeroed before they are mapped.
*/
vm_offset_t
-kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
+kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, vm_memattr_t memattr)
{
- vm_object_t object = kernel_object;
+ vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object;
vm_offset_t addr;
- vm_ooffset_t end_offset, offset;
+ vm_ooffset_t offset;
vm_page_t m;
int pflags, tries;
+ int i;
size = round_page(size);
- vm_map_lock(map);
- if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
- vm_map_unlock(map);
+ if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr))
return (0);
- }
offset = addr - VM_MIN_KERNEL_ADDRESS;
- vm_object_reference(object);
- vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL,
- VM_PROT_ALL, 0);
- pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY;
+ pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
VM_OBJECT_WLOCK(object);
- end_offset = offset + size;
- for (; offset < end_offset; offset += PAGE_SIZE) {
+ for (i = 0; i < size; i += PAGE_SIZE) {
tries = 0;
retry:
- m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags, 1,
- low, high, PAGE_SIZE, 0, memattr);
+ m = vm_page_alloc_contig(object, OFF_TO_IDX(offset + i),
+ pflags, 1, low, high, PAGE_SIZE, 0, memattr);
if (m == NULL) {
VM_OBJECT_WUNLOCK(object);
if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
- vm_map_unlock(map);
vm_pageout_grow_cache(tries, low, high);
- vm_map_lock(map);
VM_OBJECT_WLOCK(object);
tries++;
goto retry;
}
-
- /*
- * Since the pages that were allocated by any previous
- * iterations of this loop are not busy, they can be
- * freed by vm_object_page_remove(), which is called
- * by vm_map_delete().
+ /*
+ * Unmap and free the pages.
*/
- vm_map_delete(map, addr, addr + size);
- vm_map_unlock(map);
+ if (i != 0)
+ pmap_remove(kernel_pmap, addr, addr + i);
+ while (i != 0) {
+ i -= PAGE_SIZE;
+ m = vm_page_lookup(object,
+ OFF_TO_IDX(offset + i));
+ vm_page_unwire(m, 0);
+ vm_page_free(m);
+ }
+ vmem_free(vmem, addr, size);
return (0);
}
if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
+ pmap_enter(kernel_pmap, addr + i, VM_PROT_ALL, m, VM_PROT_ALL,
+ TRUE);
}
VM_OBJECT_WUNLOCK(object);
- vm_map_unlock(map);
- vm_map_wire(map, addr, addr + size, VM_MAP_WIRE_SYSTEM |
- VM_MAP_WIRE_NOHOLES);
return (addr);
}
@@ -281,27 +218,21 @@ retry:
* mapped.
*/
vm_offset_t
-kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
+kmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr)
{
- vm_object_t object = kernel_object;
- vm_offset_t addr;
+ vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object;
+ vm_offset_t addr, tmp;
vm_ooffset_t offset;
vm_page_t end_m, m;
int pflags, tries;
size = round_page(size);
- vm_map_lock(map);
- if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
- vm_map_unlock(map);
+ if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
return (0);
- }
offset = addr - VM_MIN_KERNEL_ADDRESS;
- vm_object_reference(object);
- vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL,
- VM_PROT_ALL, 0);
- pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY;
+ pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
VM_OBJECT_WLOCK(object);
tries = 0;
retry:
@@ -310,50 +241,28 @@ retry:
if (m == NULL) {
VM_OBJECT_WUNLOCK(object);
if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
- vm_map_unlock(map);
vm_pageout_grow_cache(tries, low, high);
- vm_map_lock(map);
VM_OBJECT_WLOCK(object);
tries++;
goto retry;
}
- vm_map_delete(map, addr, addr + size);
- vm_map_unlock(map);
+ vmem_free(vmem, addr, size);
return (0);
}
end_m = m + atop(size);
+ tmp = addr;
for (; m < end_m; m++) {
if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
+ pmap_enter(kernel_pmap, tmp, VM_PROT_ALL, m, VM_PROT_ALL, true);
+ tmp += PAGE_SIZE;
}
VM_OBJECT_WUNLOCK(object);
- vm_map_unlock(map);
- vm_map_wire(map, addr, addr + size, VM_MAP_WIRE_SYSTEM |
- VM_MAP_WIRE_NOHOLES);
return (addr);
}
/*
- * kmem_free:
- *
- * Release a region of kernel virtual memory allocated
- * with kmem_alloc, and return the physical pages
- * associated with that region.
- *
- * This routine may not block on kernel maps.
- */
-void
-kmem_free(map, addr, size)
- vm_map_t map;
- vm_offset_t addr;
- vm_size_t size;
-{
-
- (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
-}
-
-/*
* kmem_suballoc:
*
* Allocates a map to manage a subrange
@@ -393,65 +302,25 @@ kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
/*
* kmem_malloc:
*
- * Allocate wired-down memory in the kernel's address map for the higher
- * level kernel memory allocator (kern/kern_malloc.c). We cannot use
- * kmem_alloc() because we may need to allocate memory at interrupt
- * level where we cannot block (canwait == FALSE).
- *
- * This routine has its own private kernel submap (kmem_map) and object
- * (kmem_object). This, combined with the fact that only malloc uses
- * this routine, ensures that we will never block in map or object waits.
- *
- * We don't worry about expanding the map (adding entries) since entries
- * for wired maps are statically allocated.
- *
- * `map' is ONLY allowed to be kmem_map or one of the mbuf submaps to
- * which we never free.
+ * Allocate wired-down pages in the kernel's address space.
*/
vm_offset_t
-kmem_malloc(map, size, flags)
- vm_map_t map;
- vm_size_t size;
- int flags;
+kmem_malloc(struct vmem *vmem, vm_size_t size, int flags)
{
vm_offset_t addr;
- int i, rv;
+ int rv;
size = round_page(size);
- addr = vm_map_min(map);
+ if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
+ return (0);
- /*
- * Locate sufficient space in the map. This will give us the final
- * virtual address for the new memory, and thus will tell us the
- * offset within the kernel map.
- */
- vm_map_lock(map);
- if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
- vm_map_unlock(map);
- if ((flags & M_NOWAIT) == 0) {
- for (i = 0; i < 8; i++) {
- EVENTHANDLER_INVOKE(vm_lowmem, 0);
- uma_reclaim();
- vm_map_lock(map);
- if (vm_map_findspace(map, vm_map_min(map),
- size, &addr) == 0) {
- break;
- }
- vm_map_unlock(map);
- tsleep(&i, 0, "nokva", (hz / 4) * (i + 1));
- }
- if (i == 8) {
- panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated",
- (long)size, (long)map->size);
- }
- } else {
- return (0);
- }
+ rv = kmem_back((vmem == kmem_arena) ? kmem_object : kernel_object,
+ addr, size, flags);
+ if (rv != KERN_SUCCESS) {
+ vmem_free(vmem, addr, size);
+ return (0);
}
-
- rv = kmem_back(map, addr, size, flags);
- vm_map_unlock(map);
- return (rv == KERN_SUCCESS ? addr : 0);
+ return (addr);
}
/*
@@ -460,37 +329,22 @@ kmem_malloc(map, size, flags)
* Allocate physical pages for the specified virtual address range.
*/
int
-kmem_back(vm_map_t map, vm_offset_t addr, vm_size_t size, int flags)
+kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
{
vm_offset_t offset, i;
- vm_map_entry_t entry;
vm_page_t m;
int pflags;
- boolean_t found;
- KASSERT(vm_map_locked(map), ("kmem_back: map %p is not locked", map));
- offset = addr - VM_MIN_KERNEL_ADDRESS;
- vm_object_reference(kmem_object);
- vm_map_insert(map, kmem_object, offset, addr, addr + size,
- VM_PROT_ALL, VM_PROT_ALL, 0);
+ KASSERT(object == kmem_object || object == kernel_object,
+ ("kmem_back: only supports kernel objects."));
- /*
- * Assert: vm_map_insert() will never be able to extend the
- * previous entry so vm_map_lookup_entry() will find a new
- * entry exactly corresponding to this address range and it
- * will have wired_count == 0.
- */
- found = vm_map_lookup_entry(map, addr, &entry);
- KASSERT(found && entry->start == addr && entry->end == addr + size &&
- entry->wired_count == 0 && (entry->eflags & MAP_ENTRY_IN_TRANSITION)
- == 0, ("kmem_back: entry not found or misaligned"));
-
- pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
+ offset = addr - VM_MIN_KERNEL_ADDRESS;
+ pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
- VM_OBJECT_WLOCK(kmem_object);
+ VM_OBJECT_WLOCK(object);
for (i = 0; i < size; i += PAGE_SIZE) {
retry:
- m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags);
+ m = vm_page_alloc(object, OFF_TO_IDX(offset + i), pflags);
/*
* Ran out of space, free everything up and return. Don't need
@@ -499,79 +353,78 @@ retry:
*/
if (m == NULL) {
if ((flags & M_NOWAIT) == 0) {
- VM_OBJECT_WUNLOCK(kmem_object);
- entry->eflags |= MAP_ENTRY_IN_TRANSITION;
- vm_map_unlock(map);
+ VM_OBJECT_WUNLOCK(object);
VM_WAIT;
- vm_map_lock(map);
- KASSERT(
-(entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_NEEDS_WAKEUP)) ==
- MAP_ENTRY_IN_TRANSITION,
- ("kmem_back: volatile entry"));
- entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
- VM_OBJECT_WLOCK(kmem_object);
+ VM_OBJECT_WLOCK(object);
goto retry;
}
/*
- * Free the pages before removing the map entry.
- * They are already marked busy. Calling
- * vm_map_delete before the pages has been freed or
- * unbusied will cause a deadlock.
+ * Unmap and free the pages.
*/
+ if (i != 0)
+ pmap_remove(kernel_pmap, addr, addr + i);
while (i != 0) {
i -= PAGE_SIZE;
- m = vm_page_lookup(kmem_object,
+ m = vm_page_lookup(object,
OFF_TO_IDX(offset + i));
vm_page_unwire(m, 0);
vm_page_free(m);
}
- VM_OBJECT_WUNLOCK(kmem_object);
- vm_map_delete(map, addr, addr + size);
+ VM_OBJECT_WUNLOCK(object);
return (KERN_NO_SPACE);
}
if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
- m->valid = VM_PAGE_BITS_ALL;
KASSERT((m->oflags & VPO_UNMANAGED) != 0,
("kmem_malloc: page %p is managed", m));
+ m->valid = VM_PAGE_BITS_ALL;
+ pmap_enter(kernel_pmap, addr + i, VM_PROT_ALL, m, VM_PROT_ALL,
+ TRUE);
}
- VM_OBJECT_WUNLOCK(kmem_object);
+ VM_OBJECT_WUNLOCK(object);
- /*
- * Mark map entry as non-pageable. Repeat the assert.
- */
- KASSERT(entry->start == addr && entry->end == addr + size &&
- entry->wired_count == 0,
- ("kmem_back: entry not found or misaligned after allocation"));
- entry->wired_count = 1;
+ return (KERN_SUCCESS);
+}
- /*
- * At this point, the kmem_object must be unlocked because
- * vm_map_simplify_entry() calls vm_object_deallocate(), which
- * locks the kmem_object.
- */
- vm_map_simplify_entry(map, entry);
+void
+kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
+{
+ vm_page_t m;
+ vm_offset_t offset;
+ int i;
- /*
- * Loop thru pages, entering them in the pmap.
- */
- VM_OBJECT_WLOCK(kmem_object);
+ KASSERT(object == kmem_object || object == kernel_object,
+ ("kmem_unback: only supports kernel objects."));
+
+ offset = addr - VM_MIN_KERNEL_ADDRESS;
+ VM_OBJECT_WLOCK(object);
+ pmap_remove(kernel_pmap, addr, addr + size);
for (i = 0; i < size; i += PAGE_SIZE) {
- m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
- /*
- * Because this is kernel_pmap, this call will not block.
- */
- pmap_enter(kernel_pmap, addr + i, VM_PROT_ALL, m, VM_PROT_ALL,
- TRUE);
- vm_page_wakeup(m);
+ m = vm_page_lookup(object, OFF_TO_IDX(offset + i));
+ vm_page_unwire(m, 0);
+ vm_page_free(m);
}
- VM_OBJECT_WUNLOCK(kmem_object);
+ VM_OBJECT_WUNLOCK(object);
+}
- return (KERN_SUCCESS);
+/*
+ * kmem_free:
+ *
+ * Free memory allocated with kmem_malloc. The size must match the
+ * original allocation.
+ */
+void
+kmem_free(struct vmem *vmem, vm_offset_t addr, vm_size_t size)
+{
+
+ size = round_page(size);
+ kmem_unback((vmem == kmem_arena) ? kmem_object : kernel_object,
+ addr, size);
+ vmem_free(vmem, addr, size);
}
/*
- * kmem_alloc_wait:
+ * kmap_alloc_wait:
*
* Allocates pageable memory from a sub-map of the kernel. If the submap
* has no room, the caller sleeps waiting for more memory in the submap.
@@ -579,7 +432,7 @@ retry:
* This routine may block.
*/
vm_offset_t
-kmem_alloc_wait(map, size)
+kmap_alloc_wait(map, size)
vm_map_t map;
vm_size_t size;
{
@@ -613,13 +466,13 @@ kmem_alloc_wait(map, size)
}
/*
- * kmem_free_wakeup:
+ * kmap_free_wakeup:
*
* Returns memory to a submap of the kernel, and wakes up any processes
* waiting for memory in that map.
*/
void
-kmem_free_wakeup(map, addr, size)
+kmap_free_wakeup(map, addr, size)
vm_map_t map;
vm_offset_t addr;
vm_size_t size;
@@ -634,28 +487,25 @@ kmem_free_wakeup(map, addr, size)
vm_map_unlock(map);
}
-static void
+void
kmem_init_zero_region(void)
{
vm_offset_t addr, i;
vm_page_t m;
- int error;
/*
* Map a single physical page of zeros to a larger virtual range.
* This requires less looping in places that want large amounts of
* zeros, while not using much more physical resources.
*/
- addr = kmem_alloc_nofault(kernel_map, ZERO_REGION_SIZE);
+ addr = kva_alloc(ZERO_REGION_SIZE);
m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
if ((m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE)
pmap_qenter(addr + i, &m, 1);
- error = vm_map_protect(kernel_map, addr, addr + ZERO_REGION_SIZE,
- VM_PROT_READ, TRUE);
- KASSERT(error == 0, ("error=%d", error));
+ pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ);
zero_region = (const void *)addr;
}
@@ -688,8 +538,6 @@ kmem_init(start, end)
start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
/* ... and ending with the completion of the above `insert' */
vm_map_unlock(m);
-
- kmem_init_zero_region();
}
#ifdef DIAGNOSTIC
diff --git a/sys/vm/vm_kern.h b/sys/vm/vm_kern.h
index 1479e5f..284b777 100644
--- a/sys/vm/vm_kern.h
+++ b/sys/vm/vm_kern.h
@@ -65,9 +65,10 @@
/* Kernel memory management definitions. */
extern vm_map_t kernel_map;
-extern vm_map_t kmem_map;
extern vm_map_t exec_map;
extern vm_map_t pipe_map;
+extern struct vmem *kernel_arena;
+extern struct vmem *kmem_arena;
extern struct vmem *buffer_arena;
extern struct vmem *transient_arena;
extern vm_offset_t swapbkva;
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index c43bce2..1d92965 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -197,9 +197,15 @@ vm_map_startup(void)
kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
- uma_prealloc(kmapentzone, MAX_KMAPENT);
mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
+ vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
+#ifdef INVARIANTS
+ vmspace_zdtor,
+#else
+ NULL,
+#endif
+ vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
}
static void
@@ -299,21 +305,6 @@ vmspace_alloc(min, max)
return (vm);
}
-void
-vm_init2(void)
-{
- uma_zone_reserve_kva(kmapentzone, lmin(cnt.v_page_count,
- (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE) / 8 +
- maxproc * 2 + maxfiles);
- vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
-#ifdef INVARIANTS
- vmspace_zdtor,
-#else
- NULL,
-#endif
- vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
-}
-
static void
vmspace_container_reset(struct proc *p)
{
@@ -1469,11 +1460,6 @@ again:
pmap_align_superpage(object, offset, addr,
length);
break;
-#ifdef VMFS_TLB_ALIGNED_SPACE
- case VMFS_TLB_ALIGNED_SPACE:
- pmap_align_tlb(addr);
- break;
-#endif
default:
break;
}
@@ -1483,9 +1469,6 @@ again:
result = vm_map_insert(map, object, offset, start, start +
length, prot, max, cow);
} while (result == KERN_NO_SPACE && (find_space == VMFS_ALIGNED_SPACE ||
-#ifdef VMFS_TLB_ALIGNED_SPACE
- find_space == VMFS_TLB_ALIGNED_SPACE ||
-#endif
find_space == VMFS_OPTIMAL_SPACE));
vm_map_unlock(map);
return (result);
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 824a9a0..ed8864e 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -345,9 +345,6 @@ long vmspace_resident_count(struct vmspace *vmspace);
#define VMFS_ANY_SPACE 1 /* find a range with any alignment */
#define VMFS_OPTIMAL_SPACE 2 /* find a range with optimal alignment*/
#define VMFS_ALIGNED_SPACE 3 /* find a superpage-aligned range */
-#if defined(__mips__)
-#define VMFS_TLB_ALIGNED_SPACE 4 /* find a TLB entry aligned range */
-#endif
/*
* vm_map_wire and vm_map_unwire option flags
@@ -387,7 +384,6 @@ int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t);
int vm_map_sync(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int);
void vm_map_simplify_entry (vm_map_t, vm_map_entry_t);
-void vm_init2 (void);
int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int);
int vm_map_growstack (struct proc *p, vm_offset_t addr);
int vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 1d128bf..7c3cad4 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -295,7 +295,7 @@ vm_object_init(void)
#else
NULL,
#endif
- vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE);
+ vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
vm_radix_init();
}
OpenPOWER on IntegriCloud