summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2008-05-10 21:46:20 +0000
committeralc <alc@FreeBSD.org>2008-05-10 21:46:20 +0000
commitc251140c2683fff813ae3869939f7a510fa0c8a7 (patch)
treed12080f402f408af5398139c077822276131a22e /sys/vm
parenta7ca76e7be2b0e3a9732e1ec875e2727098adae0 (diff)
downloadFreeBSD-src-c251140c2683fff813ae3869939f7a510fa0c8a7.zip
FreeBSD-src-c251140c2683fff813ae3869939f7a510fa0c8a7.tar.gz
Introduce a new parameter "superpage_align" to kmem_suballoc() that is
used to request superpage alignment for the submap. Request superpage alignment for the kmem_map. Pass VMFS_ANY_SPACE instead of TRUE to vm_map_find(). (They are currently equivalent but VMFS_ANY_SPACE is the new preferred spelling.) Remove a stale comment from kmem_malloc().
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_extern.h3
-rw-r--r--sys/vm/vm_init.c11
-rw-r--r--sys/vm/vm_kern.c18
3 files changed, 15 insertions, 17 deletions
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 27580bd..0a54372 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -63,7 +63,8 @@ void kmem_free(vm_map_t, vm_offset_t, vm_size_t);
void kmem_free_wakeup(vm_map_t, vm_offset_t, vm_size_t);
void kmem_init(vm_offset_t, vm_offset_t);
vm_offset_t kmem_malloc(vm_map_t, vm_size_t, boolean_t);
-vm_map_t kmem_suballoc(vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t);
+vm_map_t kmem_suballoc(vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t,
+ boolean_t);
void swapout_procs(int);
int useracc(void *, int, int);
int vm_fault(vm_map_t, vm_offset_t, vm_prot_t, int);
diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c
index 0640602..d9f9d84 100644
--- a/sys/vm/vm_init.c
+++ b/sys/vm/vm_init.c
@@ -186,16 +186,17 @@ again:
panic("startup: table size inconsistency");
clean_map = kmem_suballoc(kernel_map, &kmi->clean_sva, &kmi->clean_eva,
- (nbuf*BKVASIZE) + (nswbuf*MAXPHYS));
+ nbuf * BKVASIZE + nswbuf * MAXPHYS, FALSE);
buffer_map = kmem_suballoc(clean_map, &kmi->buffer_sva,
- &kmi->buffer_eva, (nbuf*BKVASIZE));
+ &kmi->buffer_eva, nbuf * BKVASIZE, FALSE);
buffer_map->system_map = 1;
pager_map = kmem_suballoc(clean_map, &kmi->pager_sva, &kmi->pager_eva,
- (nswbuf*MAXPHYS));
+ nswbuf * MAXPHYS, FALSE);
pager_map->system_map = 1;
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
- (exec_map_entries*(ARG_MAX+(PAGE_SIZE*3))));
- pipe_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, maxpipekva);
+ exec_map_entries * (ARG_MAX + (PAGE_SIZE * 3)), FALSE);
+ pipe_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, maxpipekva,
+ FALSE);
/*
* XXX: Mbuf system machine-specific initializations should
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 1a3863d..fc27e90 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -109,8 +109,8 @@ kmem_alloc_nofault(map, size)
size = round_page(size);
addr = vm_map_min(map);
- result = vm_map_find(map, NULL, 0,
- &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
+ result = vm_map_find(map, NULL, 0, &addr, size, VMFS_ANY_SPACE,
+ VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
if (result != KERN_SUCCESS) {
return (0);
}
@@ -221,12 +221,11 @@ kmem_free(map, addr, size)
* parent Map to take range from
* min, max Returned endpoints of map
* size Size of range to find
+ * superpage_align Request that min is superpage aligned
*/
vm_map_t
-kmem_suballoc(parent, min, max, size)
- vm_map_t parent;
- vm_offset_t *min, *max;
- vm_size_t size;
+kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
+ vm_size_t size, boolean_t superpage_align)
{
int ret;
vm_map_t result;
@@ -234,8 +233,8 @@ kmem_suballoc(parent, min, max, size)
size = round_page(size);
*min = vm_map_min(parent);
- ret = vm_map_find(parent, NULL, 0,
- min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
+ ret = vm_map_find(parent, NULL, 0, min, size, superpage_align ?
+ VMFS_ALIGNED_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0);
if (ret != KERN_SUCCESS)
panic("kmem_suballoc: bad status return of %d", ret);
*max = *min + size;
@@ -259,9 +258,6 @@ kmem_suballoc(parent, min, max, size)
* (kmem_object). This, combined with the fact that only malloc uses
* this routine, ensures that we will never block in map or object waits.
*
- * Note that this still only works in a uni-processor environment and
- * when called at splhigh().
- *
* We don't worry about expanding the map (adding entries) since entries
* for wired maps are statically allocated.
*
OpenPOWER on IntegriCloud