summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_kern.c
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2008-05-10 21:46:20 +0000
committeralc <alc@FreeBSD.org>2008-05-10 21:46:20 +0000
commitc251140c2683fff813ae3869939f7a510fa0c8a7 (patch)
treed12080f402f408af5398139c077822276131a22e /sys/vm/vm_kern.c
parenta7ca76e7be2b0e3a9732e1ec875e2727098adae0 (diff)
downloadFreeBSD-src-c251140c2683fff813ae3869939f7a510fa0c8a7.zip
FreeBSD-src-c251140c2683fff813ae3869939f7a510fa0c8a7.tar.gz
Introduce a new parameter "superpage_align" to kmem_suballoc() that is
used to request superpage alignment for the submap. Request superpage alignment for the kmem_map. Pass VMFS_ANY_SPACE instead of TRUE to vm_map_find(). (They are currently equivalent but VMFS_ANY_SPACE is the new preferred spelling.) Remove a stale comment from kmem_malloc().
Diffstat (limited to 'sys/vm/vm_kern.c')
-rw-r--r--sys/vm/vm_kern.c18
1 files changed, 7 insertions, 11 deletions
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 1a3863d..fc27e90 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -109,8 +109,8 @@ kmem_alloc_nofault(map, size)
size = round_page(size);
addr = vm_map_min(map);
- result = vm_map_find(map, NULL, 0,
- &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
+ result = vm_map_find(map, NULL, 0, &addr, size, VMFS_ANY_SPACE,
+ VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
if (result != KERN_SUCCESS) {
return (0);
}
@@ -221,12 +221,11 @@ kmem_free(map, addr, size)
* parent Map to take range from
* min, max Returned endpoints of map
* size Size of range to find
+ * superpage_align Request that min is superpage aligned
*/
vm_map_t
-kmem_suballoc(parent, min, max, size)
- vm_map_t parent;
- vm_offset_t *min, *max;
- vm_size_t size;
+kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
+ vm_size_t size, boolean_t superpage_align)
{
int ret;
vm_map_t result;
@@ -234,8 +233,8 @@ kmem_suballoc(parent, min, max, size)
size = round_page(size);
*min = vm_map_min(parent);
- ret = vm_map_find(parent, NULL, 0,
- min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
+ ret = vm_map_find(parent, NULL, 0, min, size, superpage_align ?
+ VMFS_ALIGNED_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0);
if (ret != KERN_SUCCESS)
panic("kmem_suballoc: bad status return of %d", ret);
*max = *min + size;
@@ -259,9 +258,6 @@ kmem_suballoc(parent, min, max, size)
* (kmem_object). This, combined with the fact that only malloc uses
* this routine, ensures that we will never block in map or object waits.
*
- * Note that this still only works in a uni-processor environment and
- * when called at splhigh().
- *
* We don't worry about expanding the map (adding entries) since entries
* for wired maps are statically allocated.
*
OpenPOWER on IntegriCloud