summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2013-08-16 21:13:55 +0000
committerjhb <jhb@FreeBSD.org>2013-08-16 21:13:55 +0000
commit3bfcb89de4b7ac6c54e2affe99eccd1482eb4327 (patch)
tree7565121f7abca9570dd1e0d308a2dd838e3c735b /sys/vm
parentefcf22ed8cf8c3e8d2e5fa37ae4c4a9935a3d597 (diff)
downloadFreeBSD-src-3bfcb89de4b7ac6c54e2affe99eccd1482eb4327.zip
FreeBSD-src-3bfcb89de4b7ac6c54e2affe99eccd1482eb4327.tar.gz
Add new mmap(2) flags to permit applications to request specific virtual
address alignment of mappings. - MAP_ALIGNED(n) requests a mapping aligned on a boundary of (1 << n). Requests for n >= number of bits in a pointer or less than the size of a page fail with EINVAL. This matches the API provided by NetBSD. - MAP_ALIGNED_SUPER is a special case of MAP_ALIGNED. It can be used to optimize the chances of using large pages. By default it will align the mapping on a large page boundary (the system is free to choose any large page size to align to that seems best for the mapping request). However, if the object being mapped is already using large pages, then it will align the virtual mapping to match the existing large pages in the object instead. - Internally, VMFS_ALIGNED_SPACE is now renamed to VMFS_SUPER_SPACE, and VMFS_ALIGNED_SPACE(n) is repurposed for specifying a specific alignment. MAP_ALIGNED(n) maps to using VMFS_ALIGNED_SPACE(n), while MAP_ALIGNED_SUPER maps to VMFS_SUPER_SPACE. - mmap() of a device object now uses VMFS_OPTIMAL_SPACE rather than explicitly using VMFS_SUPER_SPACE. All device objects are forced to use a specific color on creation, so VMFS_OPTIMAL_SPACE is effectively equivalent. Reviewed by: alc MFC after: 1 month
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_init.c2
-rw-r--r--sys/vm/vm_kern.c2
-rw-r--r--sys/vm/vm_map.c21
-rw-r--r--sys/vm/vm_map.h8
-rw-r--r--sys/vm/vm_mmap.c28
5 files changed, 44 insertions, 17 deletions
diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c
index b539f9d..7ab1ee0 100644
--- a/sys/vm/vm_init.c
+++ b/sys/vm/vm_init.c
@@ -112,7 +112,7 @@ kva_import(void *unused, vmem_size_t size, int flags, vmem_addr_t *addrp)
addr = vm_map_min(kernel_map);
result = vm_map_find(kernel_map, NULL, 0, &addr, size,
- VMFS_ALIGNED_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
+ VMFS_SUPER_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
if (result != KERN_SUCCESS)
return (ENOMEM);
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index c7cb409..9790653 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -286,7 +286,7 @@ kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
*min = vm_map_min(parent);
ret = vm_map_find(parent, NULL, 0, min, size, superpage_align ?
- VMFS_ALIGNED_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
+ VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
MAP_ACC_NO_CHARGE);
if (ret != KERN_SUCCESS)
panic("kmem_suballoc: bad status return of %d", ret);
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 1d92965..1a6146e 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -1434,12 +1434,17 @@ vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
vm_size_t length, int find_space, vm_prot_t prot,
vm_prot_t max, int cow)
{
- vm_offset_t start, initial_addr;
+ vm_offset_t alignment, initial_addr, start;
int result;
if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL ||
(object->flags & OBJ_COLORED) == 0))
- find_space = VMFS_ANY_SPACE;
+ find_space = VMFS_ANY_SPACE;
+ if (find_space >> 8 != 0) {
+ KASSERT((find_space & 0xff) == 0, ("bad VMFS flags"));
+ alignment = (vm_offset_t)1 << (find_space >> 8);
+ } else
+ alignment = 0;
initial_addr = *addr;
again:
start = initial_addr;
@@ -1455,12 +1460,18 @@ again:
return (KERN_NO_SPACE);
}
switch (find_space) {
- case VMFS_ALIGNED_SPACE:
+ case VMFS_SUPER_SPACE:
case VMFS_OPTIMAL_SPACE:
pmap_align_superpage(object, offset, addr,
length);
break;
+ case VMFS_ANY_SPACE:
+ break;
default:
+ if ((*addr & (alignment - 1)) != 0) {
+ *addr &= ~(alignment - 1);
+ *addr += alignment;
+ }
break;
}
@@ -1468,8 +1479,8 @@ again:
}
result = vm_map_insert(map, object, offset, start, start +
length, prot, max, cow);
- } while (result == KERN_NO_SPACE && (find_space == VMFS_ALIGNED_SPACE ||
- find_space == VMFS_OPTIMAL_SPACE));
+ } while (result == KERN_NO_SPACE && find_space != VMFS_NO_SPACE &&
+ find_space != VMFS_ANY_SPACE);
vm_map_unlock(map);
return (result);
}
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index ed8864e..054c506 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -339,12 +339,16 @@ long vmspace_resident_count(struct vmspace *vmspace);
#define VM_FAULT_READ_AHEAD_MAX min(atop(MAXPHYS) - 1, UINT8_MAX)
/*
- * The following "find_space" options are supported by vm_map_find()
+ * The following "find_space" options are supported by vm_map_find().
+ *
+ * For VMFS_ALIGNED_SPACE, the desired alignment is specified to
+ * the macro argument as log base 2 of the desired alignment.
*/
#define VMFS_NO_SPACE 0 /* don't find; use the given range */
#define VMFS_ANY_SPACE 1 /* find a range with any alignment */
#define VMFS_OPTIMAL_SPACE 2 /* find a range with optimal alignment*/
-#define VMFS_ALIGNED_SPACE 3 /* find a superpage-aligned range */
+#define VMFS_SUPER_SPACE 3 /* find a superpage-aligned range */
+#define VMFS_ALIGNED_SPACE(x) ((x) << 8) /* find a range with fixed alignment */
/*
* vm_map_wire and vm_map_unwire option flags
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 1b08097..53a7be5 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -201,7 +201,7 @@ sys_mmap(td, uap)
vm_prot_t cap_maxprot, prot, maxprot;
void *handle;
objtype_t handle_type;
- int flags, error;
+ int align, error, flags;
off_t pos;
struct vmspace *vms = td->td_proc->p_vmspace;
cap_rights_t rights;
@@ -251,6 +251,13 @@ sys_mmap(td, uap)
size += pageoff; /* low end... */
size = (vm_size_t) round_page(size); /* hi end */
+ /* Ensure alignment is at least a page and fits in a pointer. */
+ align = flags & MAP_ALIGNMENT_MASK;
+ if (align != 0 && align != MAP_ALIGNED_SUPER &&
+ (align >> MAP_ALIGNMENT_SHIFT >= sizeof(void *) * NBBY ||
+ align >> MAP_ALIGNMENT_SHIFT < PAGE_SHIFT))
+ return (EINVAL);
+
/*
* Check for illegal addresses. Watch out for address wrap... Note
* that VM_*_ADDRESS are not constants due to casts (argh).
@@ -1490,7 +1497,7 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
boolean_t fitit;
vm_object_t object = NULL;
struct thread *td = curthread;
- int docow, error, rv;
+ int docow, error, findspace, rv;
boolean_t writecounted;
if (size == 0)
@@ -1605,12 +1612,17 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
if (flags & MAP_STACK)
rv = vm_map_stack(map, *addr, size, prot, maxprot,
docow | MAP_STACK_GROWS_DOWN);
- else if (fitit)
- rv = vm_map_find(map, object, foff, addr, size,
- object != NULL && object->type == OBJT_DEVICE ?
- VMFS_ALIGNED_SPACE : VMFS_OPTIMAL_SPACE, prot, maxprot,
- docow);
- else
+ else if (fitit) {
+ if ((flags & MAP_ALIGNMENT_MASK) == MAP_ALIGNED_SUPER)
+ findspace = VMFS_SUPER_SPACE;
+ else if ((flags & MAP_ALIGNMENT_MASK) != 0)
+ findspace = VMFS_ALIGNED_SPACE(flags >>
+ MAP_ALIGNMENT_SHIFT);
+ else
+ findspace = VMFS_OPTIMAL_SPACE;
+ rv = vm_map_find(map, object, foff, addr, size, findspace,
+ prot, maxprot, docow);
+ } else
rv = vm_map_fixed(map, object, foff, *addr, size,
prot, maxprot, docow);
OpenPOWER on IntegriCloud