summaryrefslogtreecommitdiffstats
path: root/sys/i386/i386/sys_machdep.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2013-08-07 06:21:20 +0000
committerjeff <jeff@FreeBSD.org>2013-08-07 06:21:20 +0000
commitde4ecca21340ce4d0bf9182cac133c14e031218e (patch)
tree950bad07f0aeeeae78036d82b9aa11ae998c3654 /sys/i386/i386/sys_machdep.c
parente141f5c0bac3839e4886a26e1ba796f4e46e6455 (diff)
downloadFreeBSD-src-de4ecca21340ce4d0bf9182cac133c14e031218e.zip
FreeBSD-src-de4ecca21340ce4d0bf9182cac133c14e031218e.tar.gz
Replace kernel virtual address space allocation with vmem. This provides
transparent layering and better fragmentation. - Normalize functions that allocate memory to use kmem_* - Those that allocate address space are named kva_* - Those that operate on maps are named kmap_* - Implement recursive allocation handling for kmem_arena in vmem. Reviewed by: alc Tested by: pho Sponsored by: EMC / Isilon Storage Division
Diffstat (limited to 'sys/i386/i386/sys_machdep.c')
-rw-r--r--sys/i386/i386/sys_machdep.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/sys/i386/i386/sys_machdep.c b/sys/i386/i386/sys_machdep.c
index 00d74d3..adf6ac4 100644
--- a/sys/i386/i386/sys_machdep.c
+++ b/sys/i386/i386/sys_machdep.c
@@ -164,7 +164,7 @@ sysarch(td, uap)
break;
case I386_SET_LDT:
if (kargs.largs.descs != NULL) {
- lp = (union descriptor *)kmem_malloc(kernel_map,
+ lp = (union descriptor *)kmem_malloc(kernel_arena,
kargs.largs.num * sizeof(union descriptor),
M_WAITOK);
if (lp == NULL) {
@@ -175,7 +175,7 @@ sysarch(td, uap)
kargs.largs.num * sizeof(union descriptor));
if (error == 0)
error = i386_set_ldt(td, &kargs.largs, lp);
- kmem_free(kernel_map, (vm_offset_t)lp,
+ kmem_free(kernel_arena, (vm_offset_t)lp,
kargs.largs.num * sizeof(union descriptor));
} else {
error = i386_set_ldt(td, &kargs.largs, NULL);
@@ -299,7 +299,7 @@ i386_extend_pcb(struct thread *td)
0 /* granularity */
};
- ext = (struct pcb_ext *)kmem_malloc(kernel_map, ctob(IOPAGES+1),
+ ext = (struct pcb_ext *)kmem_malloc(kernel_arena, ctob(IOPAGES+1),
M_WAITOK);
if (ext == 0)
return (ENOMEM);
@@ -473,7 +473,7 @@ user_ldt_alloc(struct mdproc *mdp, int len)
M_SUBPROC, M_WAITOK);
new_ldt->ldt_len = len = NEW_MAX_LD(len);
- new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_map,
+ new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_arena,
round_page(len * sizeof(union descriptor)), M_WAITOK);
if (new_ldt->ldt_base == NULL) {
free(new_ldt, M_SUBPROC);
@@ -513,7 +513,7 @@ user_ldt_alloc(struct mdproc *mdp, int len)
M_SUBPROC, M_WAITOK);
new_ldt->ldt_len = len = NEW_MAX_LD(len);
- new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_map,
+ new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_arena,
len * sizeof(union descriptor), M_WAITOK);
if (new_ldt->ldt_base == NULL) {
free(new_ldt, M_SUBPROC);
@@ -576,7 +576,7 @@ user_ldt_deref(struct proc_ldt *pldt)
mtx_assert(&dt_lock, MA_OWNED);
if (--pldt->ldt_refcnt == 0) {
mtx_unlock_spin(&dt_lock);
- kmem_free(kernel_map, (vm_offset_t)pldt->ldt_base,
+ kmem_free(kernel_arena, (vm_offset_t)pldt->ldt_base,
pldt->ldt_len * sizeof(union descriptor));
free(pldt, M_SUBPROC);
} else
@@ -855,7 +855,7 @@ i386_ldt_grow(struct thread *td, int len)
* free the new object and return.
*/
mtx_unlock_spin(&dt_lock);
- kmem_free(kernel_map,
+ kmem_free(kernel_arena,
(vm_offset_t)new_ldt->ldt_base,
new_ldt->ldt_len * sizeof(union descriptor));
free(new_ldt, M_SUBPROC);
@@ -889,7 +889,7 @@ i386_ldt_grow(struct thread *td, int len)
mtx_unlock_spin(&dt_lock);
#endif
if (old_ldt_base != NULL_LDT_BASE) {
- kmem_free(kernel_map, (vm_offset_t)old_ldt_base,
+ kmem_free(kernel_arena, (vm_offset_t)old_ldt_base,
old_ldt_len * sizeof(union descriptor));
free(new_ldt, M_SUBPROC);
}
OpenPOWER on IntegriCloud