summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2013-08-07 06:21:20 +0000
committerjeff <jeff@FreeBSD.org>2013-08-07 06:21:20 +0000
commitde4ecca21340ce4d0bf9182cac133c14e031218e (patch)
tree950bad07f0aeeeae78036d82b9aa11ae998c3654 /sys/amd64
parente141f5c0bac3839e4886a26e1ba796f4e46e6455 (diff)
downloadFreeBSD-src-de4ecca21340ce4d0bf9182cac133c14e031218e.zip
FreeBSD-src-de4ecca21340ce4d0bf9182cac133c14e031218e.tar.gz
Replace kernel virtual address space allocation with vmem. This provides
transparent layering and better fragmentation. - Normalize functions that allocate memory to use kmem_* - Those that allocate address space are named kva_* - Those that operate on maps are named kmap_* - Implement recursive allocation handling for kmem_arena in vmem. Reviewed by: alc Tested by: pho Sponsored by: EMC / Isilon Storage Division
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/mp_machdep.c12
-rw-r--r--sys/amd64/amd64/pmap.c7
-rw-r--r--sys/amd64/amd64/sys_machdep.c8
-rw-r--r--sys/amd64/amd64/vm_machdep.c2
4 files changed, 17 insertions, 12 deletions
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index 3ad00e7..79aeb9c 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -938,10 +938,14 @@ start_all_aps(void)
apic_id = cpu_apic_ids[cpu];
/* allocate and set up an idle stack data page */
- bootstacks[cpu] = (void *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
- doublefault_stack = (char *)kmem_alloc(kernel_map, PAGE_SIZE);
- nmi_stack = (char *)kmem_alloc(kernel_map, PAGE_SIZE);
- dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
+ bootstacks[cpu] = (void *)kmem_malloc(kernel_arena,
+ KSTACK_PAGES * PAGE_SIZE, M_WAITOK | M_ZERO);
+ doublefault_stack = (char *)kmem_malloc(kernel_arena,
+ PAGE_SIZE, M_WAITOK | M_ZERO);
+ nmi_stack = (char *)kmem_malloc(kernel_arena, PAGE_SIZE,
+ M_WAITOK | M_ZERO);
+ dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
+ M_WAITOK | M_ZERO);
bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
bootAP = cpu;
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 46112b5..33b8e7b 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -860,7 +860,8 @@ pmap_init(void)
*/
s = (vm_size_t)(pv_npg * sizeof(struct md_page));
s = round_page(s);
- pv_table = (struct md_page *)kmem_alloc(kernel_map, s);
+ pv_table = (struct md_page *)kmem_malloc(kernel_arena, s,
+ M_WAITOK | M_ZERO);
for (i = 0; i < pv_npg; i++)
TAILQ_INIT(&pv_table[i].pv_list);
}
@@ -5060,7 +5061,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
}
offset = pa & PAGE_MASK;
size = round_page(offset + size);
- va = kmem_alloc_nofault(kernel_map, size);
+ va = kva_alloc(size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
pa = trunc_page(pa);
@@ -5096,7 +5097,7 @@ pmap_unmapdev(vm_offset_t va, vm_size_t size)
base = trunc_page(va);
offset = va & PAGE_MASK;
size = round_page(offset + size);
- kmem_free(kernel_map, base, size);
+ kva_free(base, size);
}
/*
diff --git a/sys/amd64/amd64/sys_machdep.c b/sys/amd64/amd64/sys_machdep.c
index ac691d0..5be8af5 100644
--- a/sys/amd64/amd64/sys_machdep.c
+++ b/sys/amd64/amd64/sys_machdep.c
@@ -356,7 +356,7 @@ amd64_set_ioperm(td, uap)
*/
pcb = td->td_pcb;
if (pcb->pcb_tssp == NULL) {
- tssp = (struct amd64tss *)kmem_malloc(kernel_map,
+ tssp = (struct amd64tss *)kmem_malloc(kernel_arena,
ctob(IOPAGES+1), M_WAITOK);
if (tssp == NULL)
return (ENOMEM);
@@ -463,7 +463,7 @@ user_ldt_alloc(struct proc *p, int force)
return (mdp->md_ldt);
mtx_unlock(&dt_lock);
new_ldt = malloc(sizeof(struct proc_ldt), M_SUBPROC, M_WAITOK);
- new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_map,
+ new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_arena,
max_ldt_segment * sizeof(struct user_segment_descriptor),
M_WAITOK);
if (new_ldt->ldt_base == NULL) {
@@ -484,7 +484,7 @@ user_ldt_alloc(struct proc *p, int force)
mtx_lock(&dt_lock);
pldt = mdp->md_ldt;
if (pldt != NULL && !force) {
- kmem_free(kernel_map, (vm_offset_t)new_ldt->ldt_base,
+ kmem_free(kernel_arena, (vm_offset_t)new_ldt->ldt_base,
max_ldt_segment * sizeof(struct user_segment_descriptor));
free(new_ldt, M_SUBPROC);
return (pldt);
@@ -529,7 +529,7 @@ user_ldt_derefl(struct proc_ldt *pldt)
{
if (--pldt->ldt_refcnt == 0) {
- kmem_free(kernel_map, (vm_offset_t)pldt->ldt_base,
+ kmem_free(kernel_arena, (vm_offset_t)pldt->ldt_base,
max_ldt_segment * sizeof(struct user_segment_descriptor));
free(pldt, M_SUBPROC);
}
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index acb5b93..ed0e7e9 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -342,7 +342,7 @@ cpu_thread_clean(struct thread *td)
* Clean TSS/iomap
*/
if (pcb->pcb_tssp != NULL) {
- kmem_free(kernel_map, (vm_offset_t)pcb->pcb_tssp,
+ kva_free((vm_offset_t)pcb->pcb_tssp,
ctob(IOPAGES + 1));
pcb->pcb_tssp = NULL;
}
OpenPOWER on IntegriCloud