diff options
author | jeff <jeff@FreeBSD.org> | 2013-08-07 06:21:20 +0000 |
---|---|---|
committer | jeff <jeff@FreeBSD.org> | 2013-08-07 06:21:20 +0000 |
commit | de4ecca21340ce4d0bf9182cac133c14e031218e (patch) | |
tree | 950bad07f0aeeeae78036d82b9aa11ae998c3654 /sys/i386 | |
parent | e141f5c0bac3839e4886a26e1ba796f4e46e6455 (diff) | |
download | FreeBSD-src-de4ecca21340ce4d0bf9182cac133c14e031218e.zip FreeBSD-src-de4ecca21340ce4d0bf9182cac133c14e031218e.tar.gz |
Replace kernel virtual address space allocation with vmem. This provides
transparent layering and better fragmentation.
- Normalize functions that allocate memory to use kmem_*
- Those that allocate address space are named kva_*
- Those that operate on maps are named kmap_*
- Implement recursive allocation handling for kmem_arena in vmem.
Reviewed by: alc
Tested by: pho
Sponsored by: EMC / Isilon Storage Division
Diffstat (limited to 'sys/i386')
-rw-r--r-- | sys/i386/i386/machdep.c | 8 | ||||
-rw-r--r-- | sys/i386/i386/mp_machdep.c | 6 | ||||
-rw-r--r-- | sys/i386/i386/pmap.c | 15 | ||||
-rw-r--r-- | sys/i386/i386/sys_machdep.c | 16 | ||||
-rw-r--r-- | sys/i386/i386/vm_machdep.c | 4 | ||||
-rw-r--r-- | sys/i386/ibcs2/imgact_coff.c | 6 | ||||
-rw-r--r-- | sys/i386/pci/pci_cfgreg.c | 2 | ||||
-rw-r--r-- | sys/i386/xen/mp_machdep.c | 9 | ||||
-rw-r--r-- | sys/i386/xen/pmap.c | 12 |
9 files changed, 38 insertions, 40 deletions
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c index 51765e9..be391b6 100644 --- a/sys/i386/i386/machdep.c +++ b/sys/i386/i386/machdep.c @@ -3178,9 +3178,9 @@ f00f_hack(void *unused) printf("Intel Pentium detected, installing workaround for F00F bug\n"); - tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2); + tmp = kmem_malloc(kernel_arena, PAGE_SIZE * 2, M_WAITOK | M_ZERO); if (tmp == 0) - panic("kmem_alloc returned 0"); + panic("kmem_malloc returned 0"); /* Put the problematic entry (#6) at the end of the lower page. */ new_idt = (struct gate_descriptor*) @@ -3189,9 +3189,7 @@ f00f_hack(void *unused) r_idt.rd_base = (u_int)new_idt; lidt(&r_idt); idt = new_idt; - if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE, - VM_PROT_READ, FALSE) != KERN_SUCCESS) - panic("vm_map_protect failed"); + pmap_protect(kernel_pmap, tmp, tmp + PAGE_SIZE, VM_PROT_READ); } #endif /* defined(I586_CPU) && !NO_F00F_HACK */ diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c index 9b832ed..19e67cf 100644 --- a/sys/i386/i386/mp_machdep.c +++ b/sys/i386/i386/mp_machdep.c @@ -959,8 +959,10 @@ start_all_aps(void) /* allocate and set up a boot stack data page */ bootstacks[cpu] = - (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE); - dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE); + (char *)kmem_malloc(kernel_arena, KSTACK_PAGES * PAGE_SIZE, + M_WAITOK | M_ZERO); + dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, + M_WAITOK | M_ZERO); /* setup a vector to our boot code */ *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4); diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index 77b0235..25f98b8 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -655,7 +655,7 @@ pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) /* Inform UMA that this allocator uses kernel_map/object. */ *flags = UMA_SLAB_KERNEL; - return ((void *)kmem_alloc_contig(kernel_map, bytes, wait, 0x0ULL, + return ((void *)kmem_alloc_contig(kernel_arena, bytes, wait, 0x0ULL, 0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT)); } #endif @@ -783,13 +783,13 @@ pmap_init(void) */ s = (vm_size_t)(pv_npg * sizeof(struct md_page)); s = round_page(s); - pv_table = (struct md_page *)kmem_alloc(kernel_map, s); + pv_table = (struct md_page *)kmem_malloc(kernel_arena, s, + M_WAITOK | M_ZERO); for (i = 0; i < pv_npg; i++) TAILQ_INIT(&pv_table[i].pv_list); pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); - pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map, - PAGE_SIZE * pv_maxchunks); + pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks); if (pv_chunkbase == NULL) panic("pmap_init: not enough kvm for pv chunks"); pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks); @@ -1747,8 +1747,7 @@ pmap_pinit(pmap_t pmap) * page directory table. */ if (pmap->pm_pdir == NULL) { - pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map, - NBPTD); + pmap->pm_pdir = (pd_entry_t *)kva_alloc(NBPTD); if (pmap->pm_pdir == NULL) { PMAP_LOCK_DESTROY(pmap); return (0); @@ -5044,7 +5043,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) if (pa < KERNLOAD && pa + size <= KERNLOAD) va = KERNBASE + pa; else - va = kmem_alloc_nofault(kernel_map, size); + va = kva_alloc(size); if (!va) panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); @@ -5079,7 +5078,7 @@ pmap_unmapdev(vm_offset_t va, vm_size_t size) base = trunc_page(va); offset = va & PAGE_MASK; size = round_page(offset + size); - kmem_free(kernel_map, base, size); + kva_free(base, size); } /* diff --git a/sys/i386/i386/sys_machdep.c b/sys/i386/i386/sys_machdep.c index 00d74d3..adf6ac4 100644 --- a/sys/i386/i386/sys_machdep.c +++ b/sys/i386/i386/sys_machdep.c @@ -164,7 +164,7 @@ sysarch(td, uap) break; case I386_SET_LDT: if (kargs.largs.descs != NULL) { - lp = (union descriptor *)kmem_malloc(kernel_map, + lp = (union descriptor *)kmem_malloc(kernel_arena, kargs.largs.num * sizeof(union descriptor), M_WAITOK); if (lp == NULL) { @@ -175,7 +175,7 @@ sysarch(td, uap) kargs.largs.num * sizeof(union descriptor)); if (error == 0) error = i386_set_ldt(td, &kargs.largs, lp); - kmem_free(kernel_map, (vm_offset_t)lp, + kmem_free(kernel_arena, (vm_offset_t)lp, kargs.largs.num * sizeof(union descriptor)); } else { error = i386_set_ldt(td, &kargs.largs, NULL); @@ -299,7 +299,7 @@ i386_extend_pcb(struct thread *td) 0 /* granularity */ }; - ext = (struct pcb_ext *)kmem_malloc(kernel_map, ctob(IOPAGES+1), + ext = (struct pcb_ext *)kmem_malloc(kernel_arena, ctob(IOPAGES+1), M_WAITOK); if (ext == 0) return (ENOMEM); @@ -473,7 +473,7 @@ user_ldt_alloc(struct mdproc *mdp, int len) M_SUBPROC, M_WAITOK); new_ldt->ldt_len = len = NEW_MAX_LD(len); - new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_map, + new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_arena, round_page(len * sizeof(union descriptor)), M_WAITOK); if (new_ldt->ldt_base == NULL) { free(new_ldt, M_SUBPROC); @@ -513,7 +513,7 @@ user_ldt_alloc(struct mdproc *mdp, int len) M_SUBPROC, M_WAITOK); new_ldt->ldt_len = len = NEW_MAX_LD(len); - new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_map, + new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_arena, len * sizeof(union descriptor), M_WAITOK); if (new_ldt->ldt_base == NULL) { free(new_ldt, M_SUBPROC); @@ -576,7 +576,7 @@ user_ldt_deref(struct proc_ldt *pldt) mtx_assert(&dt_lock, MA_OWNED); if (--pldt->ldt_refcnt == 0) { mtx_unlock_spin(&dt_lock); - kmem_free(kernel_map, (vm_offset_t)pldt->ldt_base, + kmem_free(kernel_arena, (vm_offset_t)pldt->ldt_base, pldt->ldt_len * sizeof(union descriptor)); free(pldt, M_SUBPROC); } else @@ -855,7 +855,7 @@ i386_ldt_grow(struct thread *td, int len) * free the new object and return. */ mtx_unlock_spin(&dt_lock); - kmem_free(kernel_map, + kmem_free(kernel_arena, (vm_offset_t)new_ldt->ldt_base, new_ldt->ldt_len * sizeof(union descriptor)); free(new_ldt, M_SUBPROC); @@ -889,7 +889,7 @@ i386_ldt_grow(struct thread *td, int len) mtx_unlock_spin(&dt_lock); #endif if (old_ldt_base != NULL_LDT_BASE) { - kmem_free(kernel_map, (vm_offset_t)old_ldt_base, + kmem_free(kernel_arena, (vm_offset_t)old_ldt_base, old_ldt_len * sizeof(union descriptor)); free(new_ldt, M_SUBPROC); } diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c index 92e0f52..5d62b1c 100644 --- a/sys/i386/i386/vm_machdep.c +++ b/sys/i386/i386/vm_machdep.c @@ -355,7 +355,7 @@ cpu_thread_clean(struct thread *td) * XXX do we need to move the TSS off the allocated pages * before freeing them? (not done here) */ - kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ext, + kva_free((vm_offset_t)pcb->pcb_ext, ctob(IOPAGES + 1)); pcb->pcb_ext = NULL; } @@ -751,7 +751,7 @@ sf_buf_init(void *arg) sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask); TAILQ_INIT(&sf_buf_freelist); - sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE); + sf_base = kva_alloc(nsfbufs * PAGE_SIZE); sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, M_NOWAIT | M_ZERO); for (i = 0; i < nsfbufs; i++) { diff --git a/sys/i386/ibcs2/imgact_coff.c b/sys/i386/ibcs2/imgact_coff.c index b155ef9..1e33536 100644 --- a/sys/i386/ibcs2/imgact_coff.c +++ b/sys/i386/ibcs2/imgact_coff.c @@ -146,7 +146,7 @@ load_coff_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset, error = copyout(data_buf, (caddr_t) map_addr, copy_len); - kmem_free_wakeup(exec_map, (vm_offset_t)data_buf, PAGE_SIZE); + kmap_free_wakeup(exec_map, (vm_offset_t)data_buf, PAGE_SIZE); return error; } @@ -280,7 +280,7 @@ coff_load_file(struct thread *td, char *name) error = 0; dealloc_and_fail: - kmem_free_wakeup(exec_map, (vm_offset_t)ptr, PAGE_SIZE); + kmap_free_wakeup(exec_map, (vm_offset_t)ptr, PAGE_SIZE); fail: VOP_UNLOCK(vp, 0); unlocked_fail: @@ -417,7 +417,7 @@ exec_coff_imgact(imgp) } free(libbuf, M_TEMP); } - kmem_free_wakeup(exec_map, (vm_offset_t)buf, len); + kmap_free_wakeup(exec_map, (vm_offset_t)buf, len); if (error) goto fail; } diff --git a/sys/i386/pci/pci_cfgreg.c b/sys/i386/pci/pci_cfgreg.c index 14558d8..bddaaa8 100644 --- a/sys/i386/pci/pci_cfgreg.c +++ b/sys/i386/pci/pci_cfgreg.c @@ -562,7 +562,7 @@ pcie_cfgregopen(uint64_t base, uint8_t minbus, uint8_t maxbus) if (pcie_array == NULL) return (0); - va = kmem_alloc_nofault(kernel_map, PCIE_CACHE * PAGE_SIZE); + va = kva_alloc(PCIE_CACHE * PAGE_SIZE); if (va == 0) { free(pcie_array, M_DEVBUF); return (0); diff --git a/sys/i386/xen/mp_machdep.c b/sys/i386/xen/mp_machdep.c index fdfa812..05531cb 100644 --- a/sys/i386/xen/mp_machdep.c +++ b/sys/i386/xen/mp_machdep.c @@ -746,7 +746,8 @@ start_all_aps(void) /* Get per-cpu data */ pc = &__pcpu[bootAP]; pcpu_init(pc, bootAP, sizeof(struct pcpu)); - dpcpu_init((void *)kmem_alloc(kernel_map, DPCPU_SIZE), bootAP); + dpcpu_init((void *)kmem_malloc(kernel_arena, DPCPU_SIZE, + M_WAITOK | M_ZERO), bootAP); pc->pc_apic_id = cpu_apic_ids[bootAP]; pc->pc_prvspace = pc; pc->pc_curthread = 0; @@ -833,8 +834,8 @@ cpu_initialize_context(unsigned int cpu) pmap_zero_page(m[i]); } - boot_stack = kmem_alloc_nofault(kernel_map, PAGE_SIZE); - newPTD = kmem_alloc_nofault(kernel_map, NPGPTD * PAGE_SIZE); + boot_stack = kva_alloc(PAGE_SIZE); + newPTD = kva_alloc(NPGPTD * PAGE_SIZE); ma[0] = VM_PAGE_TO_MACH(m[0])|PG_V; #ifdef PAE @@ -856,7 +857,7 @@ cpu_initialize_context(unsigned int cpu) nkpt*sizeof(vm_paddr_t)); pmap_qremove(newPTD, 4); - kmem_free(kernel_map, newPTD, 4 * PAGE_SIZE); + kva_free(newPTD, 4 * PAGE_SIZE); /* * map actual idle stack to boot_stack */ diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c index 96988e2..208f5f6 100644 --- a/sys/i386/xen/pmap.c +++ b/sys/i386/xen/pmap.c @@ -620,8 +620,7 @@ pmap_init(void) pv_entry_high_water = 9 * (pv_entry_max / 10); pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); - pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map, - PAGE_SIZE * pv_maxchunks); + pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks); if (pv_chunkbase == NULL) panic("pmap_init: not enough kvm for pv chunks"); pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks); @@ -1460,8 +1459,7 @@ pmap_pinit(pmap_t pmap) * page directory table. */ if (pmap->pm_pdir == NULL) { - pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map, - NBPTD); + pmap->pm_pdir = (pd_entry_t *)kva_alloc(NBPTD); if (pmap->pm_pdir == NULL) { PMAP_LOCK_DESTROY(pmap); #ifdef HAMFISTED_LOCKING @@ -1470,7 +1468,7 @@ pmap_pinit(pmap_t pmap) return (0); } #ifdef PAE - pmap->pm_pdpt = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1); + pmap->pm_pdpt = (pd_entry_t *)kva_alloc(1); #endif } @@ -4022,7 +4020,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) if (pa < KERNLOAD && pa + size <= KERNLOAD) va = KERNBASE + pa; else - va = kmem_alloc_nofault(kernel_map, size); + va = kva_alloc(size); if (!va) panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); @@ -4057,7 +4055,7 @@ pmap_unmapdev(vm_offset_t va, vm_size_t size) base = trunc_page(va); offset = va & PAGE_MASK; size = round_page(offset + size); - kmem_free(kernel_map, base, size); + kva_free(base, size); } /* |