summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/amd64/amd64/mp_machdep.c12
-rw-r--r--sys/amd64/amd64/pmap.c7
-rw-r--r--sys/amd64/amd64/sys_machdep.c8
-rw-r--r--sys/amd64/amd64/vm_machdep.c2
-rw-r--r--sys/arm/arm/bus_space_generic.c4
-rw-r--r--sys/arm/arm/busdma_machdep-v6.c6
-rw-r--r--sys/arm/arm/busdma_machdep.c6
-rw-r--r--sys/arm/arm/mp_machdep.c3
-rw-r--r--sys/arm/arm/pmap-v6.c5
-rw-r--r--sys/arm/arm/pmap.c2
-rw-r--r--sys/arm/arm/vm_machdep.c7
-rw-r--r--sys/arm/at91/at91.c2
-rw-r--r--sys/arm/mv/armadaxp/armadaxp_mp.c4
-rw-r--r--sys/arm/s3c2xx0/s3c2xx0_space.c4
-rw-r--r--sys/arm/xscale/i80321/i80321_space.c2
-rw-r--r--sys/arm/xscale/i8134x/i81342_space.c2
-rw-r--r--sys/arm/xscale/ixp425/ixp425_pci_space.c2
-rw-r--r--sys/cddl/compat/opensolaris/kern/opensolaris_kmem.c4
-rw-r--r--sys/cddl/compat/opensolaris/sys/kmem.h3
-rw-r--r--sys/compat/linux/linux_misc.c2
-rw-r--r--sys/compat/ndis/subr_ntoskrnl.c2
-rw-r--r--sys/dev/bktr/bktr_core.c7
-rw-r--r--sys/dev/drm/drm_scatter.c4
-rw-r--r--sys/dev/drm2/drm_scatter.c4
-rw-r--r--sys/dev/drm2/i915/intel_ringbuffer.c9
-rw-r--r--sys/dev/drm2/ttm/ttm_bo_util.c5
-rw-r--r--sys/dev/xen/blkback/blkback.c4
-rw-r--r--sys/dev/xen/netback/netback.c4
-rw-r--r--sys/dev/xen/xenpci/xenpci.c2
-rw-r--r--sys/i386/i386/machdep.c8
-rw-r--r--sys/i386/i386/mp_machdep.c6
-rw-r--r--sys/i386/i386/pmap.c15
-rw-r--r--sys/i386/i386/sys_machdep.c16
-rw-r--r--sys/i386/i386/vm_machdep.c4
-rw-r--r--sys/i386/ibcs2/imgact_coff.c6
-rw-r--r--sys/i386/pci/pci_cfgreg.c2
-rw-r--r--sys/i386/xen/mp_machdep.c9
-rw-r--r--sys/i386/xen/pmap.c12
-rw-r--r--sys/ia64/ia64/mp_machdep.c3
-rw-r--r--sys/kern/imgact_gzip.c4
-rw-r--r--sys/kern/init_main.c5
-rw-r--r--sys/kern/kern_exec.c4
-rw-r--r--sys/kern/kern_malloc.c66
-rw-r--r--sys/kern/kern_mbuf.c5
-rw-r--r--sys/kern/kern_sharedpage.c2
-rw-r--r--sys/kern/subr_busdma_bufalloc.c6
-rw-r--r--sys/kern/subr_vmem.c99
-rw-r--r--sys/kern/vfs_bio.c2
-rw-r--r--sys/mips/mips/mp_machdep.c3
-rw-r--r--sys/mips/mips/pmap.c19
-rw-r--r--sys/mips/mips/vm_machdep.c2
-rw-r--r--sys/mips/sibyte/sb_zbpci.c2
-rw-r--r--sys/ofed/include/linux/dma-mapping.h4
-rw-r--r--sys/ofed/include/linux/gfp.h10
-rw-r--r--sys/ofed/include/linux/linux_compat.c4
-rw-r--r--sys/pc98/pc98/machdep.c6
-rw-r--r--sys/powerpc/aim/mmu_oea.c4
-rw-r--r--sys/powerpc/aim/mmu_oea64.c4
-rw-r--r--sys/powerpc/aim/vm_machdep.c2
-rw-r--r--sys/powerpc/booke/pmap.c2
-rw-r--r--sys/powerpc/booke/vm_machdep.c2
-rw-r--r--sys/powerpc/powerpc/busdma_machdep.c4
-rw-r--r--sys/powerpc/powerpc/mp_machdep.c4
-rw-r--r--sys/sparc64/sparc64/bus_machdep.c4
-rw-r--r--sys/sparc64/sparc64/mem.c9
-rw-r--r--sys/sparc64/sparc64/mp_machdep.c6
-rw-r--r--sys/sparc64/sparc64/pmap.c3
-rw-r--r--sys/sparc64/sparc64/vm_machdep.c2
-rw-r--r--sys/vm/memguard.c75
-rw-r--r--sys/vm/memguard.h3
-rw-r--r--sys/vm/pmap.h3
-rw-r--r--sys/vm/uma_core.c12
-rw-r--r--sys/vm/vm_extern.h37
-rw-r--r--sys/vm/vm_glue.c14
-rw-r--r--sys/vm/vm_init.c77
-rw-r--r--sys/vm/vm_kern.c384
-rw-r--r--sys/vm/vm_kern.h3
-rw-r--r--sys/vm/vm_map.c31
-rw-r--r--sys/vm/vm_map.h4
-rw-r--r--sys/vm/vm_object.c2
-rw-r--r--sys/x86/x86/busdma_machdep.c6
-rw-r--r--sys/xen/gnttab.c6
82 files changed, 583 insertions, 572 deletions
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index 3ad00e7..79aeb9c 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -938,10 +938,14 @@ start_all_aps(void)
apic_id = cpu_apic_ids[cpu];
/* allocate and set up an idle stack data page */
- bootstacks[cpu] = (void *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
- doublefault_stack = (char *)kmem_alloc(kernel_map, PAGE_SIZE);
- nmi_stack = (char *)kmem_alloc(kernel_map, PAGE_SIZE);
- dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
+ bootstacks[cpu] = (void *)kmem_malloc(kernel_arena,
+ KSTACK_PAGES * PAGE_SIZE, M_WAITOK | M_ZERO);
+ doublefault_stack = (char *)kmem_malloc(kernel_arena,
+ PAGE_SIZE, M_WAITOK | M_ZERO);
+ nmi_stack = (char *)kmem_malloc(kernel_arena, PAGE_SIZE,
+ M_WAITOK | M_ZERO);
+ dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
+ M_WAITOK | M_ZERO);
bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
bootAP = cpu;
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 46112b5..33b8e7b 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -860,7 +860,8 @@ pmap_init(void)
*/
s = (vm_size_t)(pv_npg * sizeof(struct md_page));
s = round_page(s);
- pv_table = (struct md_page *)kmem_alloc(kernel_map, s);
+ pv_table = (struct md_page *)kmem_malloc(kernel_arena, s,
+ M_WAITOK | M_ZERO);
for (i = 0; i < pv_npg; i++)
TAILQ_INIT(&pv_table[i].pv_list);
}
@@ -5060,7 +5061,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
}
offset = pa & PAGE_MASK;
size = round_page(offset + size);
- va = kmem_alloc_nofault(kernel_map, size);
+ va = kva_alloc(size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
pa = trunc_page(pa);
@@ -5096,7 +5097,7 @@ pmap_unmapdev(vm_offset_t va, vm_size_t size)
base = trunc_page(va);
offset = va & PAGE_MASK;
size = round_page(offset + size);
- kmem_free(kernel_map, base, size);
+ kva_free(base, size);
}
/*
diff --git a/sys/amd64/amd64/sys_machdep.c b/sys/amd64/amd64/sys_machdep.c
index ac691d0..5be8af5 100644
--- a/sys/amd64/amd64/sys_machdep.c
+++ b/sys/amd64/amd64/sys_machdep.c
@@ -356,7 +356,7 @@ amd64_set_ioperm(td, uap)
*/
pcb = td->td_pcb;
if (pcb->pcb_tssp == NULL) {
- tssp = (struct amd64tss *)kmem_malloc(kernel_map,
+ tssp = (struct amd64tss *)kmem_malloc(kernel_arena,
ctob(IOPAGES+1), M_WAITOK);
if (tssp == NULL)
return (ENOMEM);
@@ -463,7 +463,7 @@ user_ldt_alloc(struct proc *p, int force)
return (mdp->md_ldt);
mtx_unlock(&dt_lock);
new_ldt = malloc(sizeof(struct proc_ldt), M_SUBPROC, M_WAITOK);
- new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_map,
+ new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_arena,
max_ldt_segment * sizeof(struct user_segment_descriptor),
M_WAITOK);
if (new_ldt->ldt_base == NULL) {
@@ -484,7 +484,7 @@ user_ldt_alloc(struct proc *p, int force)
mtx_lock(&dt_lock);
pldt = mdp->md_ldt;
if (pldt != NULL && !force) {
- kmem_free(kernel_map, (vm_offset_t)new_ldt->ldt_base,
+ kmem_free(kernel_arena, (vm_offset_t)new_ldt->ldt_base,
max_ldt_segment * sizeof(struct user_segment_descriptor));
free(new_ldt, M_SUBPROC);
return (pldt);
@@ -529,7 +529,7 @@ user_ldt_derefl(struct proc_ldt *pldt)
{
if (--pldt->ldt_refcnt == 0) {
- kmem_free(kernel_map, (vm_offset_t)pldt->ldt_base,
+ kmem_free(kernel_arena, (vm_offset_t)pldt->ldt_base,
max_ldt_segment * sizeof(struct user_segment_descriptor));
free(pldt, M_SUBPROC);
}
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index acb5b93..ed0e7e9 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -342,7 +342,7 @@ cpu_thread_clean(struct thread *td)
* Clean TSS/iomap
*/
if (pcb->pcb_tssp != NULL) {
- kmem_free(kernel_map, (vm_offset_t)pcb->pcb_tssp,
+ kva_free((vm_offset_t)pcb->pcb_tssp,
ctob(IOPAGES + 1));
pcb->pcb_tssp = NULL;
}
diff --git a/sys/arm/arm/bus_space_generic.c b/sys/arm/arm/bus_space_generic.c
index 29638845..f269ac8 100644
--- a/sys/arm/arm/bus_space_generic.c
+++ b/sys/arm/arm/bus_space_generic.c
@@ -73,7 +73,7 @@ generic_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flags,
offset = bpa & PAGE_MASK;
startpa = trunc_page(bpa);
- va = kmem_alloc_nofault(kernel_map, endpa - startpa);
+ va = kva_alloc(endpa - startpa);
if (va == 0)
return (ENOMEM);
@@ -118,7 +118,7 @@ generic_bs_unmap(void *t, bus_space_handle_t h, bus_size_t size)
pmap_kremove(va);
va += PAGE_SIZE;
}
- kmem_free(kernel_map, va, endva - va);
+ kva_free(va, endva - va);
}
void
diff --git a/sys/arm/arm/busdma_machdep-v6.c b/sys/arm/arm/busdma_machdep-v6.c
index 485b44a..b7057a1 100644
--- a/sys/arm/arm/busdma_machdep-v6.c
+++ b/sys/arm/arm/busdma_machdep-v6.c
@@ -696,10 +696,10 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
*vaddr = uma_zalloc(bufzone->umazone, mflags);
} else if (dmat->nsegments >= btoc(dmat->maxsize) &&
dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
- *vaddr = (void *)kmem_alloc_attr(kernel_map, dmat->maxsize,
+ *vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
mflags, 0, dmat->lowaddr, memattr);
} else {
- *vaddr = (void *)kmem_alloc_contig(kernel_map, dmat->maxsize,
+ *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
memattr);
}
@@ -744,7 +744,7 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
!_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
uma_zfree(bufzone->umazone, vaddr);
else
- kmem_free(kernel_map, (vm_offset_t)vaddr, dmat->maxsize);
+ kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
dmat->map_count--;
free(map, M_DEVBUF);
diff --git a/sys/arm/arm/busdma_machdep.c b/sys/arm/arm/busdma_machdep.c
index 10760b4..6181d35 100644
--- a/sys/arm/arm/busdma_machdep.c
+++ b/sys/arm/arm/busdma_machdep.c
@@ -752,10 +752,10 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddrp, int flags,
vaddr = uma_zalloc(bufzone->umazone, mflags);
} else if (dmat->nsegments >= btoc(dmat->maxsize) &&
dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
- vaddr = (void *)kmem_alloc_attr(kernel_map, dmat->maxsize,
+ vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
mflags, 0, dmat->lowaddr, memattr);
} else {
- vaddr = (void *)kmem_alloc_contig(kernel_map, dmat->maxsize,
+ vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
memattr);
}
@@ -798,7 +798,7 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
!_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
uma_zfree(bufzone->umazone, vaddr);
else
- kmem_free(kernel_map, (vm_offset_t)vaddr, dmat->maxsize);
+ kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
}
static void
diff --git a/sys/arm/arm/mp_machdep.c b/sys/arm/arm/mp_machdep.c
index 2193266..4db6da4 100644
--- a/sys/arm/arm/mp_machdep.c
+++ b/sys/arm/arm/mp_machdep.c
@@ -112,7 +112,8 @@ cpu_mp_start(void)
/* Reserve memory for application processors */
for(i = 0; i < (mp_ncpus - 1); i++)
- dpcpu[i] = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
+ dpcpu[i] = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
+ M_WAITOK | M_ZERO);
temp_pagetable_va = (vm_offset_t)contigmalloc(L1_TABLE_SIZE,
M_TEMP, 0, 0x0, 0xffffffff, L1_TABLE_SIZE, 0);
addr = KERNPHYSADDR;
diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c
index 73b899c..158bff2 100644
--- a/sys/arm/arm/pmap-v6.c
+++ b/sys/arm/arm/pmap-v6.c
@@ -1255,8 +1255,7 @@ pmap_init(void)
pv_entry_high_water = 9 * (pv_entry_max / 10);
pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
- pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map,
- PAGE_SIZE * pv_maxchunks);
+ pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks);
if (pv_chunkbase == NULL)
panic("pmap_init: not enough kvm for pv chunks");
@@ -4103,7 +4102,7 @@ pmap_mapdev(vm_offset_t pa, vm_size_t size)
GIANT_REQUIRED;
- va = kmem_alloc_nofault(kernel_map, size);
+ va = kva_alloc(size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
for (tmpva = va; size > 0;) {
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index 33b643c..b4f107b 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -4718,7 +4718,7 @@ pmap_mapdev(vm_offset_t pa, vm_size_t size)
GIANT_REQUIRED;
- va = kmem_alloc_nofault(kernel_map, size);
+ va = kva_alloc(size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
for (tmpva = va; size > 0;) {
diff --git a/sys/arm/arm/vm_machdep.c b/sys/arm/arm/vm_machdep.c
index c6526ab..4f8f00b 100644
--- a/sys/arm/arm/vm_machdep.c
+++ b/sys/arm/arm/vm_machdep.c
@@ -210,7 +210,7 @@ sf_buf_init(void *arg)
sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
TAILQ_INIT(&sf_buf_freelist);
- sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
+ sf_base = kva_alloc(nsfbufs * PAGE_SIZE);
sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
M_NOWAIT | M_ZERO);
for (i = 0; i < nsfbufs; i++) {
@@ -667,7 +667,8 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
if (zone == l2zone &&
pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt) {
*flags = UMA_SLAB_KMEM;
- ret = ((void *)kmem_malloc(kmem_map, bytes, M_NOWAIT));
+ ret = ((void *)kmem_malloc(kmem_arena, bytes,
+ M_NOWAIT));
return (ret);
}
pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;
@@ -701,7 +702,7 @@ uma_small_free(void *mem, int size, u_int8_t flags)
pt_entry_t *pt;
if (flags & UMA_SLAB_KMEM)
- kmem_free(kmem_map, (vm_offset_t)mem, size);
+ kmem_free(kmem_arena, (vm_offset_t)mem, size);
else {
struct arm_small_page *sp;
diff --git a/sys/arm/at91/at91.c b/sys/arm/at91/at91.c
index deb3c38..a3f9556 100644
--- a/sys/arm/at91/at91.c
+++ b/sys/arm/at91/at91.c
@@ -85,7 +85,7 @@ at91_bs_unmap(void *t, bus_space_handle_t h, bus_size_t size)
endva = va + round_page(size);
/* Free the kernel virtual mapping. */
- kmem_free(kernel_map, va, endva - va);
+ kva_free(va, endva - va);
}
static int
diff --git a/sys/arm/mv/armadaxp/armadaxp_mp.c b/sys/arm/mv/armadaxp/armadaxp_mp.c
index 83332ba..1af598b 100644
--- a/sys/arm/mv/armadaxp/armadaxp_mp.c
+++ b/sys/arm/mv/armadaxp/armadaxp_mp.c
@@ -113,7 +113,7 @@ platform_mp_start_ap(void)
cputype = cpufunc_id();
cputype &= CPU_ID_CPU_MASK;
- smp_boot = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
+ smp_boot = kva_alloc(PAGE_SIZE);
pmap_kenter_nocache(smp_boot, 0xffff0000);
dst = (uint32_t *) smp_boot;
@@ -121,7 +121,7 @@ platform_mp_start_ap(void)
src++, dst++) {
*dst = *src;
}
- kmem_free(kernel_map, smp_boot, PAGE_SIZE);
+ kva_free(smp_boot, PAGE_SIZE);
if (cputype == CPU_ID_MV88SV584X_V7) {
/* Core rev A0 */
diff --git a/sys/arm/s3c2xx0/s3c2xx0_space.c b/sys/arm/s3c2xx0/s3c2xx0_space.c
index 958e658..ab72370 100644
--- a/sys/arm/s3c2xx0/s3c2xx0_space.c
+++ b/sys/arm/s3c2xx0/s3c2xx0_space.c
@@ -182,7 +182,7 @@ s3c2xx0_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
startpa = trunc_page(bpa);
endpa = round_page(bpa + size);
- va = kmem_alloc_nofault(kernel_map, endpa - startpa);
+ va = kva_alloc(endpa - startpa);
if (!va)
return (ENOMEM);
@@ -214,7 +214,7 @@ s3c2xx0_bs_unmap(void *t, bus_space_handle_t h, bus_size_t size)
pmap_kremove(va);
va += PAGE_SIZE;
}
- kmem_free(kernel_map, va, endva - va);
+ kva_free(va, endva - va);
}
int
diff --git a/sys/arm/xscale/i80321/i80321_space.c b/sys/arm/xscale/i80321/i80321_space.c
index ebb0306..ce1db83 100644
--- a/sys/arm/xscale/i80321/i80321_space.c
+++ b/sys/arm/xscale/i80321/i80321_space.c
@@ -312,7 +312,7 @@ i80321_mem_bs_unmap(void *t, bus_space_handle_t h, bus_size_t size)
endva = va + round_page(size);
/* Free the kernel virtual mapping. */
- kmem_free(kernel_map, va, endva - va);
+ kva_free(va, endva - va);
}
int
diff --git a/sys/arm/xscale/i8134x/i81342_space.c b/sys/arm/xscale/i8134x/i81342_space.c
index 5b08ef6..bd19a77 100644
--- a/sys/arm/xscale/i8134x/i81342_space.c
+++ b/sys/arm/xscale/i8134x/i81342_space.c
@@ -324,7 +324,7 @@ i81342_mem_bs_unmap(void *t, bus_space_handle_t h, bus_size_t size)
endva = va + round_page(size);
/* Free the kernel virtual mapping. */
- kmem_free(kernel_map, va, endva - va);
+ kva_free(va, endva - va);
#endif
}
diff --git a/sys/arm/xscale/ixp425/ixp425_pci_space.c b/sys/arm/xscale/ixp425/ixp425_pci_space.c
index 4b0ca81..8617e8e 100644
--- a/sys/arm/xscale/ixp425/ixp425_pci_space.c
+++ b/sys/arm/xscale/ixp425/ixp425_pci_space.c
@@ -432,7 +432,7 @@ ixp425_pci_mem_bs_unmap(void *t, bus_space_handle_t h, bus_size_t size)
endva = va + round_page(size);
/* Free the kernel virtual mapping. */
- kmem_free(kernel_map, va, endva - va);
+ kva_free(va, endva - va);
}
int
diff --git a/sys/cddl/compat/opensolaris/kern/opensolaris_kmem.c b/sys/cddl/compat/opensolaris/kern/opensolaris_kmem.c
index 3bcbc0b..543a3a6 100644
--- a/sys/cddl/compat/opensolaris/kern/opensolaris_kmem.c
+++ b/sys/cddl/compat/opensolaris/kern/opensolaris_kmem.c
@@ -62,6 +62,8 @@ static struct mtx kmem_items_mtx;
MTX_SYSINIT(kmem_items_mtx, &kmem_items_mtx, "kmem_items", MTX_DEF);
#endif /* KMEM_DEBUG */
+#include <sys/vmem.h>
+
void *
zfs_kmem_alloc(size_t size, int kmflags)
{
@@ -135,7 +137,7 @@ uint64_t
kmem_used(void)
{
- return (kmem_map->size);
+ return (vmem_size(kmem_arena, VMEM_ALLOC));
}
static int
diff --git a/sys/cddl/compat/opensolaris/sys/kmem.h b/sys/cddl/compat/opensolaris/sys/kmem.h
index ba8b42d..d6179a1 100644
--- a/sys/cddl/compat/opensolaris/sys/kmem.h
+++ b/sys/cddl/compat/opensolaris/sys/kmem.h
@@ -32,6 +32,7 @@
#include <sys/param.h>
#include <sys/proc.h>
#include <sys/malloc.h>
+#include <sys/vmem.h>
#include <vm/uma.h>
#include <vm/vm.h>
@@ -61,8 +62,6 @@ typedef struct kmem_cache {
void *kc_private;
} kmem_cache_t;
-#define vmem_t void
-
void *zfs_kmem_alloc(size_t size, int kmflags);
void zfs_kmem_free(void *buf, size_t size);
uint64_t kmem_size(void);
diff --git a/sys/compat/linux/linux_misc.c b/sys/compat/linux/linux_misc.c
index 7587272..378bc37 100644
--- a/sys/compat/linux/linux_misc.c
+++ b/sys/compat/linux/linux_misc.c
@@ -467,7 +467,7 @@ cleanup:
/* Release the temporary mapping. */
if (a_out)
- kmem_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE);
+ kmap_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE);
return (error);
}
diff --git a/sys/compat/ndis/subr_ntoskrnl.c b/sys/compat/ndis/subr_ntoskrnl.c
index 02e8e45..482392d 100644
--- a/sys/compat/ndis/subr_ntoskrnl.c
+++ b/sys/compat/ndis/subr_ntoskrnl.c
@@ -2489,7 +2489,7 @@ MmAllocateContiguousMemorySpecifyCache(size, lowest, highest,
break;
}
- ret = (void *)kmem_alloc_contig(kernel_map, size, M_ZERO | M_NOWAIT,
+ ret = (void *)kmem_alloc_contig(kernel_arena, size, M_ZERO | M_NOWAIT,
lowest, highest, PAGE_SIZE, boundary, memattr);
if (ret != NULL)
malloc_type_allocated(M_DEVBUF, round_page(size));
diff --git a/sys/dev/bktr/bktr_core.c b/sys/dev/bktr/bktr_core.c
index bc5bdcf..bb2ae33 100644
--- a/sys/dev/bktr/bktr_core.c
+++ b/sys/dev/bktr/bktr_core.c
@@ -109,6 +109,7 @@ __FBSDID("$FreeBSD$");
#include <sys/kernel.h>
#include <sys/fcntl.h>
#include <sys/lock.h>
+#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/signalvar.h>
@@ -1801,8 +1802,10 @@ video_ioctl( bktr_ptr_t bktr, int unit, ioctl_cmd_t cmd, caddr_t arg, struct thr
#else
buf = get_bktr_mem(unit, temp*PAGE_SIZE);
if (buf != 0) {
- kmem_free(kernel_map, bktr->bigbuf,
- (bktr->alloc_pages * PAGE_SIZE));
+ contigfree(
+ (void *)(uintptr_t)bktr->bigbuf,
+ (bktr->alloc_pages * PAGE_SIZE),
+ M_DEVBUF);
#endif
bktr->bigbuf = buf;
diff --git a/sys/dev/drm/drm_scatter.c b/sys/dev/drm/drm_scatter.c
index 9a1a4b1..1a78616 100644
--- a/sys/dev/drm/drm_scatter.c
+++ b/sys/dev/drm/drm_scatter.c
@@ -52,7 +52,7 @@ drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather *request)
entry->busaddr = malloc(entry->pages * sizeof(*entry->busaddr),
DRM_MEM_SGLISTS, M_WAITOK | M_ZERO);
- entry->vaddr = kmem_alloc_attr(kernel_map, size, M_WAITOK | M_ZERO,
+ entry->vaddr = kmem_alloc_attr(kernel_arena, size, M_WAITOK | M_ZERO,
0, BUS_SPACE_MAXADDR_32BIT, VM_MEMATTR_WRITE_COMBINING);
if (entry->vaddr == 0) {
drm_sg_cleanup(entry);
@@ -99,7 +99,7 @@ drm_sg_cleanup(struct drm_sg_mem *entry)
return;
if (entry->vaddr != 0)
- kmem_free(kernel_map, entry->vaddr, IDX_TO_OFF(entry->pages));
+ kmem_free(kernel_arena, entry->vaddr, IDX_TO_OFF(entry->pages));
free(entry->busaddr, DRM_MEM_SGLISTS);
free(entry, DRM_MEM_DRIVER);
diff --git a/sys/dev/drm2/drm_scatter.c b/sys/dev/drm2/drm_scatter.c
index ecf231f..3ff923c 100644
--- a/sys/dev/drm2/drm_scatter.c
+++ b/sys/dev/drm2/drm_scatter.c
@@ -52,7 +52,7 @@ drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather *request)
entry->busaddr = malloc(entry->pages * sizeof(*entry->busaddr),
DRM_MEM_SGLISTS, M_WAITOK | M_ZERO);
- entry->vaddr = kmem_alloc_attr(kernel_map, size, M_WAITOK | M_ZERO,
+ entry->vaddr = kmem_alloc_attr(kernel_arena, size, M_WAITOK | M_ZERO,
0, BUS_SPACE_MAXADDR_32BIT, VM_MEMATTR_WRITE_COMBINING);
if (entry->vaddr == 0) {
drm_sg_cleanup(entry);
@@ -99,7 +99,7 @@ drm_sg_cleanup(struct drm_sg_mem *entry)
return;
if (entry->vaddr != 0)
- kmem_free(kernel_map, entry->vaddr, IDX_TO_OFF(entry->pages));
+ kmem_free(kernel_arena, entry->vaddr, IDX_TO_OFF(entry->pages));
free(entry->busaddr, DRM_MEM_SGLISTS);
free(entry, DRM_MEM_DRIVER);
diff --git a/sys/dev/drm2/i915/intel_ringbuffer.c b/sys/dev/drm2/i915/intel_ringbuffer.c
index 107a211..7d6bd94 100644
--- a/sys/dev/drm2/i915/intel_ringbuffer.c
+++ b/sys/dev/drm2/i915/intel_ringbuffer.c
@@ -361,7 +361,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
goto err_unref;
pc->gtt_offset = obj->gtt_offset;
- pc->cpu_page = (uint32_t *)kmem_alloc_nofault(kernel_map, PAGE_SIZE);
+ pc->cpu_page = (uint32_t *)kva_alloc(PAGE_SIZE);
if (pc->cpu_page == NULL)
goto err_unpin;
pmap_qenter((uintptr_t)pc->cpu_page, &obj->pages[0], 1);
@@ -392,7 +392,7 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
obj = pc->obj;
pmap_qremove((vm_offset_t)pc->cpu_page, 1);
- kmem_free(kernel_map, (uintptr_t)pc->cpu_page, PAGE_SIZE);
+ kva_free((uintptr_t)pc->cpu_page, PAGE_SIZE);
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
@@ -968,7 +968,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
return;
pmap_qremove((vm_offset_t)ring->status_page.page_addr, 1);
- kmem_free(kernel_map, (vm_offset_t)ring->status_page.page_addr,
+ kva_free((vm_offset_t)ring->status_page.page_addr,
PAGE_SIZE);
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
@@ -999,8 +999,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
}
ring->status_page.gfx_addr = obj->gtt_offset;
- ring->status_page.page_addr = (void *)kmem_alloc_nofault(kernel_map,
- PAGE_SIZE);
+ ring->status_page.page_addr = (void *)kva_alloc(PAGE_SIZE);
if (ring->status_page.page_addr == NULL) {
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
goto err_unpin;
diff --git a/sys/dev/drm2/ttm/ttm_bo_util.c b/sys/dev/drm2/ttm/ttm_bo_util.c
index b977acb..43b9d68 100644
--- a/sys/dev/drm2/ttm/ttm_bo_util.c
+++ b/sys/dev/drm2/ttm/ttm_bo_util.c
@@ -498,8 +498,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
ttm_io_prot(mem->placement);
map->bo_kmap_type = ttm_bo_map_vmap;
map->num_pages = num_pages;
- map->virtual = (void *)kmem_alloc_nofault(kernel_map,
- num_pages * PAGE_SIZE);
+ map->virtual = (void *)kva_alloc(num_pages * PAGE_SIZE);
if (map->virtual != NULL) {
for (i = 0; i < num_pages; i++) {
/* XXXKIB hack */
@@ -561,7 +560,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
break;
case ttm_bo_map_vmap:
pmap_qremove((vm_offset_t)(map->virtual), map->num_pages);
- kmem_free(kernel_map, (vm_offset_t)map->virtual,
+ kva_free((vm_offset_t)map->virtual,
map->num_pages * PAGE_SIZE);
break;
case ttm_bo_map_kmap:
diff --git a/sys/dev/xen/blkback/blkback.c b/sys/dev/xen/blkback/blkback.c
index 9c92309..4208702 100644
--- a/sys/dev/xen/blkback/blkback.c
+++ b/sys/dev/xen/blkback/blkback.c
@@ -2775,7 +2775,7 @@ xbb_free_communication_mem(struct xbb_softc *xbb)
{
if (xbb->kva != 0) {
#ifndef XENHVM
- kmem_free(kernel_map, xbb->kva, xbb->kva_size);
+ kva_free(xbb->kva, xbb->kva_size);
#else
if (xbb->pseudo_phys_res != NULL) {
bus_release_resource(xbb->dev, SYS_RES_MEMORY,
@@ -3014,7 +3014,7 @@ xbb_alloc_communication_mem(struct xbb_softc *xbb)
device_get_nameunit(xbb->dev), xbb->kva_size,
xbb->reqlist_kva_size);
#ifndef XENHVM
- xbb->kva = kmem_alloc_nofault(kernel_map, xbb->kva_size);
+ xbb->kva = kva_alloc(xbb->kva_size);
if (xbb->kva == 0)
return (ENOMEM);
xbb->gnt_base_addr = xbb->kva;
diff --git a/sys/dev/xen/netback/netback.c b/sys/dev/xen/netback/netback.c
index 4c78113..80a1d61 100644
--- a/sys/dev/xen/netback/netback.c
+++ b/sys/dev/xen/netback/netback.c
@@ -621,7 +621,7 @@ xnb_free_communication_mem(struct xnb_softc *xnb)
{
if (xnb->kva != 0) {
#ifndef XENHVM
- kmem_free(kernel_map, xnb->kva, xnb->kva_size);
+ kva_free(xnb->kva, xnb->kva_size);
#else
if (xnb->pseudo_phys_res != NULL) {
bus_release_resource(xnb->dev, SYS_RES_MEMORY,
@@ -811,7 +811,7 @@ xnb_alloc_communication_mem(struct xnb_softc *xnb)
xnb->kva_size += xnb->ring_configs[i].ring_pages * PAGE_SIZE;
}
#ifndef XENHVM
- xnb->kva = kmem_alloc_nofault(kernel_map, xnb->kva_size);
+ xnb->kva = kva_alloc(xnb->kva_size);
if (xnb->kva == 0)
return (ENOMEM);
xnb->gnt_base_addr = xnb->kva;
diff --git a/sys/dev/xen/xenpci/xenpci.c b/sys/dev/xen/xenpci/xenpci.c
index f4c9f73..2d74676 100644
--- a/sys/dev/xen/xenpci/xenpci.c
+++ b/sys/dev/xen/xenpci/xenpci.c
@@ -383,7 +383,7 @@ xenpci_attach(device_t dev)
if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
panic("HYPERVISOR_memory_op failed");
- shared_va = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
+ shared_va = kva_alloc(PAGE_SIZE);
pmap_kenter(shared_va, shared_info_pa);
HYPERVISOR_shared_info = (void *) shared_va;
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index 51765e9..be391b6 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -3178,9 +3178,9 @@ f00f_hack(void *unused)
printf("Intel Pentium detected, installing workaround for F00F bug\n");
- tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2);
+ tmp = kmem_malloc(kernel_arena, PAGE_SIZE * 2, M_WAITOK | M_ZERO);
if (tmp == 0)
- panic("kmem_alloc returned 0");
+ panic("kmem_malloc returned 0");
/* Put the problematic entry (#6) at the end of the lower page. */
new_idt = (struct gate_descriptor*)
@@ -3189,9 +3189,7 @@ f00f_hack(void *unused)
r_idt.rd_base = (u_int)new_idt;
lidt(&r_idt);
idt = new_idt;
- if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
- VM_PROT_READ, FALSE) != KERN_SUCCESS)
- panic("vm_map_protect failed");
+ pmap_protect(kernel_pmap, tmp, tmp + PAGE_SIZE, VM_PROT_READ);
}
#endif /* defined(I586_CPU) && !NO_F00F_HACK */
diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c
index 9b832ed..19e67cf 100644
--- a/sys/i386/i386/mp_machdep.c
+++ b/sys/i386/i386/mp_machdep.c
@@ -959,8 +959,10 @@ start_all_aps(void)
/* allocate and set up a boot stack data page */
bootstacks[cpu] =
- (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
- dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
+ (char *)kmem_malloc(kernel_arena, KSTACK_PAGES * PAGE_SIZE,
+ M_WAITOK | M_ZERO);
+ dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
+ M_WAITOK | M_ZERO);
/* setup a vector to our boot code */
*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
*((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 77b0235..25f98b8 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -655,7 +655,7 @@ pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
/* Inform UMA that this allocator uses kernel_map/object. */
*flags = UMA_SLAB_KERNEL;
- return ((void *)kmem_alloc_contig(kernel_map, bytes, wait, 0x0ULL,
+ return ((void *)kmem_alloc_contig(kernel_arena, bytes, wait, 0x0ULL,
0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT));
}
#endif
@@ -783,13 +783,13 @@ pmap_init(void)
*/
s = (vm_size_t)(pv_npg * sizeof(struct md_page));
s = round_page(s);
- pv_table = (struct md_page *)kmem_alloc(kernel_map, s);
+ pv_table = (struct md_page *)kmem_malloc(kernel_arena, s,
+ M_WAITOK | M_ZERO);
for (i = 0; i < pv_npg; i++)
TAILQ_INIT(&pv_table[i].pv_list);
pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
- pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map,
- PAGE_SIZE * pv_maxchunks);
+ pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks);
if (pv_chunkbase == NULL)
panic("pmap_init: not enough kvm for pv chunks");
pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks);
@@ -1747,8 +1747,7 @@ pmap_pinit(pmap_t pmap)
* page directory table.
*/
if (pmap->pm_pdir == NULL) {
- pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map,
- NBPTD);
+ pmap->pm_pdir = (pd_entry_t *)kva_alloc(NBPTD);
if (pmap->pm_pdir == NULL) {
PMAP_LOCK_DESTROY(pmap);
return (0);
@@ -5044,7 +5043,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
if (pa < KERNLOAD && pa + size <= KERNLOAD)
va = KERNBASE + pa;
else
- va = kmem_alloc_nofault(kernel_map, size);
+ va = kva_alloc(size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
@@ -5079,7 +5078,7 @@ pmap_unmapdev(vm_offset_t va, vm_size_t size)
base = trunc_page(va);
offset = va & PAGE_MASK;
size = round_page(offset + size);
- kmem_free(kernel_map, base, size);
+ kva_free(base, size);
}
/*
diff --git a/sys/i386/i386/sys_machdep.c b/sys/i386/i386/sys_machdep.c
index 00d74d3..adf6ac4 100644
--- a/sys/i386/i386/sys_machdep.c
+++ b/sys/i386/i386/sys_machdep.c
@@ -164,7 +164,7 @@ sysarch(td, uap)
break;
case I386_SET_LDT:
if (kargs.largs.descs != NULL) {
- lp = (union descriptor *)kmem_malloc(kernel_map,
+ lp = (union descriptor *)kmem_malloc(kernel_arena,
kargs.largs.num * sizeof(union descriptor),
M_WAITOK);
if (lp == NULL) {
@@ -175,7 +175,7 @@ sysarch(td, uap)
kargs.largs.num * sizeof(union descriptor));
if (error == 0)
error = i386_set_ldt(td, &kargs.largs, lp);
- kmem_free(kernel_map, (vm_offset_t)lp,
+ kmem_free(kernel_arena, (vm_offset_t)lp,
kargs.largs.num * sizeof(union descriptor));
} else {
error = i386_set_ldt(td, &kargs.largs, NULL);
@@ -299,7 +299,7 @@ i386_extend_pcb(struct thread *td)
0 /* granularity */
};
- ext = (struct pcb_ext *)kmem_malloc(kernel_map, ctob(IOPAGES+1),
+ ext = (struct pcb_ext *)kmem_malloc(kernel_arena, ctob(IOPAGES+1),
M_WAITOK);
if (ext == 0)
return (ENOMEM);
@@ -473,7 +473,7 @@ user_ldt_alloc(struct mdproc *mdp, int len)
M_SUBPROC, M_WAITOK);
new_ldt->ldt_len = len = NEW_MAX_LD(len);
- new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_map,
+ new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_arena,
round_page(len * sizeof(union descriptor)), M_WAITOK);
if (new_ldt->ldt_base == NULL) {
free(new_ldt, M_SUBPROC);
@@ -513,7 +513,7 @@ user_ldt_alloc(struct mdproc *mdp, int len)
M_SUBPROC, M_WAITOK);
new_ldt->ldt_len = len = NEW_MAX_LD(len);
- new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_map,
+ new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_arena,
len * sizeof(union descriptor), M_WAITOK);
if (new_ldt->ldt_base == NULL) {
free(new_ldt, M_SUBPROC);
@@ -576,7 +576,7 @@ user_ldt_deref(struct proc_ldt *pldt)
mtx_assert(&dt_lock, MA_OWNED);
if (--pldt->ldt_refcnt == 0) {
mtx_unlock_spin(&dt_lock);
- kmem_free(kernel_map, (vm_offset_t)pldt->ldt_base,
+ kmem_free(kernel_arena, (vm_offset_t)pldt->ldt_base,
pldt->ldt_len * sizeof(union descriptor));
free(pldt, M_SUBPROC);
} else
@@ -855,7 +855,7 @@ i386_ldt_grow(struct thread *td, int len)
* free the new object and return.
*/
mtx_unlock_spin(&dt_lock);
- kmem_free(kernel_map,
+ kmem_free(kernel_arena,
(vm_offset_t)new_ldt->ldt_base,
new_ldt->ldt_len * sizeof(union descriptor));
free(new_ldt, M_SUBPROC);
@@ -889,7 +889,7 @@ i386_ldt_grow(struct thread *td, int len)
mtx_unlock_spin(&dt_lock);
#endif
if (old_ldt_base != NULL_LDT_BASE) {
- kmem_free(kernel_map, (vm_offset_t)old_ldt_base,
+ kmem_free(kernel_arena, (vm_offset_t)old_ldt_base,
old_ldt_len * sizeof(union descriptor));
free(new_ldt, M_SUBPROC);
}
diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c
index 92e0f52..5d62b1c 100644
--- a/sys/i386/i386/vm_machdep.c
+++ b/sys/i386/i386/vm_machdep.c
@@ -355,7 +355,7 @@ cpu_thread_clean(struct thread *td)
* XXX do we need to move the TSS off the allocated pages
* before freeing them? (not done here)
*/
- kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ext,
+ kva_free((vm_offset_t)pcb->pcb_ext,
ctob(IOPAGES + 1));
pcb->pcb_ext = NULL;
}
@@ -751,7 +751,7 @@ sf_buf_init(void *arg)
sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
TAILQ_INIT(&sf_buf_freelist);
- sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
+ sf_base = kva_alloc(nsfbufs * PAGE_SIZE);
sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
M_NOWAIT | M_ZERO);
for (i = 0; i < nsfbufs; i++) {
diff --git a/sys/i386/ibcs2/imgact_coff.c b/sys/i386/ibcs2/imgact_coff.c
index b155ef9..1e33536 100644
--- a/sys/i386/ibcs2/imgact_coff.c
+++ b/sys/i386/ibcs2/imgact_coff.c
@@ -146,7 +146,7 @@ load_coff_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset,
error = copyout(data_buf, (caddr_t) map_addr, copy_len);
- kmem_free_wakeup(exec_map, (vm_offset_t)data_buf, PAGE_SIZE);
+ kmap_free_wakeup(exec_map, (vm_offset_t)data_buf, PAGE_SIZE);
return error;
}
@@ -280,7 +280,7 @@ coff_load_file(struct thread *td, char *name)
error = 0;
dealloc_and_fail:
- kmem_free_wakeup(exec_map, (vm_offset_t)ptr, PAGE_SIZE);
+ kmap_free_wakeup(exec_map, (vm_offset_t)ptr, PAGE_SIZE);
fail:
VOP_UNLOCK(vp, 0);
unlocked_fail:
@@ -417,7 +417,7 @@ exec_coff_imgact(imgp)
}
free(libbuf, M_TEMP);
}
- kmem_free_wakeup(exec_map, (vm_offset_t)buf, len);
+ kmap_free_wakeup(exec_map, (vm_offset_t)buf, len);
if (error)
goto fail;
}
diff --git a/sys/i386/pci/pci_cfgreg.c b/sys/i386/pci/pci_cfgreg.c
index 14558d8..bddaaa8 100644
--- a/sys/i386/pci/pci_cfgreg.c
+++ b/sys/i386/pci/pci_cfgreg.c
@@ -562,7 +562,7 @@ pcie_cfgregopen(uint64_t base, uint8_t minbus, uint8_t maxbus)
if (pcie_array == NULL)
return (0);
- va = kmem_alloc_nofault(kernel_map, PCIE_CACHE * PAGE_SIZE);
+ va = kva_alloc(PCIE_CACHE * PAGE_SIZE);
if (va == 0) {
free(pcie_array, M_DEVBUF);
return (0);
diff --git a/sys/i386/xen/mp_machdep.c b/sys/i386/xen/mp_machdep.c
index fdfa812..05531cb 100644
--- a/sys/i386/xen/mp_machdep.c
+++ b/sys/i386/xen/mp_machdep.c
@@ -746,7 +746,8 @@ start_all_aps(void)
/* Get per-cpu data */
pc = &__pcpu[bootAP];
pcpu_init(pc, bootAP, sizeof(struct pcpu));
- dpcpu_init((void *)kmem_alloc(kernel_map, DPCPU_SIZE), bootAP);
+ dpcpu_init((void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
+ M_WAITOK | M_ZERO), bootAP);
pc->pc_apic_id = cpu_apic_ids[bootAP];
pc->pc_prvspace = pc;
pc->pc_curthread = 0;
@@ -833,8 +834,8 @@ cpu_initialize_context(unsigned int cpu)
pmap_zero_page(m[i]);
}
- boot_stack = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
- newPTD = kmem_alloc_nofault(kernel_map, NPGPTD * PAGE_SIZE);
+ boot_stack = kva_alloc(PAGE_SIZE);
+ newPTD = kva_alloc(NPGPTD * PAGE_SIZE);
ma[0] = VM_PAGE_TO_MACH(m[0])|PG_V;
#ifdef PAE
@@ -856,7 +857,7 @@ cpu_initialize_context(unsigned int cpu)
nkpt*sizeof(vm_paddr_t));
pmap_qremove(newPTD, 4);
- kmem_free(kernel_map, newPTD, 4 * PAGE_SIZE);
+ kva_free(newPTD, 4 * PAGE_SIZE);
/*
* map actual idle stack to boot_stack
*/
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index 96988e2..208f5f6 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -620,8 +620,7 @@ pmap_init(void)
pv_entry_high_water = 9 * (pv_entry_max / 10);
pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
- pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map,
- PAGE_SIZE * pv_maxchunks);
+ pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks);
if (pv_chunkbase == NULL)
panic("pmap_init: not enough kvm for pv chunks");
pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks);
@@ -1460,8 +1459,7 @@ pmap_pinit(pmap_t pmap)
* page directory table.
*/
if (pmap->pm_pdir == NULL) {
- pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map,
- NBPTD);
+ pmap->pm_pdir = (pd_entry_t *)kva_alloc(NBPTD);
if (pmap->pm_pdir == NULL) {
PMAP_LOCK_DESTROY(pmap);
#ifdef HAMFISTED_LOCKING
@@ -1470,7 +1468,7 @@ pmap_pinit(pmap_t pmap)
return (0);
}
#ifdef PAE
- pmap->pm_pdpt = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1);
+ pmap->pm_pdpt = (pd_entry_t *)kva_alloc(1);
#endif
}
@@ -4022,7 +4020,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
if (pa < KERNLOAD && pa + size <= KERNLOAD)
va = KERNBASE + pa;
else
- va = kmem_alloc_nofault(kernel_map, size);
+ va = kva_alloc(size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
@@ -4057,7 +4055,7 @@ pmap_unmapdev(vm_offset_t va, vm_size_t size)
base = trunc_page(va);
offset = va & PAGE_MASK;
size = round_page(offset + size);
- kmem_free(kernel_map, base, size);
+ kva_free(base, size);
}
/*
diff --git a/sys/ia64/ia64/mp_machdep.c b/sys/ia64/ia64/mp_machdep.c
index 8f92460..8e71b35 100644
--- a/sys/ia64/ia64/mp_machdep.c
+++ b/sys/ia64/ia64/mp_machdep.c
@@ -304,7 +304,8 @@ cpu_mp_add(u_int acpi_id, u_int id, u_int eid)
if (cpuid != 0) {
pc = (struct pcpu *)malloc(sizeof(*pc), M_SMP, M_WAITOK);
pcpu_init(pc, cpuid, sizeof(*pc));
- dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
+ dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
+ M_WAITOK | M_ZERO);
dpcpu_init(dpcpu, cpuid);
} else
pc = pcpup;
diff --git a/sys/kern/imgact_gzip.c b/sys/kern/imgact_gzip.c
index 7c48ac6..230854b 100644
--- a/sys/kern/imgact_gzip.c
+++ b/sys/kern/imgact_gzip.c
@@ -137,7 +137,7 @@ exec_gzip_imgact(imgp)
}
if (igz.inbuf)
- kmem_free_wakeup(exec_map, (vm_offset_t)igz.inbuf, PAGE_SIZE);
+ kmap_free_wakeup(exec_map, (vm_offset_t)igz.inbuf, PAGE_SIZE);
if (igz.error || error) {
printf("Output=%lu ", igz.output);
printf("Inflate_error=%d igz.error=%d where=%d\n",
@@ -310,7 +310,7 @@ NextByte(void *vp)
return igz->inbuf[(igz->idx++) - igz->offset];
}
if (igz->inbuf)
- kmem_free_wakeup(exec_map, (vm_offset_t)igz->inbuf, PAGE_SIZE);
+ kmap_free_wakeup(exec_map, (vm_offset_t)igz->inbuf, PAGE_SIZE);
igz->offset = igz->idx & ~PAGE_MASK;
error = vm_mmap(exec_map, /* map */
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index 1eb3647..247e431 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -461,11 +461,6 @@ proc0_init(void *dummy __unused)
sleepinit();
/*
- * additional VM structures
- */
- vm_init2();
-
- /*
* Create process 0 (the swapper).
*/
LIST_INSERT_HEAD(&allproc, p, p_list);
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index c0e1435..156c80d 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -1192,7 +1192,7 @@ int
exec_alloc_args(struct image_args *args)
{
- args->buf = (char *)kmem_alloc_wait(exec_map, PATH_MAX + ARG_MAX);
+ args->buf = (char *)kmap_alloc_wait(exec_map, PATH_MAX + ARG_MAX);
return (args->buf != NULL ? 0 : ENOMEM);
}
@@ -1201,7 +1201,7 @@ exec_free_args(struct image_args *args)
{
if (args->buf != NULL) {
- kmem_free_wakeup(exec_map, (vm_offset_t)args->buf,
+ kmap_free_wakeup(exec_map, (vm_offset_t)args->buf,
PATH_MAX + ARG_MAX);
args->buf = NULL;
}
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index 5e4a502..63d8386 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -62,9 +62,11 @@ __FBSDID("$FreeBSD$");
#include <sys/sbuf.h>
#include <sys/sysctl.h>
#include <sys/time.h>
+#include <sys/vmem.h>
#include <vm/vm.h>
#include <vm/pmap.h>
+#include <vm/vm_pageout.h>
#include <vm/vm_param.h>
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
@@ -113,12 +115,7 @@ MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
-static void kmeminit(void *);
-SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL);
-
static struct malloc_type *kmemstatistics;
-static vm_offset_t kmembase;
-static vm_offset_t kmemlimit;
static int kmemcount;
#define KMEM_ZSHIFT 4
@@ -203,12 +200,12 @@ SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale,
static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
- sysctl_kmem_map_size, "LU", "Current kmem_map allocation size");
+ sysctl_kmem_map_size, "LU", "Current kmem allocation size");
static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
- sysctl_kmem_map_free, "LU", "Largest contiguous free range in kmem_map");
+ sysctl_kmem_map_free, "LU", "Free space in kmem");
/*
* The malloc_mtx protects the kmemstatistics linked list.
@@ -253,7 +250,7 @@ sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
{
u_long size;
- size = kmem_map->size;
+ size = vmem_size(kmem_arena, VMEM_ALLOC);
return (sysctl_handle_long(oidp, &size, 0, req));
}
@@ -262,10 +259,7 @@ sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
{
u_long size;
- vm_map_lock_read(kmem_map);
- size = kmem_map->root != NULL ? kmem_map->root->max_free :
- kmem_map->max_offset - kmem_map->min_offset;
- vm_map_unlock_read(kmem_map);
+ size = vmem_size(kmem_arena, VMEM_FREE);
return (sysctl_handle_long(oidp, &size, 0, req));
}
@@ -420,7 +414,7 @@ contigmalloc(unsigned long size, struct malloc_type *type, int flags,
{
void *ret;
- ret = (void *)kmem_alloc_contig(kernel_map, size, flags, low, high,
+ ret = (void *)kmem_alloc_contig(kernel_arena, size, flags, low, high,
alignment, boundary, VM_MEMATTR_DEFAULT);
if (ret != NULL)
malloc_type_allocated(type, round_page(size));
@@ -438,7 +432,7 @@ void
contigfree(void *addr, unsigned long size, struct malloc_type *type)
{
- kmem_free(kernel_map, (vm_offset_t)addr, size);
+ kmem_free(kernel_arena, (vm_offset_t)addr, size);
malloc_type_freed(type, round_page(size));
}
@@ -681,18 +675,24 @@ reallocf(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
}
/*
- * Initialize the kernel memory allocator
+ * Wake the page daemon when we exhaust KVA. It will call the lowmem handler
+ * and uma_reclaim() callbacks in a context that is safe.
*/
-/* ARGSUSED*/
static void
-kmeminit(void *dummy)
+kmem_reclaim(vmem_t *vm, int flags)
+{
+
+ pagedaemon_wakeup();
+}
+
+/*
+ * Initialize the kernel memory arena.
+ */
+void
+kmeminit(void)
{
- uint8_t indx;
u_long mem_size, tmp;
- int i;
- mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
-
/*
* Try to auto-tune the kernel memory size, so that it is
* more applicable for a wider range of machine sizes. The
@@ -745,9 +745,9 @@ kmeminit(void *dummy)
#else
tmp = vm_kmem_size;
#endif
- kmem_map = kmem_suballoc(kernel_map, &kmembase, &kmemlimit,
- tmp, TRUE);
- kmem_map->system_map = 1;
+ vmem_init(kmem_arena, "kmem arena", kva_alloc(tmp), tmp, PAGE_SIZE,
+ PAGE_SIZE * 16, 0);
+ vmem_set_reclaim(kmem_arena, kmem_reclaim);
#ifdef DEBUG_MEMGUARD
/*
@@ -755,8 +755,23 @@ kmeminit(void *dummy)
* replacement allocator used for detecting tamper-after-free
* scenarios as they occur. It is only used for debugging.
*/
- memguard_init(kmem_map);
+ memguard_init(kmem_arena);
#endif
+}
+
+/*
+ * Initialize the kernel memory allocator
+ */
+/* ARGSUSED*/
+static void
+mallocinit(void *dummy)
+{
+ int i;
+ uint8_t indx;
+
+ mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
+
+ kmeminit();
uma_startup2();
@@ -787,6 +802,7 @@ kmeminit(void *dummy)
}
}
+SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, mallocinit, NULL);
void
malloc_init(void *data)
diff --git a/sys/kern/kern_mbuf.c b/sys/kern/kern_mbuf.c
index 9e85806..df9b854 100644
--- a/sys/kern/kern_mbuf.c
+++ b/sys/kern/kern_mbuf.c
@@ -121,8 +121,7 @@ tunable_mbinit(void *dummy)
* available kernel memory (physical or kmem).
* At most it can be 3/4 of available kernel memory.
*/
- realmem = qmin((quad_t)physmem * PAGE_SIZE,
- vm_map_max(kmem_map) - vm_map_min(kmem_map));
+ realmem = qmin((quad_t)physmem * PAGE_SIZE, vm_kmem_size);
maxmbufmem = realmem / 2;
TUNABLE_QUAD_FETCH("kern.ipc.maxmbufmem", &maxmbufmem);
if (maxmbufmem > realmem / 4 * 3)
@@ -395,7 +394,7 @@ mbuf_jumbo_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait)
/* Inform UMA that this allocator uses kernel_map/object. */
*flags = UMA_SLAB_KERNEL;
- return ((void *)kmem_alloc_contig(kernel_map, bytes, wait,
+ return ((void *)kmem_alloc_contig(kernel_arena, bytes, wait,
(vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT));
}
diff --git a/sys/kern/kern_sharedpage.c b/sys/kern/kern_sharedpage.c
index 20b9038..622592b 100644
--- a/sys/kern/kern_sharedpage.c
+++ b/sys/kern/kern_sharedpage.c
@@ -112,7 +112,7 @@ shared_page_init(void *dummy __unused)
VM_ALLOC_ZERO);
m->valid = VM_PAGE_BITS_ALL;
VM_OBJECT_WUNLOCK(shared_page_obj);
- addr = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
+ addr = kva_alloc(PAGE_SIZE);
pmap_qenter(addr, &m, 1);
shared_page_mapping = (char *)addr;
}
diff --git a/sys/kern/subr_busdma_bufalloc.c b/sys/kern/subr_busdma_bufalloc.c
index 9406d95..a80a233 100644
--- a/sys/kern/subr_busdma_bufalloc.c
+++ b/sys/kern/subr_busdma_bufalloc.c
@@ -152,10 +152,10 @@ busdma_bufalloc_alloc_uncacheable(uma_zone_t zone, int size, u_int8_t *pflag,
{
#ifdef VM_MEMATTR_UNCACHEABLE
- /* Inform UMA that this allocator uses kernel_map/object. */
+ /* Inform UMA that this allocator uses kernel_arena/object. */
*pflag = UMA_SLAB_KERNEL;
- return ((void *)kmem_alloc_attr(kernel_map, size, wait, 0,
+ return ((void *)kmem_alloc_attr(kernel_arena, size, wait, 0,
BUS_SPACE_MAXADDR, VM_MEMATTR_UNCACHEABLE));
#else
@@ -169,6 +169,6 @@ void
busdma_bufalloc_free_uncacheable(void *item, int size, u_int8_t pflag)
{
- kmem_free(kernel_map, (vm_offset_t)item, size);
+ kmem_free(kernel_arena, (vm_offset_t)item, size);
}
diff --git a/sys/kern/subr_vmem.c b/sys/kern/subr_vmem.c
index 6aa8ad3..d3a758d 100644
--- a/sys/kern/subr_vmem.c
+++ b/sys/kern/subr_vmem.c
@@ -61,6 +61,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
+#include <vm/vm_object.h>
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
#include <vm/vm_param.h>
@@ -213,8 +214,12 @@ static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
static uma_zone_t vmem_bt_zone;
/* boot time arena storage. */
+static struct vmem kernel_arena_storage;
+static struct vmem kmem_arena_storage;
static struct vmem buffer_arena_storage;
static struct vmem transient_arena_storage;
+vmem_t *kernel_arena = &kernel_arena_storage;
+vmem_t *kmem_arena = &kmem_arena_storage;
vmem_t *buffer_arena = &buffer_arena_storage;
vmem_t *transient_arena = &transient_arena_storage;
@@ -231,6 +236,14 @@ bt_fill(vmem_t *vm, int flags)
VMEM_ASSERT_LOCKED(vm);
/*
+ * Only allow the kmem arena to dip into reserve tags. It is the
+ * vmem where new tags come from.
+ */
+ flags &= BT_FLAGS;
+ if (vm != kmem_arena)
+ flags &= ~M_USE_RESERVE;
+
+ /*
* Loop until we meet the reserve. To minimize the lock shuffle
* and prevent simultaneous fills we first try a NOWAIT regardless
* of the caller's flags. Specify M_NOVM so we don't recurse while
@@ -545,6 +558,77 @@ qc_drain(vmem_t *vm)
zone_drain(vm->vm_qcache[i].qc_cache);
}
+#ifndef UMA_MD_SMALL_ALLOC
+
+static struct mtx_padalign vmem_bt_lock;
+
+/*
+ * vmem_bt_alloc: Allocate a new page of boundary tags.
+ *
+ * On architectures with uma_small_alloc there is no recursion; no address
+ * space need be allocated to allocate boundary tags. For the others, we
+ * must handle recursion. Boundary tags are necessary to allocate new
+ * boundary tags.
+ *
+ * UMA guarantees that enough tags are held in reserve to allocate a new
+ * page of kva. We dip into this reserve by specifying M_USE_RESERVE only
+ * when allocating the page to hold new boundary tags. In this way the
+ * reserve is automatically filled by the allocation that uses the reserve.
+ *
+ * We still have to guarantee that the new tags are allocated atomically since
+ * many threads may try concurrently. The bt_lock provides this guarantee.
+ * We convert WAITOK allocations to NOWAIT and then handle the blocking here
+ * on failure. It's ok to return NULL for a WAITOK allocation as UMA will
+ * loop again after checking to see if we lost the race to allocate.
+ *
+ * There is a small race between vmem_bt_alloc() returning the page and the
+ * zone lock being acquired to add the page to the zone. For WAITOK
+ * allocations we just pause briefly. NOWAIT may experience a transient
+ * failure. To alleviate this we permit a small number of simultaneous
+ * fills to proceed concurrently so NOWAIT is less likely to fail unless
+ * we are really out of KVA.
+ */
+static void *
+vmem_bt_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
+{
+ vmem_addr_t addr;
+
+ *pflag = UMA_SLAB_KMEM;
+
+ /*
+ * Single thread boundary tag allocation so that the address space
+ * and memory are added in one atomic operation.
+ */
+ mtx_lock(&vmem_bt_lock);
+ if (vmem_xalloc(kmem_arena, bytes, 0, 0, 0, VMEM_ADDR_MIN,
+ VMEM_ADDR_MAX, M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT,
+ &addr) == 0) {
+ if (kmem_back(kmem_object, addr, bytes,
+ M_NOWAIT | M_USE_RESERVE) == 0) {
+ mtx_unlock(&vmem_bt_lock);
+ return ((void *)addr);
+ }
+ vmem_xfree(kmem_arena, addr, bytes);
+ mtx_unlock(&vmem_bt_lock);
+ /*
+ * Out of memory, not address space. This may not even be
+ * possible due to M_USE_RESERVE page allocation.
+ */
+ if (wait & M_WAITOK)
+ VM_WAIT;
+ return (NULL);
+ }
+ mtx_unlock(&vmem_bt_lock);
+ /*
+ * We're either out of address space or lost a fill race.
+ */
+ if (wait & M_WAITOK)
+ pause("btalloc", 1);
+
+ return (NULL);
+}
+#endif
+
void
vmem_startup(void)
{
@@ -553,6 +637,17 @@ vmem_startup(void)
vmem_bt_zone = uma_zcreate("vmem btag",
sizeof(struct vmem_btag), NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, UMA_ZONE_VM);
+#ifndef UMA_MD_SMALL_ALLOC
+ mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF);
+ uma_prealloc(vmem_bt_zone, BT_MAXALLOC);
+ /*
+ * Reserve enough tags to allocate new tags. We allow multiple
+ * CPUs to attempt to allocate new tags concurrently to limit
+ * false restarts in UMA.
+ */
+ uma_zone_reserve(vmem_bt_zone, BT_MAXALLOC * (mp_ncpus + 1) / 2);
+ uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc);
+#endif
}
/* ---- rehash */
@@ -661,15 +756,15 @@ vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type)
btspan->bt_type = type;
btspan->bt_start = addr;
btspan->bt_size = size;
+ bt_insseg_tail(vm, btspan);
btfree = bt_alloc(vm);
btfree->bt_type = BT_TYPE_FREE;
btfree->bt_start = addr;
btfree->bt_size = size;
-
- bt_insseg_tail(vm, btspan);
bt_insseg(vm, btfree, btspan);
bt_insfree(vm, btfree);
+
vm->vm_size += size;
}
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 205e9b3..93fb27d 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -856,7 +856,7 @@ bufinit(void)
bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
- unmapped_buf = (caddr_t)kmem_alloc_nofault(kernel_map, MAXPHYS);
+ unmapped_buf = (caddr_t)kva_alloc(MAXPHYS);
}
#ifdef INVARIANTS
diff --git a/sys/mips/mips/mp_machdep.c b/sys/mips/mips/mp_machdep.c
index 88c2357..2a6bbb4 100644
--- a/sys/mips/mips/mp_machdep.c
+++ b/sys/mips/mips/mp_machdep.c
@@ -33,6 +33,7 @@ __FBSDID("$FreeBSD$");
#include <sys/ktr.h>
#include <sys/proc.h>
#include <sys/lock.h>
+#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/kernel.h>
#include <sys/pcpu.h>
@@ -182,7 +183,7 @@ start_ap(int cpuid)
int cpus, ms;
cpus = mp_naps;
- dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
+ dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, M_WAITOK | M_ZERO);
mips_sync();
diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c
index 3ef5186..48efb06 100644
--- a/sys/mips/mips/pmap.c
+++ b/sys/mips/mips/pmap.c
@@ -3015,7 +3015,7 @@ pmap_mapdev(vm_paddr_t pa, vm_size_t size)
offset = pa & PAGE_MASK;
size = roundup(size + offset, PAGE_SIZE);
- va = kmem_alloc_nofault(kernel_map, size);
+ va = kva_alloc(size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
pa = trunc_page(pa);
@@ -3043,7 +3043,7 @@ pmap_unmapdev(vm_offset_t va, vm_size_t size)
base = trunc_page(va);
offset = va & PAGE_MASK;
size = roundup(size + offset, PAGE_SIZE);
- kmem_free(kernel_map, base, size);
+ kva_free(base, size);
#endif
}
@@ -3149,21 +3149,6 @@ pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
*addr = ((*addr + SEGMASK) & ~SEGMASK) + superpage_offset;
}
-/*
- * Increase the starting virtual address of the given mapping so
- * that it is aligned to not be the second page in a TLB entry.
- * This routine assumes that the length is appropriately-sized so
- * that the allocation does not share a TLB entry at all if required.
- */
-void
-pmap_align_tlb(vm_offset_t *addr)
-{
- if ((*addr & PAGE_SIZE) == 0)
- return;
- *addr += PAGE_SIZE;
- return;
-}
-
#ifdef DDB
DB_SHOW_COMMAND(ptable, ddb_pid_dump)
{
diff --git a/sys/mips/mips/vm_machdep.c b/sys/mips/mips/vm_machdep.c
index 0323bb3..86dfde9 100644
--- a/sys/mips/mips/vm_machdep.c
+++ b/sys/mips/mips/vm_machdep.c
@@ -514,7 +514,7 @@ sf_buf_init(void *arg)
mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", NULL, MTX_DEF);
SLIST_INIT(&sf_freelist.sf_head);
- sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
+ sf_base = kva_alloc(nsfbufs * PAGE_SIZE);
sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
M_NOWAIT | M_ZERO);
for (i = 0; i < nsfbufs; i++) {
diff --git a/sys/mips/sibyte/sb_zbpci.c b/sys/mips/sibyte/sb_zbpci.c
index f4df353..7852a41 100644
--- a/sys/mips/sibyte/sb_zbpci.c
+++ b/sys/mips/sibyte/sb_zbpci.c
@@ -137,7 +137,7 @@ zbpci_attach(device_t dev)
/*
* Allocate KVA for accessing PCI config space.
*/
- va = kmem_alloc_nofault(kernel_map, PAGE_SIZE * mp_ncpus);
+ va = kva_alloc(PAGE_SIZE * mp_ncpus);
if (va == 0) {
device_printf(dev, "Cannot allocate virtual addresses for "
"config space access.\n");
diff --git a/sys/ofed/include/linux/dma-mapping.h b/sys/ofed/include/linux/dma-mapping.h
index c653524..0f0ad9d 100644
--- a/sys/ofed/include/linux/dma-mapping.h
+++ b/sys/ofed/include/linux/dma-mapping.h
@@ -130,7 +130,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
else
high = BUS_SPACE_MAXADDR_32BIT;
align = PAGE_SIZE << get_order(size);
- mem = (void *)kmem_alloc_contig(kmem_map, size, flag, 0, high, align,
+ mem = (void *)kmem_alloc_contig(kmem_arena, size, flag, 0, high, align,
0, VM_MEMATTR_DEFAULT);
if (mem)
*dma_handle = vtophys(mem);
@@ -144,7 +144,7 @@ dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle)
{
- kmem_free(kmem_map, (vm_offset_t)cpu_addr, size);
+ kmem_free(kmem_arena, (vm_offset_t)cpu_addr, size);
}
/* XXX This only works with no iommu. */
diff --git a/sys/ofed/include/linux/gfp.h b/sys/ofed/include/linux/gfp.h
index 8c36c15..8d2b228 100644
--- a/sys/ofed/include/linux/gfp.h
+++ b/sys/ofed/include/linux/gfp.h
@@ -65,7 +65,7 @@ static inline unsigned long
_get_page(gfp_t mask)
{
- return kmem_malloc(kmem_map, PAGE_SIZE, mask);
+ return kmem_malloc(kmem_arena, PAGE_SIZE, mask);
}
#define get_zeroed_page(mask) _get_page((mask) | M_ZERO)
@@ -78,7 +78,7 @@ free_page(unsigned long page)
if (page == 0)
return;
- kmem_free(kmem_map, page, PAGE_SIZE);
+ kmem_free(kmem_arena, page, PAGE_SIZE);
}
static inline void
@@ -88,7 +88,7 @@ __free_page(struct page *m)
if (m->object != kmem_object)
panic("__free_page: Freed page %p not allocated via wrappers.",
m);
- kmem_free(kmem_map, (vm_offset_t)page_address(m), PAGE_SIZE);
+ kmem_free(kmem_arena, (vm_offset_t)page_address(m), PAGE_SIZE);
}
static inline void
@@ -99,7 +99,7 @@ __free_pages(void *p, unsigned int order)
if (p == 0)
return;
size = PAGE_SIZE << order;
- kmem_free(kmem_map, (vm_offset_t)p, size);
+ kmem_free(kmem_arena, (vm_offset_t)p, size);
}
/*
@@ -114,7 +114,7 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
size_t size;
size = PAGE_SIZE << order;
- page = kmem_alloc_contig(kmem_map, size, gfp_mask, 0, -1,
+ page = kmem_alloc_contig(kmem_arena, size, gfp_mask, 0, -1,
size, 0, VM_MEMATTR_DEFAULT);
if (page == 0)
return (NULL);
diff --git a/sys/ofed/include/linux/linux_compat.c b/sys/ofed/include/linux/linux_compat.c
index 7167b1c..95bd6c8 100644
--- a/sys/ofed/include/linux/linux_compat.c
+++ b/sys/ofed/include/linux/linux_compat.c
@@ -647,7 +647,7 @@ vmap(struct page **pages, unsigned int count, unsigned long flags, int prot)
size_t size;
size = count * PAGE_SIZE;
- off = kmem_alloc_nofault(kernel_map, size);
+ off = kva_alloc(size);
if (off == 0)
return (NULL);
vmmap_add((void *)off, size);
@@ -665,7 +665,7 @@ vunmap(void *addr)
if (vmmap == NULL)
return;
pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE);
- kmem_free(kernel_map, (vm_offset_t)addr, vmmap->vm_size);
+ kva_free((vm_offset_t)addr, vmmap->vm_size);
kfree(vmmap);
}
diff --git a/sys/pc98/pc98/machdep.c b/sys/pc98/pc98/machdep.c
index 7a6c951..92f0dcc 100644
--- a/sys/pc98/pc98/machdep.c
+++ b/sys/pc98/pc98/machdep.c
@@ -2479,7 +2479,7 @@ f00f_hack(void *unused)
printf("Intel Pentium detected, installing workaround for F00F bug\n");
- tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2);
+ tmp = kmem_malloc(kernel_arena, PAGE_SIZE * 2, M_WAITOK | M_ZERO);
if (tmp == 0)
panic("kmem_alloc returned 0");
@@ -2490,9 +2490,7 @@ f00f_hack(void *unused)
r_idt.rd_base = (u_int)new_idt;
lidt(&r_idt);
idt = new_idt;
- if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
- VM_PROT_READ, FALSE) != KERN_SUCCESS)
- panic("vm_map_protect failed");
+ pmap_protect(kernel_pmap, tmp, tmp + PAGE_SIZE, VM_PROT_READ);
}
#endif /* defined(I586_CPU) && !NO_F00F_HACK */
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 538642d..7ce9e54 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -2591,7 +2591,7 @@ moea_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma)
return ((void *) pa);
}
- va = kmem_alloc_nofault(kernel_map, size);
+ va = kva_alloc(size);
if (!va)
panic("moea_mapdev: Couldn't alloc kernel virtual memory");
@@ -2619,7 +2619,7 @@ moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
base = trunc_page(va);
offset = va & PAGE_MASK;
size = roundup(offset + size, PAGE_SIZE);
- kmem_free(kernel_map, base, size);
+ kva_free(base, size);
}
}
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 5f3e4e0..baff276 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -2544,7 +2544,7 @@ moea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma)
offset = pa & PAGE_MASK;
size = roundup2(offset + size, PAGE_SIZE);
- va = kmem_alloc_nofault(kernel_map, size);
+ va = kva_alloc(size);
if (!va)
panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
@@ -2575,7 +2575,7 @@ moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
offset = va & PAGE_MASK;
size = roundup2(offset + size, PAGE_SIZE);
- kmem_free(kernel_map, base, size);
+ kva_free(base, size);
}
void
diff --git a/sys/powerpc/aim/vm_machdep.c b/sys/powerpc/aim/vm_machdep.c
index 785f22a..2deb4cb 100644
--- a/sys/powerpc/aim/vm_machdep.c
+++ b/sys/powerpc/aim/vm_machdep.c
@@ -253,7 +253,7 @@ sf_buf_init(void *arg)
sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
TAILQ_INIT(&sf_buf_freelist);
- sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
+ sf_base = kva_alloc(nsfbufs * PAGE_SIZE);
sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, M_NOWAIT | M_ZERO);
for (i = 0; i < nsfbufs; i++) {
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 7e3c29e..9c07d95 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -2681,7 +2681,7 @@ mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
base = trunc_page(va);
offset = va & PAGE_MASK;
size = roundup(offset + size, PAGE_SIZE);
- kmem_free(kernel_map, base, size);
+ kva_free(base, size);
}
}
diff --git a/sys/powerpc/booke/vm_machdep.c b/sys/powerpc/booke/vm_machdep.c
index 3303794..f3fcf21 100644
--- a/sys/powerpc/booke/vm_machdep.c
+++ b/sys/powerpc/booke/vm_machdep.c
@@ -260,7 +260,7 @@ sf_buf_init(void *arg)
sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
TAILQ_INIT(&sf_buf_freelist);
- sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
+ sf_base = kva_alloc(nsfbufs * PAGE_SIZE);
sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, M_NOWAIT | M_ZERO);
for (i = 0; i < nsfbufs; i++) {
diff --git a/sys/powerpc/powerpc/busdma_machdep.c b/sys/powerpc/powerpc/busdma_machdep.c
index 07508fb..4608729 100644
--- a/sys/powerpc/powerpc/busdma_machdep.c
+++ b/sys/powerpc/powerpc/busdma_machdep.c
@@ -532,7 +532,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
* multi-seg allocations yet though.
* XXX Certain AGP hardware does.
*/
- *vaddr = (void *)kmem_alloc_contig(kernel_map, dmat->maxsize,
+ *vaddr = (void *)kmem_alloc_contig(kmem_arena, dmat->maxsize,
mflags, 0ul, dmat->lowaddr, dmat->alignment ?
dmat->alignment : 1ul, dmat->boundary, attr);
(*mapp)->contigalloc = 1;
@@ -560,7 +560,7 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
if (!map->contigalloc)
free(vaddr, M_DEVBUF);
else
- kmem_free(kernel_map, (vm_offset_t)vaddr, dmat->maxsize);
+ kmem_free(kmem_arena, (vm_offset_t)vaddr, dmat->maxsize);
bus_dmamap_destroy(dmat, map);
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
}
diff --git a/sys/powerpc/powerpc/mp_machdep.c b/sys/powerpc/powerpc/mp_machdep.c
index db20a6f..b6d977b 100644
--- a/sys/powerpc/powerpc/mp_machdep.c
+++ b/sys/powerpc/powerpc/mp_machdep.c
@@ -34,6 +34,7 @@ __FBSDID("$FreeBSD$");
#include <sys/bus.h>
#include <sys/cpuset.h>
#include <sys/lock.h>
+#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/pcpu.h>
#include <sys/proc.h>
@@ -163,7 +164,8 @@ cpu_mp_start(void)
void *dpcpu;
pc = &__pcpu[cpu.cr_cpuid];
- dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
+ dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
+ M_WAITOK | M_ZERO);
pcpu_init(pc, cpu.cr_cpuid, sizeof(*pc));
dpcpu_init(dpcpu, cpu.cr_cpuid);
} else {
diff --git a/sys/sparc64/sparc64/bus_machdep.c b/sys/sparc64/sparc64/bus_machdep.c
index 7f5e76b..415f43d 100644
--- a/sys/sparc64/sparc64/bus_machdep.c
+++ b/sys/sparc64/sparc64/bus_machdep.c
@@ -655,7 +655,7 @@ sparc64_bus_mem_map(bus_space_tag_t tag, bus_addr_t addr, bus_size_t size,
if (vaddr != 0L)
sva = trunc_page(vaddr);
else {
- if ((sva = kmem_alloc_nofault(kernel_map, size)) == 0)
+ if ((sva = kva_alloc(size)) == 0)
panic("%s: cannot allocate virtual memory", __func__);
}
@@ -701,7 +701,7 @@ sparc64_bus_mem_unmap(bus_space_tag_t tag, bus_space_handle_t handle,
for (va = sva; va < endva; va += PAGE_SIZE)
pmap_kremove_flags(va);
tlb_range_demap(kernel_pmap, sva, sva + size - 1);
- kmem_free(kernel_map, sva, size);
+ kva_free(sva, size);
return (0);
}
diff --git a/sys/sparc64/sparc64/mem.c b/sys/sparc64/sparc64/mem.c
index 68c397e..b78f705 100644
--- a/sys/sparc64/sparc64/mem.c
+++ b/sys/sparc64/sparc64/mem.c
@@ -137,8 +137,11 @@ memrw(struct cdev *dev, struct uio *uio, int flags)
if (ova == 0) {
if (dcache_color_ignore == 0)
colors = DCACHE_COLORS;
- ova = kmem_alloc_wait(kernel_map,
- PAGE_SIZE * colors);
+ ova = kva_alloc(PAGE_SIZE * colors);
+ if (ova == 0) {
+ error = ENOMEM;
+ break;
+ }
}
if (colors != 1 && m->md.color != -1)
va = ova + m->md.color * PAGE_SIZE;
@@ -179,6 +182,6 @@ memrw(struct cdev *dev, struct uio *uio, int flags)
/* else panic! */
}
if (ova != 0)
- kmem_free_wakeup(kernel_map, ova, PAGE_SIZE * colors);
+ kva_free(ova, PAGE_SIZE * colors);
return (error);
}
diff --git a/sys/sparc64/sparc64/mp_machdep.c b/sys/sparc64/sparc64/mp_machdep.c
index fccfb6f..8d2282e 100644
--- a/sys/sparc64/sparc64/mp_machdep.c
+++ b/sys/sparc64/sparc64/mp_machdep.c
@@ -336,10 +336,12 @@ ap_start(phandle_t node, u_int mid, u_int cpu_impl)
cpuid_to_mid[cpuid] = mid;
cpu_identify(csa->csa_ver, clock, cpuid);
- va = kmem_alloc(kernel_map, PCPU_PAGES * PAGE_SIZE);
+ va = kmem_malloc(kernel_arena, PCPU_PAGES * PAGE_SIZE,
+ M_WAITOK | M_ZERO);
pc = (struct pcpu *)(va + (PCPU_PAGES * PAGE_SIZE)) - 1;
pcpu_init(pc, cpuid, sizeof(*pc));
- dpcpu_init((void *)kmem_alloc(kernel_map, DPCPU_SIZE), cpuid);
+ dpcpu_init((void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
+ M_WAITOK | M_ZERO), cpuid);
pc->pc_addr = va;
pc->pc_clock = clock;
pc->pc_impl = cpu_impl;
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 0b1e6b2..ebeedaa 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -1210,8 +1210,7 @@ pmap_pinit(pmap_t pm)
* Allocate KVA space for the TSB.
*/
if (pm->pm_tsb == NULL) {
- pm->pm_tsb = (struct tte *)kmem_alloc_nofault(kernel_map,
- TSB_BSIZE);
+ pm->pm_tsb = (struct tte *)kva_alloc(TSB_BSIZE);
if (pm->pm_tsb == NULL) {
PMAP_LOCK_DESTROY(pm);
return (0);
diff --git a/sys/sparc64/sparc64/vm_machdep.c b/sys/sparc64/sparc64/vm_machdep.c
index 8cec001..261c131 100644
--- a/sys/sparc64/sparc64/vm_machdep.c
+++ b/sys/sparc64/sparc64/vm_machdep.c
@@ -421,7 +421,7 @@ sf_buf_init(void *arg)
mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", NULL, MTX_DEF);
SLIST_INIT(&sf_freelist.sf_head);
- sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
+ sf_base = kva_alloc(nsfbufs * PAGE_SIZE);
sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
M_NOWAIT | M_ZERO);
for (i = 0; i < nsfbufs; i++) {
diff --git a/sys/vm/memguard.c b/sys/vm/memguard.c
index b1740c3..ea2d925 100644
--- a/sys/vm/memguard.c
+++ b/sys/vm/memguard.c
@@ -48,6 +48,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/malloc.h>
#include <sys/sysctl.h>
+#include <sys/vmem.h>
#include <vm/vm.h>
#include <vm/uma.h>
@@ -99,8 +100,9 @@ SYSCTL_PROC(_vm_memguard, OID_AUTO, desc,
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
memguard_sysctl_desc, "A", "Short description of memory type to monitor");
-static vm_map_t memguard_map = NULL;
+static vmem_t *memguard_map = NULL;
static vm_offset_t memguard_cursor;
+static vm_offset_t memguard_base;
static vm_size_t memguard_mapsize;
static vm_size_t memguard_physlimit;
static u_long memguard_wasted;
@@ -112,7 +114,7 @@ static u_long memguard_fail_pgs;
SYSCTL_ULONG(_vm_memguard, OID_AUTO, cursor, CTLFLAG_RD,
&memguard_cursor, 0, "MemGuard cursor");
SYSCTL_ULONG(_vm_memguard, OID_AUTO, mapsize, CTLFLAG_RD,
- &memguard_mapsize, 0, "MemGuard private vm_map size");
+ &memguard_mapsize, 0, "MemGuard private arena size");
SYSCTL_ULONG(_vm_memguard, OID_AUTO, phys_limit, CTLFLAG_RD,
&memguard_physlimit, 0, "Limit on MemGuard memory consumption");
SYSCTL_ULONG(_vm_memguard, OID_AUTO, wasted, CTLFLAG_RD,
@@ -200,21 +202,18 @@ memguard_fudge(unsigned long km_size, const struct vm_map *parent_map)
* out of a single VM map (contiguous chunk of address space).
*/
void
-memguard_init(vm_map_t parent_map)
+memguard_init(vmem_t *parent)
{
- vm_offset_t base, limit;
-
- memguard_map = kmem_suballoc(parent_map, &base, &limit,
- memguard_mapsize, FALSE);
- memguard_map->system_map = 1;
- KASSERT(memguard_mapsize == limit - base,
- ("Expected %lu, got %lu", (u_long)memguard_mapsize,
- (u_long)(limit - base)));
+ vm_offset_t base;
+
+ vmem_alloc(parent, memguard_mapsize, M_WAITOK, &base);
+ memguard_map = vmem_create("memguard arena", base, memguard_mapsize,
+ PAGE_SIZE, 0, M_WAITOK);
memguard_cursor = base;
+ memguard_base = base;
printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n");
printf("\tMEMGUARD map base: 0x%lx\n", (u_long)base);
- printf("\tMEMGUARD map limit: 0x%lx\n", (u_long)limit);
printf("\tMEMGUARD map size: %jd KBytes\n",
(uintmax_t)memguard_mapsize >> 10);
}
@@ -230,11 +229,13 @@ memguard_sysinit(void)
parent = SYSCTL_STATIC_CHILDREN(_vm_memguard);
SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapstart", CTLFLAG_RD,
- &memguard_map->min_offset, "MemGuard KVA base");
+ &memguard_base, "MemGuard KVA base");
SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "maplimit", CTLFLAG_RD,
- &memguard_map->max_offset, "MemGuard KVA end");
+ &memguard_mapsize, "MemGuard KVA size");
+#if 0
SYSCTL_ADD_ULONG(NULL, parent, OID_AUTO, "mapused", CTLFLAG_RD,
&memguard_map->size, "MemGuard KVA used");
+#endif
}
SYSINIT(memguard, SI_SUB_KLD, SI_ORDER_ANY, memguard_sysinit, NULL);
@@ -263,6 +264,21 @@ v2sizep(vm_offset_t va)
return ((u_long *)&p->pageq.tqe_next);
}
+static u_long *
+v2sizev(vm_offset_t va)
+{
+ vm_paddr_t pa;
+ struct vm_page *p;
+
+ pa = pmap_kextract(va);
+ if (pa == 0)
+ panic("MemGuard detected double-free of %p", (void *)va);
+ p = PHYS_TO_VM_PAGE(pa);
+ KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
+ ("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
+ return ((u_long *)&p->pageq.tqe_prev);
+}
+
/*
* Allocate a single object of specified size with specified flags
* (either M_WAITOK or M_NOWAIT).
@@ -289,14 +305,13 @@ memguard_alloc(unsigned long req_size, int flags)
if (do_guard)
size_v += 2 * PAGE_SIZE;
- vm_map_lock(memguard_map);
/*
* When we pass our memory limit, reject sub-page allocations.
* Page-size and larger allocations will use the same amount
* of physical memory whether we allocate or hand off to
* uma_large_alloc(), so keep those.
*/
- if (memguard_map->size >= memguard_physlimit &&
+ if (vmem_size(memguard_map, VMEM_ALLOC) >= memguard_physlimit &&
req_size < PAGE_SIZE) {
addr = (vm_offset_t)NULL;
memguard_fail_pgs++;
@@ -313,33 +328,34 @@ memguard_alloc(unsigned long req_size, int flags)
* map, unless vm_map_findspace() is tweaked.
*/
for (;;) {
- rv = vm_map_findspace(memguard_map, memguard_cursor,
- size_v, &addr);
- if (rv == KERN_SUCCESS)
+ if (vmem_xalloc(memguard_map, size_v, 0, 0, 0, memguard_cursor,
+ VMEM_ADDR_MAX, M_BESTFIT | M_NOWAIT, &addr) == 0)
break;
/*
* The map has no space. This may be due to
* fragmentation, or because the cursor is near the
* end of the map.
*/
- if (memguard_cursor == vm_map_min(memguard_map)) {
+ if (memguard_cursor == memguard_base) {
memguard_fail_kva++;
addr = (vm_offset_t)NULL;
goto out;
}
memguard_wrap++;
- memguard_cursor = vm_map_min(memguard_map);
+ memguard_cursor = memguard_base;
}
if (do_guard)
addr += PAGE_SIZE;
- rv = kmem_back(memguard_map, addr, size_p, flags);
+ rv = kmem_back(kmem_object, addr, size_p, flags);
if (rv != KERN_SUCCESS) {
+ vmem_xfree(memguard_map, addr, size_v);
memguard_fail_pgs++;
addr = (vm_offset_t)NULL;
goto out;
}
- memguard_cursor = addr + size_p;
+ memguard_cursor = addr + size_v;
*v2sizep(trunc_page(addr)) = req_size;
+ *v2sizev(trunc_page(addr)) = size_v;
memguard_succ++;
if (req_size < PAGE_SIZE) {
memguard_wasted += (PAGE_SIZE - req_size);
@@ -354,7 +370,6 @@ memguard_alloc(unsigned long req_size, int flags)
}
}
out:
- vm_map_unlock(memguard_map);
return ((void *)addr);
}
@@ -363,7 +378,7 @@ is_memguard_addr(void *addr)
{
vm_offset_t a = (vm_offset_t)(uintptr_t)addr;
- return (a >= memguard_map->min_offset && a < memguard_map->max_offset);
+ return (a >= memguard_base && a < memguard_base + memguard_mapsize);
}
/*
@@ -373,12 +388,13 @@ void
memguard_free(void *ptr)
{
vm_offset_t addr;
- u_long req_size, size;
+ u_long req_size, size, sizev;
char *temp;
int i;
addr = trunc_page((uintptr_t)ptr);
req_size = *v2sizep(addr);
+ sizev = *v2sizev(addr);
size = round_page(req_size);
/*
@@ -400,11 +416,12 @@ memguard_free(void *ptr)
* vm_map lock to serialize updates to memguard_wasted, since
* we had the lock at increment.
*/
- vm_map_lock(memguard_map);
+ kmem_unback(kmem_object, addr, size);
+ if (sizev > size)
+ addr -= PAGE_SIZE;
+ vmem_xfree(memguard_map, addr, sizev);
if (req_size < PAGE_SIZE)
memguard_wasted -= (PAGE_SIZE - req_size);
- (void)vm_map_delete(memguard_map, addr, addr + size);
- vm_map_unlock(memguard_map);
}
/*
diff --git a/sys/vm/memguard.h b/sys/vm/memguard.h
index 9ec4ffd..9e99e98 100644
--- a/sys/vm/memguard.h
+++ b/sys/vm/memguard.h
@@ -33,10 +33,11 @@
struct malloc_type;
struct vm_map;
+struct vmem;
#ifdef DEBUG_MEMGUARD
unsigned long memguard_fudge(unsigned long, const struct vm_map *);
-void memguard_init(struct vm_map *);
+void memguard_init(struct vmem *);
void *memguard_alloc(unsigned long, int);
void *memguard_realloc(void *, unsigned long, struct malloc_type *, int);
void memguard_free(void *);
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index c64a549..c0f80a7 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -100,9 +100,6 @@ extern vm_offset_t kernel_vm_end;
void pmap_activate(struct thread *td);
void pmap_align_superpage(vm_object_t, vm_ooffset_t, vm_offset_t *,
vm_size_t);
-#if defined(__mips__)
-void pmap_align_tlb(vm_offset_t *);
-#endif
void pmap_change_wiring(pmap_t, vm_offset_t, boolean_t);
void pmap_clear_modify(vm_page_t m);
void pmap_clear_reference(vm_page_t m);
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 900209e..5db1816 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -1015,7 +1015,7 @@ page_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
void *p; /* Returned page */
*pflag = UMA_SLAB_KMEM;
- p = (void *) kmem_malloc(kmem_map, bytes, wait);
+ p = (void *) kmem_malloc(kmem_arena, bytes, wait);
return (p);
}
@@ -1097,16 +1097,16 @@ noobj_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait)
static void
page_free(void *mem, int size, uint8_t flags)
{
- vm_map_t map;
+ struct vmem *vmem;
if (flags & UMA_SLAB_KMEM)
- map = kmem_map;
+ vmem = kmem_arena;
else if (flags & UMA_SLAB_KERNEL)
- map = kernel_map;
+ vmem = kernel_arena;
else
panic("UMA: page_free used with invalid flags %d", flags);
- kmem_free(map, (vm_offset_t)mem, size);
+ kmem_free(vmem, (vm_offset_t)mem, size);
}
/*
@@ -2983,7 +2983,7 @@ uma_zone_reserve_kva(uma_zone_t zone, int count)
#else
if (1) {
#endif
- kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
+ kva = kva_alloc(pages * UMA_SLAB_SIZE);
if (kva == 0)
return (0);
} else
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 4a2dc04..fee55d0 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -36,27 +36,40 @@
struct proc;
struct vmspace;
struct vnode;
+struct vmem;
#ifdef _KERNEL
-int kernacc(void *, int, int);
-vm_offset_t kmem_alloc(vm_map_t, vm_size_t);
-vm_offset_t kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags,
+/* These operate on kernel virtual addresses only. */
+vm_offset_t kva_alloc(vm_size_t);
+void kva_free(vm_offset_t, vm_size_t);
+
+/* These operate on pageable virtual addresses. */
+vm_offset_t kmap_alloc_wait(vm_map_t, vm_size_t);
+void kmap_free_wakeup(vm_map_t, vm_offset_t, vm_size_t);
+
+/* These operate on virtual addresses backed by memory. */
+vm_offset_t kmem_alloc_attr(struct vmem *, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr);
-vm_offset_t kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags,
+vm_offset_t kmem_alloc_contig(struct vmem *, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr);
-vm_offset_t kmem_alloc_nofault(vm_map_t, vm_size_t);
-vm_offset_t kmem_alloc_nofault_space(vm_map_t, vm_size_t, int);
-vm_offset_t kmem_alloc_wait(vm_map_t, vm_size_t);
-void kmem_free(vm_map_t, vm_offset_t, vm_size_t);
-void kmem_free_wakeup(vm_map_t, vm_offset_t, vm_size_t);
-void kmem_init(vm_offset_t, vm_offset_t);
-vm_offset_t kmem_malloc(vm_map_t map, vm_size_t size, int flags);
-int kmem_back(vm_map_t, vm_offset_t, vm_size_t, int);
+vm_offset_t kmem_malloc(struct vmem *, vm_size_t size, int flags);
+void kmem_free(struct vmem *, vm_offset_t, vm_size_t);
+
+/* This provides memory for previously allocated address space. */
+int kmem_back(vm_object_t, vm_offset_t, vm_size_t, int);
+void kmem_unback(vm_object_t, vm_offset_t, vm_size_t);
+
+/* Bootstrapping. */
vm_map_t kmem_suballoc(vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t,
boolean_t);
+void kmem_init(vm_offset_t, vm_offset_t);
+void kmem_init_zero_region(void);
+void kmeminit(void);
+
void swapout_procs(int);
+int kernacc(void *, int, int);
int useracc(void *, int, int);
int vm_fault(vm_map_t, vm_offset_t, vm_prot_t, int);
void vm_fault_copy_entry(vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t,
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 948e2b3..94e07f9 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -67,6 +67,7 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/limits.h>
#include <sys/lock.h>
+#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/racct.h>
@@ -76,6 +77,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sf_buf.h>
#include <sys/shm.h>
#include <sys/vmmeter.h>
+#include <sys/vmem.h>
#include <sys/sx.h>
#include <sys/sysctl.h>
#include <sys/_kstack_cache.h>
@@ -359,11 +361,13 @@ vm_thread_new(struct thread *td, int pages)
* We need to align the kstack's mapped address to fit within
* a single TLB entry.
*/
- ks = kmem_alloc_nofault_space(kernel_map,
- (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE, VMFS_TLB_ALIGNED_SPACE);
+ if (vmem_xalloc(kernel_arena, (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE,
+ PAGE_SIZE * 2, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
+ M_BESTFIT | M_NOWAIT, &ks)) {
+ ks = 0;
+ }
#else
- ks = kmem_alloc_nofault(kernel_map,
- (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
+ ks = kva_alloc((pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
#endif
if (ks == 0) {
printf("vm_thread_new: kstack allocation failed\n");
@@ -422,7 +426,7 @@ vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
}
VM_OBJECT_WUNLOCK(ksobj);
vm_object_deallocate(ksobj);
- kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
+ kva_free(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
}
diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c
index 2c4bcb6..b539f9d 100644
--- a/sys/vm/vm_init.c
+++ b/sys/vm/vm_init.c
@@ -70,6 +70,7 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/proc.h>
#include <sys/rwlock.h>
+#include <sys/malloc.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/selinfo.h>
@@ -101,6 +102,26 @@ static void vm_mem_init(void *);
SYSINIT(vm_mem, SI_SUB_VM, SI_ORDER_FIRST, vm_mem_init, NULL);
/*
+ * Import kva into the kernel arena.
+ */
+static int
+kva_import(void *unused, vmem_size_t size, int flags, vmem_addr_t *addrp)
+{
+ vm_offset_t addr;
+ int result;
+
+ addr = vm_map_min(kernel_map);
+ result = vm_map_find(kernel_map, NULL, 0, &addr, size,
+ VMFS_ALIGNED_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
+ if (result != KERN_SUCCESS)
+ return (ENOMEM);
+
+ *addrp = addr;
+
+ return (0);
+}
+
+/*
* vm_init initializes the virtual memory system.
* This is done only by the first cpu up.
*
@@ -111,6 +132,7 @@ static void
vm_mem_init(dummy)
void *dummy;
{
+
/*
* Initializes resident memory structures. From here on, all physical
* memory is accounted for, and we use only virtual addresses.
@@ -125,6 +147,19 @@ vm_mem_init(dummy)
vm_object_init();
vm_map_startup();
kmem_init(virtual_avail, virtual_end);
+
+ /*
+ * Initialize the kernel_arena. This can grow on demand.
+ */
+ vmem_init(kernel_arena, "kernel arena", 0, 0, PAGE_SIZE, 0, 0);
+ vmem_set_import(kernel_arena, kva_import, NULL, NULL,
+#if VM_NRESERVLEVEL > 0
+ 1 << (VM_LEVEL_0_ORDER + PAGE_SHIFT));
+#else
+ PAGE_SIZE);
+#endif
+
+ kmem_init_zero_region();
pmap_init();
vm_pager_init();
}
@@ -138,7 +173,6 @@ vm_ksubmap_init(struct kva_md_info *kmi)
long physmem_est;
vm_offset_t minaddr;
vm_offset_t maxaddr;
- vm_map_t clean_map;
/*
* Allocate space for system data structures.
@@ -146,8 +180,6 @@ vm_ksubmap_init(struct kva_md_info *kmi)
* As pages of kernel virtual memory are allocated, "v" is incremented.
* As pages of memory are allocated and cleared,
* "firstaddr" is incremented.
- * An index into the kernel page table corresponding to the
- * virtual memory address maintained in "v" is kept in "mapaddr".
*/
/*
@@ -173,7 +205,8 @@ again:
*/
if (firstaddr == 0) {
size = (vm_size_t)v;
- firstaddr = kmem_alloc(kernel_map, round_page(size));
+ firstaddr = kmem_malloc(kernel_arena, round_page(size),
+ M_ZERO | M_WAITOK);
if (firstaddr == 0)
panic("startup: no room for tables");
goto again;
@@ -185,31 +218,49 @@ again:
if ((vm_size_t)((char *)v - firstaddr) != size)
panic("startup: table size inconsistency");
+ /*
+ * Allocate the clean map to hold all of the paging and I/O virtual
+ * memory.
+ */
size = (long)nbuf * BKVASIZE + (long)nswbuf * MAXPHYS +
(long)bio_transient_maxcnt * MAXPHYS;
- clean_map = kmem_suballoc(kernel_map, &kmi->clean_sva, &kmi->clean_eva,
- size, TRUE);
+ kmi->clean_sva = firstaddr = kva_alloc(size);
+ kmi->clean_eva = firstaddr + size;
+ /*
+ * Allocate the buffer arena.
+ */
size = (long)nbuf * BKVASIZE;
- kmi->buffer_sva = kmem_alloc_nofault(clean_map, size);
+ kmi->buffer_sva = firstaddr;
kmi->buffer_eva = kmi->buffer_sva + size;
vmem_init(buffer_arena, "buffer arena", kmi->buffer_sva, size,
PAGE_SIZE, 0, 0);
+ firstaddr += size;
+ /*
+ * Now swap kva.
+ */
+ swapbkva = firstaddr;
size = (long)nswbuf * MAXPHYS;
- swapbkva = kmem_alloc_nofault(clean_map, size);
- if (!swapbkva)
- panic("Not enough clean_map VM space for pager buffers");
+ firstaddr += size;
+ /*
+ * And optionally transient bio space.
+ */
if (bio_transient_maxcnt != 0) {
size = (long)bio_transient_maxcnt * MAXPHYS;
vmem_init(transient_arena, "transient arena",
- kmem_alloc_nofault(clean_map, size),
- size, PAGE_SIZE, 0, 0);
+ firstaddr, size, PAGE_SIZE, 0, 0);
+ firstaddr += size;
}
+ if (firstaddr != kmi->clean_eva)
+ panic("Clean map calculation incorrect");
+
+ /*
+ * Allocate the pageable submaps.
+ */
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
exec_map_entries * round_page(PATH_MAX + ARG_MAX), FALSE);
pipe_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, maxpipekva,
FALSE);
}
-
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 42cd699..c7cb409 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -74,9 +74,11 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/rwlock.h>
#include <sys/sysctl.h>
+#include <sys/vmem.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
+#include <vm/vm_kern.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
@@ -86,7 +88,6 @@ __FBSDID("$FreeBSD$");
#include <vm/uma.h>
vm_map_t kernel_map;
-vm_map_t kmem_map;
vm_map_t exec_map;
vm_map_t pipe_map;
@@ -105,7 +106,7 @@ SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD,
"Max kernel address");
/*
- * kmem_alloc_nofault:
+ * kva_alloc:
*
* Allocate a virtual address range with no underlying object and
* no initial mapping to physical memory. Any mapping from this
@@ -114,94 +115,35 @@ SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD,
* a mapping on demand through vm_fault() will result in a panic.
*/
vm_offset_t
-kmem_alloc_nofault(map, size)
- vm_map_t map;
+kva_alloc(size)
vm_size_t size;
{
vm_offset_t addr;
- int result;
size = round_page(size);
- addr = vm_map_min(map);
- result = vm_map_find(map, NULL, 0, &addr, size, VMFS_ANY_SPACE,
- VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
- if (result != KERN_SUCCESS) {
+ if (vmem_alloc(kernel_arena, size, M_BESTFIT | M_NOWAIT, &addr))
return (0);
- }
+
return (addr);
}
/*
- * kmem_alloc_nofault_space:
+ * kva_free:
*
- * Allocate a virtual address range with no underlying object and
- * no initial mapping to physical memory within the specified
- * address space. Any mapping from this range to physical memory
- * must be explicitly created prior to its use, typically with
- * pmap_qenter(). Any attempt to create a mapping on demand
- * through vm_fault() will result in a panic.
+ * Release a region of kernel virtual memory allocated
+ * with kva_alloc, and return the physical pages
+ * associated with that region.
+ *
+ * This routine may not block on kernel maps.
*/
-vm_offset_t
-kmem_alloc_nofault_space(map, size, find_space)
- vm_map_t map;
- vm_size_t size;
- int find_space;
-{
+void
+kva_free(addr, size)
vm_offset_t addr;
- int result;
-
- size = round_page(size);
- addr = vm_map_min(map);
- result = vm_map_find(map, NULL, 0, &addr, size, find_space,
- VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
- if (result != KERN_SUCCESS) {
- return (0);
- }
- return (addr);
-}
-
-/*
- * Allocate wired-down memory in the kernel's address map
- * or a submap.
- */
-vm_offset_t
-kmem_alloc(map, size)
- vm_map_t map;
vm_size_t size;
{
- vm_offset_t addr;
- vm_offset_t offset;
size = round_page(size);
-
- /*
- * Use the kernel object for wired-down kernel pages. Assume that no
- * region of the kernel object is referenced more than once.
- */
-
- /*
- * Locate sufficient space in the map. This will give us the final
- * virtual address for the new memory, and thus will tell us the
- * offset within the kernel map.
- */
- vm_map_lock(map);
- if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
- vm_map_unlock(map);
- return (0);
- }
- offset = addr - VM_MIN_KERNEL_ADDRESS;
- vm_object_reference(kernel_object);
- vm_map_insert(map, kernel_object, offset, addr, addr + size,
- VM_PROT_ALL, VM_PROT_ALL, 0);
- vm_map_unlock(map);
-
- /*
- * And finally, mark the data as non-pageable.
- */
- (void) vm_map_wire(map, addr, addr + size,
- VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES);
-
- return (addr);
+ vmem_free(kernel_arena, addr, size);
}
/*
@@ -213,62 +155,57 @@ kmem_alloc(map, size)
* given flags, then the pages are zeroed before they are mapped.
*/
vm_offset_t
-kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
+kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, vm_memattr_t memattr)
{
- vm_object_t object = kernel_object;
+ vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object;
vm_offset_t addr;
- vm_ooffset_t end_offset, offset;
+ vm_ooffset_t offset;
vm_page_t m;
int pflags, tries;
+ int i;
size = round_page(size);
- vm_map_lock(map);
- if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
- vm_map_unlock(map);
+ if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr))
return (0);
- }
offset = addr - VM_MIN_KERNEL_ADDRESS;
- vm_object_reference(object);
- vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL,
- VM_PROT_ALL, 0);
- pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY;
+ pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
VM_OBJECT_WLOCK(object);
- end_offset = offset + size;
- for (; offset < end_offset; offset += PAGE_SIZE) {
+ for (i = 0; i < size; i += PAGE_SIZE) {
tries = 0;
retry:
- m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags, 1,
- low, high, PAGE_SIZE, 0, memattr);
+ m = vm_page_alloc_contig(object, OFF_TO_IDX(offset + i),
+ pflags, 1, low, high, PAGE_SIZE, 0, memattr);
if (m == NULL) {
VM_OBJECT_WUNLOCK(object);
if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
- vm_map_unlock(map);
vm_pageout_grow_cache(tries, low, high);
- vm_map_lock(map);
VM_OBJECT_WLOCK(object);
tries++;
goto retry;
}
-
- /*
- * Since the pages that were allocated by any previous
- * iterations of this loop are not busy, they can be
- * freed by vm_object_page_remove(), which is called
- * by vm_map_delete().
+ /*
+ * Unmap and free the pages.
*/
- vm_map_delete(map, addr, addr + size);
- vm_map_unlock(map);
+ if (i != 0)
+ pmap_remove(kernel_pmap, addr, addr + i);
+ while (i != 0) {
+ i -= PAGE_SIZE;
+ m = vm_page_lookup(object,
+ OFF_TO_IDX(offset + i));
+ vm_page_unwire(m, 0);
+ vm_page_free(m);
+ }
+ vmem_free(vmem, addr, size);
return (0);
}
if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
+ pmap_enter(kernel_pmap, addr + i, VM_PROT_ALL, m, VM_PROT_ALL,
+ TRUE);
}
VM_OBJECT_WUNLOCK(object);
- vm_map_unlock(map);
- vm_map_wire(map, addr, addr + size, VM_MAP_WIRE_SYSTEM |
- VM_MAP_WIRE_NOHOLES);
return (addr);
}
@@ -281,27 +218,21 @@ retry:
* mapped.
*/
vm_offset_t
-kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
+kmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr)
{
- vm_object_t object = kernel_object;
- vm_offset_t addr;
+ vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object;
+ vm_offset_t addr, tmp;
vm_ooffset_t offset;
vm_page_t end_m, m;
int pflags, tries;
size = round_page(size);
- vm_map_lock(map);
- if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
- vm_map_unlock(map);
+ if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
return (0);
- }
offset = addr - VM_MIN_KERNEL_ADDRESS;
- vm_object_reference(object);
- vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL,
- VM_PROT_ALL, 0);
- pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY;
+ pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
VM_OBJECT_WLOCK(object);
tries = 0;
retry:
@@ -310,50 +241,28 @@ retry:
if (m == NULL) {
VM_OBJECT_WUNLOCK(object);
if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
- vm_map_unlock(map);
vm_pageout_grow_cache(tries, low, high);
- vm_map_lock(map);
VM_OBJECT_WLOCK(object);
tries++;
goto retry;
}
- vm_map_delete(map, addr, addr + size);
- vm_map_unlock(map);
+ vmem_free(vmem, addr, size);
return (0);
}
end_m = m + atop(size);
+ tmp = addr;
for (; m < end_m; m++) {
if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
+ pmap_enter(kernel_pmap, tmp, VM_PROT_ALL, m, VM_PROT_ALL, true);
+ tmp += PAGE_SIZE;
}
VM_OBJECT_WUNLOCK(object);
- vm_map_unlock(map);
- vm_map_wire(map, addr, addr + size, VM_MAP_WIRE_SYSTEM |
- VM_MAP_WIRE_NOHOLES);
return (addr);
}
/*
- * kmem_free:
- *
- * Release a region of kernel virtual memory allocated
- * with kmem_alloc, and return the physical pages
- * associated with that region.
- *
- * This routine may not block on kernel maps.
- */
-void
-kmem_free(map, addr, size)
- vm_map_t map;
- vm_offset_t addr;
- vm_size_t size;
-{
-
- (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
-}
-
-/*
* kmem_suballoc:
*
* Allocates a map to manage a subrange
@@ -393,65 +302,25 @@ kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
/*
* kmem_malloc:
*
- * Allocate wired-down memory in the kernel's address map for the higher
- * level kernel memory allocator (kern/kern_malloc.c). We cannot use
- * kmem_alloc() because we may need to allocate memory at interrupt
- * level where we cannot block (canwait == FALSE).
- *
- * This routine has its own private kernel submap (kmem_map) and object
- * (kmem_object). This, combined with the fact that only malloc uses
- * this routine, ensures that we will never block in map or object waits.
- *
- * We don't worry about expanding the map (adding entries) since entries
- * for wired maps are statically allocated.
- *
- * `map' is ONLY allowed to be kmem_map or one of the mbuf submaps to
- * which we never free.
+ * Allocate wired-down pages in the kernel's address space.
*/
vm_offset_t
-kmem_malloc(map, size, flags)
- vm_map_t map;
- vm_size_t size;
- int flags;
+kmem_malloc(struct vmem *vmem, vm_size_t size, int flags)
{
vm_offset_t addr;
- int i, rv;
+ int rv;
size = round_page(size);
- addr = vm_map_min(map);
+ if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
+ return (0);
- /*
- * Locate sufficient space in the map. This will give us the final
- * virtual address for the new memory, and thus will tell us the
- * offset within the kernel map.
- */
- vm_map_lock(map);
- if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
- vm_map_unlock(map);
- if ((flags & M_NOWAIT) == 0) {
- for (i = 0; i < 8; i++) {
- EVENTHANDLER_INVOKE(vm_lowmem, 0);
- uma_reclaim();
- vm_map_lock(map);
- if (vm_map_findspace(map, vm_map_min(map),
- size, &addr) == 0) {
- break;
- }
- vm_map_unlock(map);
- tsleep(&i, 0, "nokva", (hz / 4) * (i + 1));
- }
- if (i == 8) {
- panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated",
- (long)size, (long)map->size);
- }
- } else {
- return (0);
- }
+ rv = kmem_back((vmem == kmem_arena) ? kmem_object : kernel_object,
+ addr, size, flags);
+ if (rv != KERN_SUCCESS) {
+ vmem_free(vmem, addr, size);
+ return (0);
}
-
- rv = kmem_back(map, addr, size, flags);
- vm_map_unlock(map);
- return (rv == KERN_SUCCESS ? addr : 0);
+ return (addr);
}
/*
@@ -460,37 +329,22 @@ kmem_malloc(map, size, flags)
* Allocate physical pages for the specified virtual address range.
*/
int
-kmem_back(vm_map_t map, vm_offset_t addr, vm_size_t size, int flags)
+kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
{
vm_offset_t offset, i;
- vm_map_entry_t entry;
vm_page_t m;
int pflags;
- boolean_t found;
- KASSERT(vm_map_locked(map), ("kmem_back: map %p is not locked", map));
- offset = addr - VM_MIN_KERNEL_ADDRESS;
- vm_object_reference(kmem_object);
- vm_map_insert(map, kmem_object, offset, addr, addr + size,
- VM_PROT_ALL, VM_PROT_ALL, 0);
+ KASSERT(object == kmem_object || object == kernel_object,
+ ("kmem_back: only supports kernel objects."));
- /*
- * Assert: vm_map_insert() will never be able to extend the
- * previous entry so vm_map_lookup_entry() will find a new
- * entry exactly corresponding to this address range and it
- * will have wired_count == 0.
- */
- found = vm_map_lookup_entry(map, addr, &entry);
- KASSERT(found && entry->start == addr && entry->end == addr + size &&
- entry->wired_count == 0 && (entry->eflags & MAP_ENTRY_IN_TRANSITION)
- == 0, ("kmem_back: entry not found or misaligned"));
-
- pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
+ offset = addr - VM_MIN_KERNEL_ADDRESS;
+ pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
- VM_OBJECT_WLOCK(kmem_object);
+ VM_OBJECT_WLOCK(object);
for (i = 0; i < size; i += PAGE_SIZE) {
retry:
- m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags);
+ m = vm_page_alloc(object, OFF_TO_IDX(offset + i), pflags);
/*
* Ran out of space, free everything up and return. Don't need
@@ -499,79 +353,78 @@ retry:
*/
if (m == NULL) {
if ((flags & M_NOWAIT) == 0) {
- VM_OBJECT_WUNLOCK(kmem_object);
- entry->eflags |= MAP_ENTRY_IN_TRANSITION;
- vm_map_unlock(map);
+ VM_OBJECT_WUNLOCK(object);
VM_WAIT;
- vm_map_lock(map);
- KASSERT(
-(entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_NEEDS_WAKEUP)) ==
- MAP_ENTRY_IN_TRANSITION,
- ("kmem_back: volatile entry"));
- entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
- VM_OBJECT_WLOCK(kmem_object);
+ VM_OBJECT_WLOCK(object);
goto retry;
}
/*
- * Free the pages before removing the map entry.
- * They are already marked busy. Calling
- * vm_map_delete before the pages has been freed or
- * unbusied will cause a deadlock.
+ * Unmap and free the pages.
*/
+ if (i != 0)
+ pmap_remove(kernel_pmap, addr, addr + i);
while (i != 0) {
i -= PAGE_SIZE;
- m = vm_page_lookup(kmem_object,
+ m = vm_page_lookup(object,
OFF_TO_IDX(offset + i));
vm_page_unwire(m, 0);
vm_page_free(m);
}
- VM_OBJECT_WUNLOCK(kmem_object);
- vm_map_delete(map, addr, addr + size);
+ VM_OBJECT_WUNLOCK(object);
return (KERN_NO_SPACE);
}
if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
- m->valid = VM_PAGE_BITS_ALL;
KASSERT((m->oflags & VPO_UNMANAGED) != 0,
("kmem_malloc: page %p is managed", m));
+ m->valid = VM_PAGE_BITS_ALL;
+ pmap_enter(kernel_pmap, addr + i, VM_PROT_ALL, m, VM_PROT_ALL,
+ TRUE);
}
- VM_OBJECT_WUNLOCK(kmem_object);
+ VM_OBJECT_WUNLOCK(object);
- /*
- * Mark map entry as non-pageable. Repeat the assert.
- */
- KASSERT(entry->start == addr && entry->end == addr + size &&
- entry->wired_count == 0,
- ("kmem_back: entry not found or misaligned after allocation"));
- entry->wired_count = 1;
+ return (KERN_SUCCESS);
+}
- /*
- * At this point, the kmem_object must be unlocked because
- * vm_map_simplify_entry() calls vm_object_deallocate(), which
- * locks the kmem_object.
- */
- vm_map_simplify_entry(map, entry);
+void
+kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
+{
+ vm_page_t m;
+ vm_offset_t offset;
+ int i;
- /*
- * Loop thru pages, entering them in the pmap.
- */
- VM_OBJECT_WLOCK(kmem_object);
+ KASSERT(object == kmem_object || object == kernel_object,
+ ("kmem_unback: only supports kernel objects."));
+
+ offset = addr - VM_MIN_KERNEL_ADDRESS;
+ VM_OBJECT_WLOCK(object);
+ pmap_remove(kernel_pmap, addr, addr + size);
for (i = 0; i < size; i += PAGE_SIZE) {
- m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
- /*
- * Because this is kernel_pmap, this call will not block.
- */
- pmap_enter(kernel_pmap, addr + i, VM_PROT_ALL, m, VM_PROT_ALL,
- TRUE);
- vm_page_wakeup(m);
+ m = vm_page_lookup(object, OFF_TO_IDX(offset + i));
+ vm_page_unwire(m, 0);
+ vm_page_free(m);
}
- VM_OBJECT_WUNLOCK(kmem_object);
+ VM_OBJECT_WUNLOCK(object);
+}
- return (KERN_SUCCESS);
+/*
+ * kmem_free:
+ *
+ * Free memory allocated with kmem_malloc. The size must match the
+ * original allocation.
+ */
+void
+kmem_free(struct vmem *vmem, vm_offset_t addr, vm_size_t size)
+{
+
+ size = round_page(size);
+ kmem_unback((vmem == kmem_arena) ? kmem_object : kernel_object,
+ addr, size);
+ vmem_free(vmem, addr, size);
}
/*
- * kmem_alloc_wait:
+ * kmap_alloc_wait:
*
* Allocates pageable memory from a sub-map of the kernel. If the submap
* has no room, the caller sleeps waiting for more memory in the submap.
@@ -579,7 +432,7 @@ retry:
* This routine may block.
*/
vm_offset_t
-kmem_alloc_wait(map, size)
+kmap_alloc_wait(map, size)
vm_map_t map;
vm_size_t size;
{
@@ -613,13 +466,13 @@ kmem_alloc_wait(map, size)
}
/*
- * kmem_free_wakeup:
+ * kmap_free_wakeup:
*
* Returns memory to a submap of the kernel, and wakes up any processes
* waiting for memory in that map.
*/
void
-kmem_free_wakeup(map, addr, size)
+kmap_free_wakeup(map, addr, size)
vm_map_t map;
vm_offset_t addr;
vm_size_t size;
@@ -634,28 +487,25 @@ kmem_free_wakeup(map, addr, size)
vm_map_unlock(map);
}
-static void
+void
kmem_init_zero_region(void)
{
vm_offset_t addr, i;
vm_page_t m;
- int error;
/*
* Map a single physical page of zeros to a larger virtual range.
* This requires less looping in places that want large amounts of
* zeros, while not using much more physical resources.
*/
- addr = kmem_alloc_nofault(kernel_map, ZERO_REGION_SIZE);
+ addr = kva_alloc(ZERO_REGION_SIZE);
m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
if ((m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE)
pmap_qenter(addr + i, &m, 1);
- error = vm_map_protect(kernel_map, addr, addr + ZERO_REGION_SIZE,
- VM_PROT_READ, TRUE);
- KASSERT(error == 0, ("error=%d", error));
+ pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ);
zero_region = (const void *)addr;
}
@@ -688,8 +538,6 @@ kmem_init(start, end)
start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
/* ... and ending with the completion of the above `insert' */
vm_map_unlock(m);
-
- kmem_init_zero_region();
}
#ifdef DIAGNOSTIC
diff --git a/sys/vm/vm_kern.h b/sys/vm/vm_kern.h
index 1479e5f..284b777 100644
--- a/sys/vm/vm_kern.h
+++ b/sys/vm/vm_kern.h
@@ -65,9 +65,10 @@
/* Kernel memory management definitions. */
extern vm_map_t kernel_map;
-extern vm_map_t kmem_map;
extern vm_map_t exec_map;
extern vm_map_t pipe_map;
+extern struct vmem *kernel_arena;
+extern struct vmem *kmem_arena;
extern struct vmem *buffer_arena;
extern struct vmem *transient_arena;
extern vm_offset_t swapbkva;
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index c43bce2..1d92965 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -197,9 +197,15 @@ vm_map_startup(void)
kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
- uma_prealloc(kmapentzone, MAX_KMAPENT);
mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
+ vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
+#ifdef INVARIANTS
+ vmspace_zdtor,
+#else
+ NULL,
+#endif
+ vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
}
static void
@@ -299,21 +305,6 @@ vmspace_alloc(min, max)
return (vm);
}
-void
-vm_init2(void)
-{
- uma_zone_reserve_kva(kmapentzone, lmin(cnt.v_page_count,
- (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE) / 8 +
- maxproc * 2 + maxfiles);
- vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
-#ifdef INVARIANTS
- vmspace_zdtor,
-#else
- NULL,
-#endif
- vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
-}
-
static void
vmspace_container_reset(struct proc *p)
{
@@ -1469,11 +1460,6 @@ again:
pmap_align_superpage(object, offset, addr,
length);
break;
-#ifdef VMFS_TLB_ALIGNED_SPACE
- case VMFS_TLB_ALIGNED_SPACE:
- pmap_align_tlb(addr);
- break;
-#endif
default:
break;
}
@@ -1483,9 +1469,6 @@ again:
result = vm_map_insert(map, object, offset, start, start +
length, prot, max, cow);
} while (result == KERN_NO_SPACE && (find_space == VMFS_ALIGNED_SPACE ||
-#ifdef VMFS_TLB_ALIGNED_SPACE
- find_space == VMFS_TLB_ALIGNED_SPACE ||
-#endif
find_space == VMFS_OPTIMAL_SPACE));
vm_map_unlock(map);
return (result);
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 824a9a0..ed8864e 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -345,9 +345,6 @@ long vmspace_resident_count(struct vmspace *vmspace);
#define VMFS_ANY_SPACE 1 /* find a range with any alignment */
#define VMFS_OPTIMAL_SPACE 2 /* find a range with optimal alignment*/
#define VMFS_ALIGNED_SPACE 3 /* find a superpage-aligned range */
-#if defined(__mips__)
-#define VMFS_TLB_ALIGNED_SPACE 4 /* find a TLB entry aligned range */
-#endif
/*
* vm_map_wire and vm_map_unwire option flags
@@ -387,7 +384,6 @@ int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t);
int vm_map_sync(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int);
void vm_map_simplify_entry (vm_map_t, vm_map_entry_t);
-void vm_init2 (void);
int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int);
int vm_map_growstack (struct proc *p, vm_offset_t addr);
int vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 1d128bf..7c3cad4 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -295,7 +295,7 @@ vm_object_init(void)
#else
NULL,
#endif
- vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE);
+ vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
vm_radix_init();
}
diff --git a/sys/x86/x86/busdma_machdep.c b/sys/x86/x86/busdma_machdep.c
index a4de15a..0b59616 100644
--- a/sys/x86/x86/busdma_machdep.c
+++ b/sys/x86/x86/busdma_machdep.c
@@ -544,11 +544,11 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
dmat->alignment <= PAGE_SIZE &&
(dmat->boundary == 0 || dmat->boundary >= dmat->lowaddr)) {
/* Page-based multi-segment allocations allowed */
- *vaddr = (void *)kmem_alloc_attr(kernel_map, dmat->maxsize,
+ *vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
mflags, 0ul, dmat->lowaddr, attr);
*mapp = &contig_dmamap;
} else {
- *vaddr = (void *)kmem_alloc_contig(kernel_map, dmat->maxsize,
+ *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
mflags, 0ul, dmat->lowaddr, dmat->alignment ?
dmat->alignment : 1ul, dmat->boundary, attr);
*mapp = &contig_dmamap;
@@ -582,7 +582,7 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
if (map == NULL)
free(vaddr, M_DEVBUF);
else
- kmem_free(kernel_map, (vm_offset_t)vaddr, dmat->maxsize);
+ kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
}
diff --git a/sys/xen/gnttab.c b/sys/xen/gnttab.c
index 4ece182..d8eb381 100644
--- a/sys/xen/gnttab.c
+++ b/sys/xen/gnttab.c
@@ -527,8 +527,7 @@ gnttab_map(unsigned int start_idx, unsigned int end_idx)
if (shared == NULL) {
vm_offset_t area;
- area = kmem_alloc_nofault(kernel_map,
- PAGE_SIZE * max_nr_grant_frames());
+ area = kva_alloc(PAGE_SIZE * max_nr_grant_frames());
KASSERT(area, ("can't allocate VM space for grant table"));
shared = (grant_entry_t *)area;
}
@@ -590,8 +589,7 @@ gnttab_map(unsigned int start_idx, unsigned int end_idx)
if (shared == NULL) {
vm_offset_t area;
- area = kmem_alloc_nofault(kernel_map,
- PAGE_SIZE * max_nr_grant_frames());
+ area = kva_alloc(PAGE_SIZE * max_nr_grant_frames());
KASSERT(area, ("can't allocate VM space for grant table"));
shared = (grant_entry_t *)area;
}
OpenPOWER on IntegriCloud