summaryrefslogtreecommitdiffstats
path: root/sys/arm
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2013-08-07 06:21:20 +0000
committerjeff <jeff@FreeBSD.org>2013-08-07 06:21:20 +0000
commitde4ecca21340ce4d0bf9182cac133c14e031218e (patch)
tree950bad07f0aeeeae78036d82b9aa11ae998c3654 /sys/arm
parente141f5c0bac3839e4886a26e1ba796f4e46e6455 (diff)
downloadFreeBSD-src-de4ecca21340ce4d0bf9182cac133c14e031218e.zip
FreeBSD-src-de4ecca21340ce4d0bf9182cac133c14e031218e.tar.gz
Replace kernel virtual address space allocation with vmem. This provides
transparent layering and better fragmentation. - Normalize functions that allocate memory to use kmem_* - Those that allocate address space are named kva_* - Those that operate on maps are named kmap_* - Implement recursive allocation handling for kmem_arena in vmem. Reviewed by: alc Tested by: pho Sponsored by: EMC / Isilon Storage Division
Diffstat (limited to 'sys/arm')
-rw-r--r--sys/arm/arm/bus_space_generic.c4
-rw-r--r--sys/arm/arm/busdma_machdep-v6.c6
-rw-r--r--sys/arm/arm/busdma_machdep.c6
-rw-r--r--sys/arm/arm/mp_machdep.c3
-rw-r--r--sys/arm/arm/pmap-v6.c5
-rw-r--r--sys/arm/arm/pmap.c2
-rw-r--r--sys/arm/arm/vm_machdep.c7
-rw-r--r--sys/arm/at91/at91.c2
-rw-r--r--sys/arm/mv/armadaxp/armadaxp_mp.c4
-rw-r--r--sys/arm/s3c2xx0/s3c2xx0_space.c4
-rw-r--r--sys/arm/xscale/i80321/i80321_space.c2
-rw-r--r--sys/arm/xscale/i8134x/i81342_space.c2
-rw-r--r--sys/arm/xscale/ixp425/ixp425_pci_space.c2
13 files changed, 25 insertions, 24 deletions
diff --git a/sys/arm/arm/bus_space_generic.c b/sys/arm/arm/bus_space_generic.c
index 29638845..f269ac8 100644
--- a/sys/arm/arm/bus_space_generic.c
+++ b/sys/arm/arm/bus_space_generic.c
@@ -73,7 +73,7 @@ generic_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flags,
offset = bpa & PAGE_MASK;
startpa = trunc_page(bpa);
- va = kmem_alloc_nofault(kernel_map, endpa - startpa);
+ va = kva_alloc(endpa - startpa);
if (va == 0)
return (ENOMEM);
@@ -118,7 +118,7 @@ generic_bs_unmap(void *t, bus_space_handle_t h, bus_size_t size)
pmap_kremove(va);
va += PAGE_SIZE;
}
- kmem_free(kernel_map, va, endva - va);
+ kva_free(va, endva - va);
}
void
diff --git a/sys/arm/arm/busdma_machdep-v6.c b/sys/arm/arm/busdma_machdep-v6.c
index 485b44a..b7057a1 100644
--- a/sys/arm/arm/busdma_machdep-v6.c
+++ b/sys/arm/arm/busdma_machdep-v6.c
@@ -696,10 +696,10 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
*vaddr = uma_zalloc(bufzone->umazone, mflags);
} else if (dmat->nsegments >= btoc(dmat->maxsize) &&
dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
- *vaddr = (void *)kmem_alloc_attr(kernel_map, dmat->maxsize,
+ *vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
mflags, 0, dmat->lowaddr, memattr);
} else {
- *vaddr = (void *)kmem_alloc_contig(kernel_map, dmat->maxsize,
+ *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
memattr);
}
@@ -744,7 +744,7 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
!_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
uma_zfree(bufzone->umazone, vaddr);
else
- kmem_free(kernel_map, (vm_offset_t)vaddr, dmat->maxsize);
+ kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
dmat->map_count--;
free(map, M_DEVBUF);
diff --git a/sys/arm/arm/busdma_machdep.c b/sys/arm/arm/busdma_machdep.c
index 10760b4..6181d35 100644
--- a/sys/arm/arm/busdma_machdep.c
+++ b/sys/arm/arm/busdma_machdep.c
@@ -752,10 +752,10 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddrp, int flags,
vaddr = uma_zalloc(bufzone->umazone, mflags);
} else if (dmat->nsegments >= btoc(dmat->maxsize) &&
dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
- vaddr = (void *)kmem_alloc_attr(kernel_map, dmat->maxsize,
+ vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
mflags, 0, dmat->lowaddr, memattr);
} else {
- vaddr = (void *)kmem_alloc_contig(kernel_map, dmat->maxsize,
+ vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
memattr);
}
@@ -798,7 +798,7 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
!_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
uma_zfree(bufzone->umazone, vaddr);
else
- kmem_free(kernel_map, (vm_offset_t)vaddr, dmat->maxsize);
+ kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
}
static void
diff --git a/sys/arm/arm/mp_machdep.c b/sys/arm/arm/mp_machdep.c
index 2193266..4db6da4 100644
--- a/sys/arm/arm/mp_machdep.c
+++ b/sys/arm/arm/mp_machdep.c
@@ -112,7 +112,8 @@ cpu_mp_start(void)
/* Reserve memory for application processors */
for(i = 0; i < (mp_ncpus - 1); i++)
- dpcpu[i] = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
+ dpcpu[i] = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
+ M_WAITOK | M_ZERO);
temp_pagetable_va = (vm_offset_t)contigmalloc(L1_TABLE_SIZE,
M_TEMP, 0, 0x0, 0xffffffff, L1_TABLE_SIZE, 0);
addr = KERNPHYSADDR;
diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c
index 73b899c..158bff2 100644
--- a/sys/arm/arm/pmap-v6.c
+++ b/sys/arm/arm/pmap-v6.c
@@ -1255,8 +1255,7 @@ pmap_init(void)
pv_entry_high_water = 9 * (pv_entry_max / 10);
pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
- pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map,
- PAGE_SIZE * pv_maxchunks);
+ pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks);
if (pv_chunkbase == NULL)
panic("pmap_init: not enough kvm for pv chunks");
@@ -4103,7 +4102,7 @@ pmap_mapdev(vm_offset_t pa, vm_size_t size)
GIANT_REQUIRED;
- va = kmem_alloc_nofault(kernel_map, size);
+ va = kva_alloc(size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
for (tmpva = va; size > 0;) {
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index 33b643c..b4f107b 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -4718,7 +4718,7 @@ pmap_mapdev(vm_offset_t pa, vm_size_t size)
GIANT_REQUIRED;
- va = kmem_alloc_nofault(kernel_map, size);
+ va = kva_alloc(size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
for (tmpva = va; size > 0;) {
diff --git a/sys/arm/arm/vm_machdep.c b/sys/arm/arm/vm_machdep.c
index c6526ab..4f8f00b 100644
--- a/sys/arm/arm/vm_machdep.c
+++ b/sys/arm/arm/vm_machdep.c
@@ -210,7 +210,7 @@ sf_buf_init(void *arg)
sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
TAILQ_INIT(&sf_buf_freelist);
- sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
+ sf_base = kva_alloc(nsfbufs * PAGE_SIZE);
sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
M_NOWAIT | M_ZERO);
for (i = 0; i < nsfbufs; i++) {
@@ -667,7 +667,8 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
if (zone == l2zone &&
pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt) {
*flags = UMA_SLAB_KMEM;
- ret = ((void *)kmem_malloc(kmem_map, bytes, M_NOWAIT));
+ ret = ((void *)kmem_malloc(kmem_arena, bytes,
+ M_NOWAIT));
return (ret);
}
pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;
@@ -701,7 +702,7 @@ uma_small_free(void *mem, int size, u_int8_t flags)
pt_entry_t *pt;
if (flags & UMA_SLAB_KMEM)
- kmem_free(kmem_map, (vm_offset_t)mem, size);
+ kmem_free(kmem_arena, (vm_offset_t)mem, size);
else {
struct arm_small_page *sp;
diff --git a/sys/arm/at91/at91.c b/sys/arm/at91/at91.c
index deb3c38..a3f9556 100644
--- a/sys/arm/at91/at91.c
+++ b/sys/arm/at91/at91.c
@@ -85,7 +85,7 @@ at91_bs_unmap(void *t, bus_space_handle_t h, bus_size_t size)
endva = va + round_page(size);
/* Free the kernel virtual mapping. */
- kmem_free(kernel_map, va, endva - va);
+ kva_free(va, endva - va);
}
static int
diff --git a/sys/arm/mv/armadaxp/armadaxp_mp.c b/sys/arm/mv/armadaxp/armadaxp_mp.c
index 83332ba..1af598b 100644
--- a/sys/arm/mv/armadaxp/armadaxp_mp.c
+++ b/sys/arm/mv/armadaxp/armadaxp_mp.c
@@ -113,7 +113,7 @@ platform_mp_start_ap(void)
cputype = cpufunc_id();
cputype &= CPU_ID_CPU_MASK;
- smp_boot = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
+ smp_boot = kva_alloc(PAGE_SIZE);
pmap_kenter_nocache(smp_boot, 0xffff0000);
dst = (uint32_t *) smp_boot;
@@ -121,7 +121,7 @@ platform_mp_start_ap(void)
src++, dst++) {
*dst = *src;
}
- kmem_free(kernel_map, smp_boot, PAGE_SIZE);
+ kva_free(smp_boot, PAGE_SIZE);
if (cputype == CPU_ID_MV88SV584X_V7) {
/* Core rev A0 */
diff --git a/sys/arm/s3c2xx0/s3c2xx0_space.c b/sys/arm/s3c2xx0/s3c2xx0_space.c
index 958e658..ab72370 100644
--- a/sys/arm/s3c2xx0/s3c2xx0_space.c
+++ b/sys/arm/s3c2xx0/s3c2xx0_space.c
@@ -182,7 +182,7 @@ s3c2xx0_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
startpa = trunc_page(bpa);
endpa = round_page(bpa + size);
- va = kmem_alloc_nofault(kernel_map, endpa - startpa);
+ va = kva_alloc(endpa - startpa);
if (!va)
return (ENOMEM);
@@ -214,7 +214,7 @@ s3c2xx0_bs_unmap(void *t, bus_space_handle_t h, bus_size_t size)
pmap_kremove(va);
va += PAGE_SIZE;
}
- kmem_free(kernel_map, va, endva - va);
+ kva_free(va, endva - va);
}
int
diff --git a/sys/arm/xscale/i80321/i80321_space.c b/sys/arm/xscale/i80321/i80321_space.c
index ebb0306..ce1db83 100644
--- a/sys/arm/xscale/i80321/i80321_space.c
+++ b/sys/arm/xscale/i80321/i80321_space.c
@@ -312,7 +312,7 @@ i80321_mem_bs_unmap(void *t, bus_space_handle_t h, bus_size_t size)
endva = va + round_page(size);
/* Free the kernel virtual mapping. */
- kmem_free(kernel_map, va, endva - va);
+ kva_free(va, endva - va);
}
int
diff --git a/sys/arm/xscale/i8134x/i81342_space.c b/sys/arm/xscale/i8134x/i81342_space.c
index 5b08ef6..bd19a77 100644
--- a/sys/arm/xscale/i8134x/i81342_space.c
+++ b/sys/arm/xscale/i8134x/i81342_space.c
@@ -324,7 +324,7 @@ i81342_mem_bs_unmap(void *t, bus_space_handle_t h, bus_size_t size)
endva = va + round_page(size);
/* Free the kernel virtual mapping. */
- kmem_free(kernel_map, va, endva - va);
+ kva_free(va, endva - va);
#endif
}
diff --git a/sys/arm/xscale/ixp425/ixp425_pci_space.c b/sys/arm/xscale/ixp425/ixp425_pci_space.c
index 4b0ca81..8617e8e 100644
--- a/sys/arm/xscale/ixp425/ixp425_pci_space.c
+++ b/sys/arm/xscale/ixp425/ixp425_pci_space.c
@@ -432,7 +432,7 @@ ixp425_pci_mem_bs_unmap(void *t, bus_space_handle_t h, bus_size_t size)
endva = va + round_page(size);
/* Free the kernel virtual mapping. */
- kmem_free(kernel_map, va, endva - va);
+ kva_free(va, endva - va);
}
int
OpenPOWER on IntegriCloud