summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorcognet <cognet@FreeBSD.org>2006-03-01 23:04:25 +0000
committercognet <cognet@FreeBSD.org>2006-03-01 23:04:25 +0000
commitdc462dee1c02c758f3e0a40bb1f3782608b73d16 (patch)
tree9a3f2b62ed9dc4a5c68f80d377b612e22015ea01
parent64dd56c81dc47fb507c9c5918ab298e109d5f919 (diff)
downloadFreeBSD-src-dc462dee1c02c758f3e0a40bb1f3782608b73d16.zip
FreeBSD-src-dc462dee1c02c758f3e0a40bb1f3782608b73d16.tar.gz
Try to honor BUS_DMA_COHERENT : if the flag is set, normally allocate memory
with malloc() or contigmalloc() as usual, but try to re-map the allocated memory into a VA outside the KVA, non-cached, thus making the calls to bus_dmamap_sync() for these buffers useless.
-rw-r--r--sys/arm/arm/busdma_machdep.c25
-rw-r--r--sys/arm/arm/pmap.c12
-rw-r--r--sys/arm/arm/vm_machdep.c60
-rw-r--r--sys/arm/include/pmap.h7
4 files changed, 102 insertions, 2 deletions
diff --git a/sys/arm/arm/busdma_machdep.c b/sys/arm/arm/busdma_machdep.c
index 39d4d82..f3f89ae 100644
--- a/sys/arm/arm/busdma_machdep.c
+++ b/sys/arm/arm/busdma_machdep.c
@@ -93,6 +93,8 @@ struct bus_dmamap {
bus_dma_tag_t dmat;
int flags;
void *buffer;
+ void *origbuffer;
+ void *allocbuffer;
TAILQ_ENTRY(bus_dmamap) freelist;
int len;
};
@@ -416,6 +418,23 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
*mapp = NULL;
return (ENOMEM);
}
+ if (flags & BUS_DMA_COHERENT) {
+ void *tmpaddr = arm_remap_nocache(
+ (void *)((vm_offset_t)*vaddr &~ PAGE_MASK),
+ dmat->maxsize + ((vm_offset_t)*vaddr & PAGE_MASK));
+
+ if (tmpaddr) {
+ tmpaddr = (void *)((vm_offset_t)(tmpaddr) +
+ ((vm_offset_t)*vaddr & PAGE_MASK));
+ newmap->origbuffer = *vaddr;
+ newmap->allocbuffer = tmpaddr;
+ cpu_idcache_wbinv_range((vm_offset_t)*vaddr,
+ dmat->maxsize);
+ *vaddr = tmpaddr;
+ } else
+ newmap->origbuffer = newmap->allocbuffer = NULL;
+ } else
+ newmap->origbuffer = newmap->allocbuffer = NULL;
return (0);
}
@@ -426,6 +445,12 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
void
bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
{
+ if (map->allocbuffer) {
+ KASSERT(map->allocbuffer == vaddr,
+ ("Trying to freeing the wrong DMA buffer"));
+ vaddr = map->origbuffer;
+ arm_unmap_nocache(map->allocbuffer, dmat->maxsize);
+ }
if (dmat->maxsize <= PAGE_SIZE)
free(vaddr, M_DEVBUF);
else {
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index a682f8d..7ab6e3f 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -2553,9 +2553,12 @@ pmap_bootstrap(vm_offset_t firstaddr, vm_offset_t lastaddr, struct pv_addr *l1pt
virtual_avail = round_page(virtual_avail);
virtual_end = lastaddr;
kernel_vm_end = pmap_curmaxkvaddr;
+ arm_nocache_startaddr = lastaddr;
+
#ifdef ARM_USE_SMALL_ALLOC
mtx_init(&smallalloc_mtx, "Small alloc page list", NULL, MTX_DEF);
- alloc_firstaddr = alloc_curaddr = lastaddr;
+ alloc_firstaddr = alloc_curaddr = arm_nocache_startaddr +
+ ARM_NOCACHE_KVA_SIZE;
#endif
}
@@ -2916,6 +2919,13 @@ pmap_kenter(vm_offset_t va, vm_paddr_t pa)
}
void
+pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa)
+{
+
+ pmap_kenter_internal(va, pa, 0);
+}
+
+void
pmap_kenter_user(vm_offset_t va, vm_paddr_t pa)
{
diff --git a/sys/arm/arm/vm_machdep.c b/sys/arm/arm/vm_machdep.c
index 3e7debc..ebc8563 100644
--- a/sys/arm/arm/vm_machdep.c
+++ b/sys/arm/arm/vm_machdep.c
@@ -368,6 +368,64 @@ cpu_exit(struct thread *td)
{
}
+vm_offset_t arm_nocache_startaddr;
+static int arm_nocache_allocated[ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE * 32)];
+
+/*
+ * Functions to map and unmap memory non-cached into KVA the kernel won't try
+ * to allocate. The goal is to provide uncached memory to busdma, to honor
+ * BUS_DMA_COHERENT.
+ * We can allocate at most ARM_NOCACHE_KVA_SIZE bytes.
+ * The allocator is rather dummy, each page is represented by a bit in
+ * a bitfield, 0 meaning the page is not allocated, 1 meaning it is.
+ * As soon as it finds enough contiguous pages to satisfy the request,
+ * it returns the address.
+ */
+void *
+arm_remap_nocache(void *addr, vm_size_t size)
+{
+ int i, j;
+
+ size = round_page(size);
+ for (i = 0; i < MIN(ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE * 32),
+ ARM_TP_ADDRESS); i++) {
+ if (!(arm_nocache_allocated[i / 32] & (1 << (i % 32)))) {
+ for (j = i; j < i + (size / (PAGE_SIZE)); j++)
+ if (arm_nocache_allocated[j / 32] &
+ (1 << (j % 32)))
+ break;
+ if (j == i + (size / (PAGE_SIZE)))
+ break;
+ }
+ }
+ if (i < MIN(ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE * 32),
+ ARM_TP_ADDRESS)) {
+ vm_offset_t tomap = arm_nocache_startaddr + i * PAGE_SIZE;
+ void *ret = (void *)tomap;
+ vm_paddr_t physaddr = vtophys((vm_offset_t)addr);
+
+ for (; tomap < (vm_offset_t)ret + size; tomap += PAGE_SIZE,
+ physaddr += PAGE_SIZE, i++) {
+ pmap_kenter_nocache(tomap, physaddr);
+ arm_nocache_allocated[i / 32] |= 1 << (i % 32);
+ }
+ return (ret);
+ }
+ return (NULL);
+}
+
+void
+arm_unmap_nocache(void *addr, vm_size_t size)
+{
+ vm_offset_t raddr = (vm_offset_t)addr;
+ int i;
+
+ size = round_page(size);
+ i = (raddr - arm_nocache_startaddr) / (PAGE_SIZE);
+ for (; size > 0; size -= PAGE_SIZE, i++)
+ arm_nocache_allocated[i / 32] &= ~(1 << (i % 32));
+}
+
#ifdef ARM_USE_SMALL_ALLOC
static TAILQ_HEAD(,arm_small_page) pages_normal =
@@ -393,7 +451,7 @@ arm_add_smallalloc_pages(void *list, void *mem, int bytes, int pagetable)
{
struct arm_small_page *pg;
- bytes &= ~PAGE_SIZE;
+ bytes &= ~PAGE_MASK;
while (bytes > 0) {
pg = (struct arm_small_page *)list;
pg->addr = mem;
diff --git a/sys/arm/include/pmap.h b/sys/arm/include/pmap.h
index fc85ef0..06f6f7c 100644
--- a/sys/arm/include/pmap.h
+++ b/sys/arm/include/pmap.h
@@ -210,6 +210,7 @@ extern vm_offset_t virtual_end;
void pmap_bootstrap(vm_offset_t, vm_offset_t, struct pv_addr *);
void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
+void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa);
void pmap_kenter_user(vm_offset_t va, vm_paddr_t pa);
void pmap_kremove(vm_offset_t);
void *pmap_mapdev(vm_offset_t, vm_size_t);
@@ -525,6 +526,12 @@ struct arm_small_page {
};
#endif
+
+#define ARM_NOCACHE_KVA_SIZE 0x600000
+extern vm_offset_t arm_nocache_startaddr;
+void *arm_remap_nocache(void *, vm_size_t);
+void arm_unmap_nocache(void *, vm_size_t);
+
extern vm_paddr_t dump_avail[];
#endif /* _KERNEL */
OpenPOWER on IntegriCloud