diff options
-rw-r--r-- | sys/arm/arm/busdma_machdep-v6.c | 38 |
1 files changed, 21 insertions, 17 deletions
diff --git a/sys/arm/arm/busdma_machdep-v6.c b/sys/arm/arm/busdma_machdep-v6.c index fefc4e5..b6629fa 100644 --- a/sys/arm/arm/busdma_machdep-v6.c +++ b/sys/arm/arm/busdma_machdep-v6.c @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2012 Ian Lepore + * Copyright (c) 2012-2014 Ian Lepore * Copyright (c) 2010 Mark Tinguely * Copyright (c) 2004 Olivier Houchard * Copyright (c) 2002 Peter Grehan @@ -346,6 +346,7 @@ static __inline int might_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t addr, bus_size_t size) { + return ((dmat->flags & BUS_DMA_EXCL_BOUNCE) || alignment_bounce(dmat, addr) || cacheline_bounce(map, addr, size)); @@ -444,6 +445,7 @@ busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) static void dflt_lock(void *arg, bus_dma_lock_op_t op) { + panic("driver error: busdma dflt_lock called"); } @@ -625,7 +627,7 @@ out: static int allocate_bz_and_pages(bus_dma_tag_t dmat, bus_dmamap_t mapp) { - struct bounce_zone *bz; + struct bounce_zone *bz; int maxpages; int error; @@ -1252,13 +1254,13 @@ _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) } #ifdef notyetbounceuser - /* If busdma uses user pages, then the interrupt handler could - * be use the kernel vm mapping. Both bounce pages and sync list - * do not cross page boundaries. - * Below is a rough sequence that a person would do to fix the - * user page reference in the kernel vmspace. This would be - * done in the dma post routine. - */ +/* If busdma uses user pages, then the interrupt handler could + * be use the kernel vm mapping. Both bounce pages and sync list + * do not cross page boundaries. + * Below is a rough sequence that a person would do to fix the + * user page reference in the kernel vmspace. This would be + * done in the dma post routine. + */ void _bus_dmamap_fix_user(vm_offset_t buf, bus_size_t len, pmap_t pmap, int op) @@ -1267,10 +1269,10 @@ _bus_dmamap_fix_user(vm_offset_t buf, bus_size_t len, bus_addr_t curaddr; vm_offset_t va; - /* each synclist entry is contained within a single page. - * - * this would be needed if BUS_DMASYNC_POSTxxxx was implemented - */ + /* + * each synclist entry is contained within a single page. + * this would be needed if BUS_DMASYNC_POSTxxxx was implemented + */ curaddr = pmap_extract(pmap, buf); va = pmap_dma_map(curaddr); switch (op) { @@ -1411,10 +1413,10 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) switch (op) { case BUS_DMASYNC_PREWRITE: while (sl != end) { - cpu_dcache_wb_range(sl->vaddr, sl->datacount); - l2cache_wb_range(sl->vaddr, sl->busaddr, - sl->datacount); - sl++; + cpu_dcache_wb_range(sl->vaddr, sl->datacount); + l2cache_wb_range(sl->vaddr, sl->busaddr, + sl->datacount); + sl++; } break; @@ -1462,12 +1464,14 @@ SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); static struct sysctl_ctx_list * busdma_sysctl_tree(struct bounce_zone *bz) { + return (&bz->sysctl_tree); } static struct sysctl_oid * busdma_sysctl_tree_top(struct bounce_zone *bz) { + return (bz->sysctl_tree_top); } |