summaryrefslogtreecommitdiffstats
path: root/sys/mips
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2013-02-12 16:57:20 +0000
committerkib <kib@FreeBSD.org>2013-02-12 16:57:20 +0000
commitbd7f0fa0bb4b7b0f87227e0c4d49a4bd9b113cf0 (patch)
treee550f2c754f1edf951a8b93963ebcfc4fa0d20ce /sys/mips
parente0a463e76c719f11788ec107b5aa3e2da4e57c0b (diff)
downloadFreeBSD-src-bd7f0fa0bb4b7b0f87227e0c4d49a4bd9b113cf0.zip
FreeBSD-src-bd7f0fa0bb4b7b0f87227e0c4d49a4bd9b113cf0.tar.gz
Reform the busdma API so that new types may be added without modifying
every architecture's busdma_machdep.c. It is done by unifying the bus_dmamap_load_buffer() routines so that they may be called from MI code. The MD busdma is then given a chance to do any final processing in the complete() callback. The cam changes unify the bus_dmamap_load* handling in cam drivers. The arm and mips implementations are updated to track virtual addresses for sync(). Previously this was done in a type specific way. Now it is done in a generic way by recording the list of virtuals in the map. Submitted by: jeff (sponsored by EMC/Isilon) Reviewed by: kan (previous version), scottl, mjacob (isp(4), no objections for target mode changes) Discussed with: ian (arm changes) Tested by: marius (sparc64), mips (jmallet), isci(4) on x86 (jharris), amd64 (Fabian Keil <freebsd-listen@fabiankeil.de>)
Diffstat (limited to 'sys/mips')
-rw-r--r--sys/mips/mips/busdma_machdep.c594
1 files changed, 261 insertions, 333 deletions
diff --git a/sys/mips/mips/busdma_machdep.c b/sys/mips/mips/busdma_machdep.c
index 2ad118b..f3275e6 100644
--- a/sys/mips/mips/busdma_machdep.c
+++ b/sys/mips/mips/busdma_machdep.c
@@ -40,12 +40,12 @@ __FBSDID("$FreeBSD$");
#include <sys/interrupt.h>
#include <sys/lock.h>
#include <sys/proc.h>
+#include <sys/memdesc.h>
#include <sys/mutex.h>
-#include <sys/mbuf.h>
-#include <sys/uio.h>
#include <sys/ktr.h>
#include <sys/kernel.h>
#include <sys/sysctl.h>
+#include <sys/uio.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
@@ -89,10 +89,17 @@ struct bounce_page {
vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */
bus_addr_t busaddr; /* Physical address */
vm_offset_t datavaddr; /* kva of client data */
+ bus_addr_t dataaddr; /* client physical address */
bus_size_t datacount; /* client data count */
STAILQ_ENTRY(bounce_page) links;
};
+struct sync_list {
+ vm_offset_t vaddr; /* kva of bounce buffer */
+ bus_addr_t busaddr; /* Physical address */
+ bus_size_t datacount; /* client data count */
+};
+
int busdma_swi_pending;
struct bounce_zone {
@@ -122,10 +129,6 @@ static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
"Total bounce pages");
-#define DMAMAP_LINEAR 0x1
-#define DMAMAP_MBUF 0x2
-#define DMAMAP_UIO 0x4
-#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
#define DMAMAP_UNCACHEABLE 0x8
#define DMAMAP_ALLOCATED 0x10
#define DMAMAP_MALLOCUSED 0x20
@@ -135,16 +138,16 @@ struct bus_dmamap {
int pagesneeded;
int pagesreserved;
bus_dma_tag_t dmat;
+ struct memdesc mem;
int flags;
- void *buffer;
void *origbuffer;
void *allocbuffer;
TAILQ_ENTRY(bus_dmamap) freelist;
- int len;
STAILQ_ENTRY(bus_dmamap) links;
bus_dmamap_callback_t *callback;
void *callback_arg;
-
+ int sync_count;
+ struct sync_list *slist;
};
static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
@@ -166,7 +169,8 @@ static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
int commit);
static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
- vm_offset_t vaddr, bus_size_t size);
+ vm_offset_t vaddr, bus_addr_t addr,
+ bus_size_t size);
static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
/* Default tag, as most drivers provide no parent tag. */
@@ -215,11 +219,6 @@ SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, mips_dmamap_freelist_init, NULL);
*/
static __inline int
-bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
- bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
- int flags, vm_offset_t *lastaddrp, int *segp);
-
-static __inline int
_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)
{
int i;
@@ -273,10 +272,14 @@ dflt_lock(void *arg, bus_dma_lock_op_t op)
}
static __inline bus_dmamap_t
-_busdma_alloc_dmamap(void)
+_busdma_alloc_dmamap(bus_dma_tag_t dmat)
{
+ struct sync_list *slist;
bus_dmamap_t map;
+ slist = malloc(sizeof(*slist) * dmat->nsegments, M_DEVBUF, M_NOWAIT);
+ if (slist == NULL)
+ return (NULL);
mtx_lock(&busdma_mtx);
map = TAILQ_FIRST(&dmamap_freelist);
if (map)
@@ -288,13 +291,18 @@ _busdma_alloc_dmamap(void)
map->flags = DMAMAP_ALLOCATED;
} else
map->flags = 0;
- STAILQ_INIT(&map->bpages);
+ if (map != NULL) {
+ STAILQ_INIT(&map->bpages);
+ map->slist = slist;
+ } else
+ free(slist, M_DEVBUF);
return (map);
}
static __inline void
_busdma_free_dmamap(bus_dmamap_t map)
{
+ free(map->slist, M_DEVBUF);
if (map->flags & DMAMAP_ALLOCATED)
free(map, M_DEVBUF);
else {
@@ -477,7 +485,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
}
}
- newmap = _busdma_alloc_dmamap();
+ newmap = _busdma_alloc_dmamap(dmat);
if (newmap == NULL) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
return (ENOMEM);
@@ -485,6 +493,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
*mapp = newmap;
newmap->dmat = dmat;
newmap->allocbuffer = NULL;
+ newmap->sync_count = 0;
dmat->map_count++;
/*
@@ -549,7 +558,7 @@ int
bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
{
- if (STAILQ_FIRST(&map->bpages) != NULL) {
+ if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d",
__func__, dmat, EBUSY);
return (EBUSY);
@@ -592,7 +601,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
if (flags & BUS_DMA_ZERO)
mflags |= M_ZERO;
- newmap = _busdma_alloc_dmamap();
+ newmap = _busdma_alloc_dmamap(dmat);
if (newmap == NULL) {
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->flags, ENOMEM);
@@ -601,6 +610,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
dmat->map_count++;
*mapp = newmap;
newmap->dmat = dmat;
+ newmap->sync_count = 0;
/*
* If all the memory is coherent with DMA then we don't need to
@@ -684,7 +694,37 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
}
-static int
+static void
+_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
+ bus_size_t buflen, int flags)
+{
+ bus_addr_t curaddr;
+ bus_size_t sgsize;
+
+ if ((map->pagesneeded == 0)) {
+ CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
+ dmat->lowaddr, dmat->boundary, dmat->alignment);
+ CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
+ map, map->pagesneeded);
+ /*
+ * Count the number of bounce pages
+ * needed in order to complete this transfer
+ */
+ curaddr = buf;
+ while (buflen != 0) {
+ sgsize = MIN(buflen, dmat->maxsegsz);
+ if (run_filter(dmat, curaddr) != 0) {
+ sgsize = MIN(sgsize, PAGE_SIZE);
+ map->pagesneeded++;
+ }
+ curaddr += sgsize;
+ buflen -= sgsize;
+ }
+ CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
+ }
+}
+
+static void
_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
void *buf, bus_size_t buflen, int flags)
{
@@ -719,60 +759,157 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
}
CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
}
+}
+
+static int
+_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,int flags)
+{
/* Reserve Necessary Bounce Pages */
- if (map->pagesneeded != 0) {
- mtx_lock(&bounce_lock);
- if (flags & BUS_DMA_NOWAIT) {
- if (reserve_bounce_pages(dmat, map, 0) != 0) {
- mtx_unlock(&bounce_lock);
- return (ENOMEM);
- }
- } else {
- if (reserve_bounce_pages(dmat, map, 1) != 0) {
- /* Queue us for resources */
- STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
- map, links);
- mtx_unlock(&bounce_lock);
- return (EINPROGRESS);
- }
+ mtx_lock(&bounce_lock);
+ if (flags & BUS_DMA_NOWAIT) {
+ if (reserve_bounce_pages(dmat, map, 0) != 0) {
+ mtx_unlock(&bounce_lock);
+ return (ENOMEM);
+ }
+ } else {
+ if (reserve_bounce_pages(dmat, map, 1) != 0) {
+ /* Queue us for resources */
+ STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
+ map, links);
+ mtx_unlock(&bounce_lock);
+ return (EINPROGRESS);
}
- mtx_unlock(&bounce_lock);
}
+ mtx_unlock(&bounce_lock);
+
+ return (0);
+}
+
+/*
+ * Add a single contiguous physical range to the segment list.
+ */
+static int
+_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
+ bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
+{
+ bus_addr_t baddr, bmask;
+ int seg;
+
+ /*
+ * Make sure we don't cross any boundaries.
+ */
+ bmask = ~(dmat->boundary - 1);
+ if (dmat->boundary > 0) {
+ baddr = (curaddr + dmat->boundary) & bmask;
+ if (sgsize > (baddr - curaddr))
+ sgsize = (baddr - curaddr);
+ }
+ /*
+ * Insert chunk into a segment, coalescing with
+ * the previous segment if possible.
+ */
+ seg = *segp;
+ if (seg >= 0 &&
+ curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
+ (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
+ (dmat->boundary == 0 ||
+ (segs[seg].ds_addr & bmask) == (curaddr & bmask))) {
+ segs[seg].ds_len += sgsize;
+ } else {
+ if (++seg >= dmat->nsegments)
+ return (0);
+ segs[seg].ds_addr = curaddr;
+ segs[seg].ds_len = sgsize;
+ }
+ *segp = seg;
+ return (sgsize);
+}
+
+/*
+ * Utility function to load a physical buffer. segp contains
+ * the starting segment on entrace, and the ending segment on exit.
+ */
+int
+_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
+ vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
+ int *segp)
+{
+ bus_addr_t curaddr;
+ bus_size_t sgsize;
+ int error;
+
+ if (segs == NULL)
+ segs = dmat->segments;
+ if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
+ _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
+ if (map->pagesneeded != 0) {
+ error = _bus_dmamap_reserve_pages(dmat, map, flags);
+ if (error)
+ return (error);
+ }
+ }
+
+ while (buflen > 0) {
+ curaddr = buf;
+ sgsize = MIN(buflen, dmat->maxsegsz);
+ if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
+ map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
+ sgsize = MIN(sgsize, PAGE_SIZE);
+ curaddr = add_bounce_page(dmat, map, 0, curaddr,
+ sgsize);
+ }
+ sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
+ segp);
+ if (sgsize == 0)
+ break;
+ buf += sgsize;
+ buflen -= sgsize;
+ }
+
+ /*
+ * Did we fit?
+ */
+ if (buflen != 0) {
+ _bus_dmamap_unload(dmat, map);
+ return (EFBIG); /* XXX better return value here? */
+ }
return (0);
}
/*
- * Utility function to load a linear buffer. lastaddrp holds state
- * between invocations (for multiple-buffer loads). segp contains
+ * Utility function to load a linear buffer. segp contains
* the starting segment on entrance, and the ending segment on exit.
* first indicates if this is the first invocation of this function.
*/
-static __inline int
-bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
- bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
- int flags, vm_offset_t *lastaddrp, int *segp)
+int
+_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs,
+ int *segp)
{
bus_size_t sgsize;
- bus_addr_t curaddr, lastaddr, baddr, bmask;
+ bus_addr_t curaddr;
+ struct sync_list *sl;
vm_offset_t vaddr = (vm_offset_t)buf;
- int seg;
int error = 0;
- lastaddr = *lastaddrp;
- bmask = ~(dmat->boundary - 1);
+
+ if (segs == NULL)
+ segs = dmat->segments;
if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
- error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen,
- flags);
- if (error)
- return (error);
+ _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
+ if (map->pagesneeded != 0) {
+ error = _bus_dmamap_reserve_pages(dmat, map, flags);
+ if (error)
+ return (error);
+ }
}
CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
"alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
- for (seg = *segp; buflen > 0 ; ) {
+ while (buflen > 0) {
/*
* Get the physical address for this segment.
*
@@ -791,237 +928,62 @@ bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
if (buflen < sgsize)
sgsize = buflen;
- /*
- * Make sure we don't cross any boundaries.
- */
- if (dmat->boundary > 0) {
- baddr = (curaddr + dmat->boundary) & bmask;
- if (sgsize > (baddr - curaddr))
- sgsize = (baddr - curaddr);
- }
if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
- curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
- }
-
- /*
- * Insert chunk into a segment, coalescing with
- * the previous segment if possible.
- */
- if (seg >= 0 && curaddr == lastaddr &&
- (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
- (dmat->boundary == 0 ||
- (segs[seg].ds_addr & bmask) ==
- (curaddr & bmask))) {
- segs[seg].ds_len += sgsize;
- goto segdone;
+ curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
+ sgsize);
} else {
- if (++seg >= dmat->nsegments)
- break;
- segs[seg].ds_addr = curaddr;
- segs[seg].ds_len = sgsize;
+ sl = &map->slist[map->sync_count - 1];
+ if (map->sync_count == 0 ||
+ vaddr != sl->vaddr + sl->datacount) {
+ if (++map->sync_count > dmat->nsegments)
+ goto cleanup;
+ sl++;
+ sl->vaddr = vaddr;
+ sl->datacount = sgsize;
+ sl->busaddr = curaddr;
+ } else
+ sl->datacount += sgsize;
}
- if (error)
+ sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
+ segp);
+ if (sgsize == 0)
break;
-segdone:
- lastaddr = curaddr + sgsize;
vaddr += sgsize;
buflen -= sgsize;
}
- *segp = seg;
- *lastaddrp = lastaddr;
-
+cleanup:
/*
* Did we fit?
*/
- if (buflen != 0)
+ if (buflen != 0) {
+ _bus_dmamap_unload(dmat, map);
error = EFBIG; /* XXX better return value here? */
+ }
return (error);
}
-/*
- * Map the buffer buf into bus space using the dmamap map.
- */
-int
-bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
- bus_size_t buflen, bus_dmamap_callback_t *callback,
- void *callback_arg, int flags)
+void
+__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
{
- vm_offset_t lastaddr = 0;
- int error, nsegs = -1;
KASSERT(dmat != NULL, ("dmatag is NULL"));
KASSERT(map != NULL, ("dmamap is NULL"));
+ map->mem = *mem;
map->callback = callback;
map->callback_arg = callback_arg;
- map->flags &= ~DMAMAP_TYPE_MASK;
- map->flags |= DMAMAP_LINEAR;
- map->buffer = buf;
- map->len = buflen;
- error = bus_dmamap_load_buffer(dmat,
- dmat->segments, map, buf, buflen, kernel_pmap,
- flags, &lastaddr, &nsegs);
- if (error == EINPROGRESS)
- return (error);
- if (error)
- (*callback)(callback_arg, NULL, 0, error);
- else
- (*callback)(callback_arg, dmat->segments, nsegs + 1, error);
-
- CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
- __func__, dmat, dmat->flags, nsegs + 1, error);
-
- return (error);
-}
-
-/*
- * Like bus_dmamap_load(), but for mbufs.
- */
-int
-bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
- bus_dmamap_callback2_t *callback, void *callback_arg,
- int flags)
-{
- int nsegs = -1, error = 0;
-
- M_ASSERTPKTHDR(m0);
-
- map->flags &= ~DMAMAP_TYPE_MASK;
- map->flags |= DMAMAP_MBUF;
- map->buffer = m0;
- map->len = 0;
- if (m0->m_pkthdr.len <= dmat->maxsize) {
- vm_offset_t lastaddr = 0;
- struct mbuf *m;
-
- for (m = m0; m != NULL && error == 0; m = m->m_next) {
- if (m->m_len > 0) {
- error = bus_dmamap_load_buffer(dmat,
- dmat->segments, map, m->m_data, m->m_len,
- kernel_pmap, flags, &lastaddr, &nsegs);
- map->len += m->m_len;
- }
- }
- } else {
- error = EINVAL;
- }
-
- if (error) {
- /*
- * force "no valid mappings" on error in callback.
- */
- (*callback)(callback_arg, dmat->segments, 0, 0, error);
- } else {
- (*callback)(callback_arg, dmat->segments, nsegs + 1,
- m0->m_pkthdr.len, error);
- }
- CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
- __func__, dmat, dmat->flags, error, nsegs + 1);
-
- return (error);
-}
-
-int
-bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
- struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
- int flags)
-{
- int error = 0;
- M_ASSERTPKTHDR(m0);
-
- flags |= BUS_DMA_NOWAIT;
- *nsegs = -1;
- map->flags &= ~DMAMAP_TYPE_MASK;
- map->flags |= DMAMAP_MBUF;
- map->buffer = m0;
- map->len = 0;
- if (m0->m_pkthdr.len <= dmat->maxsize) {
- vm_offset_t lastaddr = 0;
- struct mbuf *m;
-
- for (m = m0; m != NULL && error == 0; m = m->m_next) {
- if (m->m_len > 0) {
- error = bus_dmamap_load_buffer(dmat, segs, map,
- m->m_data, m->m_len,
- kernel_pmap, flags, &lastaddr,
- nsegs);
- map->len += m->m_len;
- }
- }
- } else {
- error = EINVAL;
- }
-
- /* XXX FIXME: Having to increment nsegs is really annoying */
- ++*nsegs;
- CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
- __func__, dmat, dmat->flags, error, *nsegs);
- return (error);
}
-/*
- * Like bus_dmamap_load(), but for uios.
- */
-int
-bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
- bus_dmamap_callback2_t *callback, void *callback_arg,
- int flags)
+bus_dma_segment_t *
+_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
+ bus_dma_segment_t *segs, int nsegs, int error)
{
- vm_offset_t lastaddr = 0;
- int nsegs, i, error;
- bus_size_t resid;
- struct iovec *iov;
- struct pmap *pmap;
-
- resid = uio->uio_resid;
- iov = uio->uio_iov;
- map->flags &= ~DMAMAP_TYPE_MASK;
- map->flags |= DMAMAP_UIO;
- map->buffer = uio;
- map->len = 0;
-
- if (uio->uio_segflg == UIO_USERSPACE) {
- KASSERT(uio->uio_td != NULL,
- ("bus_dmamap_load_uio: USERSPACE but no proc"));
- /* XXX: pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); */
- panic("can't do it yet");
- } else
- pmap = kernel_pmap;
-
- error = 0;
- nsegs = -1;
- for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
- /*
- * Now at the first iovec to load. Load each iovec
- * until we have exhausted the residual count.
- */
- bus_size_t minlen =
- resid < iov[i].iov_len ? resid : iov[i].iov_len;
- caddr_t addr = (caddr_t) iov[i].iov_base;
-
- if (minlen > 0) {
- error = bus_dmamap_load_buffer(dmat, dmat->segments,
- map, addr, minlen, pmap, flags, &lastaddr, &nsegs);
-
- map->len += minlen;
- resid -= minlen;
- }
- }
-
- if (error) {
- /*
- * force "no valid mappings" on error in callback.
- */
- (*callback)(callback_arg, dmat->segments, 0, 0, error);
- } else {
- (*callback)(callback_arg, dmat->segments, nsegs+1,
- uio->uio_resid, error);
- }
- CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
- __func__, dmat, dmat->flags, error, nsegs + 1);
- return (error);
+ if (segs == NULL)
+ segs = dmat->segments;
+ return (segs);
}
/*
@@ -1032,16 +994,16 @@ _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
{
struct bounce_page *bpage;
- map->flags &= ~DMAMAP_TYPE_MASK;
while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
STAILQ_REMOVE_HEAD(&map->bpages, links);
free_bounce_page(dmat, bpage);
}
+ map->sync_count = 0;
return;
}
static void
-bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
+bus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op)
{
char tmp_cl[mips_pdcache_linesize], tmp_clend[mips_pdcache_linesize];
vm_offset_t buf_cl, buf_clend;
@@ -1055,9 +1017,9 @@ bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
* prevent a data loss we save these chunks in temporary buffer
* before invalidation and restore them afer it
*/
- buf_cl = (vm_offset_t)buf & ~cache_linesize_mask;
- size_cl = (vm_offset_t)buf & cache_linesize_mask;
- buf_clend = (vm_offset_t)buf + len;
+ buf_cl = buf & ~cache_linesize_mask;
+ size_cl = buf & cache_linesize_mask;
+ buf_clend = buf + len;
size_clend = (mips_pdcache_linesize -
(buf_clend & cache_linesize_mask)) & cache_linesize_mask;
@@ -1072,7 +1034,7 @@ bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
memcpy (tmp_cl, (void*)buf_cl, size_cl);
if (size_clend)
memcpy (tmp_clend, (void*)buf_clend, size_clend);
- mips_dcache_inv_range((vm_offset_t)buf, len);
+ mips_dcache_inv_range(buf, len);
/*
* Restore them
*/
@@ -1087,15 +1049,14 @@ bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
* necessary.
*/
if (size_cl)
- mips_dcache_wbinv_range((vm_offset_t)buf_cl, size_cl);
+ mips_dcache_wbinv_range(buf_cl, size_cl);
if (size_clend && (size_cl == 0 ||
buf_clend - buf_cl > mips_pdcache_linesize))
- mips_dcache_wbinv_range((vm_offset_t)buf_clend,
- size_clend);
+ mips_dcache_wbinv_range(buf_clend, size_clend);
break;
case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
- mips_dcache_wbinv_range((vm_offset_t)buf_cl, len);
+ mips_dcache_wbinv_range(buf_cl, len);
break;
case BUS_DMASYNC_PREREAD:
@@ -1106,7 +1067,7 @@ bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
memcpy (tmp_cl, (void *)buf_cl, size_cl);
if (size_clend)
memcpy (tmp_clend, (void *)buf_clend, size_clend);
- mips_dcache_inv_range((vm_offset_t)buf, len);
+ mips_dcache_inv_range(buf, len);
/*
* Restore them
*/
@@ -1121,15 +1082,14 @@ bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
* necessary.
*/
if (size_cl)
- mips_dcache_wbinv_range((vm_offset_t)buf_cl, size_cl);
+ mips_dcache_wbinv_range(buf_cl, size_cl);
if (size_clend && (size_cl == 0 ||
buf_clend - buf_cl > mips_pdcache_linesize))
- mips_dcache_wbinv_range((vm_offset_t)buf_clend,
- size_clend);
+ mips_dcache_wbinv_range(buf_clend, size_clend);
break;
case BUS_DMASYNC_PREWRITE:
- mips_dcache_wb_range((vm_offset_t)buf, len);
+ mips_dcache_wb_range(buf, len);
break;
}
}
@@ -1141,10 +1101,18 @@ _bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
STAILQ_FOREACH(bpage, &map->bpages, links) {
if (op & BUS_DMASYNC_PREWRITE) {
- bcopy((void *)bpage->datavaddr,
- (void *)(bpage->vaddr_nocache != 0 ?
- bpage->vaddr_nocache : bpage->vaddr),
- bpage->datacount);
+ if (bpage->datavaddr != 0)
+ bcopy((void *)bpage->datavaddr,
+ (void *)(bpage->vaddr_nocache != 0 ?
+ bpage->vaddr_nocache :
+ bpage->vaddr),
+ bpage->datacount);
+ else
+ physcopyout(bpage->dataaddr,
+ (void *)(bpage->vaddr_nocache != 0 ?
+ bpage->vaddr_nocache :
+ bpage->vaddr),
+ bpage->datacount);
if (bpage->vaddr_nocache == 0) {
mips_dcache_wb_range(bpage->vaddr,
bpage->datacount);
@@ -1156,36 +1124,23 @@ _bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
mips_dcache_inv_range(bpage->vaddr,
bpage->datacount);
}
- bcopy((void *)(bpage->vaddr_nocache != 0 ?
- bpage->vaddr_nocache : bpage->vaddr),
- (void *)bpage->datavaddr, bpage->datacount);
+ if (bpage->datavaddr != 0)
+ bcopy((void *)(bpage->vaddr_nocache != 0 ?
+ bpage->vaddr_nocache : bpage->vaddr),
+ (void *)bpage->datavaddr, bpage->datacount);
+ else
+ physcopyin((void *)(bpage->vaddr_nocache != 0 ?
+ bpage->vaddr_nocache : bpage->vaddr),
+ bpage->dataaddr, bpage->datacount);
dmat->bounce_zone->total_bounced++;
}
}
}
-static __inline int
-_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len)
-{
- struct bounce_page *bpage;
-
- STAILQ_FOREACH(bpage, &map->bpages, links) {
- if ((vm_offset_t)buf >= bpage->datavaddr &&
- (vm_offset_t)buf + len <= bpage->datavaddr +
- bpage->datacount)
- return (1);
- }
- return (0);
-
-}
-
void
_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
{
- struct mbuf *m;
- struct uio *uio;
- int resid;
- struct iovec *iov;
+ struct sync_list *sl, *end;
if (op == BUS_DMASYNC_POSTWRITE)
return;
@@ -1199,38 +1154,10 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
return;
CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
- switch(map->flags & DMAMAP_TYPE_MASK) {
- case DMAMAP_LINEAR:
- if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len)))
- bus_dmamap_sync_buf(map->buffer, map->len, op);
- break;
- case DMAMAP_MBUF:
- m = map->buffer;
- while (m) {
- if (m->m_len > 0 &&
- !(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len)))
- bus_dmamap_sync_buf(m->m_data, m->m_len, op);
- m = m->m_next;
- }
- break;
- case DMAMAP_UIO:
- uio = map->buffer;
- iov = uio->uio_iov;
- resid = uio->uio_resid;
- for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
- bus_size_t minlen = resid < iov[i].iov_len ? resid :
- iov[i].iov_len;
- if (minlen > 0) {
- if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base,
- minlen))
- bus_dmamap_sync_buf(iov[i].iov_base,
- minlen, op);
- resid -= minlen;
- }
- }
- break;
- default:
- break;
+ if (map->sync_count) {
+ end = &map->slist[map->sync_count];
+ for (sl = &map->slist[0]; sl != end; sl++)
+ bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op);
}
}
@@ -1393,7 +1320,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
static bus_addr_t
add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
- bus_size_t size)
+ bus_addr_t addr, bus_size_t size)
{
struct bounce_zone *bz;
struct bounce_page *bpage;
@@ -1426,6 +1353,7 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bpage->busaddr |= vaddr & PAGE_MASK;
}
bpage->datavaddr = vaddr;
+ bpage->dataaddr = addr;
bpage->datacount = size;
STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
return (bpage->busaddr);
@@ -1479,8 +1407,8 @@ busdma_swi(void)
mtx_unlock(&bounce_lock);
dmat = map->dmat;
(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
- bus_dmamap_load(map->dmat, map, map->buffer, map->len,
- map->callback, map->callback_arg, /*flags*/0);
+ bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
+ map->callback_arg, BUS_DMA_WAITOK);
(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
mtx_lock(&bounce_lock);
}
OpenPOWER on IntegriCloud