summaryrefslogtreecommitdiffstats
path: root/sys/sparc64
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2013-02-12 16:57:20 +0000
committerkib <kib@FreeBSD.org>2013-02-12 16:57:20 +0000
commitbd7f0fa0bb4b7b0f87227e0c4d49a4bd9b113cf0 (patch)
treee550f2c754f1edf951a8b93963ebcfc4fa0d20ce /sys/sparc64
parente0a463e76c719f11788ec107b5aa3e2da4e57c0b (diff)
downloadFreeBSD-src-bd7f0fa0bb4b7b0f87227e0c4d49a4bd9b113cf0.zip
FreeBSD-src-bd7f0fa0bb4b7b0f87227e0c4d49a4bd9b113cf0.tar.gz
Reform the busdma API so that new types may be added without modifying
every architecture's busdma_machdep.c. It is done by unifying the bus_dmamap_load_buffer() routines so that they may be called from MI code. The MD busdma is then given a chance to do any final processing in the complete() callback. The cam changes unify the bus_dmamap_load* handling in cam drivers. The arm and mips implementations are updated to track virtual addresses for sync(). Previously this was done in a type specific way. Now it is done in a generic way by recording the list of virtuals in the map. Submitted by: jeff (sponsored by EMC/Isilon) Reviewed by: kan (previous version), scottl, mjacob (isp(4), no objections for target mode changes) Discussed with: ian (arm changes) Tested by: marius (sparc64), mips (jmallet), isci(4) on x86 (jharris), amd64 (Fabian Keil <freebsd-listen@fabiankeil.de>)
Diffstat (limited to 'sys/sparc64')
-rw-r--r--sys/sparc64/include/bus_dma.h37
-rw-r--r--sys/sparc64/sparc64/bus_machdep.c313
-rw-r--r--sys/sparc64/sparc64/iommu.c339
3 files changed, 277 insertions, 412 deletions
diff --git a/sys/sparc64/include/bus_dma.h b/sys/sparc64/include/bus_dma.h
index 5f981a7..7395794 100644
--- a/sys/sparc64/include/bus_dma.h
+++ b/sys/sparc64/include/bus_dma.h
@@ -78,14 +78,17 @@
struct bus_dma_methods {
int (*dm_dmamap_create)(bus_dma_tag_t, int, bus_dmamap_t *);
int (*dm_dmamap_destroy)(bus_dma_tag_t, bus_dmamap_t);
- int (*dm_dmamap_load)(bus_dma_tag_t, bus_dmamap_t, void *,
- bus_size_t, bus_dmamap_callback_t *, void *, int);
- int (*dm_dmamap_load_mbuf)(bus_dma_tag_t, bus_dmamap_t,
- struct mbuf *, bus_dmamap_callback2_t *, void *, int);
- int (*dm_dmamap_load_mbuf_sg)(bus_dma_tag_t, bus_dmamap_t,
- struct mbuf *, bus_dma_segment_t *segs, int *nsegs, int);
- int (*dm_dmamap_load_uio)(bus_dma_tag_t, bus_dmamap_t, struct uio *,
- bus_dmamap_callback2_t *, void *, int);
+ int (*dm_dmamap_load_phys)(bus_dma_tag_t dmat, bus_dmamap_t map,
+ vm_paddr_t buf, bus_size_t buflen, int flags,
+ bus_dma_segment_t *segs, int *segp);
+ int (*dm_dmamap_load_buffer)(bus_dma_tag_t dmat, bus_dmamap_t map,
+ void *buf, bus_size_t buflen, struct pmap *pmap, int flags,
+ bus_dma_segment_t *segs, int *segp);
+ void (*dm_dmamap_waitok)(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct memdesc *mem, bus_dmamap_callback_t *callback,
+ void *callback_arg);
+ bus_dma_segment_t *(*dm_dmamap_complete)(bus_dma_tag_t dmat,
+ bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, int error);
void (*dm_dmamap_unload)(bus_dma_tag_t, bus_dmamap_t);
void (*dm_dmamap_sync)(bus_dma_tag_t, bus_dmamap_t,
bus_dmasync_op_t);
@@ -125,14 +128,16 @@ struct bus_dma_tag {
((t)->dt_mt->dm_dmamap_create((t), (f), (p)))
#define bus_dmamap_destroy(t, p) \
((t)->dt_mt->dm_dmamap_destroy((t), (p)))
-#define bus_dmamap_load(t, m, p, s, cb, cba, f) \
- ((t)->dt_mt->dm_dmamap_load((t), (m), (p), (s), (cb), (cba), (f)))
-#define bus_dmamap_load_mbuf(t, m, mb, cb, cba, f) \
- ((t)->dt_mt->dm_dmamap_load_mbuf((t), (m), (mb), (cb), (cba), (f)))
-#define bus_dmamap_load_mbuf_sg(t, m, mb, segs, nsegs, f) \
- ((t)->dt_mt->dm_dmamap_load_mbuf_sg((t), (m), (mb), (segs), (nsegs), (f)))
-#define bus_dmamap_load_uio(t, m, ui, cb, cba, f) \
- ((t)->dt_mt->dm_dmamap_load_uio((t), (m), (ui), (cb), (cba), (f)))
+#define _bus_dmamap_load_phys(t, m, b, l, f, s, sp) \
+ ((t)->dt_mt->dm_dmamap_load_phys((t), (m), (b), (l), \
+ (f), (s), (sp)))
+#define _bus_dmamap_load_buffer(t, m, b, l, p, f, s, sp) \
+ ((t)->dt_mt->dm_dmamap_load_buffer((t), (m), (b), (l), (p), \
+ (f), (s), (sp)))
+#define _bus_dmamap_waitok(t, m, mem, c, ca) \
+ ((t)->dt_mt->dm_dmamap_waitok((t), (m), (mem), (c), (ca)))
+#define _bus_dmamap_complete(t, m, s, n, e) \
+ ((t)->dt_mt->dm_dmamap_complete((t), (m), (s), (n), (e)))
#define bus_dmamap_unload(t, p) \
((t)->dt_mt->dm_dmamap_unload((t), (p)))
#define bus_dmamap_sync(t, m, op) \
diff --git a/sys/sparc64/sparc64/bus_machdep.c b/sys/sparc64/sparc64/bus_machdep.c
index 31d28b2..7f5e76b 100644
--- a/sys/sparc64/sparc64/bus_machdep.c
+++ b/sys/sparc64/sparc64/bus_machdep.c
@@ -98,13 +98,11 @@ __FBSDID("$FreeBSD$");
#include <sys/bus.h>
#include <sys/lock.h>
#include <sys/malloc.h>
-#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/smp.h>
#include <sys/systm.h>
-#include <sys/uio.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
@@ -326,38 +324,106 @@ nexus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
}
/*
- * Utility function to load a linear buffer. lastaddrp holds state
- * between invocations (for multiple-buffer loads). segp contains
+ * Add a single contiguous physical range to the segment list.
+ */
+static int
+nexus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
+ bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
+{
+ bus_addr_t baddr, bmask;
+ int seg;
+
+ /*
+ * Make sure we don't cross any boundaries.
+ */
+ bmask = ~(dmat->dt_boundary - 1);
+ if (dmat->dt_boundary > 0) {
+ baddr = (curaddr + dmat->dt_boundary) & bmask;
+ if (sgsize > (baddr - curaddr))
+ sgsize = (baddr - curaddr);
+ }
+
+ /*
+ * Insert chunk into a segment, coalescing with
+ * previous segment if possible.
+ */
+ seg = *segp;
+ if (seg == -1) {
+ seg = 0;
+ segs[seg].ds_addr = curaddr;
+ segs[seg].ds_len = sgsize;
+ } else {
+ if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
+ (segs[seg].ds_len + sgsize) <= dmat->dt_maxsegsz &&
+ (dmat->dt_boundary == 0 ||
+ (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
+ segs[seg].ds_len += sgsize;
+ else {
+ if (++seg >= dmat->dt_nsegments)
+ return (0);
+ segs[seg].ds_addr = curaddr;
+ segs[seg].ds_len = sgsize;
+ }
+ }
+ *segp = seg;
+ return (sgsize);
+}
+
+/*
+ * Utility function to load a physical buffer. segp contains
* the starting segment on entrace, and the ending segment on exit.
- * first indicates if this is the first invocation of this function.
*/
static int
-_nexus_dmamap_load_buffer(bus_dma_tag_t dmat, void *buf, bus_size_t buflen,
- struct thread *td, int flags, bus_addr_t *lastaddrp,
- bus_dma_segment_t *segs, int *segp, int first)
+nexus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
+ bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp)
{
+ bus_addr_t curaddr;
bus_size_t sgsize;
- bus_addr_t curaddr, lastaddr, baddr, bmask;
- vm_offset_t vaddr = (vm_offset_t)buf;
- int seg;
- pmap_t pmap;
- if (td != NULL)
- pmap = vmspace_pmap(td->td_proc->p_vmspace);
- else
- pmap = NULL;
+ if (segs == NULL)
+ segs = dmat->dt_segments;
+
+ curaddr = buf;
+ while (buflen > 0) {
+ sgsize = MIN(buflen, dmat->dt_maxsegsz);
+ sgsize = nexus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
+ segp);
+ if (sgsize == 0)
+ break;
+ curaddr += sgsize;
+ buflen -= sgsize;
+ }
- lastaddr = *lastaddrp;
- bmask = ~(dmat->dt_boundary - 1);
+ /*
+ * Did we fit?
+ */
+ return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
+}
+
+/*
+ * Utility function to load a linear buffer. segp contains
+ * the starting segment on entrace, and the ending segment on exit.
+ */
+static int
+nexus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
+ int *segp)
+{
+ bus_size_t sgsize;
+ bus_addr_t curaddr;
+ vm_offset_t vaddr = (vm_offset_t)buf;
- for (seg = *segp; buflen > 0 ; ) {
+ if (segs == NULL)
+ segs = dmat->dt_segments;
+
+ while (buflen > 0) {
/*
* Get the physical address for this segment.
*/
- if (pmap)
- curaddr = pmap_extract(pmap, vaddr);
- else
+ if (pmap == kernel_pmap)
curaddr = pmap_kextract(vaddr);
+ else
+ curaddr = pmap_extract(pmap, vaddr);
/*
* Compute the segment size, and adjust counts.
@@ -368,205 +434,36 @@ _nexus_dmamap_load_buffer(bus_dma_tag_t dmat, void *buf, bus_size_t buflen,
if (buflen < sgsize)
sgsize = buflen;
- /*
- * Make sure we don't cross any boundaries.
- */
- if (dmat->dt_boundary > 0) {
- baddr = (curaddr + dmat->dt_boundary) & bmask;
- if (sgsize > (baddr - curaddr))
- sgsize = (baddr - curaddr);
- }
+ sgsize = nexus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
+ segp);
+ if (sgsize == 0)
+ break;
- /*
- * Insert chunk into a segment, coalescing with
- * previous segment if possible.
- */
- if (first) {
- segs[seg].ds_addr = curaddr;
- segs[seg].ds_len = sgsize;
- first = 0;
- } else {
- if (curaddr == lastaddr &&
- (segs[seg].ds_len + sgsize) <= dmat->dt_maxsegsz &&
- (dmat->dt_boundary == 0 ||
- (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
- segs[seg].ds_len += sgsize;
- else {
- if (++seg >= dmat->dt_nsegments)
- break;
- segs[seg].ds_addr = curaddr;
- segs[seg].ds_len = sgsize;
- }
- }
-
- lastaddr = curaddr + sgsize;
vaddr += sgsize;
buflen -= sgsize;
}
- *segp = seg;
- *lastaddrp = lastaddr;
-
/*
* Did we fit?
*/
return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
}
-/*
- * Common function for loading a DMA map with a linear buffer. May
- * be called by bus-specific DMA map load functions.
- *
- * Most SPARCs have IOMMUs in the bus controllers. In those cases
- * they only need one segment and will use virtual addresses for DVMA.
- * Those bus controllers should intercept these vectors and should
- * *NEVER* call nexus_dmamap_load() which is used only by devices that
- * bypass DVMA.
- */
-static int
-nexus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
- bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg,
- int flags)
-{
- bus_addr_t lastaddr;
- int error, nsegs;
-
- error = _nexus_dmamap_load_buffer(dmat, buf, buflen, NULL, flags,
- &lastaddr, dmat->dt_segments, &nsegs, 1);
-
- if (error == 0) {
- (*callback)(callback_arg, dmat->dt_segments, nsegs + 1, 0);
- map->dm_flags |= DMF_LOADED;
- } else
- (*callback)(callback_arg, NULL, 0, error);
-
- return (0);
-}
-
-/*
- * Like nexus_dmamap_load(), but for mbufs.
- */
-static int
-nexus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
- bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
-{
- int nsegs, error;
-
- M_ASSERTPKTHDR(m0);
-
- nsegs = 0;
- error = 0;
- if (m0->m_pkthdr.len <= dmat->dt_maxsize) {
- int first = 1;
- bus_addr_t lastaddr = 0;
- struct mbuf *m;
-
- for (m = m0; m != NULL && error == 0; m = m->m_next) {
- if (m->m_len > 0) {
- error = _nexus_dmamap_load_buffer(dmat,
- m->m_data, m->m_len,NULL, flags, &lastaddr,
- dmat->dt_segments, &nsegs, first);
- first = 0;
- }
- }
- } else {
- error = EINVAL;
- }
-
- if (error) {
- /* force "no valid mappings" in callback */
- (*callback)(callback_arg, dmat->dt_segments, 0, 0, error);
- } else {
- map->dm_flags |= DMF_LOADED;
- (*callback)(callback_arg, dmat->dt_segments, nsegs + 1,
- m0->m_pkthdr.len, error);
- }
- return (error);
-}
-
-static int
-nexus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
- bus_dma_segment_t *segs, int *nsegs, int flags)
+static void
+nexus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
{
- int error;
-
- M_ASSERTPKTHDR(m0);
-
- *nsegs = 0;
- error = 0;
- if (m0->m_pkthdr.len <= dmat->dt_maxsize) {
- int first = 1;
- bus_addr_t lastaddr = 0;
- struct mbuf *m;
-
- for (m = m0; m != NULL && error == 0; m = m->m_next) {
- if (m->m_len > 0) {
- error = _nexus_dmamap_load_buffer(dmat,
- m->m_data, m->m_len,NULL, flags, &lastaddr,
- segs, nsegs, first);
- first = 0;
- }
- }
- } else {
- error = EINVAL;
- }
- ++*nsegs;
- return (error);
}
-/*
- * Like nexus_dmamap_load(), but for uios.
- */
-static int
-nexus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
- bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
+static bus_dma_segment_t *
+nexus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
+ bus_dma_segment_t *segs, int nsegs, int error)
{
- bus_addr_t lastaddr;
- int nsegs, error, first, i;
- bus_size_t resid;
- struct iovec *iov;
- struct thread *td = NULL;
-
- resid = uio->uio_resid;
- iov = uio->uio_iov;
-
- if (uio->uio_segflg == UIO_USERSPACE) {
- td = uio->uio_td;
- KASSERT(td != NULL, ("%s: USERSPACE but no proc", __func__));
- }
-
- nsegs = 0;
- error = 0;
- first = 1;
- for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
- /*
- * Now at the first iovec to load. Load each iovec
- * until we have exhausted the residual count.
- */
- bus_size_t minlen =
- resid < iov[i].iov_len ? resid : iov[i].iov_len;
- caddr_t addr = (caddr_t) iov[i].iov_base;
- if (minlen > 0) {
- error = _nexus_dmamap_load_buffer(dmat, addr, minlen,
- td, flags, &lastaddr, dmat->dt_segments, &nsegs,
- first);
- first = 0;
-
- resid -= minlen;
- }
- }
-
- if (error) {
- /* force "no valid mappings" in callback */
- (*callback)(callback_arg, dmat->dt_segments, 0, 0, error);
- } else {
- map->dm_flags |= DMF_LOADED;
- (*callback)(callback_arg, dmat->dt_segments, nsegs + 1,
- uio->uio_resid, error);
- }
- return (error);
+ if (segs == NULL)
+ segs = dmat->dt_segments;
+ return (segs);
}
/*
@@ -669,10 +566,10 @@ nexus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
static struct bus_dma_methods nexus_dma_methods = {
nexus_dmamap_create,
nexus_dmamap_destroy,
- nexus_dmamap_load,
- nexus_dmamap_load_mbuf,
- nexus_dmamap_load_mbuf_sg,
- nexus_dmamap_load_uio,
+ nexus_dmamap_load_phys,
+ nexus_dmamap_load_buffer,
+ nexus_dmamap_waitok,
+ nexus_dmamap_complete,
nexus_dmamap_unload,
nexus_dmamap_sync,
nexus_dmamem_alloc,
diff --git a/sys/sparc64/sparc64/iommu.c b/sys/sparc64/sparc64/iommu.c
index ace74ec..42aa258 100644
--- a/sys/sparc64/sparc64/iommu.c
+++ b/sys/sparc64/sparc64/iommu.c
@@ -847,31 +847,50 @@ iommu_dvmamap_destroy(bus_dma_tag_t dt, bus_dmamap_t map)
}
/*
- * IOMMU DVMA operations, common to PCI and SBus
+ * Utility function to load a physical buffer. segp contains
+ * the starting segment on entrace, and the ending segment on exit.
*/
static int
-iommu_dvmamap_load_buffer(bus_dma_tag_t dt, struct iommu_state *is,
- bus_dmamap_t map, void *buf, bus_size_t buflen, struct thread *td,
- int flags, bus_dma_segment_t *segs, int *segp, int align)
+iommu_dvmamap_load_phys(bus_dma_tag_t dt, bus_dmamap_t map, vm_paddr_t buf,
+ bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp)
{
bus_addr_t amask, dvmaddr, dvmoffs;
bus_size_t sgsize, esize;
- vm_offset_t vaddr, voffs;
+ struct iommu_state *is;
+ vm_offset_t voffs;
vm_paddr_t curaddr;
- pmap_t pmap = NULL;
int error, firstpg, sgcnt;
u_int slot;
+ is = dt->dt_cookie;
+ if (*segp == -1) {
+ if ((map->dm_flags & DMF_LOADED) != 0) {
+#ifdef DIAGNOSTIC
+ printf("%s: map still in use\n", __func__);
+#endif
+ bus_dmamap_unload(dt, map);
+ }
+
+ /*
+ * Make sure that the map is not on a queue so that the
+ * resource list may be safely accessed and modified without
+ * needing the lock to cover the whole operation.
+ */
+ IS_LOCK(is);
+ iommu_map_remq(is, map);
+ IS_UNLOCK(is);
+
+ amask = dt->dt_alignment - 1;
+ } else
+ amask = 0;
KASSERT(buflen != 0, ("%s: buflen == 0!", __func__));
if (buflen > dt->dt_maxsize)
return (EINVAL);
- if (td != NULL)
- pmap = vmspace_pmap(td->td_proc->p_vmspace);
+ if (segs == NULL)
+ segs = dt->dt_segments;
- vaddr = (vm_offset_t)buf;
- voffs = vaddr & IO_PAGE_MASK;
- amask = align ? dt->dt_alignment - 1 : 0;
+ voffs = buf & IO_PAGE_MASK;
/* Try to find a slab that is large enough. */
error = iommu_dvma_vallocseg(dt, is, map, voffs, buflen, amask,
@@ -885,23 +904,17 @@ iommu_dvmamap_load_buffer(bus_dma_tag_t dt, struct iommu_state *is,
map->dm_flags |= iommu_use_streaming(is, map, buflen) != 0 ?
DMF_STREAMED : 0;
for (; buflen > 0; ) {
- /*
- * Get the physical address for this page.
- */
- if (pmap != NULL)
- curaddr = pmap_extract(pmap, vaddr);
- else
- curaddr = pmap_kextract(vaddr);
+ curaddr = buf;
/*
* Compute the segment size, and adjust counts.
*/
- sgsize = IO_PAGE_SIZE - ((u_long)vaddr & IO_PAGE_MASK);
+ sgsize = IO_PAGE_SIZE - ((u_long)buf & IO_PAGE_MASK);
if (buflen < sgsize)
sgsize = buflen;
buflen -= sgsize;
- vaddr += sgsize;
+ buf += sgsize;
dvmoffs = trunc_io_page(dvmaddr);
iommu_enter(is, dvmoffs, trunc_io_page(curaddr),
@@ -949,203 +962,153 @@ iommu_dvmamap_load_buffer(bus_dma_tag_t dt, struct iommu_state *is,
return (0);
}
+/*
+ * IOMMU DVMA operations, common to PCI and SBus
+ */
static int
-iommu_dvmamap_load(bus_dma_tag_t dt, bus_dmamap_t map, void *buf,
- bus_size_t buflen, bus_dmamap_callback_t *cb, void *cba,
- int flags)
+iommu_dvmamap_load_buffer(bus_dma_tag_t dt, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
+ int *segp)
{
- struct iommu_state *is = dt->dt_cookie;
- int error, seg = -1;
+ bus_addr_t amask, dvmaddr, dvmoffs;
+ bus_size_t sgsize, esize;
+ struct iommu_state *is;
+ vm_offset_t vaddr, voffs;
+ vm_paddr_t curaddr;
+ int error, firstpg, sgcnt;
+ u_int slot;
- if ((map->dm_flags & DMF_LOADED) != 0) {
+ is = dt->dt_cookie;
+ if (*segp == -1) {
+ if ((map->dm_flags & DMF_LOADED) != 0) {
#ifdef DIAGNOSTIC
- printf("%s: map still in use\n", __func__);
+ printf("%s: map still in use\n", __func__);
#endif
- bus_dmamap_unload(dt, map);
- }
+ bus_dmamap_unload(dt, map);
+ }
- /*
- * Make sure that the map is not on a queue so that the resource list
- * may be safely accessed and modified without needing the lock to
- * cover the whole operation.
- */
- IS_LOCK(is);
- iommu_map_remq(is, map);
- IS_UNLOCK(is);
+ /*
+ * Make sure that the map is not on a queue so that the
+ * resource list may be safely accessed and modified without
+ * needing the lock to cover the whole operation.
+ */
+ IS_LOCK(is);
+ iommu_map_remq(is, map);
+ IS_UNLOCK(is);
- error = iommu_dvmamap_load_buffer(dt, is, map, buf, buflen, NULL,
- flags, dt->dt_segments, &seg, 1);
+ amask = dt->dt_alignment - 1;
+ } else
+ amask = 0;
+ KASSERT(buflen != 0, ("%s: buflen == 0!", __func__));
+ if (buflen > dt->dt_maxsize)
+ return (EINVAL);
- IS_LOCK(is);
- iommu_map_insq(is, map);
- if (error != 0) {
- iommu_dvmamap_vunload(is, map);
- IS_UNLOCK(is);
- (*cb)(cba, dt->dt_segments, 0, error);
- } else {
- IS_UNLOCK(is);
- map->dm_flags |= DMF_LOADED;
- (*cb)(cba, dt->dt_segments, seg + 1, 0);
- }
+ if (segs == NULL)
+ segs = dt->dt_segments;
- return (error);
-}
+ vaddr = (vm_offset_t)buf;
+ voffs = vaddr & IO_PAGE_MASK;
-static int
-iommu_dvmamap_load_mbuf(bus_dma_tag_t dt, bus_dmamap_t map, struct mbuf *m0,
- bus_dmamap_callback2_t *cb, void *cba, int flags)
-{
- struct iommu_state *is = dt->dt_cookie;
- struct mbuf *m;
- int error = 0, first = 1, nsegs = -1;
+ /* Try to find a slab that is large enough. */
+ error = iommu_dvma_vallocseg(dt, is, map, voffs, buflen, amask,
+ &dvmaddr);
+ if (error != 0)
+ return (error);
- M_ASSERTPKTHDR(m0);
+ sgcnt = *segp;
+ firstpg = 1;
+ map->dm_flags &= ~DMF_STREAMED;
+ map->dm_flags |= iommu_use_streaming(is, map, buflen) != 0 ?
+ DMF_STREAMED : 0;
+ for (; buflen > 0; ) {
+ /*
+ * Get the physical address for this page.
+ */
+ if (pmap == kernel_pmap)
+ curaddr = pmap_kextract(vaddr);
+ else
+ curaddr = pmap_extract(pmap, vaddr);
- if ((map->dm_flags & DMF_LOADED) != 0) {
-#ifdef DIAGNOSTIC
- printf("%s: map still in use\n", __func__);
-#endif
- bus_dmamap_unload(dt, map);
- }
+ /*
+ * Compute the segment size, and adjust counts.
+ */
+ sgsize = IO_PAGE_SIZE - ((u_long)vaddr & IO_PAGE_MASK);
+ if (buflen < sgsize)
+ sgsize = buflen;
- IS_LOCK(is);
- iommu_map_remq(is, map);
- IS_UNLOCK(is);
+ buflen -= sgsize;
+ vaddr += sgsize;
- if (m0->m_pkthdr.len <= dt->dt_maxsize) {
- for (m = m0; m != NULL && error == 0; m = m->m_next) {
- if (m->m_len == 0)
- continue;
- error = iommu_dvmamap_load_buffer(dt, is, map,
- m->m_data, m->m_len, NULL, flags, dt->dt_segments,
- &nsegs, first);
- first = 0;
+ dvmoffs = trunc_io_page(dvmaddr);
+ iommu_enter(is, dvmoffs, trunc_io_page(curaddr),
+ (map->dm_flags & DMF_STREAMED) != 0, flags);
+ if ((is->is_flags & IOMMU_FLUSH_CACHE) != 0) {
+ slot = IOTSBSLOT(dvmoffs);
+ if (buflen <= 0 || slot % 8 == 7)
+ IOMMU_WRITE8(is, is_iommu, IMR_CACHE_FLUSH,
+ is->is_ptsb + slot * 8);
}
- } else
- error = EINVAL;
- IS_LOCK(is);
- iommu_map_insq(is, map);
- if (error != 0) {
- iommu_dvmamap_vunload(is, map);
- IS_UNLOCK(is);
- /* force "no valid mappings" in callback */
- (*cb)(cba, dt->dt_segments, 0, 0, error);
- } else {
- IS_UNLOCK(is);
- map->dm_flags |= DMF_LOADED;
- (*cb)(cba, dt->dt_segments, nsegs + 1, m0->m_pkthdr.len, 0);
+ /*
+ * Chop the chunk up into segments of at most maxsegsz, but try
+ * to fill each segment as well as possible.
+ */
+ if (!firstpg) {
+ esize = ulmin(sgsize,
+ dt->dt_maxsegsz - segs[sgcnt].ds_len);
+ segs[sgcnt].ds_len += esize;
+ sgsize -= esize;
+ dvmaddr += esize;
+ }
+ while (sgsize > 0) {
+ sgcnt++;
+ if (sgcnt >= dt->dt_nsegments)
+ return (EFBIG);
+ /*
+ * No extra alignment here - the common practice in
+ * the busdma code seems to be that only the first
+ * segment needs to satisfy the alignment constraints
+ * (and that only for bus_dmamem_alloc()ed maps).
+ * It is assumed that such tags have maxsegsize >=
+ * maxsize.
+ */
+ esize = ulmin(sgsize, dt->dt_maxsegsz);
+ segs[sgcnt].ds_addr = dvmaddr;
+ segs[sgcnt].ds_len = esize;
+ sgsize -= esize;
+ dvmaddr += esize;
+ }
+
+ firstpg = 0;
}
- return (error);
+ *segp = sgcnt;
+ return (0);
}
-static int
-iommu_dvmamap_load_mbuf_sg(bus_dma_tag_t dt, bus_dmamap_t map, struct mbuf *m0,
- bus_dma_segment_t *segs, int *nsegs, int flags)
+static void
+iommu_dvmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
{
- struct iommu_state *is = dt->dt_cookie;
- struct mbuf *m;
- int error = 0, first = 1;
-
- M_ASSERTPKTHDR(m0);
-
- *nsegs = -1;
- if ((map->dm_flags & DMF_LOADED) != 0) {
-#ifdef DIAGNOSTIC
- printf("%s: map still in use\n", __func__);
-#endif
- bus_dmamap_unload(dt, map);
- }
-
- IS_LOCK(is);
- iommu_map_remq(is, map);
- IS_UNLOCK(is);
-
- if (m0->m_pkthdr.len <= dt->dt_maxsize) {
- for (m = m0; m != NULL && error == 0; m = m->m_next) {
- if (m->m_len == 0)
- continue;
- error = iommu_dvmamap_load_buffer(dt, is, map,
- m->m_data, m->m_len, NULL, flags, segs,
- nsegs, first);
- first = 0;
- }
- } else
- error = EINVAL;
-
- IS_LOCK(is);
- iommu_map_insq(is, map);
- if (error != 0) {
- iommu_dvmamap_vunload(is, map);
- IS_UNLOCK(is);
- } else {
- IS_UNLOCK(is);
- map->dm_flags |= DMF_LOADED;
- ++*nsegs;
- }
- return (error);
}
-static int
-iommu_dvmamap_load_uio(bus_dma_tag_t dt, bus_dmamap_t map, struct uio *uio,
- bus_dmamap_callback2_t *cb, void *cba, int flags)
+static bus_dma_segment_t *
+iommu_dvmamap_complete(bus_dma_tag_t dt, bus_dmamap_t map,
+ bus_dma_segment_t *segs, int nsegs, int error)
{
struct iommu_state *is = dt->dt_cookie;
- struct iovec *iov;
- struct thread *td = NULL;
- bus_size_t minlen, resid;
- int nsegs = -1, error = 0, first = 1, i;
-
- if ((map->dm_flags & DMF_LOADED) != 0) {
-#ifdef DIAGNOSTIC
- printf("%s: map still in use\n", __func__);
-#endif
- bus_dmamap_unload(dt, map);
- }
-
- IS_LOCK(is);
- iommu_map_remq(is, map);
- IS_UNLOCK(is);
-
- resid = uio->uio_resid;
- iov = uio->uio_iov;
-
- if (uio->uio_segflg == UIO_USERSPACE) {
- td = uio->uio_td;
- KASSERT(td != NULL,
- ("%s: USERSPACE but no proc", __func__));
- }
-
- for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
- /*
- * Now at the first iovec to load. Load each iovec
- * until we have exhausted the residual count.
- */
- minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
- if (minlen == 0)
- continue;
-
- error = iommu_dvmamap_load_buffer(dt, is, map,
- iov[i].iov_base, minlen, td, flags, dt->dt_segments,
- &nsegs, first);
- first = 0;
-
- resid -= minlen;
- }
IS_LOCK(is);
iommu_map_insq(is, map);
- if (error) {
+ if (error != 0) {
iommu_dvmamap_vunload(is, map);
IS_UNLOCK(is);
- /* force "no valid mappings" in callback */
- (*cb)(cba, dt->dt_segments, 0, 0, error);
} else {
IS_UNLOCK(is);
map->dm_flags |= DMF_LOADED;
- (*cb)(cba, dt->dt_segments, nsegs + 1, uio->uio_resid, 0);
}
- return (error);
+ if (segs == NULL)
+ segs = dt->dt_segments;
+ return (segs);
}
static void
@@ -1241,10 +1204,10 @@ iommu_diag(struct iommu_state *is, vm_offset_t va)
struct bus_dma_methods iommu_dma_methods = {
iommu_dvmamap_create,
iommu_dvmamap_destroy,
- iommu_dvmamap_load,
- iommu_dvmamap_load_mbuf,
- iommu_dvmamap_load_mbuf_sg,
- iommu_dvmamap_load_uio,
+ iommu_dvmamap_load_phys,
+ iommu_dvmamap_load_buffer,
+ iommu_dvmamap_waitok,
+ iommu_dvmamap_complete,
iommu_dvmamap_unload,
iommu_dvmamap_sync,
iommu_dvmamem_alloc,
OpenPOWER on IntegriCloud