summaryrefslogtreecommitdiffstats
path: root/sys/alpha
diff options
context:
space:
mode:
authorsam <sam@FreeBSD.org>2002-10-04 20:40:39 +0000
committersam <sam@FreeBSD.org>2002-10-04 20:40:39 +0000
commit1ba0866904d81527e87e7ca185303d932dcb43fc (patch)
tree2a99279aecd745558985c36259960a14ab6452aa /sys/alpha
parent451a9f90e16e09921a9b6bb02f0bc45c9272fba6 (diff)
downloadFreeBSD-src-1ba0866904d81527e87e7ca185303d932dcb43fc.zip
FreeBSD-src-1ba0866904d81527e87e7ca185303d932dcb43fc.tar.gz
New bus_dma interfaces for use by crypto device drivers:
o bus_dmamap_load_mbuf o bus_dmamap_load_uio Test on i386. Known to compile on alpha and sparc64, but not tested. Otherwise untried.
Diffstat (limited to 'sys/alpha')
-rw-r--r--sys/alpha/alpha/busdma_machdep.c207
-rw-r--r--sys/alpha/include/bus.h23
2 files changed, 230 insertions, 0 deletions
diff --git a/sys/alpha/alpha/busdma_machdep.c b/sys/alpha/alpha/busdma_machdep.c
index 119d89b..22b39a4 100644
--- a/sys/alpha/alpha/busdma_machdep.c
+++ b/sys/alpha/alpha/busdma_machdep.c
@@ -32,10 +32,14 @@
#include <sys/interrupt.h>
#include <sys/lock.h>
#include <sys/malloc.h>
+#include <sys/mbuf.h>
#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/uio.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
+#include <vm/vm_map.h>
#include <machine/bus.h>
#include <machine/sgmap.h>
@@ -535,6 +539,209 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
}
/*
+ * Utility function to load a linear buffer. lastaddrp holds state
+ * between invocations (for multiple-buffer loads). segp contains
+ * the starting segment on entrace, and the ending segment on exit.
+ * first indicates if this is the first invocation of this function.
+ */
+static int
+_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
+ bus_dma_segment_t segs[],
+ void *buf, bus_size_t buflen,
+ struct thread *td,
+ int flags,
+ vm_offset_t *lastaddrp,
+ int *segp,
+ int first)
+{
+ bus_size_t sgsize;
+ bus_addr_t curaddr, lastaddr, baddr, bmask;
+ vm_offset_t vaddr = (vm_offset_t)buf;
+ int seg;
+ pmap_t pmap;
+
+ if (td != NULL)
+ pmap = vmspace_pmap(td->td_proc->p_vmspace);
+ else
+ pmap = NULL;
+
+ lastaddr = *lastaddrp;
+ bmask = ~(dmat->boundary - 1);
+
+ for (seg = *segp; buflen > 0 ; ) {
+ /*
+ * Get the physical address for this segment.
+ */
+ if (pmap)
+ curaddr = pmap_extract(pmap, vaddr);
+ else
+ curaddr = pmap_kextract(vaddr);
+
+ /*
+ * Compute the segment size, and adjust counts.
+ */
+ sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
+ if (buflen < sgsize)
+ sgsize = buflen;
+
+ /*
+ * Make sure we don't cross any boundaries.
+ */
+ if (dmat->boundary > 0) {
+ baddr = (curaddr + dmat->boundary) & bmask;
+ if (sgsize > (baddr - curaddr))
+ sgsize = (baddr - curaddr);
+ }
+
+ /*
+ * Insert chunk into a segment, coalescing with
+ * previous segment if possible.
+ */
+ if (first) {
+ segs[seg].ds_addr = curaddr + alpha_XXX_dmamap_or;
+ segs[seg].ds_len = sgsize;
+ first = 0;
+ } else {
+ if (curaddr == lastaddr &&
+ (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
+ (dmat->boundary == 0 ||
+ (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
+ segs[seg].ds_len += sgsize;
+ else {
+ if (++seg >= dmat->nsegments)
+ break;
+ segs[seg].ds_addr = curaddr + alpha_XXX_dmamap_or;
+ segs[seg].ds_len = sgsize;
+ }
+ }
+
+ lastaddr = curaddr + sgsize;
+ vaddr += sgsize;
+ buflen -= sgsize;
+ }
+
+ *segp = seg;
+ *lastaddrp = lastaddr;
+
+ /*
+ * Did we fit?
+ */
+ return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
+}
+
+/*
+ * Like _bus_dmamap_load(), but for mbufs.
+ */
+int
+bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct mbuf *m0,
+ bus_dmamap_callback2_t *callback, void *callback_arg,
+ int flags)
+{
+#ifdef __GNUC__
+ bus_dma_segment_t dm_segments[dmat->nsegments];
+#else
+ bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
+#endif
+ int nsegs, error;
+
+ KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL,
+ ("bus_dmamap_load_mbuf: No support for bounce pages!"));
+ KASSERT(m0->m_flags & M_PKTHDR,
+ ("bus_dmamap_load_mbuf: no packet header"));
+
+ nsegs = 0;
+ error = 0;
+ if (m0->m_pkthdr.len <= dmat->maxsize) {
+ int first = 1;
+ vm_offset_t lastaddr = 0;
+ struct mbuf *m;
+
+ for (m = m0; m != NULL && error == 0; m = m->m_next) {
+ error = _bus_dmamap_load_buffer(dmat,
+ dm_segments,
+ m->m_data, m->m_len,
+ NULL, flags, &lastaddr, &nsegs, first);
+ first = 0;
+ }
+ } else {
+ error = EINVAL;
+ }
+
+ if (error) {
+ /* force "no valid mappings" in callback */
+ (*callback)(callback_arg, dm_segments, 0, 0, error);
+ } else {
+ (*callback)(callback_arg, dm_segments,
+ nsegs+1, m0->m_pkthdr.len, error);
+ }
+ return (error);
+}
+
+/*
+ * Like _bus_dmamap_load(), but for uios.
+ */
+int
+bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct uio *uio,
+ bus_dmamap_callback2_t *callback, void *callback_arg,
+ int flags)
+{
+ vm_offset_t lastaddr;
+#ifdef __GNUC__
+ bus_dma_segment_t dm_segments[dmat->nsegments];
+#else
+ bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
+#endif
+ int nsegs, error, first, i;
+ bus_size_t resid;
+ struct iovec *iov;
+ struct thread *td = NULL;
+
+ KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL,
+ ("bus_dmamap_load_uio: No support for bounce pages!"));
+
+ resid = uio->uio_resid;
+ iov = uio->uio_iov;
+
+ if (uio->uio_segflg == UIO_USERSPACE) {
+ td = uio->uio_td;
+ KASSERT(td != NULL,
+ ("bus_dmamap_load_uio: USERSPACE but no proc"));
+ }
+
+ nsegs = 0;
+ error = 0;
+ first = 1;
+ for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
+ /*
+ * Now at the first iovec to load. Load each iovec
+ * until we have exhausted the residual count.
+ */
+ bus_size_t minlen =
+ resid < iov[i].iov_len ? resid : iov[i].iov_len;
+ caddr_t addr = (caddr_t) iov[i].iov_base;
+
+ error = _bus_dmamap_load_buffer(dmat,
+ dm_segments,
+ addr, minlen,
+ td, flags, &lastaddr, &nsegs, first);
+ first = 0;
+
+ resid -= minlen;
+ }
+
+ if (error) {
+ /* force "no valid mappings" in callback */
+ (*callback)(callback_arg, dm_segments, 0, 0, error);
+ } else {
+ (*callback)(callback_arg, dm_segments,
+ nsegs+1, uio->uio_resid, error);
+ }
+ return (error);
+}
+
+/*
* Release the mapping held by map.
*/
void
diff --git a/sys/alpha/include/bus.h b/sys/alpha/include/bus.h
index 1a805b1..4c1d55b 100644
--- a/sys/alpha/include/bus.h
+++ b/sys/alpha/include/bus.h
@@ -560,6 +560,29 @@ int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
void *callback_arg, int flags);
/*
+ * Like bus_dmamap_callback but includes map size in bytes. This is
+ * defined as a separate interface to maintain compatiiblity for users
+ * of bus_dmamap_callback_t--at some point these interfaces should be merged.
+ */
+typedef void bus_dmamap_callback2_t(void *, bus_dma_segment_t *, int, bus_size_t, int);
+/*
+ * Like bus_dmamap_load but for mbufs. Note the use of the
+ * bus_dmamap_callback2_t interface.
+ */
+int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct mbuf *mbuf,
+ bus_dmamap_callback2_t *callback, void *callback_arg,
+ int flags);
+/*
+ * Like bus_dmamap_load but for uios. Note the use of the
+ * bus_dmamap_callback2_t interface.
+ */
+int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct uio *ui,
+ bus_dmamap_callback2_t *callback, void *callback_arg,
+ int flags);
+
+/*
* Perform a syncronization operation on the given map.
*/
void _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t);
OpenPOWER on IntegriCloud