From 1ba0866904d81527e87e7ca185303d932dcb43fc Mon Sep 17 00:00:00 2001 From: sam Date: Fri, 4 Oct 2002 20:40:39 +0000 Subject: New bus_dma interfaces for use by crypto device drivers: o bus_dmamap_load_mbuf o bus_dmamap_load_uio Test on i386. Known to compile on alpha and sparc64, but not tested. Otherwise untried. --- sys/alpha/alpha/busdma_machdep.c | 207 +++++++++++++++++++++++++++++++++++++ sys/alpha/include/bus.h | 23 +++++ sys/amd64/amd64/busdma_machdep.c | 206 +++++++++++++++++++++++++++++++++++++ sys/amd64/include/bus_dma.h | 23 +++++ sys/i386/i386/busdma_machdep.c | 206 +++++++++++++++++++++++++++++++++++++ sys/i386/include/bus_dma.h | 23 +++++ sys/ia64/ia64/busdma_machdep.c | 207 +++++++++++++++++++++++++++++++++++++ sys/ia64/include/bus.h | 23 +++++ sys/sparc64/include/bus.h | 37 +++++++ sys/sparc64/sparc64/bus_machdep.c | 211 ++++++++++++++++++++++++++++++++++++++ sys/sys/bus_dma.h | 23 +++++ 11 files changed, 1189 insertions(+) diff --git a/sys/alpha/alpha/busdma_machdep.c b/sys/alpha/alpha/busdma_machdep.c index 119d89b..22b39a4 100644 --- a/sys/alpha/alpha/busdma_machdep.c +++ b/sys/alpha/alpha/busdma_machdep.c @@ -32,10 +32,14 @@ #include #include #include +#include #include +#include +#include #include #include +#include #include #include @@ -535,6 +539,209 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, } /* + * Utility function to load a linear buffer. lastaddrp holds state + * between invocations (for multiple-buffer loads). segp contains + * the starting segment on entrace, and the ending segment on exit. + * first indicates if this is the first invocation of this function. + */ +static int +_bus_dmamap_load_buffer(bus_dma_tag_t dmat, + bus_dma_segment_t segs[], + void *buf, bus_size_t buflen, + struct thread *td, + int flags, + vm_offset_t *lastaddrp, + int *segp, + int first) +{ + bus_size_t sgsize; + bus_addr_t curaddr, lastaddr, baddr, bmask; + vm_offset_t vaddr = (vm_offset_t)buf; + int seg; + pmap_t pmap; + + if (td != NULL) + pmap = vmspace_pmap(td->td_proc->p_vmspace); + else + pmap = NULL; + + lastaddr = *lastaddrp; + bmask = ~(dmat->boundary - 1); + + for (seg = *segp; buflen > 0 ; ) { + /* + * Get the physical address for this segment. + */ + if (pmap) + curaddr = pmap_extract(pmap, vaddr); + else + curaddr = pmap_kextract(vaddr); + + /* + * Compute the segment size, and adjust counts. + */ + sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); + if (buflen < sgsize) + sgsize = buflen; + + /* + * Make sure we don't cross any boundaries. + */ + if (dmat->boundary > 0) { + baddr = (curaddr + dmat->boundary) & bmask; + if (sgsize > (baddr - curaddr)) + sgsize = (baddr - curaddr); + } + + /* + * Insert chunk into a segment, coalescing with + * previous segment if possible. + */ + if (first) { + segs[seg].ds_addr = curaddr + alpha_XXX_dmamap_or; + segs[seg].ds_len = sgsize; + first = 0; + } else { + if (curaddr == lastaddr && + (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && + (dmat->boundary == 0 || + (segs[seg].ds_addr & bmask) == (curaddr & bmask))) + segs[seg].ds_len += sgsize; + else { + if (++seg >= dmat->nsegments) + break; + segs[seg].ds_addr = curaddr + alpha_XXX_dmamap_or; + segs[seg].ds_len = sgsize; + } + } + + lastaddr = curaddr + sgsize; + vaddr += sgsize; + buflen -= sgsize; + } + + *segp = seg; + *lastaddrp = lastaddr; + + /* + * Did we fit? + */ + return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ +} + +/* + * Like _bus_dmamap_load(), but for mbufs. + */ +int +bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, + struct mbuf *m0, + bus_dmamap_callback2_t *callback, void *callback_arg, + int flags) +{ +#ifdef __GNUC__ + bus_dma_segment_t dm_segments[dmat->nsegments]; +#else + bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; +#endif + int nsegs, error; + + KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, + ("bus_dmamap_load_mbuf: No support for bounce pages!")); + KASSERT(m0->m_flags & M_PKTHDR, + ("bus_dmamap_load_mbuf: no packet header")); + + nsegs = 0; + error = 0; + if (m0->m_pkthdr.len <= dmat->maxsize) { + int first = 1; + vm_offset_t lastaddr = 0; + struct mbuf *m; + + for (m = m0; m != NULL && error == 0; m = m->m_next) { + error = _bus_dmamap_load_buffer(dmat, + dm_segments, + m->m_data, m->m_len, + NULL, flags, &lastaddr, &nsegs, first); + first = 0; + } + } else { + error = EINVAL; + } + + if (error) { + /* force "no valid mappings" in callback */ + (*callback)(callback_arg, dm_segments, 0, 0, error); + } else { + (*callback)(callback_arg, dm_segments, + nsegs+1, m0->m_pkthdr.len, error); + } + return (error); +} + +/* + * Like _bus_dmamap_load(), but for uios. + */ +int +bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, + struct uio *uio, + bus_dmamap_callback2_t *callback, void *callback_arg, + int flags) +{ + vm_offset_t lastaddr; +#ifdef __GNUC__ + bus_dma_segment_t dm_segments[dmat->nsegments]; +#else + bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; +#endif + int nsegs, error, first, i; + bus_size_t resid; + struct iovec *iov; + struct thread *td = NULL; + + KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, + ("bus_dmamap_load_uio: No support for bounce pages!")); + + resid = uio->uio_resid; + iov = uio->uio_iov; + + if (uio->uio_segflg == UIO_USERSPACE) { + td = uio->uio_td; + KASSERT(td != NULL, + ("bus_dmamap_load_uio: USERSPACE but no proc")); + } + + nsegs = 0; + error = 0; + first = 1; + for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { + /* + * Now at the first iovec to load. Load each iovec + * until we have exhausted the residual count. + */ + bus_size_t minlen = + resid < iov[i].iov_len ? resid : iov[i].iov_len; + caddr_t addr = (caddr_t) iov[i].iov_base; + + error = _bus_dmamap_load_buffer(dmat, + dm_segments, + addr, minlen, + td, flags, &lastaddr, &nsegs, first); + first = 0; + + resid -= minlen; + } + + if (error) { + /* force "no valid mappings" in callback */ + (*callback)(callback_arg, dm_segments, 0, 0, error); + } else { + (*callback)(callback_arg, dm_segments, + nsegs+1, uio->uio_resid, error); + } + return (error); +} + +/* * Release the mapping held by map. */ void diff --git a/sys/alpha/include/bus.h b/sys/alpha/include/bus.h index 1a805b1..4c1d55b 100644 --- a/sys/alpha/include/bus.h +++ b/sys/alpha/include/bus.h @@ -560,6 +560,29 @@ int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, void *callback_arg, int flags); /* + * Like bus_dmamap_callback but includes map size in bytes. This is + * defined as a separate interface to maintain compatiiblity for users + * of bus_dmamap_callback_t--at some point these interfaces should be merged. + */ +typedef void bus_dmamap_callback2_t(void *, bus_dma_segment_t *, int, bus_size_t, int); +/* + * Like bus_dmamap_load but for mbufs. Note the use of the + * bus_dmamap_callback2_t interface. + */ +int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, + struct mbuf *mbuf, + bus_dmamap_callback2_t *callback, void *callback_arg, + int flags); +/* + * Like bus_dmamap_load but for uios. Note the use of the + * bus_dmamap_callback2_t interface. + */ +int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, + struct uio *ui, + bus_dmamap_callback2_t *callback, void *callback_arg, + int flags); + +/* * Perform a syncronization operation on the given map. */ void _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t); diff --git a/sys/amd64/amd64/busdma_machdep.c b/sys/amd64/amd64/busdma_machdep.c index d0cb697..3158726 100644 --- a/sys/amd64/amd64/busdma_machdep.c +++ b/sys/amd64/amd64/busdma_machdep.c @@ -34,9 +34,12 @@ #include #include #include +#include +#include #include #include +#include #include #include @@ -494,6 +497,209 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, } /* + * Utility function to load a linear buffer. lastaddrp holds state + * between invocations (for multiple-buffer loads). segp contains + * the starting segment on entrace, and the ending segment on exit. + * first indicates if this is the first invocation of this function. + */ +static int +_bus_dmamap_load_buffer(bus_dma_tag_t dmat, + bus_dma_segment_t segs[], + void *buf, bus_size_t buflen, + struct thread *td, + int flags, + vm_offset_t *lastaddrp, + int *segp, + int first) +{ + bus_size_t sgsize; + bus_addr_t curaddr, lastaddr, baddr, bmask; + vm_offset_t vaddr = (vm_offset_t)buf; + int seg; + pmap_t pmap; + + if (td != NULL) + pmap = vmspace_pmap(td->td_proc->p_vmspace); + else + pmap = NULL; + + lastaddr = *lastaddrp; + bmask = ~(dmat->boundary - 1); + + for (seg = *segp; buflen > 0 ; ) { + /* + * Get the physical address for this segment. + */ + if (pmap) + curaddr = pmap_extract(pmap, vaddr); + else + curaddr = pmap_kextract(vaddr); + + /* + * Compute the segment size, and adjust counts. + */ + sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); + if (buflen < sgsize) + sgsize = buflen; + + /* + * Make sure we don't cross any boundaries. + */ + if (dmat->boundary > 0) { + baddr = (curaddr + dmat->boundary) & bmask; + if (sgsize > (baddr - curaddr)) + sgsize = (baddr - curaddr); + } + + /* + * Insert chunk into a segment, coalescing with + * previous segment if possible. + */ + if (first) { + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + first = 0; + } else { + if (curaddr == lastaddr && + (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && + (dmat->boundary == 0 || + (segs[seg].ds_addr & bmask) == (curaddr & bmask))) + segs[seg].ds_len += sgsize; + else { + if (++seg >= dmat->nsegments) + break; + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + } + } + + lastaddr = curaddr + sgsize; + vaddr += sgsize; + buflen -= sgsize; + } + + *segp = seg; + *lastaddrp = lastaddr; + + /* + * Did we fit? + */ + return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ +} + +/* + * Like _bus_dmamap_load(), but for mbufs. + */ +int +bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, + struct mbuf *m0, + bus_dmamap_callback2_t *callback, void *callback_arg, + int flags) +{ +#ifdef __GNUC__ + bus_dma_segment_t dm_segments[dmat->nsegments]; +#else + bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; +#endif + int nsegs, error; + + KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, + ("bus_dmamap_load_mbuf: No support for bounce pages!")); + KASSERT(m0->m_flags & M_PKTHDR, + ("bus_dmamap_load_mbuf: no packet header")); + + nsegs = 0; + error = 0; + if (m0->m_pkthdr.len <= dmat->maxsize) { + int first = 1; + vm_offset_t lastaddr = 0; + struct mbuf *m; + + for (m = m0; m != NULL && error == 0; m = m->m_next) { + error = _bus_dmamap_load_buffer(dmat, + dm_segments, + m->m_data, m->m_len, + NULL, flags, &lastaddr, &nsegs, first); + first = 0; + } + } else { + error = EINVAL; + } + + if (error) { + /* force "no valid mappings" in callback */ + (*callback)(callback_arg, dm_segments, 0, 0, error); + } else { + (*callback)(callback_arg, dm_segments, + nsegs+1, m0->m_pkthdr.len, error); + } + return (error); +} + +/* + * Like _bus_dmamap_load(), but for uios. + */ +int +bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, + struct uio *uio, + bus_dmamap_callback2_t *callback, void *callback_arg, + int flags) +{ + vm_offset_t lastaddr; +#ifdef __GNUC__ + bus_dma_segment_t dm_segments[dmat->nsegments]; +#else + bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; +#endif + int nsegs, error, first, i; + bus_size_t resid; + struct iovec *iov; + struct thread *td = NULL; + + KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, + ("bus_dmamap_load_uio: No support for bounce pages!")); + + resid = uio->uio_resid; + iov = uio->uio_iov; + + if (uio->uio_segflg == UIO_USERSPACE) { + td = uio->uio_td; + KASSERT(td != NULL, + ("bus_dmamap_load_uio: USERSPACE but no proc")); + } + + nsegs = 0; + error = 0; + first = 1; + for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { + /* + * Now at the first iovec to load. Load each iovec + * until we have exhausted the residual count. + */ + bus_size_t minlen = + resid < iov[i].iov_len ? resid : iov[i].iov_len; + caddr_t addr = (caddr_t) iov[i].iov_base; + + error = _bus_dmamap_load_buffer(dmat, + dm_segments, + addr, minlen, + td, flags, &lastaddr, &nsegs, first); + first = 0; + + resid -= minlen; + } + + if (error) { + /* force "no valid mappings" in callback */ + (*callback)(callback_arg, dm_segments, 0, 0, error); + } else { + (*callback)(callback_arg, dm_segments, + nsegs+1, uio->uio_resid, error); + } + return (error); +} + +/* * Release the mapping held by map. */ void diff --git a/sys/amd64/include/bus_dma.h b/sys/amd64/include/bus_dma.h index cb41b17..6790731 100644 --- a/sys/amd64/include/bus_dma.h +++ b/sys/amd64/include/bus_dma.h @@ -203,6 +203,29 @@ int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, void *callback_arg, int flags); /* + * Like bus_dmamap_callback but includes map size in bytes. This is + * defined as a separate interface to maintain compatiiblity for users + * of bus_dmamap_callback_t--at some point these interfaces should be merged. + */ +typedef void bus_dmamap_callback2_t(void *, bus_dma_segment_t *, int, bus_size_t, int); +/* + * Like bus_dmamap_load but for mbufs. Note the use of the + * bus_dmamap_callback2_t interface. + */ +int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, + struct mbuf *mbuf, + bus_dmamap_callback2_t *callback, void *callback_arg, + int flags); +/* + * Like bus_dmamap_load but for uios. Note the use of the + * bus_dmamap_callback2_t interface. + */ +int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, + struct uio *ui, + bus_dmamap_callback2_t *callback, void *callback_arg, + int flags); + +/* * Perform a syncronization operation on the given map. */ void _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t); diff --git a/sys/i386/i386/busdma_machdep.c b/sys/i386/i386/busdma_machdep.c index d0cb697..3158726 100644 --- a/sys/i386/i386/busdma_machdep.c +++ b/sys/i386/i386/busdma_machdep.c @@ -34,9 +34,12 @@ #include #include #include +#include +#include #include #include +#include #include #include @@ -494,6 +497,209 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, } /* + * Utility function to load a linear buffer. lastaddrp holds state + * between invocations (for multiple-buffer loads). segp contains + * the starting segment on entrace, and the ending segment on exit. + * first indicates if this is the first invocation of this function. + */ +static int +_bus_dmamap_load_buffer(bus_dma_tag_t dmat, + bus_dma_segment_t segs[], + void *buf, bus_size_t buflen, + struct thread *td, + int flags, + vm_offset_t *lastaddrp, + int *segp, + int first) +{ + bus_size_t sgsize; + bus_addr_t curaddr, lastaddr, baddr, bmask; + vm_offset_t vaddr = (vm_offset_t)buf; + int seg; + pmap_t pmap; + + if (td != NULL) + pmap = vmspace_pmap(td->td_proc->p_vmspace); + else + pmap = NULL; + + lastaddr = *lastaddrp; + bmask = ~(dmat->boundary - 1); + + for (seg = *segp; buflen > 0 ; ) { + /* + * Get the physical address for this segment. + */ + if (pmap) + curaddr = pmap_extract(pmap, vaddr); + else + curaddr = pmap_kextract(vaddr); + + /* + * Compute the segment size, and adjust counts. + */ + sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); + if (buflen < sgsize) + sgsize = buflen; + + /* + * Make sure we don't cross any boundaries. + */ + if (dmat->boundary > 0) { + baddr = (curaddr + dmat->boundary) & bmask; + if (sgsize > (baddr - curaddr)) + sgsize = (baddr - curaddr); + } + + /* + * Insert chunk into a segment, coalescing with + * previous segment if possible. + */ + if (first) { + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + first = 0; + } else { + if (curaddr == lastaddr && + (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && + (dmat->boundary == 0 || + (segs[seg].ds_addr & bmask) == (curaddr & bmask))) + segs[seg].ds_len += sgsize; + else { + if (++seg >= dmat->nsegments) + break; + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + } + } + + lastaddr = curaddr + sgsize; + vaddr += sgsize; + buflen -= sgsize; + } + + *segp = seg; + *lastaddrp = lastaddr; + + /* + * Did we fit? + */ + return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ +} + +/* + * Like _bus_dmamap_load(), but for mbufs. + */ +int +bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, + struct mbuf *m0, + bus_dmamap_callback2_t *callback, void *callback_arg, + int flags) +{ +#ifdef __GNUC__ + bus_dma_segment_t dm_segments[dmat->nsegments]; +#else + bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; +#endif + int nsegs, error; + + KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, + ("bus_dmamap_load_mbuf: No support for bounce pages!")); + KASSERT(m0->m_flags & M_PKTHDR, + ("bus_dmamap_load_mbuf: no packet header")); + + nsegs = 0; + error = 0; + if (m0->m_pkthdr.len <= dmat->maxsize) { + int first = 1; + vm_offset_t lastaddr = 0; + struct mbuf *m; + + for (m = m0; m != NULL && error == 0; m = m->m_next) { + error = _bus_dmamap_load_buffer(dmat, + dm_segments, + m->m_data, m->m_len, + NULL, flags, &lastaddr, &nsegs, first); + first = 0; + } + } else { + error = EINVAL; + } + + if (error) { + /* force "no valid mappings" in callback */ + (*callback)(callback_arg, dm_segments, 0, 0, error); + } else { + (*callback)(callback_arg, dm_segments, + nsegs+1, m0->m_pkthdr.len, error); + } + return (error); +} + +/* + * Like _bus_dmamap_load(), but for uios. + */ +int +bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, + struct uio *uio, + bus_dmamap_callback2_t *callback, void *callback_arg, + int flags) +{ + vm_offset_t lastaddr; +#ifdef __GNUC__ + bus_dma_segment_t dm_segments[dmat->nsegments]; +#else + bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; +#endif + int nsegs, error, first, i; + bus_size_t resid; + struct iovec *iov; + struct thread *td = NULL; + + KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, + ("bus_dmamap_load_uio: No support for bounce pages!")); + + resid = uio->uio_resid; + iov = uio->uio_iov; + + if (uio->uio_segflg == UIO_USERSPACE) { + td = uio->uio_td; + KASSERT(td != NULL, + ("bus_dmamap_load_uio: USERSPACE but no proc")); + } + + nsegs = 0; + error = 0; + first = 1; + for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { + /* + * Now at the first iovec to load. Load each iovec + * until we have exhausted the residual count. + */ + bus_size_t minlen = + resid < iov[i].iov_len ? resid : iov[i].iov_len; + caddr_t addr = (caddr_t) iov[i].iov_base; + + error = _bus_dmamap_load_buffer(dmat, + dm_segments, + addr, minlen, + td, flags, &lastaddr, &nsegs, first); + first = 0; + + resid -= minlen; + } + + if (error) { + /* force "no valid mappings" in callback */ + (*callback)(callback_arg, dm_segments, 0, 0, error); + } else { + (*callback)(callback_arg, dm_segments, + nsegs+1, uio->uio_resid, error); + } + return (error); +} + +/* * Release the mapping held by map. */ void diff --git a/sys/i386/include/bus_dma.h b/sys/i386/include/bus_dma.h index cb41b17..6790731 100644 --- a/sys/i386/include/bus_dma.h +++ b/sys/i386/include/bus_dma.h @@ -203,6 +203,29 @@ int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, void *callback_arg, int flags); /* + * Like bus_dmamap_callback but includes map size in bytes. This is + * defined as a separate interface to maintain compatiiblity for users + * of bus_dmamap_callback_t--at some point these interfaces should be merged. + */ +typedef void bus_dmamap_callback2_t(void *, bus_dma_segment_t *, int, bus_size_t, int); +/* + * Like bus_dmamap_load but for mbufs. Note the use of the + * bus_dmamap_callback2_t interface. + */ +int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, + struct mbuf *mbuf, + bus_dmamap_callback2_t *callback, void *callback_arg, + int flags); +/* + * Like bus_dmamap_load but for uios. Note the use of the + * bus_dmamap_callback2_t interface. + */ +int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, + struct uio *ui, + bus_dmamap_callback2_t *callback, void *callback_arg, + int flags); + +/* * Perform a syncronization operation on the given map. */ void _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t); diff --git a/sys/ia64/ia64/busdma_machdep.c b/sys/ia64/ia64/busdma_machdep.c index 2247e35..95b9521 100644 --- a/sys/ia64/ia64/busdma_machdep.c +++ b/sys/ia64/ia64/busdma_machdep.c @@ -29,11 +29,15 @@ #include #include #include +#include #include #include +#include +#include #include #include +#include #include #include @@ -523,6 +527,209 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, } /* + * Utility function to load a linear buffer. lastaddrp holds state + * between invocations (for multiple-buffer loads). segp contains + * the starting segment on entrace, and the ending segment on exit. + * first indicates if this is the first invocation of this function. + */ +static int +_bus_dmamap_load_buffer(bus_dma_tag_t dmat, + bus_dma_segment_t segs[], + void *buf, bus_size_t buflen, + struct thread *td, + int flags, + vm_offset_t *lastaddrp, + int *segp, + int first) +{ + bus_size_t sgsize; + bus_addr_t curaddr, lastaddr, baddr, bmask; + vm_offset_t vaddr = (vm_offset_t)buf; + int seg; + pmap_t pmap; + + if (td != NULL) + pmap = vmspace_pmap(td->td_proc->p_vmspace); + else + pmap = NULL; + + lastaddr = *lastaddrp; + bmask = ~(dmat->boundary - 1); + + for (seg = *segp; buflen > 0 ; ) { + /* + * Get the physical address for this segment. + */ + if (pmap) + curaddr = pmap_extract(pmap, vaddr); + else + curaddr = pmap_kextract(vaddr); + + /* + * Compute the segment size, and adjust counts. + */ + sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); + if (buflen < sgsize) + sgsize = buflen; + + /* + * Make sure we don't cross any boundaries. + */ + if (dmat->boundary > 0) { + baddr = (curaddr + dmat->boundary) & bmask; + if (sgsize > (baddr - curaddr)) + sgsize = (baddr - curaddr); + } + + /* + * Insert chunk into a segment, coalescing with + * previous segment if possible. + */ + if (first) { + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + first = 0; + } else { + if (curaddr == lastaddr && + (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && + (dmat->boundary == 0 || + (segs[seg].ds_addr & bmask) == (curaddr & bmask))) + segs[seg].ds_len += sgsize; + else { + if (++seg >= dmat->nsegments) + break; + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + } + } + + lastaddr = curaddr + sgsize; + vaddr += sgsize; + buflen -= sgsize; + } + + *segp = seg; + *lastaddrp = lastaddr; + + /* + * Did we fit? + */ + return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ +} + +/* + * Like _bus_dmamap_load(), but for mbufs. + */ +int +bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, + struct mbuf *m0, + bus_dmamap_callback2_t *callback, void *callback_arg, + int flags) +{ +#ifdef __GNUC__ + bus_dma_segment_t dm_segments[dmat->nsegments]; +#else + bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; +#endif + int nsegs, error; + + KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, + ("bus_dmamap_load_mbuf: No support for bounce pages!")); + KASSERT(m0->m_flags & M_PKTHDR, + ("bus_dmamap_load_mbuf: no packet header")); + + nsegs = 0; + error = 0; + if (m0->m_pkthdr.len <= dmat->maxsize) { + int first = 1; + vm_offset_t lastaddr = 0; + struct mbuf *m; + + for (m = m0; m != NULL && error == 0; m = m->m_next) { + error = _bus_dmamap_load_buffer(dmat, + dm_segments, + m->m_data, m->m_len, + NULL, flags, &lastaddr, &nsegs, first); + first = 0; + } + } else { + error = EINVAL; + } + + if (error) { + /* force "no valid mappings" in callback */ + (*callback)(callback_arg, dm_segments, 0, 0, error); + } else { + (*callback)(callback_arg, dm_segments, + nsegs+1, m0->m_pkthdr.len, error); + } + return (error); +} + +/* + * Like _bus_dmamap_load(), but for uios. + */ +int +bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, + struct uio *uio, + bus_dmamap_callback2_t *callback, void *callback_arg, + int flags) +{ + vm_offset_t lastaddr; +#ifdef __GNUC__ + bus_dma_segment_t dm_segments[dmat->nsegments]; +#else + bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; +#endif + int nsegs, error, first, i; + bus_size_t resid; + struct iovec *iov; + struct thread *td = NULL; + + KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, + ("bus_dmamap_load_uio: No support for bounce pages!")); + + resid = uio->uio_resid; + iov = uio->uio_iov; + + if (uio->uio_segflg == UIO_USERSPACE) { + td = uio->uio_td; + KASSERT(td != NULL, + ("bus_dmamap_load_uio: USERSPACE but no proc")); + } + + nsegs = 0; + error = 0; + first = 1; + for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { + /* + * Now at the first iovec to load. Load each iovec + * until we have exhausted the residual count. + */ + bus_size_t minlen = + resid < iov[i].iov_len ? resid : iov[i].iov_len; + caddr_t addr = (caddr_t) iov[i].iov_base; + + error = _bus_dmamap_load_buffer(dmat, + dm_segments, + addr, minlen, + td, flags, &lastaddr, &nsegs, first); + first = 0; + + resid -= minlen; + } + + if (error) { + /* force "no valid mappings" in callback */ + (*callback)(callback_arg, dm_segments, 0, 0, error); + } else { + (*callback)(callback_arg, dm_segments, + nsegs+1, uio->uio_resid, error); + } + return (error); +} + +/* * Release the mapping held by map. */ void diff --git a/sys/ia64/include/bus.h b/sys/ia64/include/bus.h index d7231c4..457aa2a 100644 --- a/sys/ia64/include/bus.h +++ b/sys/ia64/include/bus.h @@ -1218,6 +1218,29 @@ int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, void *callback_arg, int flags); /* + * Like bus_dmamap_callback but includes map size in bytes. This is + * defined as a separate interface to maintain compatiiblity for users + * of bus_dmamap_callback_t--at some point these interfaces should be merged. + */ +typedef void bus_dmamap_callback2_t(void *, bus_dma_segment_t *, int, bus_size_t, int); +/* + * Like bus_dmamap_load but for mbufs. Note the use of the + * bus_dmamap_callback2_t interface. + */ +int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, + struct mbuf *mbuf, + bus_dmamap_callback2_t *callback, void *callback_arg, + int flags); +/* + * Like bus_dmamap_load but for uios. Note the use of the + * bus_dmamap_callback2_t interface. + */ +int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, + struct uio *ui, + bus_dmamap_callback2_t *callback, void *callback_arg, + int flags); + +/* * Perform a syncronization operation on the given map. */ void _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t); diff --git a/sys/sparc64/include/bus.h b/sys/sparc64/include/bus.h index 5552597..2231f22 100644 --- a/sys/sparc64/include/bus.h +++ b/sys/sparc64/include/bus.h @@ -913,6 +913,13 @@ typedef struct bus_dma_segment bus_dma_segment_t; typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int); /* + * Like bus_dmamap_callback but includes map size in bytes. This is + * defined as a separate interface to maintain compatiiblity for users + * of bus_dmamap_callback_t--at some point these interfaces should be merged. + */ +typedef void bus_dmamap_callback2_t(void *, bus_dma_segment_t *, int, bus_size_t, int); + +/* * bus_dma_tag_t * * A machine-dependent opaque type describing the implementation of @@ -942,6 +949,10 @@ struct bus_dma_tag { int (*dmamap_destroy)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t); int (*dmamap_load)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t, bus_dmamap_callback_t *, void *, int); + int (*dmamap_load_mbuf)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, + struct mbuf *, bus_dmamap_callback2_t *, void *, int); + int (*dmamap_load_uio)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, + struct uio *, bus_dmamap_callback2_t *, void *, int); void (*dmamap_unload)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t); void (*dmamap_sync)(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t); @@ -1010,6 +1021,32 @@ sparc64_dmamap_load(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t m, #define bus_dmamap_load(t, m, p, s, cb, cba, f) \ sparc64_dmamap_load((t), (t), (m), (p), (s), (cb), (cba), (f)) +static __inline int +sparc64_dmamap_load_mbuf(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t m, + struct mbuf *mb, bus_dmamap_callback2_t *cb, void *cba, int f) +{ + bus_dma_tag_t lt; + + for (lt = pt; lt->dmamap_load == NULL; lt = lt->parent) + ; + return ((*lt->dmamap_load_mbuf)(lt, dt, m, mb, cb, cba, f)); +} +#define bus_dmamap_load_mbuf(t, m, mb, cb, cba, f) \ + sparc64_dmamap_load_mbuf((t), (t), (m), (mb), (cb), (cba), (f)) + +static __inline int +sparc64_dmamap_load_uio(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t m, + struct uio *ui, bus_dmamap_callback2_t *cb, void *cba, int f) +{ + bus_dma_tag_t lt; + + for (lt = pt; lt->dmamap_load == NULL; lt = lt->parent) + ; + return ((*lt->dmamap_load_uio)(lt, dt, m, ui, cb, cba, f)); +} +#define bus_dmamap_load_uio(t, m, ui, cb, cba, f) \ + sparc64_dmamap_load_uio((t), (t), (m), (ui), (cb), (cba), (f)) + static __inline void sparc64_dmamap_unload(bus_dma_tag_t pt, bus_dma_tag_t dt, bus_dmamap_t p) { diff --git a/sys/sparc64/sparc64/bus_machdep.c b/sys/sparc64/sparc64/bus_machdep.c index 1397992..3fac41e 100644 --- a/sys/sparc64/sparc64/bus_machdep.c +++ b/sys/sparc64/sparc64/bus_machdep.c @@ -112,15 +112,18 @@ #include #include #include +#include #include #include #include +#include #include #include #include #include #include +#include #include #include @@ -159,6 +162,10 @@ static int nexus_dmamap_create(bus_dma_tag_t, bus_dma_tag_t, int, static int nexus_dmamap_destroy(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t); static int nexus_dmamap_load(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t, bus_dmamap_callback_t *, void *, int); +static int nexus_dmamap_load_mbuf(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, + struct mbuf *, bus_dmamap_callback2_t *, void *, int); +static int nexus_dmamap_load_uio(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, + struct uio *, bus_dmamap_callback2_t *, void *, int); static void nexus_dmamap_unload(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t); static void nexus_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t); @@ -211,6 +218,8 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, newtag->dmamap_create = NULL; newtag->dmamap_destroy = NULL; newtag->dmamap_load = NULL; + newtag->dmamap_load_mbuf = NULL; + newtag->dmamap_load_uio = NULL; newtag->dmamap_unload = NULL; newtag->dmamap_sync = NULL; newtag->dmamem_alloc = NULL; @@ -368,6 +377,206 @@ nexus_dmamap_load(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map, } /* + * Utility function to load a linear buffer. lastaddrp holds state + * between invocations (for multiple-buffer loads). segp contains + * the starting segment on entrace, and the ending segment on exit. + * first indicates if this is the first invocation of this function. + */ +static int +_nexus_dmamap_load_buffer(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, + bus_dma_segment_t segs[], + void *buf, bus_size_t buflen, + struct thread *td, + int flags, + vm_offset_t *lastaddrp, + int *segp, + int first) +{ + bus_size_t sgsize; + bus_addr_t curaddr, lastaddr, baddr, bmask; + vm_offset_t vaddr = (vm_offset_t)buf; + int seg; + pmap_t pmap; + + if (td != NULL) + pmap = vmspace_pmap(td->td_proc->p_vmspace); + else + pmap = NULL; + + lastaddr = *lastaddrp; + bmask = ~(ddmat->boundary - 1); + + for (seg = *segp; buflen > 0 ; ) { + /* + * Get the physical address for this segment. + */ + if (pmap) + curaddr = pmap_extract(pmap, vaddr); + else + curaddr = pmap_kextract(vaddr); + + /* + * Compute the segment size, and adjust counts. + */ + sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); + if (buflen < sgsize) + sgsize = buflen; + + /* + * Make sure we don't cross any boundaries. + */ + if (ddmat->boundary > 0) { + baddr = (curaddr + ddmat->boundary) & bmask; + if (sgsize > (baddr - curaddr)) + sgsize = (baddr - curaddr); + } + + /* + * Insert chunk into a segment, coalescing with + * previous segment if possible. + */ + if (first) { + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + first = 0; + } else { + if (curaddr == lastaddr && + (segs[seg].ds_len + sgsize) <= ddmat->maxsegsz && + (ddmat->boundary == 0 || + (segs[seg].ds_addr & bmask) == (curaddr & bmask))) + segs[seg].ds_len += sgsize; + else { + if (++seg >= ddmat->nsegments) + break; + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + } + } + + lastaddr = curaddr + sgsize; + vaddr += sgsize; + buflen -= sgsize; + } + + *segp = seg; + *lastaddrp = lastaddr; + + /* + * Did we fit? + */ + return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ +} + +/* + * Like _bus_dmamap_load(), but for mbufs. + */ +static int +nexus_dmamap_load_mbuf(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, + bus_dmamap_t map, + struct mbuf *m0, + bus_dmamap_callback2_t *callback, void *callback_arg, + int flags) +{ +#ifdef __GNUC__ + bus_dma_segment_t dm_segments[ddmat->nsegments]; +#else + bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; +#endif + int nsegs, error; + + KASSERT(m0->m_flags & M_PKTHDR, + ("nexus_dmamap_load_mbuf: no packet header")); + + nsegs = 0; + error = 0; + if (m0->m_pkthdr.len <= ddmat->maxsize) { + int first = 1; + vm_offset_t lastaddr = 0; + struct mbuf *m; + + for (m = m0; m != NULL && error == 0; m = m->m_next) { + error = _nexus_dmamap_load_buffer(pdmat, ddmat, + dm_segments, + m->m_data, m->m_len, + NULL, flags, &lastaddr, &nsegs, first); + first = 0; + } + } else { + error = EINVAL; + } + + if (error) { + /* force "no valid mappings" in callback */ + (*callback)(callback_arg, dm_segments, 0, 0, error); + } else { + (*callback)(callback_arg, dm_segments, + nsegs+1, m0->m_pkthdr.len, error); + } + return (error); +} + +/* + * Like _bus_dmamap_load(), but for uios. + */ +static int +nexus_dmamap_load_uio(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, + bus_dmamap_t map, + struct uio *uio, + bus_dmamap_callback2_t *callback, void *callback_arg, + int flags) +{ + vm_offset_t lastaddr; +#ifdef __GNUC__ + bus_dma_segment_t dm_segments[ddmat->nsegments]; +#else + bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; +#endif + int nsegs, error, first, i; + bus_size_t resid; + struct iovec *iov; + struct thread *td = NULL; + + resid = uio->uio_resid; + iov = uio->uio_iov; + + if (uio->uio_segflg == UIO_USERSPACE) { + td = uio->uio_td; + KASSERT(td != NULL, + ("nexus_dmamap_load_uio: USERSPACE but no proc")); + } + + nsegs = 0; + error = 0; + first = 1; + for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { + /* + * Now at the first iovec to load. Load each iovec + * until we have exhausted the residual count. + */ + bus_size_t minlen = + resid < iov[i].iov_len ? resid : iov[i].iov_len; + caddr_t addr = (caddr_t) iov[i].iov_base; + + error = _nexus_dmamap_load_buffer(pdmat, ddmat, + dm_segments, + addr, minlen, + td, flags, &lastaddr, &nsegs, first); + first = 0; + + resid -= minlen; + } + + if (error) { + /* force "no valid mappings" in callback */ + (*callback)(callback_arg, dm_segments, 0, 0, error); + } else { + (*callback)(callback_arg, dm_segments, + nsegs+1, uio->uio_resid, error); + } + return (error); +} + +/* * Common function for unloading a DMA map. May be called by * bus-specific DMA map unload functions. */ @@ -506,6 +715,8 @@ struct bus_dma_tag nexus_dmatag = { nexus_dmamap_create, nexus_dmamap_destroy, nexus_dmamap_load, + nexus_dmamap_load_mbuf, + nexus_dmamap_load_uio, nexus_dmamap_unload, nexus_dmamap_sync, diff --git a/sys/sys/bus_dma.h b/sys/sys/bus_dma.h index cb41b17..6790731 100644 --- a/sys/sys/bus_dma.h +++ b/sys/sys/bus_dma.h @@ -203,6 +203,29 @@ int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, void *callback_arg, int flags); /* + * Like bus_dmamap_callback but includes map size in bytes. This is + * defined as a separate interface to maintain compatiiblity for users + * of bus_dmamap_callback_t--at some point these interfaces should be merged. + */ +typedef void bus_dmamap_callback2_t(void *, bus_dma_segment_t *, int, bus_size_t, int); +/* + * Like bus_dmamap_load but for mbufs. Note the use of the + * bus_dmamap_callback2_t interface. + */ +int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, + struct mbuf *mbuf, + bus_dmamap_callback2_t *callback, void *callback_arg, + int flags); +/* + * Like bus_dmamap_load but for uios. Note the use of the + * bus_dmamap_callback2_t interface. + */ +int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, + struct uio *ui, + bus_dmamap_callback2_t *callback, void *callback_arg, + int flags); + +/* * Perform a syncronization operation on the given map. */ void _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t); -- cgit v1.1