summaryrefslogtreecommitdiffstats
path: root/sys/powerpc
diff options
context:
space:
mode:
authornwhitehorn <nwhitehorn@FreeBSD.org>2010-07-08 15:38:55 +0000
committernwhitehorn <nwhitehorn@FreeBSD.org>2010-07-08 15:38:55 +0000
commitb8a3f884f5f76d7cbeafa82dcc64a92139bd0451 (patch)
treecf8c3431de4e0239f3df7acb987572fb388d6b00 /sys/powerpc
parent9903687954d730f5450b28e92332102e97ae531e (diff)
downloadFreeBSD-src-b8a3f884f5f76d7cbeafa82dcc64a92139bd0451.zip
FreeBSD-src-b8a3f884f5f76d7cbeafa82dcc64a92139bd0451.tar.gz
Replace the existing PowerPC busdma implementation with the one from
amd64 (with slight modifications). This provides support for bounce buffers, which are required on systems with RAM above 4 GB.
Diffstat (limited to 'sys/powerpc')
-rw-r--r--sys/powerpc/aim/vm_machdep.c2
-rw-r--r--sys/powerpc/booke/vm_machdep.c2
-rw-r--r--sys/powerpc/powerpc/busdma_machdep.c1041
3 files changed, 834 insertions, 211 deletions
diff --git a/sys/powerpc/aim/vm_machdep.c b/sys/powerpc/aim/vm_machdep.c
index 3967176..9f51d0b 100644
--- a/sys/powerpc/aim/vm_machdep.c
+++ b/sys/powerpc/aim/vm_machdep.c
@@ -358,10 +358,8 @@ sf_buf_free(struct sf_buf *sf)
void
swi_vm(void *dummy)
{
-#if 0 /* XXX: Don't have busdma stuff yet */
if (busdma_swi_pending != 0)
busdma_swi();
-#endif
}
/*
diff --git a/sys/powerpc/booke/vm_machdep.c b/sys/powerpc/booke/vm_machdep.c
index 7d2b88b..8696c9c 100644
--- a/sys/powerpc/booke/vm_machdep.c
+++ b/sys/powerpc/booke/vm_machdep.c
@@ -357,10 +357,8 @@ sf_buf_free(struct sf_buf *sf)
void
swi_vm(void *dummy)
{
-#if 0 /* XXX: Don't have busdma stuff yet */
if (busdma_swi_pending != 0)
busdma_swi();
-#endif
}
/*
diff --git a/sys/powerpc/powerpc/busdma_machdep.c b/sys/powerpc/powerpc/busdma_machdep.c
index f12de3d..4223dc4 100644
--- a/sys/powerpc/powerpc/busdma_machdep.c
+++ b/sys/powerpc/powerpc/busdma_machdep.c
@@ -1,5 +1,4 @@
/*-
- * Copyright (c) 2002 Peter Grehan
* Copyright (c) 1997, 1998 Justin T. Gibbs.
* All rights reserved.
*
@@ -23,27 +22,28 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- *
- * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
*/
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-
/*
- * Bus dma support routines
+ * From amd64/busdma_machdep.c, r204214
*/
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/bus.h>
#include <sys/interrupt.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/proc.h>
#include <sys/mutex.h>
#include <sys/mbuf.h>
#include <sys/uio.h>
+#include <sys/sysctl.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
@@ -51,34 +51,122 @@ __FBSDID("$FreeBSD$");
#include <machine/atomic.h>
#include <machine/bus.h>
-#include <machine/cpufunc.h>
+#include <machine/md_var.h>
+
+#define MAX_BPAGES 8192
+
+struct bounce_zone;
struct bus_dma_tag {
- bus_dma_tag_t parent;
- bus_size_t alignment;
- bus_size_t boundary;
- bus_addr_t lowaddr;
- bus_addr_t highaddr;
+ bus_dma_tag_t parent;
+ bus_size_t alignment;
+ bus_size_t boundary;
+ bus_addr_t lowaddr;
+ bus_addr_t highaddr;
bus_dma_filter_t *filter;
- void *filterarg;
- bus_size_t maxsize;
- u_int nsegments;
- bus_size_t maxsegsz;
- int flags;
- int ref_count;
- int map_count;
+ void *filterarg;
+ bus_size_t maxsize;
+ u_int nsegments;
+ bus_size_t maxsegsz;
+ int flags;
+ int ref_count;
+ int map_count;
bus_dma_lock_t *lockfunc;
void *lockfuncarg;
+ bus_dma_segment_t *segments;
+ struct bounce_zone *bounce_zone;
+};
+
+struct bounce_page {
+ vm_offset_t vaddr; /* kva of bounce buffer */
+ bus_addr_t busaddr; /* Physical address */
+ vm_offset_t datavaddr; /* kva of client data */
+ bus_size_t datacount; /* client data count */
+ STAILQ_ENTRY(bounce_page) links;
+};
+
+int busdma_swi_pending;
+
+struct bounce_zone {
+ STAILQ_ENTRY(bounce_zone) links;
+ STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
+ int total_bpages;
+ int free_bpages;
+ int reserved_bpages;
+ int active_bpages;
+ int total_bounced;
+ int total_deferred;
+ int map_count;
+ bus_size_t alignment;
+ bus_addr_t lowaddr;
+ char zoneid[8];
+ char lowaddrid[20];
+ struct sysctl_ctx_list sysctl_tree;
+ struct sysctl_oid *sysctl_tree_top;
};
+static struct mtx bounce_lock;
+static int total_bpages;
+static int busdma_zonecount;
+static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
+
+SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
+SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
+ "Total bounce pages");
+
struct bus_dmamap {
- bus_dma_tag_t dmat;
- void *buf; /* unmapped buffer pointer */
- bus_size_t buflen; /* unmapped buffer length */
- bus_dmamap_callback_t *callback;
- void *callback_arg;
+ struct bp_list bpages;
+ int pagesneeded;
+ int pagesreserved;
+ bus_dma_tag_t dmat;
+ void *buf; /* unmapped buffer pointer */
+ bus_size_t buflen; /* unmapped buffer length */
+ bus_dmamap_callback_t *callback;
+ void *callback_arg;
+ STAILQ_ENTRY(bus_dmamap) links;
};
+static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
+static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
+static struct bus_dmamap nobounce_dmamap;
+
+static void init_bounce_pages(void *dummy);
+static int alloc_bounce_zone(bus_dma_tag_t dmat);
+static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
+static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
+ int commit);
+static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
+ vm_offset_t vaddr, bus_size_t size);
+static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
+static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
+
+/*
+ * Return true if a match is made.
+ *
+ * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
+ *
+ * If paddr is within the bounds of the dma tag then call the filter callback
+ * to check for a match, if there is no filter callback then assume a match.
+ */
+static __inline int
+run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
+{
+ int retval;
+
+ retval = 0;
+
+ do {
+ if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
+ || ((paddr & (dmat->alignment - 1)) != 0))
+ && (dmat->filter == NULL
+ || (*dmat->filter)(dmat->filterarg, paddr) != 0))
+ retval = 1;
+
+ dmat = dmat->parent;
+ } while (retval == 0 && dmat != NULL);
+ return (retval);
+}
+
/*
* Convenience function for manipulating driver locks from busdma (during
* busdma_swi, for example). Drivers that don't provide their own locks
@@ -112,42 +200,53 @@ busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
static void
dflt_lock(void *arg, bus_dma_lock_op_t op)
{
-#ifdef INVARIANTS
panic("driver error: busdma dflt_lock called");
-#else
- printf("DRIVER_ERROR: busdma dflt_lock called\n");
-#endif
}
+#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3
+#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
/*
* Allocate a device specific dma_tag.
*/
int
bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
- bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
- bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
- int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
- void *lockfuncarg, bus_dma_tag_t *dmat)
+ bus_size_t boundary, bus_addr_t lowaddr,
+ bus_addr_t highaddr, bus_dma_filter_t *filter,
+ void *filterarg, bus_size_t maxsize, int nsegments,
+ bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+ void *lockfuncarg, bus_dma_tag_t *dmat)
{
bus_dma_tag_t newtag;
int error = 0;
+ /* Basic sanity checking */
+ if (boundary != 0 && boundary < maxsegsz)
+ maxsegsz = boundary;
+
+ if (maxsegsz == 0) {
+ return (EINVAL);
+ }
+
/* Return a NULL tag on failure */
*dmat = NULL;
- newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
- if (newtag == NULL)
+ newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
+ M_ZERO | M_NOWAIT);
+ if (newtag == NULL) {
+ CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
+ __func__, newtag, 0, error);
return (ENOMEM);
+ }
newtag->parent = parent;
newtag->alignment = alignment;
newtag->boundary = boundary;
- newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
- newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
+ newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
+ newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
newtag->filter = filter;
newtag->filterarg = filterarg;
- newtag->maxsize = maxsize;
- newtag->nsegments = nsegments;
+ newtag->maxsize = maxsize;
+ newtag->nsegments = nsegments;
newtag->maxsegsz = maxsegsz;
newtag->flags = flags;
newtag->ref_count = 1; /* Count ourself */
@@ -159,61 +258,107 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
newtag->lockfunc = dflt_lock;
newtag->lockfuncarg = NULL;
}
+ newtag->segments = NULL;
- /*
- * Take into account any restrictions imposed by our parent tag
- */
- if (parent != NULL) {
- newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
- newtag->highaddr = max(parent->highaddr, newtag->highaddr);
+ /* Take into account any restrictions imposed by our parent tag */
+ if (parent != NULL) {
+ newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
+ newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
if (newtag->boundary == 0)
newtag->boundary = parent->boundary;
else if (parent->boundary != 0)
- newtag->boundary = MIN(parent->boundary,
+ newtag->boundary = MIN(parent->boundary,
newtag->boundary);
- if (newtag->filter == NULL) {
- /*
- * Short circuit looking at our parent directly
- * since we have encapsulated all of its information
- */
- newtag->filter = parent->filter;
- newtag->filterarg = parent->filterarg;
- newtag->parent = parent->parent;
+ if (newtag->filter == NULL) {
+ /*
+ * Short circuit looking at our parent directly
+ * since we have encapsulated all of its information
+ */
+ newtag->filter = parent->filter;
+ newtag->filterarg = parent->filterarg;
+ newtag->parent = parent->parent;
}
if (newtag->parent != NULL)
atomic_add_int(&parent->ref_count, 1);
}
- *dmat = newtag;
+ if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem)
+ || newtag->alignment > 1)
+ newtag->flags |= BUS_DMA_COULD_BOUNCE;
+
+ if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
+ (flags & BUS_DMA_ALLOCNOW) != 0) {
+ struct bounce_zone *bz;
+
+ /* Must bounce */
+
+ if ((error = alloc_bounce_zone(newtag)) != 0) {
+ free(newtag, M_DEVBUF);
+ return (error);
+ }
+ bz = newtag->bounce_zone;
+
+ if (ptoa(bz->total_bpages) < maxsize) {
+ int pages;
+
+ pages = atop(maxsize) - bz->total_bpages;
+
+ /* Add pages to our bounce pool */
+ if (alloc_bounce_pages(newtag, pages) < pages)
+ error = ENOMEM;
+ }
+ /* Performed initial allocation */
+ newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
+ }
+
+ if (error != 0) {
+ free(newtag, M_DEVBUF);
+ } else {
+ *dmat = newtag;
+ }
+ CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
+ __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
return (error);
}
int
bus_dma_tag_destroy(bus_dma_tag_t dmat)
{
+ bus_dma_tag_t dmat_copy;
+ int error;
+
+ error = 0;
+ dmat_copy = dmat;
+
if (dmat != NULL) {
-
- if (dmat->map_count != 0)
- return (EBUSY);
-
- while (dmat != NULL) {
- bus_dma_tag_t parent;
-
- parent = dmat->parent;
- atomic_subtract_int(&dmat->ref_count, 1);
- if (dmat->ref_count == 0) {
- free(dmat, M_DEVBUF);
- /*
- * Last reference count, so
- * release our reference
- * count on our parent.
- */
- dmat = parent;
- } else
- dmat = NULL;
- }
- }
- return (0);
+
+ if (dmat->map_count != 0) {
+ error = EBUSY;
+ goto out;
+ }
+
+ while (dmat != NULL) {
+ bus_dma_tag_t parent;
+
+ parent = dmat->parent;
+ atomic_subtract_int(&dmat->ref_count, 1);
+ if (dmat->ref_count == 0) {
+ if (dmat->segments != NULL)
+ free(dmat->segments, M_DEVBUF);
+ free(dmat, M_DEVBUF);
+ /*
+ * Last reference count, so
+ * release our reference
+ * count on our parent.
+ */
+ dmat = parent;
+ } else
+ dmat = NULL;
+ }
+ }
+out:
+ CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
+ return (error);
}
/*
@@ -223,10 +368,83 @@ bus_dma_tag_destroy(bus_dma_tag_t dmat)
int
bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
{
- *mapp = NULL;
- dmat->map_count++;
+ int error;
+
+ error = 0;
+
+ if (dmat->segments == NULL) {
+ dmat->segments = (bus_dma_segment_t *)malloc(
+ sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
+ M_NOWAIT);
+ if (dmat->segments == NULL) {
+ CTR3(KTR_BUSDMA, "%s: tag %p error %d",
+ __func__, dmat, ENOMEM);
+ return (ENOMEM);
+ }
+ }
- return (0);
+ /*
+ * Bouncing might be required if the driver asks for an active
+ * exclusion region, a data alignment that is stricter than 1, and/or
+ * an active address boundary.
+ */
+ if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
+
+ /* Must bounce */
+ struct bounce_zone *bz;
+ int maxpages;
+
+ if (dmat->bounce_zone == NULL) {
+ if ((error = alloc_bounce_zone(dmat)) != 0)
+ return (error);
+ }
+ bz = dmat->bounce_zone;
+
+ *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
+ M_NOWAIT | M_ZERO);
+ if (*mapp == NULL) {
+ CTR3(KTR_BUSDMA, "%s: tag %p error %d",
+ __func__, dmat, ENOMEM);
+ return (ENOMEM);
+ }
+
+ /* Initialize the new map */
+ STAILQ_INIT(&((*mapp)->bpages));
+
+ /*
+ * Attempt to add pages to our pool on a per-instance
+ * basis up to a sane limit.
+ */
+ if (dmat->alignment > 1)
+ maxpages = MAX_BPAGES;
+ else
+ maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr));
+ if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
+ || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
+ int pages;
+
+ pages = MAX(atop(dmat->maxsize), 1);
+ pages = MIN(maxpages - bz->total_bpages, pages);
+ pages = MAX(pages, 1);
+ if (alloc_bounce_pages(dmat, pages) < pages)
+ error = ENOMEM;
+
+ if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
+ if (error == 0)
+ dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
+ } else {
+ error = 0;
+ }
+ }
+ bz->map_count++;
+ } else {
+ *mapp = NULL;
+ }
+ if (error == 0)
+ dmat->map_count++;
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
+ __func__, dmat, dmat->flags, error);
+ return (error);
}
/*
@@ -236,13 +454,22 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
int
bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
{
- if (map != NULL) {
- panic("dmamap_destroy: NULL?\n");
- }
- dmat->map_count--;
- return (0);
+ if (map != NULL && map != &nobounce_dmamap) {
+ if (STAILQ_FIRST(&map->bpages) != NULL) {
+ CTR3(KTR_BUSDMA, "%s: tag %p error %d",
+ __func__, dmat, EBUSY);
+ return (EBUSY);
+ }
+ if (dmat->bounce_zone)
+ dmat->bounce_zone->map_count--;
+ free(map, M_DEVBUF);
+ }
+ dmat->map_count--;
+ CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
+ return (0);
}
+
/*
* Allocate a piece of memory that can be efficiently mapped into
* bus device space based on the constraints lited in the dma tag.
@@ -250,7 +477,7 @@ bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
*/
int
bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
- bus_dmamap_t *mapp)
+ bus_dmamap_t *mapp)
{
int mflags;
@@ -258,56 +485,87 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
mflags = M_NOWAIT;
else
mflags = M_WAITOK;
+
+ /* If we succeed, no mapping/bouncing will be required */
+ *mapp = NULL;
+
+ if (dmat->segments == NULL) {
+ dmat->segments = (bus_dma_segment_t *)malloc(
+ sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
+ mflags);
+ if (dmat->segments == NULL) {
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
+ __func__, dmat, dmat->flags, ENOMEM);
+ return (ENOMEM);
+ }
+ }
if (flags & BUS_DMA_ZERO)
mflags |= M_ZERO;
- *mapp = NULL;
-
/*
* XXX:
* (dmat->alignment < dmat->maxsize) is just a quick hack; the exact
* alignment guarantees of malloc need to be nailed down, and the
* code below should be rewritten to take that into account.
*
- * In the meantime, we'll return an error if malloc gets it wrong.
+ * In the meantime, we'll warn the user if malloc gets it wrong.
*/
- if (dmat->maxsize <= PAGE_SIZE &&
- dmat->alignment < dmat->maxsize) {
+ if ((dmat->maxsize <= PAGE_SIZE) &&
+ (dmat->alignment < dmat->maxsize) &&
+ dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) {
*vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
- } else {
- /*
- * XXX Use Contigmalloc until it is merged into this facility
- * and handles multi-seg allocations. Nobody is doing
- * multi-seg allocations yet though.
- */
- *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
- 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
- dmat->boundary);
- }
-
- if (*vaddr == NULL)
- return (ENOMEM);
-
- if ((uintptr_t)*vaddr % dmat->alignment)
- printf("XXX: %s: alignment not respected!\n", __func__);
-
- return (0);
+ } else {
+ /*
+ * XXX Use Contigmalloc until it is merged into this facility
+ * and handles multi-seg allocations. Nobody is doing
+ * multi-seg allocations yet though.
+ * XXX Certain AGP hardware does.
+ */
+ *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
+ 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
+ dmat->boundary);
+ }
+ if (*vaddr == NULL) {
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
+ __func__, dmat, dmat->flags, ENOMEM);
+ return (ENOMEM);
+ } else if ((uintptr_t)*vaddr & (dmat->alignment - 1)) {
+ printf("bus_dmamem_alloc failed to align memory properly.\n");
+ }
+#ifdef NOTYET
+ if (flags & BUS_DMA_NOCACHE)
+ pmap_change_attr((vm_offset_t)*vaddr, dmat->maxsize,
+ PAT_UNCACHEABLE);
+#endif
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
+ __func__, dmat, dmat->flags, 0);
+ return (0);
}
/*
- * Free a piece of memory and it's allocated dmamap, that was allocated
+ * Free a piece of memory and it's allociated dmamap, that was allocated
* via bus_dmamem_alloc. Make the same choice for free/contigfree.
*/
void
bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
{
- if (map != NULL)
- panic("bus_dmamem_free: Invalid map freed\n");
- if (dmat->maxsize <= PAGE_SIZE &&
- dmat->alignment < dmat->maxsize)
+ /*
+ * dmamem does not need to be bounced, so the map should be
+ * NULL
+ */
+ if (map != NULL)
+ panic("bus_dmamem_free: Invalid map freed\n");
+#ifdef NOTYET
+ pmap_change_attr((vm_offset_t)vaddr, dmat->maxsize, PAT_WRITE_BACK);
+#endif
+ if ((dmat->maxsize <= PAGE_SIZE) &&
+ (dmat->alignment < dmat->maxsize) &&
+ dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem))
free(vaddr, M_DEVBUF);
- else
+ else {
contigfree(vaddr, dmat->maxsize, M_DEVBUF);
+ }
+ CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
}
/*
@@ -316,26 +574,89 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
* the starting segment on entrance, and the ending segment on exit.
* first indicates if this is the first invocation of this function.
*/
-static int
-bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
- void *buf, bus_size_t buflen, struct thread *td, int flags,
- vm_offset_t *lastaddrp, int *segp, int first)
+static __inline int
+_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
+ bus_dmamap_t map,
+ void *buf, bus_size_t buflen,
+ pmap_t pmap,
+ int flags,
+ bus_addr_t *lastaddrp,
+ bus_dma_segment_t *segs,
+ int *segp,
+ int first)
{
bus_size_t sgsize;
bus_addr_t curaddr, lastaddr, baddr, bmask;
- vm_offset_t vaddr = (vm_offset_t)buf;
+ vm_offset_t vaddr;
+ bus_addr_t paddr;
int seg;
- pmap_t pmap;
- if (td != NULL)
- pmap = vmspace_pmap(td->td_proc->p_vmspace);
- else
- pmap = NULL;
+ if (map == NULL)
+ map = &nobounce_dmamap;
+
+ if ((map != &nobounce_dmamap && map->pagesneeded == 0)
+ && ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) {
+ vm_offset_t vendaddr;
+ CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
+ "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
+ dmat->boundary, dmat->alignment);
+ CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
+ map, &nobounce_dmamap, map->pagesneeded);
+ /*
+ * Count the number of bounce pages
+ * needed in order to complete this transfer
+ */
+ vaddr = (vm_offset_t)buf;
+ vendaddr = (vm_offset_t)buf + buflen;
+
+ while (vaddr < vendaddr) {
+ bus_size_t sg_len;
+
+ sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
+ if (pmap)
+ paddr = pmap_extract(pmap, vaddr);
+ else
+ paddr = pmap_kextract(vaddr);
+ if (run_filter(dmat, paddr) != 0) {
+ sg_len = roundup2(sg_len, dmat->alignment);
+ map->pagesneeded++;
+ }
+ vaddr += sg_len;
+ }
+ CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
+ }
+
+ /* Reserve Necessary Bounce Pages */
+ if (map->pagesneeded != 0) {
+ mtx_lock(&bounce_lock);
+ if (flags & BUS_DMA_NOWAIT) {
+ if (reserve_bounce_pages(dmat, map, 0) != 0) {
+ mtx_unlock(&bounce_lock);
+ return (ENOMEM);
+ }
+ } else {
+ if (reserve_bounce_pages(dmat, map, 1) != 0) {
+ /* Queue us for resources */
+ map->dmat = dmat;
+ map->buf = buf;
+ map->buflen = buflen;
+ STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
+ map, links);
+ mtx_unlock(&bounce_lock);
+ return (EINPROGRESS);
+ }
+ }
+ mtx_unlock(&bounce_lock);
+ }
+
+ vaddr = (vm_offset_t)buf;
lastaddr = *lastaddrp;
bmask = ~(dmat->boundary - 1);
for (seg = *segp; buflen > 0 ; ) {
+ bus_size_t max_sgsize;
+
/*
* Get the physical address for this segment.
*/
@@ -347,11 +668,15 @@ bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
/*
* Compute the segment size, and adjust counts.
*/
- sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
- if (sgsize > dmat->maxsegsz)
- sgsize = dmat->maxsegsz;
- if (buflen < sgsize)
- sgsize = buflen;
+ max_sgsize = MIN(buflen, dmat->maxsegsz);
+ sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK);
+ if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
+ sgsize = roundup2(sgsize, dmat->alignment);
+ sgsize = MIN(sgsize, max_sgsize);
+ curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
+ } else {
+ sgsize = MIN(sgsize, max_sgsize);
+ }
/*
* Make sure we don't cross any boundaries.
@@ -364,7 +689,7 @@ bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
/*
* Insert chunk into a segment, coalescing with
- * the previous segment if possible.
+ * previous segment if possible.
*/
if (first) {
segs[seg].ds_addr = curaddr;
@@ -403,59 +728,71 @@ bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
*/
int
bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
- bus_size_t buflen, bus_dmamap_callback_t *callback,
- void *callback_arg, int flags)
+ bus_size_t buflen, bus_dmamap_callback_t *callback,
+ void *callback_arg, int flags)
{
-#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
- bus_dma_segment_t dm_segments[dmat->nsegments];
-#else
- bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
-#endif
- vm_offset_t lastaddr;
- int error, nsegs;
+ bus_addr_t lastaddr = 0;
+ int error, nsegs = 0;
- if (map != NULL)
- panic("bus_dmamap_load: Invalid map\n");
+ if (map != NULL) {
+ flags |= BUS_DMA_WAITOK;
+ map->callback = callback;
+ map->callback_arg = callback_arg;
+ }
- lastaddr = (vm_offset_t)0;
- nsegs = 0;
- error = bus_dmamap_load_buffer(dmat, dm_segments, buf, buflen,
- NULL, flags, &lastaddr, &nsegs, 1);
+ error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags,
+ &lastaddr, dmat->segments, &nsegs, 1);
- if (error == 0)
- (*callback)(callback_arg, dm_segments, nsegs + 1, 0);
+ CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
+ __func__, dmat, dmat->flags, error, nsegs + 1);
+
+ if (error == EINPROGRESS) {
+ return (error);
+ }
+
+ if (error)
+ (*callback)(callback_arg, dmat->segments, 0, error);
else
- (*callback)(callback_arg, NULL, 0, error);
+ (*callback)(callback_arg, dmat->segments, nsegs + 1, 0);
+
+ /*
+ * Return ENOMEM to the caller so that it can pass it up the stack.
+ * This error only happens when NOWAIT is set, so deferal is disabled.
+ */
+ if (error == ENOMEM)
+ return (error);
return (0);
}
+
/*
- * Like bus_dmamap_load(), but for mbufs.
+ * Like _bus_dmamap_load(), but for mbufs.
*/
int
-bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
- bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
+bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct mbuf *m0,
+ bus_dmamap_callback2_t *callback, void *callback_arg,
+ int flags)
{
-#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
- bus_dma_segment_t dm_segments[dmat->nsegments];
-#else
- bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
-#endif
- int nsegs = 0, error = 0;
+ int nsegs, error;
M_ASSERTPKTHDR(m0);
+ flags |= BUS_DMA_NOWAIT;
+ nsegs = 0;
+ error = 0;
if (m0->m_pkthdr.len <= dmat->maxsize) {
int first = 1;
- vm_offset_t lastaddr = 0;
+ bus_addr_t lastaddr = 0;
struct mbuf *m;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len > 0) {
- error = bus_dmamap_load_buffer(dmat,
- dm_segments, m->m_data, m->m_len, NULL,
- flags, &lastaddr, &nsegs, first);
+ error = _bus_dmamap_load_buffer(dmat, map,
+ m->m_data, m->m_len,
+ NULL, flags, &lastaddr,
+ dmat->segments, &nsegs, first);
first = 0;
}
}
@@ -464,90 +801,96 @@ bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
}
if (error) {
- /*
- * force "no valid mappings" on error in callback.
- */
- (*callback)(callback_arg, dm_segments, 0, 0, error);
+ /* force "no valid mappings" in callback */
+ (*callback)(callback_arg, dmat->segments, 0, 0, error);
} else {
- (*callback)(callback_arg, dm_segments, nsegs+1,
- m0->m_pkthdr.len, error);
+ (*callback)(callback_arg, dmat->segments,
+ nsegs+1, m0->m_pkthdr.len, error);
}
+ CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
+ __func__, dmat, dmat->flags, error, nsegs + 1);
return (error);
}
int
-bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
- bus_dma_segment_t *segs, int *nsegs, int flags)
+bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
+ int flags)
{
- int error = 0;
+ int error;
M_ASSERTPKTHDR(m0);
+ flags |= BUS_DMA_NOWAIT;
*nsegs = 0;
-
+ error = 0;
if (m0->m_pkthdr.len <= dmat->maxsize) {
int first = 1;
- vm_offset_t lastaddr = 0;
+ bus_addr_t lastaddr = 0;
struct mbuf *m;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len > 0) {
- error = bus_dmamap_load_buffer(dmat,
- segs, m->m_data, m->m_len, NULL,
- flags, &lastaddr, nsegs, first);
+ error = _bus_dmamap_load_buffer(dmat, map,
+ m->m_data, m->m_len,
+ NULL, flags, &lastaddr,
+ segs, nsegs, first);
first = 0;
}
}
- ++*nsegs;
} else {
error = EINVAL;
}
+ /* XXX FIXME: Having to increment nsegs is really annoying */
+ ++*nsegs;
+ CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
+ __func__, dmat, dmat->flags, error, *nsegs);
return (error);
}
/*
- * Like bus_dmamap_load(), but for uios.
+ * Like _bus_dmamap_load(), but for uios.
*/
int
-bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
- bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
+bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct uio *uio,
+ bus_dmamap_callback2_t *callback, void *callback_arg,
+ int flags)
{
- vm_offset_t lastaddr;
-#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
- bus_dma_segment_t dm_segments[dmat->nsegments];
-#else
- bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
-#endif
- int nsegs, i, error, first;
+ bus_addr_t lastaddr = 0;
+ int nsegs, error, first, i;
bus_size_t resid;
struct iovec *iov;
- struct thread *td = NULL;
+ pmap_t pmap;
+ flags |= BUS_DMA_NOWAIT;
resid = uio->uio_resid;
iov = uio->uio_iov;
if (uio->uio_segflg == UIO_USERSPACE) {
- td = uio->uio_td;
- KASSERT(td != NULL,
- ("bus_dmamap_load_uio: USERSPACE but no proc"));
- }
+ KASSERT(uio->uio_td != NULL,
+ ("bus_dmamap_load_uio: USERSPACE but no proc"));
+ pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
+ } else
+ pmap = NULL;
+ nsegs = 0;
+ error = 0;
first = 1;
- nsegs = error = 0;
for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
/*
* Now at the first iovec to load. Load each iovec
* until we have exhausted the residual count.
*/
bus_size_t minlen =
- resid < iov[i].iov_len ? resid : iov[i].iov_len;
+ resid < iov[i].iov_len ? resid : iov[i].iov_len;
caddr_t addr = (caddr_t) iov[i].iov_base;
if (minlen > 0) {
- error = bus_dmamap_load_buffer(dmat, dm_segments, addr,
- minlen, td, flags, &lastaddr, &nsegs, first);
-
+ error = _bus_dmamap_load_buffer(dmat, map,
+ addr, minlen, pmap, flags, &lastaddr,
+ dmat->segments, &nsegs, first);
first = 0;
resid -= minlen;
@@ -555,31 +898,315 @@ bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
}
if (error) {
- /*
- * force "no valid mappings" on error in callback.
- */
- (*callback)(callback_arg, dm_segments, 0, 0, error);
+ /* force "no valid mappings" in callback */
+ (*callback)(callback_arg, dmat->segments, 0, 0, error);
} else {
- (*callback)(callback_arg, dm_segments, nsegs+1,
- uio->uio_resid, error);
+ (*callback)(callback_arg, dmat->segments,
+ nsegs+1, uio->uio_resid, error);
}
-
+ CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
+ __func__, dmat, dmat->flags, error, nsegs + 1);
return (error);
}
/*
- * Release the mapping held by map. A no-op on PowerPC.
+ * Release the mapping held by map.
*/
void
_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
{
+ struct bounce_page *bpage;
- return;
+ while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
+ STAILQ_REMOVE_HEAD(&map->bpages, links);
+ free_bounce_page(dmat, bpage);
+ }
}
void
_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
{
+ struct bounce_page *bpage;
+
+ if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
+ /*
+ * Handle data bouncing. We might also
+ * want to add support for invalidating
+ * the caches on broken hardware
+ */
+ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
+ "performing bounce", __func__, op, dmat, dmat->flags);
+
+ if (op & BUS_DMASYNC_PREWRITE) {
+ while (bpage != NULL) {
+ bcopy((void *)bpage->datavaddr,
+ (void *)bpage->vaddr,
+ bpage->datacount);
+ bpage = STAILQ_NEXT(bpage, links);
+ }
+ dmat->bounce_zone->total_bounced++;
+ }
+
+ if (op & BUS_DMASYNC_POSTREAD) {
+ while (bpage != NULL) {
+ bcopy((void *)bpage->vaddr,
+ (void *)bpage->datavaddr,
+ bpage->datacount);
+ bpage = STAILQ_NEXT(bpage, links);
+ }
+ dmat->bounce_zone->total_bounced++;
+ }
+ }
+}
+
+static void
+init_bounce_pages(void *dummy __unused)
+{
+
+ total_bpages = 0;
+ STAILQ_INIT(&bounce_zone_list);
+ STAILQ_INIT(&bounce_map_waitinglist);
+ STAILQ_INIT(&bounce_map_callbacklist);
+ mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
+}
+SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
+
+static struct sysctl_ctx_list *
+busdma_sysctl_tree(struct bounce_zone *bz)
+{
+ return (&bz->sysctl_tree);
+}
+
+static struct sysctl_oid *
+busdma_sysctl_tree_top(struct bounce_zone *bz)
+{
+ return (bz->sysctl_tree_top);
+}
+
+static int
+alloc_bounce_zone(bus_dma_tag_t dmat)
+{
+ struct bounce_zone *bz;
+
+ /* Check to see if we already have a suitable zone */
+ STAILQ_FOREACH(bz, &bounce_zone_list, links) {
+ if ((dmat->alignment <= bz->alignment)
+ && (dmat->lowaddr >= bz->lowaddr)) {
+ dmat->bounce_zone = bz;
+ return (0);
+ }
+ }
+
+ if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
+ M_NOWAIT | M_ZERO)) == NULL)
+ return (ENOMEM);
+
+ STAILQ_INIT(&bz->bounce_page_list);
+ bz->free_bpages = 0;
+ bz->reserved_bpages = 0;
+ bz->active_bpages = 0;
+ bz->lowaddr = dmat->lowaddr;
+ bz->alignment = MAX(dmat->alignment, PAGE_SIZE);
+ bz->map_count = 0;
+ snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
+ busdma_zonecount++;
+ snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
+ STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
+ dmat->bounce_zone = bz;
+
+ sysctl_ctx_init(&bz->sysctl_tree);
+ bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
+ SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
+ CTLFLAG_RD, 0, "");
+ if (bz->sysctl_tree_top == NULL) {
+ sysctl_ctx_free(&bz->sysctl_tree);
+ return (0); /* XXX error code? */
+ }
+
+ SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
+ SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
+ "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
+ "Total bounce pages");
+ SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
+ SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
+ "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
+ "Free bounce pages");
+ SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
+ SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
+ "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
+ "Reserved bounce pages");
+ SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
+ SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
+ "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
+ "Active bounce pages");
+ SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
+ SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
+ "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
+ "Total bounce requests");
+ SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
+ SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
+ "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
+ "Total bounce requests that were deferred");
+ SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
+ SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
+ "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
+ SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
+ SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
+ "alignment", CTLFLAG_RD, &bz->alignment, 0, "");
+
+ return (0);
+}
+
+static int
+alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
+{
+ struct bounce_zone *bz;
+ int count;
+
+ bz = dmat->bounce_zone;
+ count = 0;
+ while (numpages > 0) {
+ struct bounce_page *bpage;
+
+ bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
+ M_NOWAIT | M_ZERO);
+
+ if (bpage == NULL)
+ break;
+ bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
+ M_NOWAIT, 0ul,
+ bz->lowaddr,
+ PAGE_SIZE,
+ 0);
+ if (bpage->vaddr == 0) {
+ free(bpage, M_DEVBUF);
+ break;
+ }
+ bpage->busaddr = pmap_kextract(bpage->vaddr);
+ mtx_lock(&bounce_lock);
+ STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
+ total_bpages++;
+ bz->total_bpages++;
+ bz->free_bpages++;
+ mtx_unlock(&bounce_lock);
+ count++;
+ numpages--;
+ }
+ return (count);
+}
+
+static int
+reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
+{
+ struct bounce_zone *bz;
+ int pages;
+
+ mtx_assert(&bounce_lock, MA_OWNED);
+ bz = dmat->bounce_zone;
+ pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
+ if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
+ return (map->pagesneeded - (map->pagesreserved + pages));
+ bz->free_bpages -= pages;
+ bz->reserved_bpages += pages;
+ map->pagesreserved += pages;
+ pages = map->pagesneeded - map->pagesreserved;
+
+ return (pages);
+}
- return;
+static bus_addr_t
+add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
+ bus_size_t size)
+{
+ struct bounce_zone *bz;
+ struct bounce_page *bpage;
+
+ KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
+ KASSERT(map != NULL && map != &nobounce_dmamap,
+ ("add_bounce_page: bad map %p", map));
+
+ bz = dmat->bounce_zone;
+ if (map->pagesneeded == 0)
+ panic("add_bounce_page: map doesn't need any pages");
+ map->pagesneeded--;
+
+ if (map->pagesreserved == 0)
+ panic("add_bounce_page: map doesn't need any pages");
+ map->pagesreserved--;
+
+ mtx_lock(&bounce_lock);
+ bpage = STAILQ_FIRST(&bz->bounce_page_list);
+ if (bpage == NULL)
+ panic("add_bounce_page: free page list is empty");
+
+ STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
+ bz->reserved_bpages--;
+ bz->active_bpages++;
+ mtx_unlock(&bounce_lock);
+
+ if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
+ /* Page offset needs to be preserved. */
+ bpage->vaddr |= vaddr & PAGE_MASK;
+ bpage->busaddr |= vaddr & PAGE_MASK;
+ }
+ bpage->datavaddr = vaddr;
+ bpage->datacount = size;
+ STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
+ return (bpage->busaddr);
+}
+
+static void
+free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
+{
+ struct bus_dmamap *map;
+ struct bounce_zone *bz;
+
+ bz = dmat->bounce_zone;
+ bpage->datavaddr = 0;
+ bpage->datacount = 0;
+ if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
+ /*
+ * Reset the bounce page to start at offset 0. Other uses
+ * of this bounce page may need to store a full page of
+ * data and/or assume it starts on a page boundary.
+ */
+ bpage->vaddr &= ~PAGE_MASK;
+ bpage->busaddr &= ~PAGE_MASK;
+ }
+
+ mtx_lock(&bounce_lock);
+ STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
+ bz->free_bpages++;
+ bz->active_bpages--;
+ if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
+ if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
+ STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
+ STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
+ map, links);
+ busdma_swi_pending = 1;
+ bz->total_deferred++;
+ swi_sched(vm_ih, 0);
+ }
+ }
+ mtx_unlock(&bounce_lock);
+}
+
+void
+busdma_swi(void)
+{
+ bus_dma_tag_t dmat;
+ struct bus_dmamap *map;
+
+ mtx_lock(&bounce_lock);
+ while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
+ STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
+ mtx_unlock(&bounce_lock);
+ dmat = map->dmat;
+ (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
+ bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
+ map->callback, map->callback_arg, /*flags*/0);
+ (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
+ mtx_lock(&bounce_lock);
+ }
+ mtx_unlock(&bounce_lock);
}
OpenPOWER on IntegriCloud