summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authormux <mux@FreeBSD.org>2003-07-01 19:02:26 +0000
committermux <mux@FreeBSD.org>2003-07-01 19:02:26 +0000
commit391326ab2b73473a4e17f06f716f07c916d9ff78 (patch)
tree1991593be4e0098edb5760e4eeac40fb5c163828 /sys
parent7f5998c70714e529c85460b27b3b334ab6617cd4 (diff)
downloadFreeBSD-src-391326ab2b73473a4e17f06f716f07c916d9ff78.zip
FreeBSD-src-391326ab2b73473a4e17f06f716f07c916d9ff78.tar.gz
Sync with i386's busdma backend. This fixes a few bugs and adds
support for the BUS_DMA_NOWAIT flag in bus_dmamap_load().
Diffstat (limited to 'sys')
-rw-r--r--sys/alpha/alpha/busdma_machdep.c81
1 files changed, 48 insertions, 33 deletions
diff --git a/sys/alpha/alpha/busdma_machdep.c b/sys/alpha/alpha/busdma_machdep.c
index 4df7bd2..e540e1c 100644
--- a/sys/alpha/alpha/busdma_machdep.c
+++ b/sys/alpha/alpha/busdma_machdep.c
@@ -106,12 +106,21 @@ static struct bus_dmamap nobounce_dmamap;
static void init_bounce_pages(void *dummy);
static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
-static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
-static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
+static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
+ int commit);
+static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_offset_t vaddr, bus_size_t size);
static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
+/*
+ * Return true if a match is made.
+ *
+ * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
+ *
+ * If paddr is within the bounds of the dma tag then call the filter callback
+ * to check for a match, if there is no filter callback then assume a match.
+ */
static __inline int
run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
{
@@ -282,8 +291,14 @@ bus_dma_tag_destroy(bus_dma_tag_t dmat)
atomic_subtract_int(&dmat->ref_count, 1);
if (dmat->ref_count == 0) {
free(dmat, M_DEVBUF);
- }
- dmat = parent;
+ /*
+ * Last reference count, so
+ * release our reference
+ * count on our parent.
+ */
+ dmat = parent;
+ } else
+ dmat = NULL;
}
}
return (0);
@@ -379,16 +394,9 @@ bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
sgmap_free_region(chipset.sgmap, map->sgmaphandle);
}
- if (map != NULL) {
+ if (map != NULL && map != &nobounce_dmamap) {
if (STAILQ_FIRST(&map->bpages) != NULL)
return (EBUSY);
- /*
- * The nobounce_dmamap map is not dynamically
- * allocated, thus we should on no account try to
- * free it.
- */
- if (map != &nobounce_dmamap)
- free(map, M_DEVBUF);
}
dmat->map_count--;
return (0);
@@ -430,7 +438,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
/*
* Free a piece of memory and it's allociated dmamap, that was allocated
- * via bus_dmamem_alloc.
+ * via bus_dmamem_alloc. Make the same choice for free/contigfree.
*/
void
bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
@@ -525,18 +533,24 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
/* Reserve Necessary Bounce Pages */
if (map->pagesneeded != 0) {
mtx_lock(&bounce_lock);
- if (reserve_bounce_pages(dmat, map) != 0) {
-
- /* Queue us for resources */
- map->dmat = dmat;
- map->buf = buf;
- map->buflen = buflen;
- map->callback = callback;
- map->callback_arg = callback_arg;
-
- STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
- mtx_unlock(&bounce_lock);
- return (EINPROGRESS);
+ if (flags & BUS_DMA_NOWAIT) {
+ if (reserve_bounce_pages(dmat, map, 0) != 0) {
+ mtx_unlock(&bounce_lock);
+ return (ENOMEM);
+ }
+ } else {
+ if (reserve_bounce_pages(dmat, map, 1) != 0) {
+ /* Queue us for resources */
+ map->dmat = dmat;
+ map->buf = buf;
+ map->buflen = buflen;
+ map->callback = callback;
+ map->callback_arg = callback_arg;
+ STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
+ map, links);
+ mtx_unlock(&bounce_lock);
+ return (EINPROGRESS);
+ }
}
mtx_unlock(&bounce_lock);
}
@@ -619,7 +633,7 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
pmap = NULL;
lastaddr = *lastaddrp;
- bmask = ~(dmat->boundary - 1);
+ bmask = ~(dmat->boundary - 1);
for (seg = *segp; buflen > 0 ; ) {
/*
@@ -706,7 +720,7 @@ bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
error = 0;
if (m0->m_pkthdr.len <= dmat->maxsize) {
int first = 1;
- vm_offset_t lastaddr = 0;
+ bus_addr_t lastaddr = 0;
struct mbuf *m;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
@@ -742,7 +756,7 @@ bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
{
- vm_offset_t lastaddr;
+ bus_addr_t lastaddr;
#ifdef __GNUC__
bus_dma_segment_t dm_segments[dmat->nsegments];
#else
@@ -825,7 +839,6 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
struct bounce_page *bpage;
if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
-
/*
* Handle data bouncing. We might also
* want to add support for invalidating
@@ -887,7 +900,7 @@ alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
PAGE_SIZE,
dmat->boundary);
mtx_unlock(&Giant);
- if (bpage->vaddr == NULL) {
+ if (bpage->vaddr == 0) {
free(bpage, M_DEVBUF);
break;
}
@@ -904,12 +917,14 @@ alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
}
static int
-reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
+reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
{
int pages;
mtx_assert(&bounce_lock, MA_OWNED);
pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
+ if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
+ return (map->pagesneeded - (map->pagesreserved + pages));
free_bpages -= pages;
reserved_bpages += pages;
map->pagesreserved += pages;
@@ -918,7 +933,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
return (pages);
}
-static vm_offset_t
+static bus_addr_t
add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bus_size_t size)
{
@@ -961,7 +976,7 @@ free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
free_bpages++;
active_bpages--;
if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
- if (reserve_bounce_pages(map->dmat, map) == 0) {
+ if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
map, links);
OpenPOWER on IntegriCloud