From 1f03137b2328854e1cb42e11321acccac19501fc Mon Sep 17 00:00:00 2001 From: scottl Date: Fri, 2 Jul 2004 03:47:28 +0000 Subject: Allocate s/g segments in the tag rather than on the stack. --- sys/alpha/alpha/busdma_machdep.c | 65 +++++++++++++++++++++------------------- 1 file changed, 35 insertions(+), 30 deletions(-) (limited to 'sys') diff --git a/sys/alpha/alpha/busdma_machdep.c b/sys/alpha/alpha/busdma_machdep.c index 7b9a185..4497167 100644 --- a/sys/alpha/alpha/busdma_machdep.c +++ b/sys/alpha/alpha/busdma_machdep.c @@ -66,6 +66,7 @@ struct bus_dma_tag { int map_count; bus_dma_lock_t *lockfunc; void *lockfuncarg; + bus_dma_segment_t *segments; }; struct bounce_page { @@ -221,7 +222,9 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, newtag->lockfunc = dflt_lock; newtag->lockfuncarg = NULL; } - + + newtag->segments = NULL; + /* Take into account any restrictions imposed by our parent tag */ if (parent != NULL) { newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); @@ -290,6 +293,8 @@ bus_dma_tag_destroy(bus_dma_tag_t dmat) parent = dmat->parent; atomic_subtract_int(&dmat->ref_count, 1); if (dmat->ref_count == 0) { + if (dmat->segments != NULL) + free(dmat->segments, M_DEVBUF); free(dmat, M_DEVBUF); /* * Last reference count, so @@ -332,6 +337,14 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) return (0); } + if (dmat->segments == NULL) { + dmat->segments = (bus_dma_segment_t *)malloc( + sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, + M_NOWAIT); + if (dmat->segments == NULL) + return (ENOMEM); + } + if (dmat->lowaddr < ptoa(Maxmem)) { /* Must bounce */ int maxpages; @@ -425,6 +438,14 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, /* If we succeed, no mapping/bouncing will be required */ *mapp = &nobounce_dmamap; + if (dmat->segments == NULL) { + dmat->segments = (bus_dma_segment_t *)malloc( + sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, + M_NOWAIT); + if (dmat->segments == NULL) + return (ENOMEM); + } + if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) { *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); } else { @@ -477,11 +498,6 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, { vm_offset_t vaddr; vm_offset_t paddr; -#ifdef __GNUC__ - bus_dma_segment_t dm_segments[dmat->nsegments]; -#else - bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; -#endif bus_dma_segment_t *sg; int seg; int error; @@ -496,16 +512,16 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, * of the bus address space. */ vaddr = trunc_page((vm_offset_t) buf); - dm_segments[0].ds_addr = + dmat->segments[0].ds_addr = map->busaddress + (vm_offset_t) buf - vaddr; - dm_segments[0].ds_len = buflen; + dmat->segments[0].ds_len = buflen; buflen = round_page((vm_offset_t) buf + buflen) - vaddr; sgmap_load_region(chipset.sgmap, map->busaddress, vaddr, buflen); map->buflen = buflen; - (*callback)(callback_arg, dm_segments, 1, error); + (*callback)(callback_arg, dmat->segments, 1, error); return (0); } @@ -560,7 +576,7 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, } vaddr = (vm_offset_t)buf; - sg = &dm_segments[0]; + sg = &dmat->segments[0]; seg = 1; sg->ds_len = 0; @@ -604,7 +620,7 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, error = EFBIG; } - (*callback)(callback_arg, dm_segments, seg, error); + (*callback)(callback_arg, dmat->segments, seg, error); return (0); } @@ -617,7 +633,6 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, */ static int _bus_dmamap_load_buffer(bus_dma_tag_t dmat, - bus_dma_segment_t segs[], void *buf, bus_size_t buflen, struct thread *td, int flags, @@ -625,12 +640,15 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat, int *segp, int first) { + bus_dma_segment_t *segs; bus_size_t sgsize; bus_addr_t curaddr, lastaddr, baddr, bmask; vm_offset_t vaddr = (vm_offset_t)buf; int seg; pmap_t pmap; + segs = dmat->segments; + if (td != NULL) pmap = vmspace_pmap(td->td_proc->p_vmspace); else @@ -709,11 +727,6 @@ bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmamap_callback2_t *callback, void *callback_arg, int flags) { -#ifdef __GNUC__ - bus_dma_segment_t dm_segments[dmat->nsegments]; -#else - bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; -#endif int nsegs, error; KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, @@ -730,7 +743,6 @@ bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, for (m = m0; m != NULL && error == 0; m = m->m_next) { if (m->m_len > 0) { error = _bus_dmamap_load_buffer(dmat, - dm_segments, m->m_data, m->m_len, NULL, flags, &lastaddr, &nsegs, first); @@ -743,9 +755,9 @@ bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, if (error) { /* force "no valid mappings" in callback */ - (*callback)(callback_arg, dm_segments, 0, 0, error); + (*callback)(callback_arg, dmat->segments, 0, 0, error); } else { - (*callback)(callback_arg, dm_segments, + (*callback)(callback_arg, dmat->segments, nsegs+1, m0->m_pkthdr.len, error); } return (error); @@ -761,11 +773,6 @@ bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) { bus_addr_t lastaddr; -#ifdef __GNUC__ - bus_dma_segment_t dm_segments[dmat->nsegments]; -#else - bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; -#endif int nsegs, error, first, i; bus_size_t resid; struct iovec *iov; @@ -796,9 +803,7 @@ bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, caddr_t addr = (caddr_t) iov[i].iov_base; if (minlen > 0) { - error = _bus_dmamap_load_buffer(dmat, - dm_segments, - addr, minlen, + error = _bus_dmamap_load_buffer(dmat, addr, minlen, td, flags, &lastaddr, &nsegs, first); first = 0; @@ -808,9 +813,9 @@ bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, if (error) { /* force "no valid mappings" in callback */ - (*callback)(callback_arg, dm_segments, 0, 0, error); + (*callback)(callback_arg, dmat->segments, 0, 0, error); } else { - (*callback)(callback_arg, dm_segments, + (*callback)(callback_arg, dmat->segments, nsegs+1, uio->uio_resid, error); } return (error); -- cgit v1.1