diff options
author | scottl <scottl@FreeBSD.org> | 2004-10-19 02:39:27 +0000 |
---|---|---|
committer | scottl <scottl@FreeBSD.org> | 2004-10-19 02:39:27 +0000 |
commit | 57d3e400ee18162a2b75af4a328175684cc1f30e (patch) | |
tree | fb9fd0a6e4251f62f47ab1b09a45147d91bd264d /sys | |
parent | 55c32119bfac1fedea62a735065a603a9444b288 (diff) | |
download | FreeBSD-src-57d3e400ee18162a2b75af4a328175684cc1f30e.zip FreeBSD-src-57d3e400ee18162a2b75af4a328175684cc1f30e.tar.gz |
Use an alignment of 1 instead of PAGE_SIZE for the rx and tx buffer tags.
Since the e1000 DMA engines hava no constraints on the alignment of buffer
transfers, there is no reason to tell busdma that there is. This save a
minimum of 1 malloc call per packet, which translates to eliminating 4 locks.
It also means that buffers are not needlessly bounced when transfered. The
end result is a 38% improvement in pps in a 4 way bridging environment.
Obtained from: Sandvine, Inc.
Diffstat (limited to 'sys')
-rw-r--r-- | sys/dev/em/if_em.c | 14 |
1 files changed, 2 insertions, 12 deletions
diff --git a/sys/dev/em/if_em.c b/sys/dev/em/if_em.c index 2964c62..a90eb7a 100644 --- a/sys/dev/em/if_em.c +++ b/sys/dev/em/if_em.c @@ -2056,13 +2056,6 @@ em_dma_malloc(struct adapter *adapter, bus_size_t size, goto fail_0; } - r = bus_dmamap_create(dma->dma_tag, BUS_DMA_NOWAIT, &dma->dma_map); - if (r != 0) { - printf("em%d: em_dma_malloc: bus_dmamap_create failed; " - "error %u\n", adapter->unit, r); - goto fail_1; - } - r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, BUS_DMA_NOWAIT, &dma->dma_map); if (r != 0) { @@ -2090,8 +2083,6 @@ fail_3: bus_dmamap_unload(dma->dma_tag, dma->dma_map); fail_2: bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); -fail_1: - bus_dmamap_destroy(dma->dma_tag, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); fail_0: dma->dma_map = NULL; @@ -2104,7 +2095,6 @@ em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma) { bus_dmamap_unload(dma->dma_tag, dma->dma_map); bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); - bus_dmamap_destroy(dma->dma_tag, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); } @@ -2145,7 +2135,7 @@ em_setup_transmit_structures(struct adapter * adapter) * Setup DMA descriptor areas. */ if (bus_dma_tag_create(NULL, /* parent */ - PAGE_SIZE, 0, /* alignment, bounds */ + 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ @@ -2523,7 +2513,7 @@ em_allocate_receive_structures(struct adapter * adapter) sizeof(struct em_buffer) * adapter->num_rx_desc); error = bus_dma_tag_create(NULL, /* parent */ - PAGE_SIZE, 0, /* alignment, bounds */ + 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ |