summaryrefslogtreecommitdiffstats
path: root/sys/x86
diff options
context:
space:
mode:
authorroyger <royger@FreeBSD.org>2016-04-15 09:21:50 +0000
committerroyger <royger@FreeBSD.org>2016-04-15 09:21:50 +0000
commit0f955221ea625a5fdb37f2fa0b63d761f2103918 (patch)
tree3c2361d5fd33b34dab73db05cdbe36914ab5a382 /sys/x86
parent0e7cffb825b2a74071888588f71a2d3b79f5b703 (diff)
downloadFreeBSD-src-0f955221ea625a5fdb37f2fa0b63d761f2103918.zip
FreeBSD-src-0f955221ea625a5fdb37f2fa0b63d761f2103918.tar.gz
busdma/bounce: revert r292255
Revert r292255 because it can create bounced regions without contiguous page offsets, which is needed for USB devices. Another solution would be to force bouncing the full buffer always (even when only one page requires bouncing), but this seems overly complicated and unnecessary, and it will probably involve using more bounce pages than the current code. Reported by: phk
Diffstat (limited to 'sys/x86')
-rw-r--r--sys/x86/x86/busdma_bounce.c56
1 files changed, 44 insertions, 12 deletions
diff --git a/sys/x86/x86/busdma_bounce.c b/sys/x86/x86/busdma_bounce.c
index 6e9aa70..78d04b9 100644
--- a/sys/x86/x86/busdma_bounce.c
+++ b/sys/x86/x86/busdma_bounce.c
@@ -476,7 +476,8 @@ _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
while (buflen != 0) {
sgsize = MIN(buflen, dmat->common.maxsegsz);
if (bus_dma_run_filter(&dmat->common, curaddr)) {
- sgsize = MIN(PAGE_SIZE, sgsize);
+ sgsize = MIN(sgsize,
+ PAGE_SIZE - (curaddr & PAGE_MASK));
map->pagesneeded++;
}
curaddr += sgsize;
@@ -516,7 +517,8 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
else
paddr = pmap_extract(pmap, vaddr);
if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
- sg_len = PAGE_SIZE;
+ sg_len = roundup2(sg_len,
+ dmat->common.alignment);
map->pagesneeded++;
}
vaddr += sg_len;
@@ -552,7 +554,9 @@ _bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
max_sgsize = MIN(buflen, dmat->common.maxsegsz);
sg_len = MIN(sg_len, max_sgsize);
if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
- sg_len = MIN(PAGE_SIZE, max_sgsize);
+ sg_len = roundup2(sg_len,
+ dmat->common.alignment);
+ sg_len = MIN(sg_len, max_sgsize);
KASSERT((sg_len & (dmat->common.alignment - 1))
== 0, ("Segment size is not aligned"));
map->pagesneeded++;
@@ -648,7 +652,7 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
int *segp)
{
bus_size_t sgsize;
- bus_addr_t curaddr, nextaddr;
+ bus_addr_t curaddr;
int error;
if (map == NULL)
@@ -672,12 +676,9 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 &&
bus_dma_run_filter(&dmat->common, curaddr)) {
- nextaddr = 0;
- sgsize = MIN(PAGE_SIZE, sgsize);
- if ((curaddr & PAGE_MASK) + sgsize > PAGE_SIZE)
- nextaddr = roundup2(curaddr, PAGE_SIZE);
- curaddr = add_bounce_page(dmat, map, 0, curaddr,
- nextaddr, sgsize);
+ sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
+ curaddr = add_bounce_page(dmat, map, 0, curaddr, 0,
+ sgsize);
}
sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
segp);
@@ -743,7 +744,8 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 &&
bus_dma_run_filter(&dmat->common, curaddr)) {
- sgsize = MIN(PAGE_SIZE, max_sgsize);
+ sgsize = roundup2(sgsize, dmat->common.alignment);
+ sgsize = MIN(sgsize, max_sgsize);
curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 0,
sgsize);
} else {
@@ -772,6 +774,17 @@ bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
int error, page_index;
bus_size_t sgsize, max_sgsize;
+ if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
+ /*
+ * If we have to keep the offset of each page this function
+ * is not suitable, switch back to bus_dmamap_load_ma_triv
+ * which is going to do the right thing in this case.
+ */
+ error = bus_dmamap_load_ma_triv(dmat, map, ma, buflen, ma_offs,
+ flags, segs, segp);
+ return (error);
+ }
+
if (map == NULL)
map = &nobounce_dmamap;
@@ -798,7 +811,10 @@ bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 &&
bus_dma_run_filter(&dmat->common, paddr)) {
- sgsize = MIN(PAGE_SIZE, max_sgsize);
+ sgsize = roundup2(sgsize, dmat->common.alignment);
+ sgsize = MIN(sgsize, max_sgsize);
+ KASSERT((sgsize & (dmat->common.alignment - 1)) == 0,
+ ("Segment size is not aligned"));
/*
* Check if two pages of the user provided buffer
* are used.
@@ -1159,6 +1175,13 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bz->active_bpages++;
mtx_unlock(&bounce_lock);
+ if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
+ /* Page offset needs to be preserved. */
+ bpage->vaddr |= addr1 & PAGE_MASK;
+ bpage->busaddr |= addr1 & PAGE_MASK;
+ KASSERT(addr2 == 0,
+ ("Trying to bounce multiple pages with BUS_DMA_KEEP_PG_OFFSET"));
+ }
bpage->datavaddr = vaddr;
bpage->datapage[0] = PHYS_TO_VM_PAGE(addr1);
KASSERT((addr2 & PAGE_MASK) == 0, ("Second page is not aligned"));
@@ -1178,6 +1201,15 @@ free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
bz = dmat->bounce_zone;
bpage->datavaddr = 0;
bpage->datacount = 0;
+ if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
+ /*
+ * Reset the bounce page to start at offset 0. Other uses
+ * of this bounce page may need to store a full page of
+ * data and/or assume it starts on a page boundary.
+ */
+ bpage->vaddr &= ~PAGE_MASK;
+ bpage->busaddr &= ~PAGE_MASK;
+ }
mtx_lock(&bounce_lock);
STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
OpenPOWER on IntegriCloud