summaryrefslogtreecommitdiffstats
path: root/sys/x86/x86/busdma_bounce.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/x86/x86/busdma_bounce.c')
-rw-r--r--sys/x86/x86/busdma_bounce.c56
1 files changed, 44 insertions, 12 deletions
diff --git a/sys/x86/x86/busdma_bounce.c b/sys/x86/x86/busdma_bounce.c
index 6e9aa70..78d04b9 100644
--- a/sys/x86/x86/busdma_bounce.c
+++ b/sys/x86/x86/busdma_bounce.c
@@ -476,7 +476,8 @@ _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
while (buflen != 0) {
sgsize = MIN(buflen, dmat->common.maxsegsz);
if (bus_dma_run_filter(&dmat->common, curaddr)) {
- sgsize = MIN(PAGE_SIZE, sgsize);
+ sgsize = MIN(sgsize,
+ PAGE_SIZE - (curaddr & PAGE_MASK));
map->pagesneeded++;
}
curaddr += sgsize;
@@ -516,7 +517,8 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
else
paddr = pmap_extract(pmap, vaddr);
if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
- sg_len = PAGE_SIZE;
+ sg_len = roundup2(sg_len,
+ dmat->common.alignment);
map->pagesneeded++;
}
vaddr += sg_len;
@@ -552,7 +554,9 @@ _bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
max_sgsize = MIN(buflen, dmat->common.maxsegsz);
sg_len = MIN(sg_len, max_sgsize);
if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
- sg_len = MIN(PAGE_SIZE, max_sgsize);
+ sg_len = roundup2(sg_len,
+ dmat->common.alignment);
+ sg_len = MIN(sg_len, max_sgsize);
KASSERT((sg_len & (dmat->common.alignment - 1))
== 0, ("Segment size is not aligned"));
map->pagesneeded++;
@@ -648,7 +652,7 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
int *segp)
{
bus_size_t sgsize;
- bus_addr_t curaddr, nextaddr;
+ bus_addr_t curaddr;
int error;
if (map == NULL)
@@ -672,12 +676,9 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 &&
bus_dma_run_filter(&dmat->common, curaddr)) {
- nextaddr = 0;
- sgsize = MIN(PAGE_SIZE, sgsize);
- if ((curaddr & PAGE_MASK) + sgsize > PAGE_SIZE)
- nextaddr = roundup2(curaddr, PAGE_SIZE);
- curaddr = add_bounce_page(dmat, map, 0, curaddr,
- nextaddr, sgsize);
+ sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
+ curaddr = add_bounce_page(dmat, map, 0, curaddr, 0,
+ sgsize);
}
sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
segp);
@@ -743,7 +744,8 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 &&
bus_dma_run_filter(&dmat->common, curaddr)) {
- sgsize = MIN(PAGE_SIZE, max_sgsize);
+ sgsize = roundup2(sgsize, dmat->common.alignment);
+ sgsize = MIN(sgsize, max_sgsize);
curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 0,
sgsize);
} else {
@@ -772,6 +774,17 @@ bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
int error, page_index;
bus_size_t sgsize, max_sgsize;
+ if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
+ /*
+ * If we have to keep the offset of each page this function
+ * is not suitable, switch back to bus_dmamap_load_ma_triv
+ * which is going to do the right thing in this case.
+ */
+ error = bus_dmamap_load_ma_triv(dmat, map, ma, buflen, ma_offs,
+ flags, segs, segp);
+ return (error);
+ }
+
if (map == NULL)
map = &nobounce_dmamap;
@@ -798,7 +811,10 @@ bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 &&
bus_dma_run_filter(&dmat->common, paddr)) {
- sgsize = MIN(PAGE_SIZE, max_sgsize);
+ sgsize = roundup2(sgsize, dmat->common.alignment);
+ sgsize = MIN(sgsize, max_sgsize);
+ KASSERT((sgsize & (dmat->common.alignment - 1)) == 0,
+ ("Segment size is not aligned"));
/*
* Check if two pages of the user provided buffer
* are used.
@@ -1159,6 +1175,13 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bz->active_bpages++;
mtx_unlock(&bounce_lock);
+ if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
+ /* Page offset needs to be preserved. */
+ bpage->vaddr |= addr1 & PAGE_MASK;
+ bpage->busaddr |= addr1 & PAGE_MASK;
+ KASSERT(addr2 == 0,
+ ("Trying to bounce multiple pages with BUS_DMA_KEEP_PG_OFFSET"));
+ }
bpage->datavaddr = vaddr;
bpage->datapage[0] = PHYS_TO_VM_PAGE(addr1);
KASSERT((addr2 & PAGE_MASK) == 0, ("Second page is not aligned"));
@@ -1178,6 +1201,15 @@ free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
bz = dmat->bounce_zone;
bpage->datavaddr = 0;
bpage->datacount = 0;
+ if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
+ /*
+ * Reset the bounce page to start at offset 0. Other uses
+ * of this bounce page may need to store a full page of
+ * data and/or assume it starts on a page boundary.
+ */
+ bpage->vaddr &= ~PAGE_MASK;
+ bpage->busaddr &= ~PAGE_MASK;
+ }
mtx_lock(&bounce_lock);
STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
OpenPOWER on IntegriCloud