From c56d329e28cff2d7a1e77cafc2f9c7e986ba11cb Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Sun, 22 May 2016 11:05:52 +0200 Subject: MAINTAINERS: Add file patterns for dma device tree bindings Submitters of device tree binding documentation may forget to CC the subsystem maintainer if this is missing. Signed-off-by: Geert Uytterhoeven Cc: Vinod Koul Cc: dmaengine@vger.kernel.org Signed-off-by: Vinod Koul --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 7304d2e..f7bbae3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3773,6 +3773,7 @@ Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ S: Maintained F: drivers/dma/ F: include/linux/dmaengine.h +F: Documentation/devicetree/bindings/dma/ F: Documentation/dmaengine/ T: git git://git.infradead.org/users/vkoul/slave-dma.git -- cgit v1.1 From 4a9723e8df68cfce4048517ee32e37f78854b6fb Mon Sep 17 00:00:00 2001 From: Ludovic Desroches Date: Thu, 12 May 2016 16:54:08 +0200 Subject: dmaengine: at_xdmac: align descriptors on 64 bits Having descriptors aligned on 64 bits allows update CNDA and CUBC in an atomic way. Signed-off-by: Ludovic Desroches Fixes: e1f7c9eee707 ("dmaengine: at_xdmac: creation of the atmel eXtended DMA Controller driver") Cc: stable@vger.kernel.org #v4.1 and later Reviewed-by: Nicolas Ferre Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 8e304b1..ba9b0b7 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -242,7 +242,7 @@ struct at_xdmac_lld { u32 mbr_dus; /* Destination Microblock Stride Register */ }; - +/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */ struct at_xdmac_desc { struct at_xdmac_lld lld; enum dma_transfer_direction direction; @@ -253,7 +253,7 @@ struct at_xdmac_desc { unsigned int xfer_size; struct list_head descs_list; struct list_head xfer_node; -}; +} __aligned(sizeof(u64)); static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb) { -- cgit v1.1 From 53398f488821c2b5b15291e3debec6ad33f75d3d Mon Sep 17 00:00:00 2001 From: Ludovic Desroches Date: Thu, 12 May 2016 16:54:09 +0200 Subject: dmaengine: at_xdmac: fix residue corruption An unexpected value of CUBC can lead to a corrupted residue. A more complex sequence is needed to detect an inaccurate value for NCA or CUBC. Signed-off-by: Ludovic Desroches Fixes: e1f7c9eee707 ("dmaengine: at_xdmac: creation of the atmel eXtended DMA Controller driver") Cc: stable@vger.kernel.org #v4.1 and later Reviewed-by: Nicolas Ferre Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 54 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 32 insertions(+), 22 deletions(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index ba9b0b7..b02494e 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1400,6 +1400,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, u32 cur_nda, check_nda, cur_ubc, mask, value; u8 dwidth = 0; unsigned long flags; + bool initd; ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_COMPLETE) @@ -1435,34 +1436,43 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, } /* - * When processing the residue, we need to read two registers but we - * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where - * we stand in the descriptor list and AT_XDMAC_CUBC is used - * to know how many data are remaining for the current descriptor. - * Since the dma channel is not paused to not loose data, between the - * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of - * descriptor. - * For that reason, after reading AT_XDMAC_CUBC, we check if we are - * still using the same descriptor by reading a second time - * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to - * read again AT_XDMAC_CUBC. + * The easiest way to compute the residue should be to pause the DMA + * but doing this can lead to miss some data as some devices don't + * have FIFO. + * We need to read several registers because: + * - DMA is running therefore a descriptor change is possible while + * reading these registers + * - When the block transfer is done, the value of the CUBC register + * is set to its initial value until the fetch of the next descriptor. + * This value will corrupt the residue calculation so we have to skip + * it. + * + * INITD -------- ------------ + * |____________________| + * _______________________ _______________ + * NDA @desc2 \/ @desc3 + * _______________________/\_______________ + * __________ ___________ _______________ + * CUBC 0 \/ MAX desc1 \/ MAX desc2 + * __________/\___________/\_______________ + * + * Since descriptors are aligned on 64 bits, we can assume that + * the update of NDA and CUBC is atomic. * Memory barriers are used to ensure the read order of the registers. - * A max number of retries is set because unlikely it can never ends if - * we are transferring a lot of data with small buffers. + * A max number of retries is set because unlikely it could never ends. */ - cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; - rmb(); - cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) { - rmb(); check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; - - if (likely(cur_nda == check_nda)) - break; - - cur_nda = check_nda; + rmb(); + initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD); rmb(); cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); + rmb(); + cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; + rmb(); + + if ((check_nda == cur_nda) && initd) + break; } if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) { -- cgit v1.1 From 9295c41d77ca93aac79cfca6fa09fa1ca5cab66f Mon Sep 17 00:00:00 2001 From: Ludovic Desroches Date: Thu, 12 May 2016 16:54:10 +0200 Subject: dmaengine: at_xdmac: double FIFO flush needed to compute residue Due to the way CUBC register is updated, a double flush is needed to compute an accurate residue. First flush aim is to get data from the DMA FIFO and second one ensures that we won't report data which are not in memory. Signed-off-by: Ludovic Desroches Fixes: e1f7c9eee707 ("dmaengine: at_xdmac: creation of the atmel eXtended DMA Controller driver") Cc: stable@vger.kernel.org #v4.1 and later Reviewed-by: Nicolas Ferre Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index b02494e..75bd662 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1425,7 +1425,16 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, residue = desc->xfer_size; /* * Flush FIFO: only relevant when the transfer is source peripheral - * synchronized. + * synchronized. Flush is needed before reading CUBC because data in + * the FIFO are not reported by CUBC. Reporting a residue of the + * transfer length while we have data in FIFO can cause issue. + * Usecase: atmel USART has a timeout which means I have received + * characters but there is no more character received for a while. On + * timeout, it requests the residue. If the data are in the DMA FIFO, + * we will return a residue of the transfer length. It means no data + * received. If an application is waiting for these data, it will hang + * since we won't have another USART timeout without receiving new + * data. */ mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM; @@ -1481,6 +1490,19 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, } /* + * Flush FIFO: only relevant when the transfer is source peripheral + * synchronized. Another flush is needed here because CUBC is updated + * when the controller sends the data write command. It can lead to + * report data that are not written in the memory or the device. The + * FIFO flush ensures that data are really written. + */ + if ((desc->lld.mbr_cfg & mask) == value) { + at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); + while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS)) + cpu_relax(); + } + + /* * Remove size of all microblocks already transferred and the current * one. Then add the remaining size to transfer of the current * microblock. -- cgit v1.1 From 5156463588c3999b630d9ffc6061a54962f3c2d9 Mon Sep 17 00:00:00 2001 From: Stefan Roese Date: Wed, 1 Jun 2016 12:43:32 +0200 Subject: dmaengine: mv_xor: Fix incorrect offset in dma_map_page() Upon booting, I occasionally spotted some BUGs triggered by the internal DMA test routine executed upon driver probing. This was detected by SLUB_DEBUG ("Freechain corrupt" or "Redzone overwritten"). Tracking this down located a problem in passing 0 as offset in dma_map_page(). As kmalloc, especially when used with SLUB_DEBUG, may return a non page aligned address. This patch fixes this issue by passing the correct offset in dma_map_page(). Tested on a custom Armada XP board. Signed-off-by: Stefan Roese Cc: Thomas Petazzoni Cc: Gregory CLEMENT Cc: Marcin Wojtas Signed-off-by: Vinod Koul --- drivers/dma/mv_xor.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 25d1dad..d0446a7 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -703,8 +703,9 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) goto free_resources; } - src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, - PAGE_SIZE, DMA_TO_DEVICE); + src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), + (size_t)src & ~PAGE_MASK, PAGE_SIZE, + DMA_TO_DEVICE); unmap->addr[0] = src_dma; ret = dma_mapping_error(dma_chan->device->dev, src_dma); @@ -714,8 +715,9 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) } unmap->to_cnt = 1; - dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, - PAGE_SIZE, DMA_FROM_DEVICE); + dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), + (size_t)dest & ~PAGE_MASK, PAGE_SIZE, + DMA_FROM_DEVICE); unmap->addr[1] = dest_dma; ret = dma_mapping_error(dma_chan->device->dev, dest_dma); -- cgit v1.1