From ebb7fe2100e8d181c7c3911801444965384ba147 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 2 Jan 2017 17:26:21 +0200 Subject: dmaengine: dw: pci: remove LPE Audio DMA ID LPE Audio driver should take care of DMA IPs by itself. Keeping an ID like this in dw_dma_pci.c is anyway wrong since that block has two DMA controllers under one ID (like MFD device). That's also why I didn't include LPE Audio ID for Intel Merrifield previously. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/dw/pci.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c index 0ae6c3b..7558a9c 100644 --- a/drivers/dma/dw/pci.c +++ b/drivers/dma/dw/pci.c @@ -95,9 +95,8 @@ static const struct dev_pm_ops dw_pci_dev_pm_ops = { }; static const struct pci_device_id dw_pci_id_table[] = { - /* Medfield */ + /* Medfield (GPDMA) */ { PCI_VDEVICE(INTEL, 0x0827) }, - { PCI_VDEVICE(INTEL, 0x0830) }, /* BayTrail */ { PCI_VDEVICE(INTEL, 0x0f06) }, -- cgit v1.1 From a46a7634017ae5da1adf352c5bd0c66a772f20b5 Mon Sep 17 00:00:00 2001 From: Jarkko Nikula Date: Tue, 17 Jan 2017 13:57:25 +0200 Subject: dmaengine: dw: Fix data corruption in large device to memory transfers When transferring more data than the maximum block size supported by the HW multiplied by source width the transfer is split into smaller chunks. Currently code calculates the memory width and thus aligment before splitting for both memory to device and device to memory transfers. For memory to device transfers this work fine since alignment is preserved through the splitting and split blocks are still memory width aligned. However in device to memory transfers aligment breaks when maximum block size multiplied by register width doesn't have the same alignment than the buffer. For instance when transferring from an 8-bit register 4100 bytes (32-bit aligned) on a DW DMA that has maximum block size of 4095 elements. An attempt to do such transfers caused data corruption. Fix this by calculating and setting the destination memory width after splitting by using the split block aligment and length. Signed-off-by: Jarkko Nikula Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/dw/core.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index e5adf5d..45bb608 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -789,17 +789,13 @@ slave_sg_todev_fill_desc: lli_write(desc, sar, mem); lli_write(desc, dar, reg); - lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width)); if ((len >> mem_width) > dwc->block_size) { dlen = dwc->block_size << mem_width; - mem += dlen; - len -= dlen; } else { dlen = len; - len = 0; } - lli_write(desc, ctlhi, dlen >> mem_width); + lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width)); desc->len = dlen; if (!first) { @@ -809,6 +805,9 @@ slave_sg_todev_fill_desc: list_add_tail(&desc->desc_node, &first->tx_list); } prev = desc; + + mem += dlen; + len -= dlen; total_len += dlen; if (len) @@ -833,8 +832,6 @@ slave_sg_todev_fill_desc: mem = sg_dma_address(sg); len = sg_dma_len(sg); - mem_width = __ffs(data_width | mem | len); - slave_sg_fromdev_fill_desc: desc = dwc_desc_get(dwc); if (!desc) @@ -842,16 +839,14 @@ slave_sg_fromdev_fill_desc: lli_write(desc, sar, reg); lli_write(desc, dar, mem); - lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width)); if ((len >> reg_width) > dwc->block_size) { dlen = dwc->block_size << reg_width; - mem += dlen; - len -= dlen; } else { dlen = len; - len = 0; } lli_write(desc, ctlhi, dlen >> reg_width); + mem_width = __ffs(data_width | mem | dlen); + lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width)); desc->len = dlen; if (!first) { @@ -861,6 +856,9 @@ slave_sg_fromdev_fill_desc: list_add_tail(&desc->desc_node, &first->tx_list); } prev = desc; + + mem += dlen; + len -= dlen; total_len += dlen; if (len) -- cgit v1.1 From 08d62f58aa2587132a930afbe8664379b430e2dd Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 17 Jan 2017 13:57:26 +0200 Subject: dmaengine: dw: register IRQ and DMA pool with instance ID It is really useful not only for debugging to have an IRQ line and DMA pool labeled with driver and its instance ID. Do this for DesignWare DMA driver. All current users of this IP would be enhanced later on. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/dw/core.c | 7 +++++-- drivers/dma/dw/pci.c | 1 + drivers/dma/dw/platform.c | 1 + drivers/dma/dw/regs.h | 1 + include/linux/dma/dw.h | 2 ++ 5 files changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 45bb608..8751624 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -1502,8 +1502,11 @@ int dw_dma_probe(struct dw_dma_chip *chip) /* Force dma off, just in case */ dw_dma_off(dw); + /* Device and instance ID for IRQ and DMA pool */ + snprintf(dw->name, sizeof(dw->name), "dw:dmac%d", chip->id); + /* Create a pool of consistent memory blocks for hardware descriptors */ - dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, + dw->desc_pool = dmam_pool_create(dw->name, chip->dev, sizeof(struct dw_desc), 4, 0); if (!dw->desc_pool) { dev_err(chip->dev, "No memory for descriptors dma pool\n"); @@ -1514,7 +1517,7 @@ int dw_dma_probe(struct dw_dma_chip *chip) tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED, - "dw_dmac", dw); + dw->name, dw); if (err) goto err_pdata; diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c index 7558a9c..4719437 100644 --- a/drivers/dma/dw/pci.c +++ b/drivers/dma/dw/pci.c @@ -47,6 +47,7 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) return -ENOMEM; chip->dev = &pdev->dev; + chip->id = pdev->devfn; chip->regs = pcim_iomap_table(pdev)[0]; chip->irq = pdev->irq; chip->pdata = pdata; diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index b1655e4..c639c60 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c @@ -202,6 +202,7 @@ static int dw_probe(struct platform_device *pdev) pdata = dw_dma_parse_dt(pdev); chip->dev = dev; + chip->id = pdev->id; chip->pdata = pdata; chip->clk = devm_clk_get(chip->dev, "hclk"); diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h index 4e0128c..6087eba 100644 --- a/drivers/dma/dw/regs.h +++ b/drivers/dma/dw/regs.h @@ -270,6 +270,7 @@ static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) struct dw_dma { struct dma_device dma; + char name[20]; void __iomem *regs; struct dma_pool *desc_pool; struct tasklet_struct tasklet; diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h index ccfd0c3..b63b258 100644 --- a/include/linux/dma/dw.h +++ b/include/linux/dma/dw.h @@ -23,6 +23,7 @@ struct dw_dma; /** * struct dw_dma_chip - representation of DesignWare DMA controller hardware * @dev: struct device of the DMA controller + * @id: instance ID * @irq: irq line * @regs: memory mapped I/O space * @clk: hclk clock @@ -31,6 +32,7 @@ struct dw_dma; */ struct dw_dma_chip { struct device *dev; + int id; int irq; void __iomem *regs; struct clk *clk; -- cgit v1.1 From be242f4e2e551e55d1f76504a8adcb703e0127c3 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 17 Jan 2017 13:57:27 +0200 Subject: dmaengine: dw: replace convert_burst() with one liner Replace convert_burst() with one liner in place. The change simplifies further extension of the driver to cover new DMA controller hardware. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/dw/core.c | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 8751624..e3749ec 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -901,25 +901,18 @@ bool dw_dma_filter(struct dma_chan *chan, void *param) } EXPORT_SYMBOL_GPL(dw_dma_filter); -/* - * Fix sconfig's burst size according to dw_dmac. We need to convert them as: - * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. - * - * NOTE: burst size 2 is not supported by controller. - * - * This can be done by finding least significant bit set: n & (n - 1) - */ -static inline void convert_burst(u32 *maxburst) -{ - if (*maxburst > 1) - *maxburst = fls(*maxburst) - 2; - else - *maxburst = 0; -} - static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dma_slave_config *sc = &dwc->dma_sconfig; + /* + * Fix sconfig's burst size according to dw_dmac. We need to convert + * them as: + * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. + * + * NOTE: burst size 2 is not supported by DesignWare controller. + */ + u32 s = 2; /* Check if chan will be configured for slave transfers */ if (!is_slave_direction(sconfig->direction)) @@ -928,8 +921,8 @@ static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); dwc->direction = sconfig->direction; - convert_burst(&dwc->dma_sconfig.src_maxburst); - convert_burst(&dwc->dma_sconfig.dst_maxburst); + sc->src_maxburst = sc->src_maxburst > 1 ? fls(sc->src_maxburst) - s : 0; + sc->dst_maxburst = sc->dst_maxburst > 1 ? fls(sc->dst_maxburst) - s : 0; return 0; } -- cgit v1.1 From f4aa3183c36d2f32695b0e7f0ce7296568729830 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 17 Jan 2017 13:57:28 +0200 Subject: dmaengine: dw: extract dwc_chan_pause() for future use iDMA 32-bit has a special handling of the FIFO during pause() / terminate_all(). Prepare code to implement that. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/dw/core.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index e3749ec..4a558d5 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -927,22 +927,26 @@ static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) return 0; } -static int dwc_pause(struct dma_chan *chan) +static void dwc_chan_pause(struct dw_dma_chan *dwc) { - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - unsigned long flags; unsigned int count = 20; /* timeout iterations */ u32 cfglo; - spin_lock_irqsave(&dwc->lock, flags); - cfglo = channel_readl(dwc, CFG_LO); channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) udelay(2); set_bit(DW_DMA_IS_PAUSED, &dwc->flags); +} +static int dwc_pause(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&dwc->lock, flags); + dwc_chan_pause(dwc); spin_unlock_irqrestore(&dwc->lock, flags); return 0; -- cgit v1.1 From 2d248812aa14d5f9bc6fc11a222c722524f25159 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 17 Jan 2017 13:57:29 +0200 Subject: dmaengine: dw: introduce block2bytes() and bytes2block() The newly introduced helpers prepare driver to support new DMA controller hardware. While here, introduce DWC_CTLH_BLOCK_TS() macro as well. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/dw/core.c | 52 ++++++++++++++++++++++++++++++--------------------- drivers/dma/dw/regs.h | 6 ++++-- 2 files changed, 35 insertions(+), 23 deletions(-) diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 4a558d5..0186d67 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -184,6 +184,27 @@ static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) cpu_relax(); } +static u32 bytes2block(struct dw_dma_chan *dwc, size_t bytes, + unsigned int width, size_t *len) +{ + u32 block; + + if ((bytes >> width) > dwc->block_size) { + block = dwc->block_size; + *len = block << width; + } else { + block = bytes >> width; + *len = bytes; + } + + return block; +} + +static size_t block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width) +{ + return DWC_CTLH_BLOCK_TS(block) << width; +} + /*----------------------------------------------------------------------*/ /* Perform single block transfer */ @@ -332,7 +353,7 @@ static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) u32 ctlhi = channel_readl(dwc, CTL_HI); u32 ctllo = channel_readl(dwc, CTL_LO); - return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7)); + return block2bytes(dwc, ctlhi, ctllo >> 4 & 7); } static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) @@ -692,10 +713,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | DWC_CTLL_FC_M2M; prev = first = NULL; - for (offset = 0; offset < len; offset += xfer_count << src_width) { - xfer_count = min_t(size_t, (len - offset) >> src_width, - dwc->block_size); - + for (offset = 0; offset < len; offset += xfer_count) { desc = dwc_desc_get(dwc); if (!desc) goto err_desc_get; @@ -703,8 +721,8 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, lli_write(desc, sar, src + offset); lli_write(desc, dar, dest + offset); lli_write(desc, ctllo, ctllo); - lli_write(desc, ctlhi, xfer_count); - desc->len = xfer_count << src_width; + lli_write(desc, ctlhi, bytes2block(dwc, len - offset, src_width, &xfer_count)); + desc->len = xfer_count; if (!first) { first = desc; @@ -775,7 +793,8 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, for_each_sg(sgl, sg, sg_len, i) { struct dw_desc *desc; - u32 len, dlen, mem; + u32 len, mem; + size_t dlen; mem = sg_dma_address(sg); len = sg_dma_len(sg); @@ -789,12 +808,7 @@ slave_sg_todev_fill_desc: lli_write(desc, sar, mem); lli_write(desc, dar, reg); - if ((len >> mem_width) > dwc->block_size) { - dlen = dwc->block_size << mem_width; - } else { - dlen = len; - } - lli_write(desc, ctlhi, dlen >> mem_width); + lli_write(desc, ctlhi, bytes2block(dwc, len, mem_width, &dlen)); lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width)); desc->len = dlen; @@ -827,7 +841,8 @@ slave_sg_todev_fill_desc: for_each_sg(sgl, sg, sg_len, i) { struct dw_desc *desc; - u32 len, dlen, mem; + u32 len, mem; + size_t dlen; mem = sg_dma_address(sg); len = sg_dma_len(sg); @@ -839,12 +854,7 @@ slave_sg_fromdev_fill_desc: lli_write(desc, sar, reg); lli_write(desc, dar, mem); - if ((len >> reg_width) > dwc->block_size) { - dlen = dwc->block_size << reg_width; - } else { - dlen = len; - } - lli_write(desc, ctlhi, dlen >> reg_width); + lli_write(desc, ctlhi, bytes2block(dwc, len, reg_width, &dlen)); mem_width = __ffs(data_width | mem | dlen); lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width)); desc->len = dlen; diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h index 6087eba..ba26d6b 100644 --- a/drivers/dma/dw/regs.h +++ b/drivers/dma/dw/regs.h @@ -9,6 +9,7 @@ * published by the Free Software Foundation. */ +#include #include #include @@ -170,8 +171,9 @@ enum dw_dma_msize { #define DWC_CTLL_LLP_S_EN (1 << 28) /* src block chain */ /* Bitfields in CTL_HI */ -#define DWC_CTLH_DONE 0x00001000 -#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff +#define DWC_CTLH_BLOCK_TS_MASK GENMASK(11, 0) +#define DWC_CTLH_BLOCK_TS(x) ((x) & DWC_CTLH_BLOCK_TS_MASK) +#define DWC_CTLH_DONE (1 << 12) /* Bitfields in CFG_LO */ #define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */ -- cgit v1.1 From a9f4d1b8314396cc09301fa3ab954167ff81a46b Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 17 Jan 2017 13:57:30 +0200 Subject: dmaengine: dw: introduce register mappings for iDMA 32-bit The integrated DMA (iDMA 32-bit) is Intel designed DMA controller which mimics Synopsys Designware DMA. This patch appends the register mappings for the parts which are slightly different to the DesignWare hardware. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/dw/regs.h | 52 ++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 3 deletions(-) diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h index ba26d6b..32a3287 100644 --- a/drivers/dma/dw/regs.h +++ b/drivers/dma/dw/regs.h @@ -3,6 +3,7 @@ * * Copyright (C) 2005-2007 Atmel Corporation * Copyright (C) 2010-2011 ST Microelectronics + * Copyright (C) 2016 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -13,6 +14,8 @@ #include #include +#include + #include "internal.h" #define DW_DMA_MAX_NR_REQUESTS 16 @@ -86,9 +89,9 @@ struct dw_dma_regs { DW_REG(ID); DW_REG(TEST); - /* reserved */ - DW_REG(__reserved0); - DW_REG(__reserved1); + /* iDMA 32-bit support */ + DW_REG(CLASS_PRIORITY0); + DW_REG(CLASS_PRIORITY1); /* optional encoded params, 0x3c8..0x3f7 */ u32 __reserved; @@ -100,6 +103,17 @@ struct dw_dma_regs { /* top-level parameters */ u32 DW_PARAMS; + + /* component ID */ + u32 COMP_TYPE; + u32 COMP_VERSION; + + /* iDMA 32-bit support */ + DW_REG(FIFO_PARTITION0); + DW_REG(FIFO_PARTITION1); + + DW_REG(SAI_ERR); + DW_REG(GLOBAL_CFG); }; /* @@ -216,6 +230,33 @@ enum dw_dma_msize { /* Bitfields in CFG */ #define DW_CFG_DMA_EN (1 << 0) +/* iDMA 32-bit support */ + +/* Bitfields in CTL_HI */ +#define IDMA32C_CTLH_BLOCK_TS_MASK GENMASK(16, 0) +#define IDMA32C_CTLH_BLOCK_TS(x) ((x) & IDMA32C_CTLH_BLOCK_TS_MASK) +#define IDMA32C_CTLH_DONE (1 << 17) + +/* Bitfields in CFG_LO */ +#define IDMA32C_CFGL_DST_BURST_ALIGN (1 << 0) /* dst burst align */ +#define IDMA32C_CFGL_SRC_BURST_ALIGN (1 << 1) /* src burst align */ +#define IDMA32C_CFGL_CH_DRAIN (1 << 10) /* drain FIFO */ +#define IDMA32C_CFGL_DST_OPT_BL (1 << 20) /* optimize dst burst length */ +#define IDMA32C_CFGL_SRC_OPT_BL (1 << 21) /* optimize src burst length */ + +/* Bitfields in CFG_HI */ +#define IDMA32C_CFGH_SRC_PER(x) ((x) << 0) +#define IDMA32C_CFGH_DST_PER(x) ((x) << 4) +#define IDMA32C_CFGH_RD_ISSUE_THD(x) ((x) << 8) +#define IDMA32C_CFGH_RW_ISSUE_THD(x) ((x) << 18) +#define IDMA32C_CFGH_SRC_PER_EXT(x) ((x) << 28) /* src peripheral extension */ +#define IDMA32C_CFGH_DST_PER_EXT(x) ((x) << 30) /* dst peripheral extension */ + +/* Bitfields in FIFO_PARTITION */ +#define IDMA32C_FP_PSIZE_CH0(x) ((x) << 0) +#define IDMA32C_FP_PSIZE_CH1(x) ((x) << 13) +#define IDMA32C_FP_UPDATE (1 << 26) + enum dw_dmac_flags { DW_DMA_IS_CYCLIC = 0, DW_DMA_IS_SOFT_LLP = 1, @@ -296,6 +337,11 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) #define dma_writel(dw, name, val) \ dma_writel_native((val), &(__dw_regs(dw)->name)) +#define idma32_readq(dw, name) \ + hi_lo_readq(&(__dw_regs(dw)->name)) +#define idma32_writeq(dw, name, val) \ + hi_lo_writeq((val), &(__dw_regs(dw)->name)) + #define channel_set_bit(dw, reg, mask) \ dma_writel(dw, reg, ((mask) << 8) | (mask)) #define channel_clear_bit(dw, reg, mask) \ -- cgit v1.1 From 199244d69458770770890f8b5988a1b6cad701ad Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 17 Jan 2017 13:57:31 +0200 Subject: dmaengine: dw: add support of iDMA 32-bit hardware iDMA 32-bit is Intel designed DMA controller that behaves like Synopsys Designware DMA. This patch adds a support of the new Intel hardware. Due to iDMA 32-bit has no autoconfiguration the platform code must provide a platform data to dw_dma_probe(). By default full FIFO (1024 bytes) is assigned to channel 0. Here we slice FIFO on equal parts between channels for iDMA 32-bit case. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/dw/core.c | 101 +++++++++++++++++++++++++++++++---- include/linux/platform_data/dma-dw.h | 2 + 2 files changed, 94 insertions(+), 9 deletions(-) diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 0186d67..e500950 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -138,16 +138,32 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) dwc->descs_allocated--; } -static void dwc_initialize(struct dw_dma_chan *dwc) +static void dwc_initialize_chan_idma32(struct dw_dma_chan *dwc) +{ + u32 cfghi = 0; + u32 cfglo = 0; + + /* Set default burst alignment */ + cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN; + + /* Low 4 bits of the request lines */ + cfghi |= IDMA32C_CFGH_DST_PER(dwc->dws.dst_id & 0xf); + cfghi |= IDMA32C_CFGH_SRC_PER(dwc->dws.src_id & 0xf); + + /* Request line extension (2 bits) */ + cfghi |= IDMA32C_CFGH_DST_PER_EXT(dwc->dws.dst_id >> 4 & 0x3); + cfghi |= IDMA32C_CFGH_SRC_PER_EXT(dwc->dws.src_id >> 4 & 0x3); + + channel_writel(dwc, CFG_LO, cfglo); + channel_writel(dwc, CFG_HI, cfghi); +} + +static void dwc_initialize_chan_dw(struct dw_dma_chan *dwc) { - struct dw_dma *dw = to_dw_dma(dwc->chan.device); u32 cfghi = DWC_CFGH_FIFO_MODE; u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); bool hs_polarity = dwc->dws.hs_polarity; - if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags)) - return; - cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id); cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id); @@ -156,6 +172,19 @@ static void dwc_initialize(struct dw_dma_chan *dwc) channel_writel(dwc, CFG_LO, cfglo); channel_writel(dwc, CFG_HI, cfghi); +} + +static void dwc_initialize(struct dw_dma_chan *dwc) +{ + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + + if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags)) + return; + + if (dw->pdata->is_idma32) + dwc_initialize_chan_idma32(dwc); + else + dwc_initialize_chan_dw(dwc); /* Enable interrupts */ channel_set_bit(dw, MASK.XFER, dwc->mask); @@ -187,8 +216,13 @@ static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) static u32 bytes2block(struct dw_dma_chan *dwc, size_t bytes, unsigned int width, size_t *len) { + struct dw_dma *dw = to_dw_dma(dwc->chan.device); u32 block; + /* Always in bytes for iDMA 32-bit */ + if (dw->pdata->is_idma32) + width = 0; + if ((bytes >> width) > dwc->block_size) { block = dwc->block_size; *len = block << width; @@ -202,6 +236,11 @@ static u32 bytes2block(struct dw_dma_chan *dwc, size_t bytes, static size_t block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width) { + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + + if (dw->pdata->is_idma32) + return IDMA32C_CTLH_BLOCK_TS(block); + return DWC_CTLH_BLOCK_TS(block) << width; } @@ -915,14 +954,16 @@ static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dma_slave_config *sc = &dwc->dma_sconfig; + struct dw_dma *dw = to_dw_dma(chan->device); /* * Fix sconfig's burst size according to dw_dmac. We need to convert * them as: * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. * * NOTE: burst size 2 is not supported by DesignWare controller. + * iDMA 32-bit supports it. */ - u32 s = 2; + u32 s = dw->pdata->is_idma32 ? 1 : 2; /* Check if chan will be configured for slave transfers */ if (!is_slave_direction(sconfig->direction)) @@ -937,12 +978,19 @@ static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) return 0; } -static void dwc_chan_pause(struct dw_dma_chan *dwc) +static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain) { + struct dw_dma *dw = to_dw_dma(dwc->chan.device); unsigned int count = 20; /* timeout iterations */ u32 cfglo; cfglo = channel_readl(dwc, CFG_LO); + if (dw->pdata->is_idma32) { + if (drain) + cfglo |= IDMA32C_CFGL_CH_DRAIN; + else + cfglo &= ~IDMA32C_CFGL_CH_DRAIN; + } channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) udelay(2); @@ -956,7 +1004,7 @@ static int dwc_pause(struct dma_chan *chan) unsigned long flags; spin_lock_irqsave(&dwc->lock, flags); - dwc_chan_pause(dwc); + dwc_chan_pause(dwc, false); spin_unlock_irqrestore(&dwc->lock, flags); return 0; @@ -998,6 +1046,8 @@ static int dwc_terminate_all(struct dma_chan *chan) clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); + dwc_chan_pause(dwc, true); + dwc_chan_disable(dw, dwc); dwc_chan_resume(dwc); @@ -1090,6 +1140,32 @@ static void dwc_issue_pending(struct dma_chan *chan) /*----------------------------------------------------------------------*/ +/* + * Program FIFO size of channels. + * + * By default full FIFO (1024 bytes) is assigned to channel 0. Here we + * slice FIFO on equal parts between channels. + */ +static void idma32_fifo_partition(struct dw_dma *dw) +{ + u64 value = IDMA32C_FP_PSIZE_CH0(128) | IDMA32C_FP_PSIZE_CH1(128) | + IDMA32C_FP_UPDATE; + u64 fifo_partition = 0; + + if (!dw->pdata->is_idma32) + return; + + /* Fill FIFO_PARTITION low bits (Channels 0..1, 4..5) */ + fifo_partition |= value << 0; + + /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */ + fifo_partition |= value << 32; + + /* Program FIFO Partition registers - 128 bytes for each channel */ + idma32_writeq(dw, FIFO_PARTITION1, fifo_partition); + idma32_writeq(dw, FIFO_PARTITION0, fifo_partition); +} + static void dw_dma_off(struct dw_dma *dw) { unsigned int i; @@ -1509,8 +1585,13 @@ int dw_dma_probe(struct dw_dma_chip *chip) /* Force dma off, just in case */ dw_dma_off(dw); + idma32_fifo_partition(dw); + /* Device and instance ID for IRQ and DMA pool */ - snprintf(dw->name, sizeof(dw->name), "dw:dmac%d", chip->id); + if (pdata->is_idma32) + snprintf(dw->name, sizeof(dw->name), "idma32:dmac%d", chip->id); + else + snprintf(dw->name, sizeof(dw->name), "dw:dmac%d", chip->id); /* Create a pool of consistent memory blocks for hardware descriptors */ dw->desc_pool = dmam_pool_create(dw->name, chip->dev, @@ -1673,6 +1754,8 @@ int dw_dma_enable(struct dw_dma_chip *chip) { struct dw_dma *dw = chip->dw; + idma32_fifo_partition(dw); + dw_dma_on(dw); return 0; } diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index e69e415..896cb71 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -41,6 +41,7 @@ struct dw_dma_slave { * @is_private: The device channels should be marked as private and not for * by the general purpose DMA channel allocator. * @is_memcpy: The device channels do support memory-to-memory transfers. + * @is_idma32: The type of the DMA controller is iDMA32 * @chan_allocation_order: Allocate channels starting from 0 or 7 * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. * @block_size: Maximum block size supported by the controller @@ -53,6 +54,7 @@ struct dw_dma_platform_data { unsigned int nr_channels; bool is_private; bool is_memcpy; + bool is_idma32; #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ unsigned char chan_allocation_order; -- cgit v1.1 From f7c799e950f96191a16f18606e43e6f861b2a361 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 17 Jan 2017 13:57:32 +0200 Subject: dmaengine: dw: we do support Merrifield SoC in PCI mode Intel Merrifield platform contains Intel integrated DMA (iDMA 32-bit) which has a slightly different register mapping, e.g. some bits in CTL_* and CFG_* channel registers, and has to use platform data since there is no autoconfiguration. The iDMA 32-bit specification is available in the publicly available documentation for Intel Braswell and BayTrail SoCs as LPE Audio DMA. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/dw/pci.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c index 4719437..7778ed7 100644 --- a/drivers/dma/dw/pci.c +++ b/drivers/dma/dw/pci.c @@ -15,6 +15,18 @@ #include "internal.h" +static struct dw_dma_platform_data mrfld_pdata = { + .nr_channels = 8, + .is_private = true, + .is_memcpy = true, + .is_idma32 = true, + .chan_allocation_order = CHAN_ALLOCATION_ASCENDING, + .chan_priority = CHAN_PRIORITY_ASCENDING, + .block_size = 131071, + .nr_masters = 1, + .data_width = {4}, +}; + static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) { const struct dw_dma_platform_data *pdata = (void *)pid->driver_data; @@ -103,6 +115,9 @@ static const struct pci_device_id dw_pci_id_table[] = { { PCI_VDEVICE(INTEL, 0x0f06) }, { PCI_VDEVICE(INTEL, 0x0f40) }, + /* Merrifield iDMA 32-bit (GPDMA) */ + { PCI_VDEVICE(INTEL, 0x11a2), (kernel_ulong_t)&mrfld_pdata }, + /* Braswell */ { PCI_VDEVICE(INTEL, 0x2286) }, { PCI_VDEVICE(INTEL, 0x22c0) }, -- cgit v1.1