From db8196df4bb6f117caa163aa73b0f16fd62290bd Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Thu, 13 Oct 2011 22:34:23 +0530 Subject: dmaengine: move drivers to dma_transfer_direction fixup usage of dma direction by introducing dma_transfer_direction, this patch moves dma/drivers/* to use new enum Cc: Jassi Brar Cc: Russell King Cc: Viresh Kumar Cc: Linus Walleij Cc: Nicolas Ferre Cc: Mika Westerberg Cc: H Hartley Sweeten Cc: Li Yang Cc: Zhang Wei Cc: Sascha Hauer Cc: Guennadi Liakhovetski Cc: Shawn Guo Cc: Yong Wang Cc: Tomoya MORINAGA Cc: Boojin Kim Cc: Barry Song Acked-by: Mika Westerberg Acked-by: Linus Walleij Acked-by: Viresh Kumar Acked-by: Nicolas Ferre Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/dma/imx-sdma.c') diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index eab1fe7..065de54 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -246,7 +246,7 @@ struct sdma_engine; struct sdma_channel { struct sdma_engine *sdma; unsigned int channel; - enum dma_data_direction direction; + enum dma_transfer_direction direction; enum sdma_peripheral_type peripheral_type; unsigned int event_id0; unsigned int event_id1; @@ -649,7 +649,7 @@ static int sdma_load_context(struct sdma_channel *sdmac) struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; int ret; - if (sdmac->direction == DMA_FROM_DEVICE) { + if (sdmac->direction == DMA_DEV_TO_MEM) { load_address = sdmac->pc_from_device; } else { load_address = sdmac->pc_to_device; @@ -910,7 +910,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan) static struct dma_async_tx_descriptor *sdma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, - unsigned int sg_len, enum dma_data_direction direction, + unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags) { struct sdma_channel *sdmac = to_sdma_chan(chan); @@ -1007,7 +1007,7 @@ err_out: static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, - size_t period_len, enum dma_data_direction direction) + size_t period_len, enum dma_transfer_direction direction) { struct sdma_channel *sdmac = to_sdma_chan(chan); struct sdma_engine *sdma = sdmac->sdma; @@ -1092,7 +1092,7 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, sdma_disable_channel(sdmac); return 0; case DMA_SLAVE_CONFIG: - if (dmaengine_cfg->direction == DMA_FROM_DEVICE) { + if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { sdmac->per_address = dmaengine_cfg->src_addr; sdmac->watermark_level = dmaengine_cfg->src_maxburst; sdmac->word_size = dmaengine_cfg->src_addr_width; -- cgit v1.1 From e69664336d6400cc1685716ee5f04ef74e85703e Mon Sep 17 00:00:00 2001 From: Huang Shijie Date: Fri, 18 Nov 2011 16:38:02 +0800 Subject: IMX/DMA : set the DMA direction in the sdma_control() Set the right DMA direction in the sdma_control(), else we will get the wrong log when enable the DYNAMIC_DEBUG. Signed-off-by: Huang Shijie Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/dma/imx-sdma.c') diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 2e03571..d9e5933 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1102,6 +1102,7 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, sdmac->watermark_level = dmaengine_cfg->dst_maxburst; sdmac->word_size = dmaengine_cfg->dst_addr_width; } + sdmac->direction = dmaengine_cfg->direction; return sdma_config_channel(sdmac); default: return -ENOSYS; -- cgit v1.1 From ab59a510c6ad6b3add5125df64843be754782de6 Mon Sep 17 00:00:00 2001 From: Huang Shijie Date: Fri, 2 Dec 2011 10:16:25 +0800 Subject: IMX/SDMA : save the real count for one DMA transaction. When we use the SDMA in the UART driver(such as imx6q), we will meet one situation: Assume we set 64 bytes for the RX DMA buffer. The RX DMA buffer has received some data, but not full. An Aging DMA request will be received by the SDMA controller if we enable the IDDMAEN(UCR4[6]) in this case. So the UART driver needs to know the count of the real received bytes, and push them to upper layer. Add two new fields to sdmac, and update the `residue` in sdma_tx_status(). Signed-off-by: Huang Shijie Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers/dma/imx-sdma.c') diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index d9e5933..f59fd8f 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -268,6 +268,8 @@ struct sdma_channel { struct dma_async_tx_descriptor desc; dma_cookie_t last_completed; enum dma_status status; + unsigned int chn_count; + unsigned int chn_real_count; }; #define IMX_DMA_SG_LOOP (1 << 0) @@ -503,6 +505,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) struct sdma_buffer_descriptor *bd; int i, error = 0; + sdmac->chn_real_count = 0; /* * non loop mode. Iterate over all descriptors, collect * errors and call callback function @@ -512,6 +515,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) if (bd->mode.status & (BD_DONE | BD_RROR)) error = -EIO; + sdmac->chn_real_count += bd->mode.count; } if (error) @@ -519,9 +523,9 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) else sdmac->status = DMA_SUCCESS; + sdmac->last_completed = sdmac->desc.cookie; if (sdmac->desc.callback) sdmac->desc.callback(sdmac->desc.callback_param); - sdmac->last_completed = sdmac->desc.cookie; } static void mxc_sdma_handle_channel(struct sdma_channel *sdmac) @@ -941,6 +945,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( goto err_out; } + sdmac->chn_count = 0; for_each_sg(sgl, sg, sg_len, i) { struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; int param; @@ -957,6 +962,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( } bd->mode.count = count; + sdmac->chn_count += count; if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) { ret = -EINVAL; @@ -1120,7 +1126,8 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, last_used = chan->cookie; - dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0); + dma_set_tx_state(txstate, sdmac->last_completed, last_used, + sdmac->chn_count - sdmac->chn_real_count); return sdmac->status; } -- cgit v1.1 From f69f2e264f6388df6d3cae45dd67ddfd52aaa14b Mon Sep 17 00:00:00 2001 From: Haitao Zhang Date: Sun, 1 Jan 2012 11:30:06 +0800 Subject: dma/imx-sdma: save irq flags when use spin_lock in sdma_tx_submit mx53_loco: fix deadlock report from sdma_tx_submit() during boot BugLink: http://bugs.launchpad.net/bugs/878701 Adjust to use spin_lock_irqsave()/spin_unlock_irqresotre(), so to make it safe when called from interrupt context. Signed-off-by: Haitao Zhang Signed-off-by: Eric Miao Signed-off-by: Richard Zhao Acked-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/dma/imx-sdma.c') diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index f59fd8f..a8af379 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -836,17 +836,18 @@ static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) { + unsigned long flags; struct sdma_channel *sdmac = to_sdma_chan(tx->chan); struct sdma_engine *sdma = sdmac->sdma; dma_cookie_t cookie; - spin_lock_irq(&sdmac->lock); + spin_lock_irqsave(&sdmac->lock, flags); cookie = sdma_assign_cookie(sdmac); sdma_enable_channel(sdma, sdmac->channel); - spin_unlock_irq(&sdmac->lock); + spin_unlock_irqrestore(&sdmac->lock, flags); return cookie; } -- cgit v1.1