summaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig5
-rw-r--r--drivers/dma/amba-pl08x.c1
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/dmaengine.c14
-rw-r--r--drivers/dma/imx-dma.c9
-rw-r--r--drivers/dma/ioat/dma.c16
-rw-r--r--drivers/dma/ioat/dma.h6
-rw-r--r--drivers/dma/ioat/dma_v2.c12
-rw-r--r--drivers/dma/ioat/dma_v2.h4
-rw-r--r--drivers/dma/ioat/dma_v3.c49
-rw-r--r--drivers/dma/iop-adma.c4
-rw-r--r--drivers/dma/mxs-dma.c10
-rw-r--r--drivers/dma/pl330.c25
-rw-r--r--drivers/dma/ste_dma40.c323
-rw-r--r--drivers/dma/ste_dma40_ll.h2
15 files changed, 308 insertions, 176 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index cf9da36..ef378b5 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -91,11 +91,10 @@ config DW_DMAC
config AT_HDMAC
tristate "Atmel AHB DMA support"
- depends on ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
+ depends on ARCH_AT91
select DMA_ENGINE
help
- Support the Atmel AHB DMA controller. This can be integrated in
- chips such as the Atmel AT91SAM9RL.
+ Support the Atmel AHB DMA controller.
config FSL_DMA
tristate "Freescale Elo and Elo Plus DMA support"
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index c301a8e..3d704ab 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1429,6 +1429,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* signal
*/
release_phy_channel(plchan);
+ plchan->phychan_hold = 0;
}
/* Dequeue jobs and free LLIs */
if (plchan->at) {
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 7aa58d2..445fdf8 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -221,10 +221,6 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
vdbg_dump_regs(atchan);
- /* clear any pending interrupt */
- while (dma_readl(atdma, EBCISR))
- cpu_relax();
-
channel_writel(atchan, SADDR, 0);
channel_writel(atchan, DADDR, 0);
channel_writel(atchan, CTRLA, 0);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 767bcc3..2397f6f 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -332,6 +332,20 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
}
EXPORT_SYMBOL(dma_find_channel);
+/*
+ * net_dma_find_channel - find a channel for net_dma
+ * net_dma has alignment requirements
+ */
+struct dma_chan *net_dma_find_channel(void)
+{
+ struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
+ if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
+ return NULL;
+
+ return chan;
+}
+EXPORT_SYMBOL(net_dma_find_channel);
+
/**
* dma_issue_pending_all - flush all pending operations across all channels
*/
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index a45b5d2..bb787d8 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -571,11 +571,14 @@ static void imxdma_tasklet(unsigned long data)
if (desc->desc.callback)
desc->desc.callback(desc->desc.callback_param);
- dma_cookie_complete(&desc->desc);
-
- /* If we are dealing with a cyclic descriptor keep it on ld_active */
+ /* If we are dealing with a cyclic descriptor keep it on ld_active
+ * and dont mark the descripor as complete.
+ * Only in non-cyclic cases it would be marked as complete
+ */
if (imxdma_chan_is_doing_cyclic(imxdmac))
goto out;
+ else
+ dma_cookie_complete(&desc->desc);
/* Free 2D slot if it was an interleaved transfer */
if (imxdmac->enabled_2d) {
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 31493d8..73b2b65 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -546,9 +546,9 @@ void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
PCI_DMA_TODEVICE, flags, 0);
}
-unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
+dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
{
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
u64 completion;
completion = *chan->completion;
@@ -569,7 +569,7 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
}
bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
- unsigned long *phys_complete)
+ dma_addr_t *phys_complete)
{
*phys_complete = ioat_get_current_completion(chan);
if (*phys_complete == chan->last_completion)
@@ -580,14 +580,14 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
return true;
}
-static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
+static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
{
struct ioat_chan_common *chan = &ioat->base;
struct list_head *_desc, *n;
struct dma_async_tx_descriptor *tx;
- dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
- __func__, phys_complete);
+ dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n",
+ __func__, (unsigned long long) phys_complete);
list_for_each_safe(_desc, n, &ioat->used_desc) {
struct ioat_desc_sw *desc;
@@ -652,7 +652,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
static void ioat1_cleanup(struct ioat_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
prefetch(chan->completion);
@@ -698,7 +698,7 @@ static void ioat1_timer_event(unsigned long data)
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
spin_unlock_bh(&ioat->desc_lock);
} else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
spin_lock_bh(&ioat->desc_lock);
/* if we haven't made progress and we have already
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index c7888bc..5e8fe01 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -88,7 +88,7 @@ struct ioatdma_device {
struct ioat_chan_common {
struct dma_chan common;
void __iomem *reg_base;
- unsigned long last_completion;
+ dma_addr_t last_completion;
spinlock_t cleanup_lock;
unsigned long state;
#define IOAT_COMPLETION_PENDING 0
@@ -310,7 +310,7 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device);
void __devexit ioat_dma_remove(struct ioatdma_device *device);
struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
void __iomem *iobase);
-unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
+dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan);
void ioat_init_channel(struct ioatdma_device *device,
struct ioat_chan_common *chan, int idx);
enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
@@ -318,7 +318,7 @@ enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
size_t len, struct ioat_dma_descriptor *hw);
bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
- unsigned long *phys_complete);
+ dma_addr_t *phys_complete);
void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
void ioat_kobject_del(struct ioatdma_device *device);
extern const struct sysfs_ops ioat_sysfs_ops;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index e8e110f..8689576 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -128,7 +128,7 @@ static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
spin_unlock_bh(&ioat->prep_lock);
}
-static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
+static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
{
struct ioat_chan_common *chan = &ioat->base;
struct dma_async_tx_descriptor *tx;
@@ -179,7 +179,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
spin_lock_bh(&chan->cleanup_lock);
if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -260,7 +260,7 @@ int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
ioat2_quiesce(chan, 0);
if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -275,7 +275,7 @@ void ioat2_timer_event(unsigned long data)
struct ioat_chan_common *chan = &ioat->base;
if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
u64 status;
status = ioat_chansts(chan);
@@ -572,9 +572,9 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
*/
struct ioat_chan_common *chan = &ioat->base;
struct dma_chan *c = &chan->common;
- const u16 curr_size = ioat2_ring_size(ioat);
+ const u32 curr_size = ioat2_ring_size(ioat);
const u16 active = ioat2_ring_active(ioat);
- const u16 new_size = 1 << order;
+ const u32 new_size = 1 << order;
struct ioat_ring_ent **ring;
u16 i;
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index a2c413b..be2a55b 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -74,7 +74,7 @@ static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
return container_of(chan, struct ioat2_dma_chan, base);
}
-static inline u16 ioat2_ring_size(struct ioat2_dma_chan *ioat)
+static inline u32 ioat2_ring_size(struct ioat2_dma_chan *ioat)
{
return 1 << ioat->alloc_order;
}
@@ -91,7 +91,7 @@ static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat));
}
-static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat)
+static inline u32 ioat2_ring_space(struct ioat2_dma_chan *ioat)
{
return ioat2_ring_size(ioat) - ioat2_ring_active(ioat);
}
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 2c4476c..f7f1dc6 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -257,7 +257,7 @@ static bool desc_has_ext(struct ioat_ring_ent *desc)
* The difference from the dma_v2.c __cleanup() is that this routine
* handles extended descriptors and dma-unmapping raid operations.
*/
-static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
+static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
{
struct ioat_chan_common *chan = &ioat->base;
struct ioat_ring_ent *desc;
@@ -314,7 +314,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
spin_lock_bh(&chan->cleanup_lock);
if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -333,7 +333,7 @@ static void ioat3_cleanup_event(unsigned long data)
static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
ioat2_quiesce(chan, 0);
if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -348,7 +348,7 @@ static void ioat3_timer_event(unsigned long data)
struct ioat_chan_common *chan = &ioat->base;
if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
u64 status;
status = ioat_chansts(chan);
@@ -1149,6 +1149,44 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan)
return ioat2_reset_sync(chan, msecs_to_jiffies(200));
}
+static bool is_jf_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_snb_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
+ return true;
+ default:
+ return false;
+ }
+}
+
int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
{
struct pci_dev *pdev = device->pdev;
@@ -1169,6 +1207,9 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
+ if (is_jf_ioat(pdev) || is_snb_ioat(pdev))
+ dma->copy_align = 6;
+
dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index da6c4c2..79e3eba 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1252,8 +1252,8 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
/* address conversion buffers (dma_map / page_address) */
void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2];
- dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST];
- dma_addr_t pq_dest[2];
+ dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST+2];
+ dma_addr_t *pq_dest = &pq_src[IOP_ADMA_NUM_SRC_TEST];
int i;
struct dma_async_tx_descriptor *tx;
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index c81ef7e..655d4ce6 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -201,10 +201,6 @@ static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
- struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(tx->chan);
-
- mxs_dma_enable_chan(mxs_chan);
-
return dma_cookie_assign(tx);
}
@@ -558,9 +554,9 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
static void mxs_dma_issue_pending(struct dma_chan *chan)
{
- /*
- * Nothing to do. We only have a single descriptor.
- */
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+
+ mxs_dma_enable_chan(mxs_chan);
}
static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 282caf1..2ee6e23 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2225,12 +2225,9 @@ static inline void free_desc_list(struct list_head *list)
{
struct dma_pl330_dmac *pdmac;
struct dma_pl330_desc *desc;
- struct dma_pl330_chan *pch;
+ struct dma_pl330_chan *pch = NULL;
unsigned long flags;
- if (list_empty(list))
- return;
-
/* Finish off the work list */
list_for_each_entry(desc, list, node) {
dma_async_tx_callback callback;
@@ -2247,6 +2244,10 @@ static inline void free_desc_list(struct list_head *list)
desc->pchan = NULL;
}
+ /* pch will be unset if list was empty */
+ if (!pch)
+ return;
+
pdmac = pch->dmac;
spin_lock_irqsave(&pdmac->pool_lock, flags);
@@ -2257,12 +2258,9 @@ static inline void free_desc_list(struct list_head *list)
static inline void handle_cyclic_desc_list(struct list_head *list)
{
struct dma_pl330_desc *desc;
- struct dma_pl330_chan *pch;
+ struct dma_pl330_chan *pch = NULL;
unsigned long flags;
- if (list_empty(list))
- return;
-
list_for_each_entry(desc, list, node) {
dma_async_tx_callback callback;
@@ -2274,6 +2272,10 @@ static inline void handle_cyclic_desc_list(struct list_head *list)
callback(desc->txd.callback_param);
}
+ /* pch will be unset if list was empty */
+ if (!pch)
+ return;
+
spin_lock_irqsave(&pch->lock, flags);
list_splice_tail_init(list, &pch->work_list);
spin_unlock_irqrestore(&pch->lock, flags);
@@ -2926,8 +2928,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
INIT_LIST_HEAD(&pd->channels);
/* Initialize channel parameters */
- num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri,
- (u8)pi->pcfg.num_chan);
+ if (pdat)
+ num_chan = max_t(int, pdat->nr_valid_peri, pi->pcfg.num_chan);
+ else
+ num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
+
pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
for (i = 0; i < num_chan; i++) {
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index bdd41d4..2ed1ac3 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -18,6 +18,7 @@
#include <linux/pm_runtime.h>
#include <linux/err.h>
#include <linux/amba/bus.h>
+#include <linux/regulator/consumer.h>
#include <plat/ste_dma40.h>
@@ -69,6 +70,22 @@ enum d40_command {
};
/*
+ * enum d40_events - The different Event Enables for the event lines.
+ *
+ * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
+ * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
+ * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
+ * @D40_ROUND_EVENTLINE: Status check for event line.
+ */
+
+enum d40_events {
+ D40_DEACTIVATE_EVENTLINE = 0,
+ D40_ACTIVATE_EVENTLINE = 1,
+ D40_SUSPEND_REQ_EVENTLINE = 2,
+ D40_ROUND_EVENTLINE = 3
+};
+
+/*
* These are the registers that has to be saved and later restored
* when the DMA hw is powered off.
* TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
@@ -870,8 +887,8 @@ static void d40_save_restore_registers(struct d40_base *base, bool save)
}
#endif
-static int d40_channel_execute_command(struct d40_chan *d40c,
- enum d40_command command)
+static int __d40_execute_command_phy(struct d40_chan *d40c,
+ enum d40_command command)
{
u32 status;
int i;
@@ -880,6 +897,12 @@ static int d40_channel_execute_command(struct d40_chan *d40c,
unsigned long flags;
u32 wmask;
+ if (command == D40_DMA_STOP) {
+ ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
+ if (ret)
+ return ret;
+ }
+
spin_lock_irqsave(&d40c->base->execmd_lock, flags);
if (d40c->phy_chan->num % 2 == 0)
@@ -973,67 +996,109 @@ static void d40_term_all(struct d40_chan *d40c)
}
d40c->pending_tx = 0;
- d40c->busy = false;
}
-static void __d40_config_set_event(struct d40_chan *d40c, bool enable,
- u32 event, int reg)
+static void __d40_config_set_event(struct d40_chan *d40c,
+ enum d40_events event_type, u32 event,
+ int reg)
{
void __iomem *addr = chan_base(d40c) + reg;
int tries;
+ u32 status;
+
+ switch (event_type) {
+
+ case D40_DEACTIVATE_EVENTLINE:
- if (!enable) {
writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
| ~D40_EVENTLINE_MASK(event), addr);
- return;
- }
+ break;
+
+ case D40_SUSPEND_REQ_EVENTLINE:
+ status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
+ D40_EVENTLINE_POS(event);
+
+ if (status == D40_DEACTIVATE_EVENTLINE ||
+ status == D40_SUSPEND_REQ_EVENTLINE)
+ break;
+ writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
+ | ~D40_EVENTLINE_MASK(event), addr);
+
+ for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
+
+ status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
+ D40_EVENTLINE_POS(event);
+
+ cpu_relax();
+ /*
+ * Reduce the number of bus accesses while
+ * waiting for the DMA to suspend.
+ */
+ udelay(3);
+
+ if (status == D40_DEACTIVATE_EVENTLINE)
+ break;
+ }
+
+ if (tries == D40_SUSPEND_MAX_IT) {
+ chan_err(d40c,
+ "unable to stop the event_line chl %d (log: %d)"
+ "status %x\n", d40c->phy_chan->num,
+ d40c->log_num, status);
+ }
+ break;
+
+ case D40_ACTIVATE_EVENTLINE:
/*
* The hardware sometimes doesn't register the enable when src and dst
* event lines are active on the same logical channel. Retry to ensure
* it does. Usually only one retry is sufficient.
*/
- tries = 100;
- while (--tries) {
- writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
- | ~D40_EVENTLINE_MASK(event), addr);
+ tries = 100;
+ while (--tries) {
+ writel((D40_ACTIVATE_EVENTLINE <<
+ D40_EVENTLINE_POS(event)) |
+ ~D40_EVENTLINE_MASK(event), addr);
- if (readl(addr) & D40_EVENTLINE_MASK(event))
- break;
- }
+ if (readl(addr) & D40_EVENTLINE_MASK(event))
+ break;
+ }
- if (tries != 99)
- dev_dbg(chan2dev(d40c),
- "[%s] workaround enable S%cLNK (%d tries)\n",
- __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
- 100 - tries);
+ if (tries != 99)
+ dev_dbg(chan2dev(d40c),
+ "[%s] workaround enable S%cLNK (%d tries)\n",
+ __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
+ 100 - tries);
- WARN_ON(!tries);
-}
+ WARN_ON(!tries);
+ break;
-static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
-{
- unsigned long flags;
+ case D40_ROUND_EVENTLINE:
+ BUG();
+ break;
- spin_lock_irqsave(&d40c->phy_chan->lock, flags);
+ }
+}
+static void d40_config_set_event(struct d40_chan *d40c,
+ enum d40_events event_type)
+{
/* Enable event line connected to device (or memcpy) */
if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
(d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
- __d40_config_set_event(d40c, do_enable, event,
+ __d40_config_set_event(d40c, event_type, event,
D40_CHAN_REG_SSLNK);
}
if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
- __d40_config_set_event(d40c, do_enable, event,
+ __d40_config_set_event(d40c, event_type, event,
D40_CHAN_REG_SDLNK);
}
-
- spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
}
static u32 d40_chan_has_events(struct d40_chan *d40c)
@@ -1047,6 +1112,64 @@ static u32 d40_chan_has_events(struct d40_chan *d40c)
return val;
}
+static int
+__d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
+{
+ unsigned long flags;
+ int ret = 0;
+ u32 active_status;
+ void __iomem *active_reg;
+
+ if (d40c->phy_chan->num % 2 == 0)
+ active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
+ else
+ active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
+
+
+ spin_lock_irqsave(&d40c->phy_chan->lock, flags);
+
+ switch (command) {
+ case D40_DMA_STOP:
+ case D40_DMA_SUSPEND_REQ:
+
+ active_status = (readl(active_reg) &
+ D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
+ D40_CHAN_POS(d40c->phy_chan->num);
+
+ if (active_status == D40_DMA_RUN)
+ d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
+ else
+ d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
+
+ if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
+ ret = __d40_execute_command_phy(d40c, command);
+
+ break;
+
+ case D40_DMA_RUN:
+
+ d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
+ ret = __d40_execute_command_phy(d40c, command);
+ break;
+
+ case D40_DMA_SUSPENDED:
+ BUG();
+ break;
+ }
+
+ spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
+ return ret;
+}
+
+static int d40_channel_execute_command(struct d40_chan *d40c,
+ enum d40_command command)
+{
+ if (chan_is_logical(d40c))
+ return __d40_execute_command_log(d40c, command);
+ else
+ return __d40_execute_command_phy(d40c, command);
+}
+
static u32 d40_get_prmo(struct d40_chan *d40c)
{
static const unsigned int phy_map[] = {
@@ -1149,15 +1272,7 @@ static int d40_pause(struct d40_chan *d40c)
spin_lock_irqsave(&d40c->lock, flags);
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
- if (res == 0) {
- if (chan_is_logical(d40c)) {
- d40_config_set_event(d40c, false);
- /* Resume the other logical channels if any */
- if (d40_chan_has_events(d40c))
- res = d40_channel_execute_command(d40c,
- D40_DMA_RUN);
- }
- }
+
pm_runtime_mark_last_busy(d40c->base->dev);
pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
@@ -1174,45 +1289,17 @@ static int d40_resume(struct d40_chan *d40c)
spin_lock_irqsave(&d40c->lock, flags);
pm_runtime_get_sync(d40c->base->dev);
- if (d40c->base->rev == 0)
- if (chan_is_logical(d40c)) {
- res = d40_channel_execute_command(d40c,
- D40_DMA_SUSPEND_REQ);
- goto no_suspend;
- }
/* If bytes left to transfer or linked tx resume job */
- if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
-
- if (chan_is_logical(d40c))
- d40_config_set_event(d40c, true);
-
+ if (d40_residue(d40c) || d40_tx_is_linked(d40c))
res = d40_channel_execute_command(d40c, D40_DMA_RUN);
- }
-no_suspend:
pm_runtime_mark_last_busy(d40c->base->dev);
pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
return res;
}
-static int d40_terminate_all(struct d40_chan *chan)
-{
- unsigned long flags;
- int ret = 0;
-
- ret = d40_pause(chan);
- if (!ret && chan_is_physical(chan))
- ret = d40_channel_execute_command(chan, D40_DMA_STOP);
-
- spin_lock_irqsave(&chan->lock, flags);
- d40_term_all(chan);
- spin_unlock_irqrestore(&chan->lock, flags);
-
- return ret;
-}
-
static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct d40_chan *d40c = container_of(tx->chan,
@@ -1232,20 +1319,6 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
static int d40_start(struct d40_chan *d40c)
{
- if (d40c->base->rev == 0) {
- int err;
-
- if (chan_is_logical(d40c)) {
- err = d40_channel_execute_command(d40c,
- D40_DMA_SUSPEND_REQ);
- if (err)
- return err;
- }
- }
-
- if (chan_is_logical(d40c))
- d40_config_set_event(d40c, true);
-
return d40_channel_execute_command(d40c, D40_DMA_RUN);
}
@@ -1258,10 +1331,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
d40d = d40_first_queued(d40c);
if (d40d != NULL) {
- if (!d40c->busy)
+ if (!d40c->busy) {
d40c->busy = true;
-
- pm_runtime_get_sync(d40c->base->dev);
+ pm_runtime_get_sync(d40c->base->dev);
+ }
/* Remove from queue */
d40_desc_remove(d40d);
@@ -1388,8 +1461,8 @@ static void dma_tasklet(unsigned long data)
return;
- err:
- /* Rescue manoeuvre if receiving double interrupts */
+err:
+ /* Rescue manouver if receiving double interrupts */
if (d40c->pending_tx > 0)
d40c->pending_tx--;
spin_unlock_irqrestore(&d40c->lock, flags);
@@ -1770,7 +1843,6 @@ static int d40_config_memcpy(struct d40_chan *d40c)
return 0;
}
-
static int d40_free_dma(struct d40_chan *d40c)
{
@@ -1806,43 +1878,18 @@ static int d40_free_dma(struct d40_chan *d40c)
}
pm_runtime_get_sync(d40c->base->dev);
- res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ res = d40_channel_execute_command(d40c, D40_DMA_STOP);
if (res) {
- chan_err(d40c, "suspend failed\n");
+ chan_err(d40c, "stop failed\n");
goto out;
}
- if (chan_is_logical(d40c)) {
- /* Release logical channel, deactivate the event line */
+ d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
- d40_config_set_event(d40c, false);
+ if (chan_is_logical(d40c))
d40c->base->lookup_log_chans[d40c->log_num] = NULL;
-
- /*
- * Check if there are more logical allocation
- * on this phy channel.
- */
- if (!d40_alloc_mask_free(phy, is_src, event)) {
- /* Resume the other logical channels if any */
- if (d40_chan_has_events(d40c)) {
- res = d40_channel_execute_command(d40c,
- D40_DMA_RUN);
- if (res)
- chan_err(d40c,
- "Executing RUN command\n");
- }
- goto out;
- }
- } else {
- (void) d40_alloc_mask_free(phy, is_src, 0);
- }
-
- /* Release physical channel */
- res = d40_channel_execute_command(d40c, D40_DMA_STOP);
- if (res) {
- chan_err(d40c, "Failed to stop channel\n");
- goto out;
- }
+ else
+ d40c->base->lookup_phy_chans[phy->num] = NULL;
if (d40c->busy) {
pm_runtime_mark_last_busy(d40c->base->dev);
@@ -1852,7 +1899,6 @@ static int d40_free_dma(struct d40_chan *d40c)
d40c->busy = false;
d40c->phy_chan = NULL;
d40c->configured = false;
- d40c->base->lookup_phy_chans[phy->num] = NULL;
out:
pm_runtime_mark_last_busy(d40c->base->dev);
@@ -2070,7 +2116,7 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
if (sg_next(&sg_src[sg_len - 1]) == sg_src)
desc->cyclic = true;
- if (direction != DMA_NONE) {
+ if (direction != DMA_TRANS_NONE) {
dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
if (direction == DMA_DEV_TO_MEM)
@@ -2371,6 +2417,31 @@ static void d40_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&d40c->lock, flags);
}
+static void d40_terminate_all(struct dma_chan *chan)
+{
+ unsigned long flags;
+ struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
+ int ret;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ pm_runtime_get_sync(d40c->base->dev);
+ ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
+ if (ret)
+ chan_err(d40c, "Failed to stop channel\n");
+
+ d40_term_all(d40c);
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
+ if (d40c->busy) {
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
+ }
+ d40c->busy = false;
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+}
+
static int
dma40_config_to_halfchannel(struct d40_chan *d40c,
struct stedma40_half_channel_info *info,
@@ -2551,7 +2622,8 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
switch (cmd) {
case DMA_TERMINATE_ALL:
- return d40_terminate_all(d40c);
+ d40_terminate_all(chan);
+ return 0;
case DMA_PAUSE:
return d40_pause(d40c);
case DMA_RESUME:
@@ -2908,6 +2980,12 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
rev, res->start);
+ if (rev < 2) {
+ d40_err(&pdev->dev, "hardware revision: %d is not supported",
+ rev);
+ goto failure;
+ }
+
plat_data = pdev->dev.platform_data;
/* Count the number of logical channels in use */
@@ -2998,6 +3076,7 @@ failure:
if (base) {
kfree(base->lcla_pool.alloc_map);
+ kfree(base->reg_val_backup_chan);
kfree(base->lookup_log_chans);
kfree(base->lookup_phy_chans);
kfree(base->phy_res);
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 8d3d490..51e8e53 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -62,8 +62,6 @@
#define D40_SREG_ELEM_LOG_LIDX_MASK (0xFF << D40_SREG_ELEM_LOG_LIDX_POS)
/* Link register */
-#define D40_DEACTIVATE_EVENTLINE 0x0
-#define D40_ACTIVATE_EVENTLINE 0x1
#define D40_EVENTLINE_POS(i) (2 * i)
#define D40_EVENTLINE_MASK(i) (0x3 << D40_EVENTLINE_POS(i))
OpenPOWER on IntegriCloud