summaryrefslogtreecommitdiffstats
path: root/drivers/dma/mv_xor.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-07 20:39:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-07 20:39:25 -0400
commitd0cd84817c745655428dbfdb1e3f754230b46bef (patch)
treea7b6f422f6ac50f506ffa7a66f8e83387f90f212 /drivers/dma/mv_xor.c
parentbdf428feb225229b1d4715b45bbdad4a934cd89c (diff)
parent3f334078567245429540e6461c81c749fce87f70 (diff)
downloadop-kernel-dev-d0cd84817c745655428dbfdb1e3f754230b46bef.zip
op-kernel-dev-d0cd84817c745655428dbfdb1e3f754230b46bef.tar.gz
Merge tag 'dmaengine-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine
Pull dmaengine updates from Dan Williams: "Even though this has fixes marked for -stable, given the size and the needed conflict resolutions this is 3.18-rc1/merge-window material. These patches have been languishing in my tree for a long while. The fact that I do not have the time to do proper/prompt maintenance of this tree is a primary factor in the decision to step down as dmaengine maintainer. That and the fact that the bulk of drivers/dma/ activity is going through Vinod these days. The net_dma removal has not been in -next. It has developed simple conflicts against mainline and net-next (for-3.18). Continuing thanks to Vinod for staying on top of drivers/dma/. Summary: 1/ Step down as dmaengine maintainer see commit 08223d80df38 "dmaengine maintainer update" 2/ Removal of net_dma, as it has been marked 'broken' since 3.13 (commit 77873803363c "net_dma: mark broken"), without reports of performance regression. 3/ Miscellaneous fixes" * tag 'dmaengine-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine: net: make tcp_cleanup_rbuf private net_dma: revert 'copied_early' net_dma: simple removal dmaengine maintainer update dmatest: prevent memory leakage on error path in thread ioat: Use time_before_jiffies() dmaengine: fix xor sources continuation dma: mv_xor: Rename __mv_xor_slot_cleanup() to mv_xor_slot_cleanup() dma: mv_xor: Remove all callers of mv_xor_slot_cleanup() dma: mv_xor: Remove unneeded mv_xor_clean_completed_slots() call ioat: Use pci_enable_msix_exact() instead of pci_enable_msix() drivers: dma: Include appropriate header file in dca.c drivers: dma: Mark functions as static in dma_v3.c dma: mv_xor: Add DMA API error checks ioat/dca: Use dev_is_pci() to check whether it is pci device
Diffstat (limited to 'drivers/dma/mv_xor.c')
-rw-r--r--drivers/dma/mv_xor.c80
1 files changed, 63 insertions, 17 deletions
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 394cbc5..7938272 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -310,7 +310,8 @@ mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
return 0;
}
-static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
+/* This function must be called with the mv_xor_chan spinlock held */
+static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
{
struct mv_xor_desc_slot *iter, *_iter;
dma_cookie_t cookie = 0;
@@ -366,18 +367,13 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
mv_chan->dmachan.completed_cookie = cookie;
}
-static void
-mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
-{
- spin_lock_bh(&mv_chan->lock);
- __mv_xor_slot_cleanup(mv_chan);
- spin_unlock_bh(&mv_chan->lock);
-}
-
static void mv_xor_tasklet(unsigned long data)
{
struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
+
+ spin_lock_bh(&chan->lock);
mv_xor_slot_cleanup(chan);
+ spin_unlock_bh(&chan->lock);
}
static struct mv_xor_desc_slot *
@@ -656,9 +652,10 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan)
struct mv_xor_desc_slot *iter, *_iter;
int in_use_descs = 0;
+ spin_lock_bh(&mv_chan->lock);
+
mv_xor_slot_cleanup(mv_chan);
- spin_lock_bh(&mv_chan->lock);
list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
chain_node) {
in_use_descs++;
@@ -700,11 +697,12 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_COMPLETE) {
- mv_xor_clean_completed_slots(mv_chan);
+ if (ret == DMA_COMPLETE)
return ret;
- }
+
+ spin_lock_bh(&mv_chan->lock);
mv_xor_slot_cleanup(mv_chan);
+ spin_unlock_bh(&mv_chan->lock);
return dma_cookie_status(chan, cookie, txstate);
}
@@ -782,7 +780,7 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
{
- int i;
+ int i, ret;
void *src, *dest;
dma_addr_t src_dma, dest_dma;
struct dma_chan *dma_chan;
@@ -819,19 +817,44 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
PAGE_SIZE, DMA_TO_DEVICE);
- unmap->to_cnt = 1;
unmap->addr[0] = src_dma;
+ ret = dma_mapping_error(dma_chan->device->dev, src_dma);
+ if (ret) {
+ err = -ENOMEM;
+ goto free_resources;
+ }
+ unmap->to_cnt = 1;
+
dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
PAGE_SIZE, DMA_FROM_DEVICE);
- unmap->from_cnt = 1;
unmap->addr[1] = dest_dma;
+ ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
+ if (ret) {
+ err = -ENOMEM;
+ goto free_resources;
+ }
+ unmap->from_cnt = 1;
unmap->len = PAGE_SIZE;
tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
PAGE_SIZE, 0);
+ if (!tx) {
+ dev_err(dma_chan->device->dev,
+ "Self-test cannot prepare operation, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
cookie = mv_xor_tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(dma_chan->device->dev,
+ "Self-test submit error, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
mv_xor_issue_pending(dma_chan);
async_tx_ack(tx);
msleep(1);
@@ -866,7 +889,7 @@ out:
static int
mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
{
- int i, src_idx;
+ int i, src_idx, ret;
struct page *dest;
struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
@@ -929,19 +952,42 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
0, PAGE_SIZE, DMA_TO_DEVICE);
dma_srcs[i] = unmap->addr[i];
+ ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
+ if (ret) {
+ err = -ENOMEM;
+ goto free_resources;
+ }
unmap->to_cnt++;
}
unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
DMA_FROM_DEVICE);
dest_dma = unmap->addr[src_count];
+ ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
+ if (ret) {
+ err = -ENOMEM;
+ goto free_resources;
+ }
unmap->from_cnt = 1;
unmap->len = PAGE_SIZE;
tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
src_count, PAGE_SIZE, 0);
+ if (!tx) {
+ dev_err(dma_chan->device->dev,
+ "Self-test cannot prepare operation, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
cookie = mv_xor_tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(dma_chan->device->dev,
+ "Self-test submit error, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
mv_xor_issue_pending(dma_chan);
async_tx_ack(tx);
msleep(8);
OpenPOWER on IntegriCloud