diff options
Diffstat (limited to 'crypto/async_tx')
-rw-r--r-- | crypto/async_tx/async_memcpy.c | 2 | ||||
-rw-r--r-- | crypto/async_tx/async_memset.c | 2 | ||||
-rw-r--r-- | crypto/async_tx/async_pq.c | 6 | ||||
-rw-r--r-- | crypto/async_tx/async_xor.c | 5 |
4 files changed, 9 insertions, 6 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index b38cbb3..0ec1fb6 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -50,7 +50,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx = NULL; - if (device) { + if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { dma_addr_t dma_dest, dma_src; unsigned long dma_prep_flags = 0; diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index a374784..58e4a87 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c @@ -47,7 +47,7 @@ async_memset(struct page *dest, int val, unsigned int offset, size_t len, struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx = NULL; - if (device) { + if (device && is_dma_fill_aligned(device, offset, 0, len)) { dma_addr_t dma_dest; unsigned long dma_prep_flags = 0; diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index a25e290..b88db6d 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -211,7 +211,8 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, if (dma_src && device && (src_cnt <= dma_maxpq(device, 0) || - dma_maxpq(device, DMA_PREP_CONTINUE) > 0)) { + dma_maxpq(device, DMA_PREP_CONTINUE) > 0) && + is_dma_pq_aligned(device, offset, 0, len)) { /* run the p+q asynchronously */ pr_debug("%s: (async) disks: %d len: %zu\n", __func__, disks, len); @@ -274,7 +275,8 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, else if (sizeof(dma_addr_t) <= sizeof(struct page *)) dma_src = (dma_addr_t *) blocks; - if (dma_src && device && disks <= dma_maxpq(device, 0)) { + if (dma_src && device && disks <= dma_maxpq(device, 0) && + is_dma_pq_aligned(device, offset, 0, len)) { struct device *dev = device->dev; dma_addr_t *pq = &dma_src[disks-2]; int i; diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index db27987..b459a90 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -193,7 +193,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, else if (sizeof(dma_addr_t) <= sizeof(struct page *)) dma_src = (dma_addr_t *) src_list; - if (dma_src && chan) { + if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) { /* run the xor asynchronously */ pr_debug("%s (async): len: %zu\n", __func__, len); @@ -265,7 +265,8 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, else if (sizeof(dma_addr_t) <= sizeof(struct page *)) dma_src = (dma_addr_t *) src_list; - if (dma_src && device && src_cnt <= device->max_xor) { + if (dma_src && device && src_cnt <= device->max_xor && + is_dma_xor_aligned(device, offset, 0, len)) { unsigned long dma_prep_flags = 0; int i; |