diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 10:25:58 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 10:25:58 -0800 |
commit | 27d189c02ba25851973c8582e419c0bded9f7e5b (patch) | |
tree | be142d664bc4e3cec7ab2878a243343f46e897ee /drivers | |
parent | a1703154200c390ab03c10224c586e815d3e31e8 (diff) | |
parent | 55db8387a5e8d07407f0b7c6b2526417a2bc6243 (diff) | |
download | op-kernel-dev-27d189c02ba25851973c8582e419c0bded9f7e5b.zip op-kernel-dev-27d189c02ba25851973c8582e419c0bded9f7e5b.tar.gz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (46 commits)
hwrng: via_rng - Fix memory scribbling on some CPUs
crypto: padlock - Move padlock.h into include/crypto
hwrng: via_rng - Fix asm constraints
crypto: n2 - use __devexit not __exit in n2_unregister_algs
crypto: mark crypto workqueues CPU_INTENSIVE
crypto: mv_cesa - dont return PTR_ERR() of wrong pointer
crypto: ripemd - Set module author and update email address
crypto: omap-sham - backlog handling fix
crypto: gf128mul - Remove experimental tag
crypto: af_alg - fix af_alg memory_allocated data type
crypto: aesni-intel - Fixed build with binutils 2.16
crypto: af_alg - Make sure sk_security is initialized on accept()ed sockets
net: Add missing lockdep class names for af_alg
include: Install linux/if_alg.h for user-space crypto API
crypto: omap-aes - checkpatch --file warning fixes
crypto: omap-aes - initialize aes module once per request
crypto: omap-aes - unnecessary code removed
crypto: omap-aes - error handling implementation improved
crypto: omap-aes - redundant locking is removed
crypto: omap-aes - DMA initialization fixes for OMAP off mode
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/char/hw_random/via-rng.c | 10 | ||||
-rw-r--r-- | drivers/crypto/mv_cesa.c | 2 | ||||
-rw-r--r-- | drivers/crypto/n2_core.c | 2 | ||||
-rw-r--r-- | drivers/crypto/omap-aes.c | 260 | ||||
-rw-r--r-- | drivers/crypto/omap-sham.c | 374 | ||||
-rw-r--r-- | drivers/crypto/padlock-aes.c | 2 | ||||
-rw-r--r-- | drivers/crypto/padlock-sha.c | 8 | ||||
-rw-r--r-- | drivers/crypto/padlock.h | 23 |
8 files changed, 361 insertions, 320 deletions
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index 794aacb..d0387a8 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -24,6 +24,7 @@ * warranty of any kind, whether express or implied. */ +#include <crypto/padlock.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/hw_random.h> @@ -34,7 +35,6 @@ #include <asm/i387.h> -#define PFX KBUILD_MODNAME ": " enum { @@ -81,8 +81,7 @@ static inline u32 xstore(u32 *addr, u32 edx_in) ts_state = irq_ts_save(); asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */" - :"=m"(*addr), "=a"(eax_out) - :"D"(addr), "d"(edx_in)); + : "=m" (*addr), "=a" (eax_out), "+d" (edx_in), "+D" (addr)); irq_ts_restore(ts_state); return eax_out; @@ -90,8 +89,10 @@ static inline u32 xstore(u32 *addr, u32 edx_in) static int via_rng_data_present(struct hwrng *rng, int wait) { + char buf[16 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ + ((aligned(STACK_ALIGN))); + u32 *via_rng_datum = (u32 *)PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); u32 bytes_out; - u32 *via_rng_datum = (u32 *)(&rng->priv); int i; /* We choose the recommended 1-byte-per-instruction RNG rate, @@ -115,6 +116,7 @@ static int via_rng_data_present(struct hwrng *rng, int wait) break; udelay(10); } + rng->priv = *via_rng_datum; return bytes_out ? 1 : 0; } diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index 7d279e5..c99305a 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c @@ -857,7 +857,7 @@ static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name, printk(KERN_WARNING MV_CESA "Base driver '%s' could not be loaded!\n", base_hash_name); - err = PTR_ERR(fallback_tfm); + err = PTR_ERR(base_hash); goto err_bad_base; } } diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 76141262..80dc094 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1542,7 +1542,7 @@ out: return err; } -static void __exit n2_unregister_algs(void) +static void __devexit n2_unregister_algs(void) { mutex_lock(&spu_lock); if (!--algs_registered) diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 799ca51..add2a1a 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -74,11 +74,9 @@ #define FLAGS_CBC BIT(1) #define FLAGS_GIV BIT(2) -#define FLAGS_NEW_KEY BIT(4) -#define FLAGS_NEW_IV BIT(5) -#define FLAGS_INIT BIT(6) -#define FLAGS_FAST BIT(7) -#define FLAGS_BUSY 8 +#define FLAGS_INIT BIT(4) +#define FLAGS_FAST BIT(5) +#define FLAGS_BUSY BIT(6) struct omap_aes_ctx { struct omap_aes_dev *dd; @@ -98,19 +96,18 @@ struct omap_aes_reqctx { struct omap_aes_dev { struct list_head list; unsigned long phys_base; - void __iomem *io_base; + void __iomem *io_base; struct clk *iclk; struct omap_aes_ctx *ctx; struct device *dev; unsigned long flags; + int err; - u32 *iv; - u32 ctrl; + spinlock_t lock; + struct crypto_queue queue; - spinlock_t lock; - struct crypto_queue queue; - - struct tasklet_struct task; + struct tasklet_struct done_task; + struct tasklet_struct queue_task; struct ablkcipher_request *req; size_t total; @@ -179,9 +176,13 @@ static int omap_aes_wait(struct omap_aes_dev *dd, u32 offset, u32 bit) static int omap_aes_hw_init(struct omap_aes_dev *dd) { - int err = 0; - + /* + * clocks are enabled when request starts and disabled when finished. + * It may be long delays between requests. + * Device might go to off mode to save power. + */ clk_enable(dd->iclk); + if (!(dd->flags & FLAGS_INIT)) { /* is it necessary to reset before every operation? */ omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET, @@ -193,39 +194,26 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd) __asm__ __volatile__("nop"); __asm__ __volatile__("nop"); - err = omap_aes_wait(dd, AES_REG_SYSSTATUS, - AES_REG_SYSSTATUS_RESETDONE); - if (!err) - dd->flags |= FLAGS_INIT; - } + if (omap_aes_wait(dd, AES_REG_SYSSTATUS, + AES_REG_SYSSTATUS_RESETDONE)) + return -ETIMEDOUT; - return err; -} + dd->flags |= FLAGS_INIT; + dd->err = 0; + } -static void omap_aes_hw_cleanup(struct omap_aes_dev *dd) -{ - clk_disable(dd->iclk); + return 0; } -static void omap_aes_write_ctrl(struct omap_aes_dev *dd) +static int omap_aes_write_ctrl(struct omap_aes_dev *dd) { unsigned int key32; - int i; + int i, err; u32 val, mask; - val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); - if (dd->flags & FLAGS_CBC) - val |= AES_REG_CTRL_CBC; - if (dd->flags & FLAGS_ENCRYPT) - val |= AES_REG_CTRL_DIRECTION; - - if (dd->ctrl == val && !(dd->flags & FLAGS_NEW_IV) && - !(dd->ctx->flags & FLAGS_NEW_KEY)) - goto out; - - /* only need to write control registers for new settings */ - - dd->ctrl = val; + err = omap_aes_hw_init(dd); + if (err) + return err; val = 0; if (dd->dma_lch_out >= 0) @@ -237,30 +225,43 @@ static void omap_aes_write_ctrl(struct omap_aes_dev *dd) omap_aes_write_mask(dd, AES_REG_MASK, val, mask); - pr_debug("Set key\n"); key32 = dd->ctx->keylen / sizeof(u32); - /* set a key */ + + /* it seems a key should always be set even if it has not changed */ for (i = 0; i < key32; i++) { omap_aes_write(dd, AES_REG_KEY(i), __le32_to_cpu(dd->ctx->key[i])); } - dd->ctx->flags &= ~FLAGS_NEW_KEY; - if (dd->flags & FLAGS_NEW_IV) { - pr_debug("Set IV\n"); - omap_aes_write_n(dd, AES_REG_IV(0), dd->iv, 4); - dd->flags &= ~FLAGS_NEW_IV; - } + if ((dd->flags & FLAGS_CBC) && dd->req->info) + omap_aes_write_n(dd, AES_REG_IV(0), dd->req->info, 4); + + val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); + if (dd->flags & FLAGS_CBC) + val |= AES_REG_CTRL_CBC; + if (dd->flags & FLAGS_ENCRYPT) + val |= AES_REG_CTRL_DIRECTION; mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION | AES_REG_CTRL_KEY_SIZE; - omap_aes_write_mask(dd, AES_REG_CTRL, dd->ctrl, mask); + omap_aes_write_mask(dd, AES_REG_CTRL, val, mask); -out: - /* start DMA or disable idle mode */ - omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START, - AES_REG_MASK_START); + /* IN */ + omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT, + dd->phys_base + AES_REG_DATA, 0, 4); + + omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); + omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); + + /* OUT */ + omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT, + dd->phys_base + AES_REG_DATA, 0, 4); + + omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); + omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); + + return 0; } static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx) @@ -288,8 +289,16 @@ static void omap_aes_dma_callback(int lch, u16 ch_status, void *data) { struct omap_aes_dev *dd = data; - if (lch == dd->dma_lch_out) - tasklet_schedule(&dd->task); + if (ch_status != OMAP_DMA_BLOCK_IRQ) { + pr_err("omap-aes DMA error status: 0x%hx\n", ch_status); + dd->err = -EIO; + dd->flags &= ~FLAGS_INIT; /* request to re-initialize */ + } else if (lch == dd->dma_lch_in) { + return; + } + + /* dma_lch_out - completed */ + tasklet_schedule(&dd->done_task); } static int omap_aes_dma_init(struct omap_aes_dev *dd) @@ -339,18 +348,6 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd) goto err_dma_out; } - omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT, - dd->phys_base + AES_REG_DATA, 0, 4); - - omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); - omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); - - omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT, - dd->phys_base + AES_REG_DATA, 0, 4); - - omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); - omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); - return 0; err_dma_out: @@ -406,6 +403,11 @@ static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf, if (!count) return off; + /* + * buflen and total are AES_BLOCK_SIZE size aligned, + * so count should be also aligned + */ + sg_copy_buf(buf + off, *sg, *offset, count, out); off += count; @@ -461,7 +463,9 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, omap_start_dma(dd->dma_lch_in); omap_start_dma(dd->dma_lch_out); - omap_aes_write_ctrl(dd); + /* start DMA or disable idle mode */ + omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START, + AES_REG_MASK_START); return 0; } @@ -488,8 +492,10 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) count = min(dd->total, sg_dma_len(dd->in_sg)); count = min(count, sg_dma_len(dd->out_sg)); - if (count != dd->total) + if (count != dd->total) { + pr_err("request length != buffer length\n"); return -EINVAL; + } pr_debug("fast\n"); @@ -525,23 +531,25 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) dd->total -= count; - err = omap_aes_hw_init(dd); - err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count); + if (err) { + dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); + dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); + } return err; } static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) { - struct omap_aes_ctx *ctx; + struct ablkcipher_request *req = dd->req; pr_debug("err: %d\n", err); - ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(dd->req)); + clk_disable(dd->iclk); + dd->flags &= ~FLAGS_BUSY; - if (!dd->total) - dd->req->base.complete(&dd->req->base, err); + req->base.complete(&req->base, err); } static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) @@ -553,8 +561,6 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START); - omap_aes_hw_cleanup(dd); - omap_stop_dma(dd->dma_lch_in); omap_stop_dma(dd->dma_lch_out); @@ -574,40 +580,39 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) } } - if (err || !dd->total) - omap_aes_finish_req(dd, err); - return err; } -static int omap_aes_handle_req(struct omap_aes_dev *dd) +static int omap_aes_handle_queue(struct omap_aes_dev *dd, + struct ablkcipher_request *req) { struct crypto_async_request *async_req, *backlog; struct omap_aes_ctx *ctx; struct omap_aes_reqctx *rctx; - struct ablkcipher_request *req; unsigned long flags; - - if (dd->total) - goto start; + int err, ret = 0; spin_lock_irqsave(&dd->lock, flags); + if (req) + ret = ablkcipher_enqueue_request(&dd->queue, req); + if (dd->flags & FLAGS_BUSY) { + spin_unlock_irqrestore(&dd->lock, flags); + return ret; + } backlog = crypto_get_backlog(&dd->queue); async_req = crypto_dequeue_request(&dd->queue); - if (!async_req) - clear_bit(FLAGS_BUSY, &dd->flags); + if (async_req) + dd->flags |= FLAGS_BUSY; spin_unlock_irqrestore(&dd->lock, flags); if (!async_req) - return 0; + return ret; if (backlog) backlog->complete(backlog, -EINPROGRESS); req = ablkcipher_request_cast(async_req); - pr_debug("get new req\n"); - /* assign new request to device */ dd->req = req; dd->total = req->nbytes; @@ -621,27 +626,22 @@ static int omap_aes_handle_req(struct omap_aes_dev *dd) rctx->mode &= FLAGS_MODE_MASK; dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; - dd->iv = req->info; - if ((dd->flags & FLAGS_CBC) && dd->iv) - dd->flags |= FLAGS_NEW_IV; - else - dd->flags &= ~FLAGS_NEW_IV; - + dd->ctx = ctx; ctx->dd = dd; - if (dd->ctx != ctx) { - /* assign new context to device */ - dd->ctx = ctx; - ctx->flags |= FLAGS_NEW_KEY; - } - if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) - pr_err("request size is not exact amount of AES blocks\n"); + err = omap_aes_write_ctrl(dd); + if (!err) + err = omap_aes_crypt_dma_start(dd); + if (err) { + /* aes_task will not finish it, so do it here */ + omap_aes_finish_req(dd, err); + tasklet_schedule(&dd->queue_task); + } -start: - return omap_aes_crypt_dma_start(dd); + return ret; /* return ret, which is enqueue return value */ } -static void omap_aes_task(unsigned long data) +static void omap_aes_done_task(unsigned long data) { struct omap_aes_dev *dd = (struct omap_aes_dev *)data; int err; @@ -650,40 +650,50 @@ static void omap_aes_task(unsigned long data) err = omap_aes_crypt_dma_stop(dd); - err = omap_aes_handle_req(dd); + err = dd->err ? : err; + + if (dd->total && !err) { + err = omap_aes_crypt_dma_start(dd); + if (!err) + return; /* DMA started. Not fininishing. */ + } + + omap_aes_finish_req(dd, err); + omap_aes_handle_queue(dd, NULL); pr_debug("exit\n"); } +static void omap_aes_queue_task(unsigned long data) +{ + struct omap_aes_dev *dd = (struct omap_aes_dev *)data; + + omap_aes_handle_queue(dd, NULL); +} + static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) { struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( crypto_ablkcipher_reqtfm(req)); struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); struct omap_aes_dev *dd; - unsigned long flags; - int err; pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC)); + if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { + pr_err("request size is not exact amount of AES blocks\n"); + return -EINVAL; + } + dd = omap_aes_find_dev(ctx); if (!dd) return -ENODEV; rctx->mode = mode; - spin_lock_irqsave(&dd->lock, flags); - err = ablkcipher_enqueue_request(&dd->queue, req); - spin_unlock_irqrestore(&dd->lock, flags); - - if (!test_and_set_bit(FLAGS_BUSY, &dd->flags)) - omap_aes_handle_req(dd); - - pr_debug("exit\n"); - - return err; + return omap_aes_handle_queue(dd, req); } /* ********************** ALG API ************************************ */ @@ -701,7 +711,6 @@ static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, memcpy(ctx->key, key, keylen); ctx->keylen = keylen; - ctx->flags |= FLAGS_NEW_KEY; return 0; } @@ -750,7 +759,7 @@ static struct crypto_alg algs[] = { .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_aes_ctx), - .cra_alignmask = 0, + .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = omap_aes_cra_init, @@ -770,7 +779,7 @@ static struct crypto_alg algs[] = { .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_aes_ctx), - .cra_alignmask = 0, + .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = omap_aes_cra_init, @@ -849,7 +858,8 @@ static int omap_aes_probe(struct platform_device *pdev) (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR); clk_disable(dd->iclk); - tasklet_init(&dd->task, omap_aes_task, (unsigned long)dd); + tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd); + tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd); err = omap_aes_dma_init(dd); if (err) @@ -876,7 +886,8 @@ err_algs: crypto_unregister_alg(&algs[j]); omap_aes_dma_cleanup(dd); err_dma: - tasklet_kill(&dd->task); + tasklet_kill(&dd->done_task); + tasklet_kill(&dd->queue_task); iounmap(dd->io_base); err_io: clk_put(dd->iclk); @@ -903,7 +914,8 @@ static int omap_aes_remove(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(algs); i++) crypto_unregister_alg(&algs[i]); - tasklet_kill(&dd->task); + tasklet_kill(&dd->done_task); + tasklet_kill(&dd->queue_task); omap_aes_dma_cleanup(dd); iounmap(dd->io_base); clk_put(dd->iclk); diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index a081c7c..2e71123 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -72,10 +72,9 @@ #define DEFAULT_TIMEOUT_INTERVAL HZ -#define FLAGS_FIRST 0x0001 #define FLAGS_FINUP 0x0002 #define FLAGS_FINAL 0x0004 -#define FLAGS_FAST 0x0008 +#define FLAGS_SG 0x0008 #define FLAGS_SHA1 0x0010 #define FLAGS_DMA_ACTIVE 0x0020 #define FLAGS_OUTPUT_READY 0x0040 @@ -83,13 +82,17 @@ #define FLAGS_INIT 0x0100 #define FLAGS_CPU 0x0200 #define FLAGS_HMAC 0x0400 - -/* 3rd byte */ -#define FLAGS_BUSY 16 +#define FLAGS_ERROR 0x0800 +#define FLAGS_BUSY 0x1000 #define OP_UPDATE 1 #define OP_FINAL 2 +#define OMAP_ALIGN_MASK (sizeof(u32)-1) +#define OMAP_ALIGNED __attribute__((aligned(sizeof(u32)))) + +#define BUFLEN PAGE_SIZE + struct omap_sham_dev; struct omap_sham_reqctx { @@ -97,8 +100,8 @@ struct omap_sham_reqctx { unsigned long flags; unsigned long op; + u8 digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED; size_t digcnt; - u8 *buffer; size_t bufcnt; size_t buflen; dma_addr_t dma_addr; @@ -107,6 +110,8 @@ struct omap_sham_reqctx { struct scatterlist *sg; unsigned int offset; /* offset in current sg */ unsigned int total; /* total request */ + + u8 buffer[0] OMAP_ALIGNED; }; struct omap_sham_hmac_ctx { @@ -136,6 +141,7 @@ struct omap_sham_dev { int irq; struct clk *iclk; spinlock_t lock; + int err; int dma; int dma_lch; struct tasklet_struct done_task; @@ -194,53 +200,68 @@ static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit) static void omap_sham_copy_hash(struct ahash_request *req, int out) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + u32 *hash = (u32 *)ctx->digest; + int i; + + /* MD5 is almost unused. So copy sha1 size to reduce code */ + for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) { + if (out) + hash[i] = omap_sham_read(ctx->dd, + SHA_REG_DIGEST(i)); + else + omap_sham_write(ctx->dd, + SHA_REG_DIGEST(i), hash[i]); + } +} + +static void omap_sham_copy_ready_hash(struct ahash_request *req) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + u32 *in = (u32 *)ctx->digest; u32 *hash = (u32 *)req->result; int i; + if (!hash) + return; + if (likely(ctx->flags & FLAGS_SHA1)) { /* SHA1 results are in big endian */ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) - if (out) - hash[i] = be32_to_cpu(omap_sham_read(ctx->dd, - SHA_REG_DIGEST(i))); - else - omap_sham_write(ctx->dd, SHA_REG_DIGEST(i), - cpu_to_be32(hash[i])); + hash[i] = be32_to_cpu(in[i]); } else { /* MD5 results are in little endian */ for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++) - if (out) - hash[i] = le32_to_cpu(omap_sham_read(ctx->dd, - SHA_REG_DIGEST(i))); - else - omap_sham_write(ctx->dd, SHA_REG_DIGEST(i), - cpu_to_le32(hash[i])); + hash[i] = le32_to_cpu(in[i]); } } -static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, - int final, int dma) +static int omap_sham_hw_init(struct omap_sham_dev *dd) { - struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); - u32 val = length << 5, mask; + clk_enable(dd->iclk); - if (unlikely(!ctx->digcnt)) { + if (!(dd->flags & FLAGS_INIT)) { + omap_sham_write_mask(dd, SHA_REG_MASK, + SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); - clk_enable(dd->iclk); + if (omap_sham_wait(dd, SHA_REG_SYSSTATUS, + SHA_REG_SYSSTATUS_RESETDONE)) + return -ETIMEDOUT; - if (!(dd->flags & FLAGS_INIT)) { - omap_sham_write_mask(dd, SHA_REG_MASK, - SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); + dd->flags |= FLAGS_INIT; + dd->err = 0; + } - if (omap_sham_wait(dd, SHA_REG_SYSSTATUS, - SHA_REG_SYSSTATUS_RESETDONE)) - return -ETIMEDOUT; + return 0; +} - dd->flags |= FLAGS_INIT; - } - } else { +static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, + int final, int dma) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); + u32 val = length << 5, mask; + + if (likely(ctx->digcnt)) omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt); - } omap_sham_write_mask(dd, SHA_REG_MASK, SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0), @@ -260,29 +281,26 @@ static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH; omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask); - - return 0; } static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, size_t length, int final) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); - int err, count, len32; + int count, len32; const u32 *buffer = (const u32 *)buf; dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", ctx->digcnt, length, final); - err = omap_sham_write_ctrl(dd, length, final, 0); - if (err) - return err; + omap_sham_write_ctrl(dd, length, final, 0); + + /* should be non-zero before next lines to disable clocks later */ + ctx->digcnt += length; if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY)) return -ETIMEDOUT; - ctx->digcnt += length; - if (final) ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ @@ -298,16 +316,11 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, size_t length, int final) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); - int err, len32; + int len32; dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", ctx->digcnt, length, final); - /* flush cache entries related to our page */ - if (dma_addr == ctx->dma_addr) - dma_sync_single_for_device(dd->dev, dma_addr, length, - DMA_TO_DEVICE); - len32 = DIV_ROUND_UP(length, sizeof(u32)); omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32, @@ -317,9 +330,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, dma_addr, 0, 0); - err = omap_sham_write_ctrl(dd, length, final, 1); - if (err) - return err; + omap_sham_write_ctrl(dd, length, final, 1); ctx->digcnt += length; @@ -371,15 +382,29 @@ static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx) return 0; } +static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd, + struct omap_sham_reqctx *ctx, + size_t length, int final) +{ + ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, + DMA_TO_DEVICE); + if (dma_mapping_error(dd->dev, ctx->dma_addr)) { + dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen); + return -EINVAL; + } + + ctx->flags &= ~FLAGS_SG; + + /* next call does not fail... so no unmap in the case of error */ + return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final); +} + static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); unsigned int final; size_t count; - if (!ctx->total) - return 0; - omap_sham_append_sg(ctx); final = (ctx->flags & FLAGS_FINUP) && !ctx->total; @@ -390,30 +415,68 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { count = ctx->bufcnt; ctx->bufcnt = 0; - return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final); + return omap_sham_xmit_dma_map(dd, ctx, count, final); } return 0; } -static int omap_sham_update_dma_fast(struct omap_sham_dev *dd) +/* Start address alignment */ +#define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32))) +/* SHA1 block size alignment */ +#define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE)) + +static int omap_sham_update_dma_start(struct omap_sham_dev *dd) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); - unsigned int length; + unsigned int length, final, tail; + struct scatterlist *sg; - ctx->flags |= FLAGS_FAST; + if (!ctx->total) + return 0; + + if (ctx->bufcnt || ctx->offset) + return omap_sham_update_dma_slow(dd); + + dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", + ctx->digcnt, ctx->bufcnt, ctx->total); + + sg = ctx->sg; - length = min(ctx->total, sg_dma_len(ctx->sg)); - ctx->total = length; + if (!SG_AA(sg)) + return omap_sham_update_dma_slow(dd); + + if (!sg_is_last(sg) && !SG_SA(sg)) + /* size is not SHA1_BLOCK_SIZE aligned */ + return omap_sham_update_dma_slow(dd); + + length = min(ctx->total, sg->length); + + if (sg_is_last(sg)) { + if (!(ctx->flags & FLAGS_FINUP)) { + /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */ + tail = length & (SHA1_MD5_BLOCK_SIZE - 1); + /* without finup() we need one block to close hash */ + if (!tail) + tail = SHA1_MD5_BLOCK_SIZE; + length -= tail; + } + } if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { dev_err(dd->dev, "dma_map_sg error\n"); return -EINVAL; } + ctx->flags |= FLAGS_SG; + ctx->total -= length; + ctx->offset = length; /* offset where to start slow */ - return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1); + final = (ctx->flags & FLAGS_FINUP) && !ctx->total; + + /* next call does not fail... so no unmap in the case of error */ + return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final); } static int omap_sham_update_cpu(struct omap_sham_dev *dd) @@ -433,8 +496,17 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); omap_stop_dma(dd->dma_lch); - if (ctx->flags & FLAGS_FAST) + if (ctx->flags & FLAGS_SG) { dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); + if (ctx->sg->length == ctx->offset) { + ctx->sg = sg_next(ctx->sg); + if (ctx->sg) + ctx->offset = 0; + } + } else { + dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, + DMA_TO_DEVICE); + } return 0; } @@ -454,14 +526,7 @@ static void omap_sham_cleanup(struct ahash_request *req) spin_unlock_irqrestore(&dd->lock, flags); if (ctx->digcnt) - clk_disable(dd->iclk); - - if (ctx->dma_addr) - dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, - DMA_TO_DEVICE); - - if (ctx->buffer) - free_page((unsigned long)ctx->buffer); + omap_sham_copy_ready_hash(req); dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt); } @@ -489,8 +554,6 @@ static int omap_sham_init(struct ahash_request *req) ctx->flags = 0; - ctx->flags |= FLAGS_FIRST; - dev_dbg(dd->dev, "init: digest size: %d\n", crypto_ahash_digestsize(tfm)); @@ -499,21 +562,7 @@ static int omap_sham_init(struct ahash_request *req) ctx->bufcnt = 0; ctx->digcnt = 0; - - ctx->buflen = PAGE_SIZE; - ctx->buffer = (void *)__get_free_page( - (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? - GFP_KERNEL : GFP_ATOMIC); - if (!ctx->buffer) - return -ENOMEM; - - ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, - DMA_TO_DEVICE); - if (dma_mapping_error(dd->dev, ctx->dma_addr)) { - dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen); - free_page((unsigned long)ctx->buffer); - return -EINVAL; - } + ctx->buflen = BUFLEN; if (tctx->flags & FLAGS_HMAC) { struct omap_sham_hmac_ctx *bctx = tctx->base; @@ -538,10 +587,8 @@ static int omap_sham_update_req(struct omap_sham_dev *dd) if (ctx->flags & FLAGS_CPU) err = omap_sham_update_cpu(dd); - else if (ctx->flags & FLAGS_FAST) - err = omap_sham_update_dma_fast(dd); else - err = omap_sham_update_dma_slow(dd); + err = omap_sham_update_dma_start(dd); /* wait for dma completion before can take more data */ dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); @@ -560,15 +607,12 @@ static int omap_sham_final_req(struct omap_sham_dev *dd) use_dma = 0; if (use_dma) - err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1); + err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1); else err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1); ctx->bufcnt = 0; - if (err != -EINPROGRESS) - omap_sham_cleanup(req); - dev_dbg(dd->dev, "final_req: err: %d\n", err); return err; @@ -576,6 +620,7 @@ static int omap_sham_final_req(struct omap_sham_dev *dd) static int omap_sham_finish_req_hmac(struct ahash_request *req) { + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); struct omap_sham_hmac_ctx *bctx = tctx->base; int bs = crypto_shash_blocksize(bctx->shash); @@ -590,48 +635,56 @@ static int omap_sham_finish_req_hmac(struct ahash_request *req) return crypto_shash_init(&desc.shash) ?: crypto_shash_update(&desc.shash, bctx->opad, bs) ?: - crypto_shash_finup(&desc.shash, req->result, ds, req->result); + crypto_shash_finup(&desc.shash, ctx->digest, ds, ctx->digest); } static void omap_sham_finish_req(struct ahash_request *req, int err) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + struct omap_sham_dev *dd = ctx->dd; if (!err) { omap_sham_copy_hash(ctx->dd->req, 1); if (ctx->flags & FLAGS_HMAC) err = omap_sham_finish_req_hmac(req); + } else { + ctx->flags |= FLAGS_ERROR; } - if (ctx->flags & FLAGS_FINAL) + if ((ctx->flags & FLAGS_FINAL) || err) omap_sham_cleanup(req); - clear_bit(FLAGS_BUSY, &ctx->dd->flags); + clk_disable(dd->iclk); + dd->flags &= ~FLAGS_BUSY; if (req->base.complete) req->base.complete(&req->base, err); } -static int omap_sham_handle_queue(struct omap_sham_dev *dd) +static int omap_sham_handle_queue(struct omap_sham_dev *dd, + struct ahash_request *req) { struct crypto_async_request *async_req, *backlog; struct omap_sham_reqctx *ctx; - struct ahash_request *req, *prev_req; + struct ahash_request *prev_req; unsigned long flags; - int err = 0; - - if (test_and_set_bit(FLAGS_BUSY, &dd->flags)) - return 0; + int err = 0, ret = 0; spin_lock_irqsave(&dd->lock, flags); + if (req) + ret = ahash_enqueue_request(&dd->queue, req); + if (dd->flags & FLAGS_BUSY) { + spin_unlock_irqrestore(&dd->lock, flags); + return ret; + } backlog = crypto_get_backlog(&dd->queue); async_req = crypto_dequeue_request(&dd->queue); - if (!async_req) - clear_bit(FLAGS_BUSY, &dd->flags); + if (async_req) + dd->flags |= FLAGS_BUSY; spin_unlock_irqrestore(&dd->lock, flags); if (!async_req) - return 0; + return ret; if (backlog) backlog->complete(backlog, -EINPROGRESS); @@ -646,7 +699,22 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd) dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", ctx->op, req->nbytes); - if (req != prev_req && ctx->digcnt) + + err = omap_sham_hw_init(dd); + if (err) + goto err1; + + omap_set_dma_dest_params(dd->dma_lch, 0, + OMAP_DMA_AMODE_CONSTANT, + dd->phys_base + SHA_REG_DIN(0), 0, 16); + + omap_set_dma_dest_burst_mode(dd->dma_lch, + OMAP_DMA_DATA_BURST_16); + + omap_set_dma_src_burst_mode(dd->dma_lch, + OMAP_DMA_DATA_BURST_4); + + if (ctx->digcnt) /* request has changed - restore hash */ omap_sham_copy_hash(req, 0); @@ -658,7 +726,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd) } else if (ctx->op == OP_FINAL) { err = omap_sham_final_req(dd); } - +err1: if (err != -EINPROGRESS) { /* done_task will not finish it, so do it here */ omap_sham_finish_req(req, err); @@ -667,7 +735,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd) dev_dbg(dd->dev, "exit, err: %d\n", err); - return err; + return ret; } static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) @@ -675,18 +743,10 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) struct omap_sham_reqctx *ctx = ahash_request_ctx(req); struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); struct omap_sham_dev *dd = tctx->dd; - unsigned long flags; - int err; ctx->op = op; - spin_lock_irqsave(&dd->lock, flags); - err = ahash_enqueue_request(&dd->queue, req); - spin_unlock_irqrestore(&dd->lock, flags); - - omap_sham_handle_queue(dd); - - return err; + return omap_sham_handle_queue(dd, req); } static int omap_sham_update(struct ahash_request *req) @@ -709,21 +769,13 @@ static int omap_sham_update(struct ahash_request *req) */ omap_sham_append_sg(ctx); return 0; - } else if (ctx->bufcnt + ctx->total <= 64) { + } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) { + /* + * faster to use CPU for short transfers + */ ctx->flags |= FLAGS_CPU; - } else if (!ctx->bufcnt && sg_is_last(ctx->sg)) { - /* may be can use faster functions */ - int aligned = IS_ALIGNED((u32)ctx->sg->offset, - sizeof(u32)); - - if (aligned && (ctx->flags & FLAGS_FIRST)) - /* digest: first and final */ - ctx->flags |= FLAGS_FAST; - - ctx->flags &= ~FLAGS_FIRST; } - } else if (ctx->bufcnt + ctx->total <= ctx->buflen) { - /* if not finaup -> not fast */ + } else if (ctx->bufcnt + ctx->total < ctx->buflen) { omap_sham_append_sg(ctx); return 0; } @@ -761,12 +813,14 @@ static int omap_sham_final(struct ahash_request *req) ctx->flags |= FLAGS_FINUP; - /* OMAP HW accel works only with buffers >= 9 */ - /* HMAC is always >= 9 because of ipad */ - if ((ctx->digcnt + ctx->bufcnt) < 9) - err = omap_sham_final_shash(req); - else if (ctx->bufcnt) - return omap_sham_enqueue(req, OP_FINAL); + if (!(ctx->flags & FLAGS_ERROR)) { + /* OMAP HW accel works only with buffers >= 9 */ + /* HMAC is always >= 9 because of ipad */ + if ((ctx->digcnt + ctx->bufcnt) < 9) + err = omap_sham_final_shash(req); + else if (ctx->bufcnt) + return omap_sham_enqueue(req, OP_FINAL); + } omap_sham_cleanup(req); @@ -836,6 +890,8 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); const char *alg_name = crypto_tfm_alg_name(tfm); + pr_info("enter\n"); + /* Allocate a fallback and abort if it failed. */ tctx->fallback = crypto_alloc_shash(alg_name, 0, CRYPTO_ALG_NEED_FALLBACK); @@ -846,7 +902,7 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) } crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct omap_sham_reqctx)); + sizeof(struct omap_sham_reqctx) + BUFLEN); if (alg_base) { struct omap_sham_hmac_ctx *bctx = tctx->base; @@ -932,7 +988,7 @@ static struct ahash_alg algs[] = { CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx), - .cra_alignmask = 0, + .cra_alignmask = OMAP_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_init, .cra_exit = omap_sham_cra_exit, @@ -956,7 +1012,7 @@ static struct ahash_alg algs[] = { .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx) + sizeof(struct omap_sham_hmac_ctx), - .cra_alignmask = 0, + .cra_alignmask = OMAP_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_sha1_init, .cra_exit = omap_sham_cra_exit, @@ -980,7 +1036,7 @@ static struct ahash_alg algs[] = { .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx) + sizeof(struct omap_sham_hmac_ctx), - .cra_alignmask = 0, + .cra_alignmask = OMAP_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_md5_init, .cra_exit = omap_sham_cra_exit, @@ -993,7 +1049,7 @@ static void omap_sham_done_task(unsigned long data) struct omap_sham_dev *dd = (struct omap_sham_dev *)data; struct ahash_request *req = dd->req; struct omap_sham_reqctx *ctx = ahash_request_ctx(req); - int ready = 1; + int ready = 0, err = 0; if (ctx->flags & FLAGS_OUTPUT_READY) { ctx->flags &= ~FLAGS_OUTPUT_READY; @@ -1003,15 +1059,18 @@ static void omap_sham_done_task(unsigned long data) if (dd->flags & FLAGS_DMA_ACTIVE) { dd->flags &= ~FLAGS_DMA_ACTIVE; omap_sham_update_dma_stop(dd); - omap_sham_update_dma_slow(dd); + if (!dd->err) + err = omap_sham_update_dma_start(dd); } - if (ready && !(dd->flags & FLAGS_DMA_ACTIVE)) { - dev_dbg(dd->dev, "update done\n"); + err = dd->err ? : err; + + if (err != -EINPROGRESS && (ready || err)) { + dev_dbg(dd->dev, "update done: err: %d\n", err); /* finish curent request */ - omap_sham_finish_req(req, 0); + omap_sham_finish_req(req, err); /* start new request */ - omap_sham_handle_queue(dd); + omap_sham_handle_queue(dd, NULL); } } @@ -1019,7 +1078,7 @@ static void omap_sham_queue_task(unsigned long data) { struct omap_sham_dev *dd = (struct omap_sham_dev *)data; - omap_sham_handle_queue(dd); + omap_sham_handle_queue(dd, NULL); } static irqreturn_t omap_sham_irq(int irq, void *dev_id) @@ -1041,6 +1100,7 @@ static irqreturn_t omap_sham_irq(int irq, void *dev_id) omap_sham_read(dd, SHA_REG_CTRL); ctx->flags |= FLAGS_OUTPUT_READY; + dd->err = 0; tasklet_schedule(&dd->done_task); return IRQ_HANDLED; @@ -1050,8 +1110,13 @@ static void omap_sham_dma_callback(int lch, u16 ch_status, void *data) { struct omap_sham_dev *dd = data; - if (likely(lch == dd->dma_lch)) - tasklet_schedule(&dd->done_task); + if (ch_status != OMAP_DMA_BLOCK_IRQ) { + pr_err("omap-sham DMA error status: 0x%hx\n", ch_status); + dd->err = -EIO; + dd->flags &= ~FLAGS_INIT; /* request to re-initialize */ + } + + tasklet_schedule(&dd->done_task); } static int omap_sham_dma_init(struct omap_sham_dev *dd) @@ -1066,15 +1131,6 @@ static int omap_sham_dma_init(struct omap_sham_dev *dd) dev_err(dd->dev, "Unable to request DMA channel\n"); return err; } - omap_set_dma_dest_params(dd->dma_lch, 0, - OMAP_DMA_AMODE_CONSTANT, - dd->phys_base + SHA_REG_DIN(0), 0, 16); - - omap_set_dma_dest_burst_mode(dd->dma_lch, - OMAP_DMA_DATA_BURST_16); - - omap_set_dma_src_burst_mode(dd->dma_lch, - OMAP_DMA_DATA_BURST_4); return 0; } diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 8a515ba..db33d30 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -9,6 +9,7 @@ #include <crypto/algapi.h> #include <crypto/aes.h> +#include <crypto/padlock.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> @@ -21,7 +22,6 @@ #include <asm/byteorder.h> #include <asm/processor.h> #include <asm/i387.h> -#include "padlock.h" /* * Number of data blocks actually fetched for each xcrypt insn. diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index d3a27e0..adf075b 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -13,6 +13,7 @@ */ #include <crypto/internal/hash.h> +#include <crypto/padlock.h> #include <crypto/sha.h> #include <linux/err.h> #include <linux/module.h> @@ -22,13 +23,6 @@ #include <linux/kernel.h> #include <linux/scatterlist.h> #include <asm/i387.h> -#include "padlock.h" - -#ifdef CONFIG_64BIT -#define STACK_ALIGN 16 -#else -#define STACK_ALIGN 4 -#endif struct padlock_sha_desc { struct shash_desc fallback; diff --git a/drivers/crypto/padlock.h b/drivers/crypto/padlock.h deleted file mode 100644 index b728e45..0000000 --- a/drivers/crypto/padlock.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Driver for VIA PadLock - * - * Copyright (c) 2004 Michal Ludvig <michal@logix.cz> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - */ - -#ifndef _CRYPTO_PADLOCK_H -#define _CRYPTO_PADLOCK_H - -#define PADLOCK_ALIGNMENT 16 - -#define PFX "padlock: " - -#define PADLOCK_CRA_PRIORITY 300 -#define PADLOCK_COMPOSITE_PRIORITY 400 - -#endif /* _CRYPTO_PADLOCK_H */ |