diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-05-24 08:52:55 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-05-24 08:52:55 +0900 |
commit | 1f782fee18b39b9ad438ebbd82c2915a16c879ee (patch) | |
tree | f292930065e6c860714c134790ab8882680ac739 /crypto | |
parent | 8eda2f21ed9c936a54fd7bc16cbfa5ee656635c2 (diff) | |
parent | f4b87dee923342505e1ddba8d34ce9de33e75050 (diff) | |
download | op-kernel-dev-1f782fee18b39b9ad438ebbd82c2915a16c879ee.zip op-kernel-dev-1f782fee18b39b9ad438ebbd82c2915a16c879ee.tar.gz |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/Kconfig | 2 | ||||
-rw-r--r-- | crypto/ablkcipher.c | 277 | ||||
-rw-r--r-- | crypto/algapi.c | 2 | ||||
-rw-r--r-- | crypto/async_tx/async_tx.c | 46 | ||||
-rw-r--r-- | crypto/authenc.c | 8 | ||||
-rw-r--r-- | crypto/internal.h | 2 | ||||
-rw-r--r-- | crypto/pcrypt.c | 11 | ||||
-rw-r--r-- | crypto/scatterwalk.c | 2 | ||||
-rw-r--r-- | crypto/shash.c | 2 | ||||
-rw-r--r-- | crypto/tcrypt.c | 343 | ||||
-rw-r--r-- | crypto/tcrypt.h | 29 | ||||
-rw-r--r-- | crypto/testmgr.c | 66 | ||||
-rw-r--r-- | crypto/testmgr.h | 64 | ||||
-rw-r--r-- | crypto/vmac.c | 75 |
14 files changed, 821 insertions, 108 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index 403857a..9d9434f 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -28,7 +28,7 @@ config CRYPTO_FIPS This options enables the fips boot option which is required if you want to system to operate in a FIPS 200 certification. You should say no unless you know what - this is. Note that CRYPTO_ANSI_CPRNG is requred if this + this is. Note that CRYPTO_ANSI_CPRNG is required if this option is selected config CRYPTO_ALGAPI diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index fe980da..98a6610 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -24,10 +24,287 @@ #include <linux/slab.h> #include <linux/seq_file.h> +#include <crypto/scatterwalk.h> + #include "internal.h" static const char *skcipher_default_geniv __read_mostly; +struct ablkcipher_buffer { + struct list_head entry; + struct scatter_walk dst; + unsigned int len; + void *data; +}; + +enum { + ABLKCIPHER_WALK_SLOW = 1 << 0, +}; + +static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p) +{ + scatterwalk_copychunks(p->data, &p->dst, p->len, 1); +} + +void __ablkcipher_walk_complete(struct ablkcipher_walk *walk) +{ + struct ablkcipher_buffer *p, *tmp; + + list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { + ablkcipher_buffer_write(p); + list_del(&p->entry); + kfree(p); + } +} +EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete); + +static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk, + struct ablkcipher_buffer *p) +{ + p->dst = walk->out; + list_add_tail(&p->entry, &walk->buffers); +} + +/* Get a spot of the specified length that does not straddle a page. + * The caller needs to ensure that there is enough space for this operation. + */ +static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len) +{ + u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); + return max(start, end_page); +} + +static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, + unsigned int bsize) +{ + unsigned int n = bsize; + + for (;;) { + unsigned int len_this_page = scatterwalk_pagelen(&walk->out); + + if (len_this_page > n) + len_this_page = n; + scatterwalk_advance(&walk->out, n); + if (n == len_this_page) + break; + n -= len_this_page; + scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg)); + } + + return bsize; +} + +static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk, + unsigned int n) +{ + scatterwalk_advance(&walk->in, n); + scatterwalk_advance(&walk->out, n); + + return n; +} + +static int ablkcipher_walk_next(struct ablkcipher_request *req, + struct ablkcipher_walk *walk); + +int ablkcipher_walk_done(struct ablkcipher_request *req, + struct ablkcipher_walk *walk, int err) +{ + struct crypto_tfm *tfm = req->base.tfm; + unsigned int nbytes = 0; + + if (likely(err >= 0)) { + unsigned int n = walk->nbytes - err; + + if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) + n = ablkcipher_done_fast(walk, n); + else if (WARN_ON(err)) { + err = -EINVAL; + goto err; + } else + n = ablkcipher_done_slow(walk, n); + + nbytes = walk->total - n; + err = 0; + } + + scatterwalk_done(&walk->in, 0, nbytes); + scatterwalk_done(&walk->out, 1, nbytes); + +err: + walk->total = nbytes; + walk->nbytes = nbytes; + + if (nbytes) { + crypto_yield(req->base.flags); + return ablkcipher_walk_next(req, walk); + } + + if (walk->iv != req->info) + memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize); + if (walk->iv_buffer) + kfree(walk->iv_buffer); + + return err; +} +EXPORT_SYMBOL_GPL(ablkcipher_walk_done); + +static inline int ablkcipher_next_slow(struct ablkcipher_request *req, + struct ablkcipher_walk *walk, + unsigned int bsize, + unsigned int alignmask, + void **src_p, void **dst_p) +{ + unsigned aligned_bsize = ALIGN(bsize, alignmask + 1); + struct ablkcipher_buffer *p; + void *src, *dst, *base; + unsigned int n; + + n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1); + n += (aligned_bsize * 3 - (alignmask + 1) + + (alignmask & ~(crypto_tfm_ctx_alignment() - 1))); + + p = kmalloc(n, GFP_ATOMIC); + if (!p) + ablkcipher_walk_done(req, walk, -ENOMEM); + + base = p + 1; + + dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1); + src = dst = ablkcipher_get_spot(dst, bsize); + + p->len = bsize; + p->data = dst; + + scatterwalk_copychunks(src, &walk->in, bsize, 0); + + ablkcipher_queue_write(walk, p); + + walk->nbytes = bsize; + walk->flags |= ABLKCIPHER_WALK_SLOW; + + *src_p = src; + *dst_p = dst; + + return 0; +} + +static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk, + struct crypto_tfm *tfm, + unsigned int alignmask) +{ + unsigned bs = walk->blocksize; + unsigned int ivsize = tfm->crt_ablkcipher.ivsize; + unsigned aligned_bs = ALIGN(bs, alignmask + 1); + unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) - + (alignmask + 1); + u8 *iv; + + size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); + walk->iv_buffer = kmalloc(size, GFP_ATOMIC); + if (!walk->iv_buffer) + return -ENOMEM; + + iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1); + iv = ablkcipher_get_spot(iv, bs) + aligned_bs; + iv = ablkcipher_get_spot(iv, bs) + aligned_bs; + iv = ablkcipher_get_spot(iv, ivsize); + + walk->iv = memcpy(iv, walk->iv, ivsize); + return 0; +} + +static inline int ablkcipher_next_fast(struct ablkcipher_request *req, + struct ablkcipher_walk *walk) +{ + walk->src.page = scatterwalk_page(&walk->in); + walk->src.offset = offset_in_page(walk->in.offset); + walk->dst.page = scatterwalk_page(&walk->out); + walk->dst.offset = offset_in_page(walk->out.offset); + + return 0; +} + +static int ablkcipher_walk_next(struct ablkcipher_request *req, + struct ablkcipher_walk *walk) +{ + struct crypto_tfm *tfm = req->base.tfm; + unsigned int alignmask, bsize, n; + void *src, *dst; + int err; + + alignmask = crypto_tfm_alg_alignmask(tfm); + n = walk->total; + if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) { + req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; + return ablkcipher_walk_done(req, walk, -EINVAL); + } + + walk->flags &= ~ABLKCIPHER_WALK_SLOW; + src = dst = NULL; + + bsize = min(walk->blocksize, n); + n = scatterwalk_clamp(&walk->in, n); + n = scatterwalk_clamp(&walk->out, n); + + if (n < bsize || + !scatterwalk_aligned(&walk->in, alignmask) || + !scatterwalk_aligned(&walk->out, alignmask)) { + err = ablkcipher_next_slow(req, walk, bsize, alignmask, + &src, &dst); + goto set_phys_lowmem; + } + + walk->nbytes = n; + + return ablkcipher_next_fast(req, walk); + +set_phys_lowmem: + if (err >= 0) { + walk->src.page = virt_to_page(src); + walk->dst.page = virt_to_page(dst); + walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1)); + walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1)); + } + + return err; +} + +static int ablkcipher_walk_first(struct ablkcipher_request *req, + struct ablkcipher_walk *walk) +{ + struct crypto_tfm *tfm = req->base.tfm; + unsigned int alignmask; + + alignmask = crypto_tfm_alg_alignmask(tfm); + if (WARN_ON_ONCE(in_irq())) + return -EDEADLK; + + walk->nbytes = walk->total; + if (unlikely(!walk->total)) + return 0; + + walk->iv_buffer = NULL; + walk->iv = req->info; + if (unlikely(((unsigned long)walk->iv & alignmask))) { + int err = ablkcipher_copy_iv(walk, tfm, alignmask); + if (err) + return err; + } + + scatterwalk_start(&walk->in, walk->in.sg); + scatterwalk_start(&walk->out, walk->out.sg); + + return ablkcipher_walk_next(req, walk); +} + +int ablkcipher_walk_phys(struct ablkcipher_request *req, + struct ablkcipher_walk *walk) +{ + walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm); + return ablkcipher_walk_first(req, walk); +} +EXPORT_SYMBOL_GPL(ablkcipher_walk_phys); + static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { diff --git a/crypto/algapi.c b/crypto/algapi.c index 76fae27..c3cf1a6 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -544,7 +544,7 @@ int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, { int err = -EINVAL; - if (frontend && (alg->cra_flags ^ frontend->type) & frontend->maskset) + if ((alg->cra_flags ^ frontend->type) & frontend->maskset) goto out; spawn->frontend = frontend; diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index f9cdf04..7f2c00a 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -81,18 +81,13 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, struct dma_device *device = chan->device; struct dma_async_tx_descriptor *intr_tx = (void *) ~0; - #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH - BUG(); - #endif - /* first check to see if we can still append to depend_tx */ - spin_lock_bh(&depend_tx->lock); - if (depend_tx->parent && depend_tx->chan == tx->chan) { - tx->parent = depend_tx; - depend_tx->next = tx; + txd_lock(depend_tx); + if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) { + txd_chain(depend_tx, tx); intr_tx = NULL; } - spin_unlock_bh(&depend_tx->lock); + txd_unlock(depend_tx); /* attached dependency, flush the parent channel */ if (!intr_tx) { @@ -111,24 +106,22 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, if (intr_tx) { intr_tx->callback = NULL; intr_tx->callback_param = NULL; - tx->parent = intr_tx; - /* safe to set ->next outside the lock since we know we are + /* safe to chain outside the lock since we know we are * not submitted yet */ - intr_tx->next = tx; + txd_chain(intr_tx, tx); /* check if we need to append */ - spin_lock_bh(&depend_tx->lock); - if (depend_tx->parent) { - intr_tx->parent = depend_tx; - depend_tx->next = intr_tx; + txd_lock(depend_tx); + if (txd_parent(depend_tx)) { + txd_chain(depend_tx, intr_tx); async_tx_ack(intr_tx); intr_tx = NULL; } - spin_unlock_bh(&depend_tx->lock); + txd_unlock(depend_tx); if (intr_tx) { - intr_tx->parent = NULL; + txd_clear_parent(intr_tx); intr_tx->tx_submit(intr_tx); async_tx_ack(intr_tx); } @@ -176,21 +169,20 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, * 2/ dependencies are 1:1 i.e. two transactions can * not depend on the same parent */ - BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next || - tx->parent); + BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) || + txd_parent(tx)); /* the lock prevents async_tx_run_dependencies from missing * the setting of ->next when ->parent != NULL */ - spin_lock_bh(&depend_tx->lock); - if (depend_tx->parent) { + txd_lock(depend_tx); + if (txd_parent(depend_tx)) { /* we have a parent so we can not submit directly * if we are staying on the same channel: append * else: channel switch */ if (depend_tx->chan == chan) { - tx->parent = depend_tx; - depend_tx->next = tx; + txd_chain(depend_tx, tx); s = ASYNC_TX_SUBMITTED; } else s = ASYNC_TX_CHANNEL_SWITCH; @@ -203,7 +195,7 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, else s = ASYNC_TX_CHANNEL_SWITCH; } - spin_unlock_bh(&depend_tx->lock); + txd_unlock(depend_tx); switch (s) { case ASYNC_TX_SUBMITTED: @@ -212,12 +204,12 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, async_tx_channel_switch(depend_tx, tx); break; case ASYNC_TX_DIRECT_SUBMIT: - tx->parent = NULL; + txd_clear_parent(tx); tx->tx_submit(tx); break; } } else { - tx->parent = NULL; + txd_clear_parent(tx); tx->tx_submit(tx); } diff --git a/crypto/authenc.c b/crypto/authenc.c index 05eb32e..b9884ee 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -181,6 +181,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); + unsigned int cryptlen = req->cryptlen; if (err) goto out; @@ -196,6 +197,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, goto out; authsize = crypto_aead_authsize(authenc); + cryptlen -= authsize; ihash = ahreq->result + authsize; scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); @@ -209,7 +211,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, ablkcipher_request_set_callback(abreq, aead_request_flags(req), req->base.complete, req->base.data); ablkcipher_request_set_crypt(abreq, req->src, req->dst, - req->cryptlen, req->iv); + cryptlen, req->iv); err = crypto_ablkcipher_decrypt(abreq); @@ -228,11 +230,13 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq, struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); + unsigned int cryptlen = req->cryptlen; if (err) goto out; authsize = crypto_aead_authsize(authenc); + cryptlen -= authsize; ihash = ahreq->result + authsize; scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); @@ -246,7 +250,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq, ablkcipher_request_set_callback(abreq, aead_request_flags(req), req->base.complete, req->base.data); ablkcipher_request_set_crypt(abreq, req->src, req->dst, - req->cryptlen, req->iv); + cryptlen, req->iv); err = crypto_ablkcipher_decrypt(abreq); diff --git a/crypto/internal.h b/crypto/internal.h index 2d22636..d4384b0 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -6,7 +6,7 @@ * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) + * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 8020124..247178c 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -315,16 +315,13 @@ out_free_inst: goto out; } -static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb) +static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb, + u32 type, u32 mask) { struct crypto_instance *inst; struct crypto_alg *alg; - struct crypto_attr_type *algt; - - algt = crypto_get_attr_type(tb); - alg = crypto_get_attr_alg(tb, algt->type, - (algt->mask & CRYPTO_ALG_TYPE_MASK)); + alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK)); if (IS_ERR(alg)) return ERR_CAST(alg); @@ -365,7 +362,7 @@ static struct crypto_instance *pcrypt_alloc(struct rtattr **tb) switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_AEAD: - return pcrypt_alloc_aead(tb); + return pcrypt_alloc_aead(tb, algt->type, algt->mask); } return ERR_PTR(-EINVAL); diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 3de89a4..41e529a 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -68,7 +68,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out, void scatterwalk_done(struct scatter_walk *walk, int out, int more) { - if (!offset_in_page(walk->offset) || !more) + if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more) scatterwalk_pagedone(walk, out, more); } EXPORT_SYMBOL_GPL(scatterwalk_done); diff --git a/crypto/shash.c b/crypto/shash.c index 91f7b9d..22fd943 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -37,7 +37,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, u8 *buffer, *alignbuffer; int err; - absize = keylen + (alignmask & ~(CRYPTO_MINALIGN - 1)); + absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); buffer = kmalloc(absize, GFP_KERNEL); if (!buffer) return -ENOMEM; diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index a351599..3ca68f9 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -394,6 +394,17 @@ out: return 0; } +static void test_hash_sg_init(struct scatterlist *sg) +{ + int i; + + sg_init_table(sg, TVMEMSIZE); + for (i = 0; i < TVMEMSIZE; i++) { + sg_set_buf(sg + i, tvmem[i], PAGE_SIZE); + memset(tvmem[i], 0xff, PAGE_SIZE); + } +} + static void test_hash_speed(const char *algo, unsigned int sec, struct hash_speed *speed) { @@ -423,12 +434,7 @@ static void test_hash_speed(const char *algo, unsigned int sec, goto out; } - sg_init_table(sg, TVMEMSIZE); - for (i = 0; i < TVMEMSIZE; i++) { - sg_set_buf(sg + i, tvmem[i], PAGE_SIZE); - memset(tvmem[i], 0xff, PAGE_SIZE); - } - + test_hash_sg_init(sg); for (i = 0; speed[i].blen != 0; i++) { if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { printk(KERN_ERR @@ -437,6 +443,9 @@ static void test_hash_speed(const char *algo, unsigned int sec, goto out; } + if (speed[i].klen) + crypto_hash_setkey(tfm, tvmem[0], speed[i].klen); + printk(KERN_INFO "test%3u " "(%5u byte blocks,%5u bytes per update,%4u updates): ", i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); @@ -458,6 +467,250 @@ out: crypto_free_hash(tfm); } +struct tcrypt_result { + struct completion completion; + int err; +}; + +static void tcrypt_complete(struct crypto_async_request *req, int err) +{ + struct tcrypt_result *res = req->data; + + if (err == -EINPROGRESS) + return; + + res->err = err; + complete(&res->completion); +} + +static inline int do_one_ahash_op(struct ahash_request *req, int ret) +{ + if (ret == -EINPROGRESS || ret == -EBUSY) { + struct tcrypt_result *tr = req->base.data; + + ret = wait_for_completion_interruptible(&tr->completion); + if (!ret) + ret = tr->err; + INIT_COMPLETION(tr->completion); + } + return ret; +} + +static int test_ahash_jiffies_digest(struct ahash_request *req, int blen, + char *out, int sec) +{ + unsigned long start, end; + int bcount; + int ret; + + for (start = jiffies, end = start + sec * HZ, bcount = 0; + time_before(jiffies, end); bcount++) { + ret = do_one_ahash_op(req, crypto_ahash_digest(req)); + if (ret) + return ret; + } + + printk("%6u opers/sec, %9lu bytes/sec\n", + bcount / sec, ((long)bcount * blen) / sec); + + return 0; +} + +static int test_ahash_jiffies(struct ahash_request *req, int blen, + int plen, char *out, int sec) +{ + unsigned long start, end; + int bcount, pcount; + int ret; + + if (plen == blen) + return test_ahash_jiffies_digest(req, blen, out, sec); + + for (start = jiffies, end = start + sec * HZ, bcount = 0; + time_before(jiffies, end); bcount++) { + ret = crypto_ahash_init(req); + if (ret) + return ret; + for (pcount = 0; pcount < blen; pcount += plen) { + ret = do_one_ahash_op(req, crypto_ahash_update(req)); + if (ret) + return ret; + } + /* we assume there is enough space in 'out' for the result */ + ret = do_one_ahash_op(req, crypto_ahash_final(req)); + if (ret) + return ret; + } + + pr_cont("%6u opers/sec, %9lu bytes/sec\n", + bcount / sec, ((long)bcount * blen) / sec); + + return 0; +} + +static int test_ahash_cycles_digest(struct ahash_request *req, int blen, + char *out) +{ + unsigned long cycles = 0; + int ret, i; + + /* Warm-up run. */ + for (i = 0; i < 4; i++) { + ret = do_one_ahash_op(req, crypto_ahash_digest(req)); + if (ret) + goto out; + } + + /* The real thing. */ + for (i = 0; i < 8; i++) { + cycles_t start, end; + + start = get_cycles(); + + ret = do_one_ahash_op(req, crypto_ahash_digest(req)); + if (ret) + goto out; + + end = get_cycles(); + + cycles += end - start; + } + +out: + if (ret) + return ret; + + pr_cont("%6lu cycles/operation, %4lu cycles/byte\n", + cycles / 8, cycles / (8 * blen)); + + return 0; +} + +static int test_ahash_cycles(struct ahash_request *req, int blen, + int plen, char *out) +{ + unsigned long cycles = 0; + int i, pcount, ret; + + if (plen == blen) + return test_ahash_cycles_digest(req, blen, out); + + /* Warm-up run. */ + for (i = 0; i < 4; i++) { + ret = crypto_ahash_init(req); + if (ret) + goto out; + for (pcount = 0; pcount < blen; pcount += plen) { + ret = do_one_ahash_op(req, crypto_ahash_update(req)); + if (ret) + goto out; + } + ret = do_one_ahash_op(req, crypto_ahash_final(req)); + if (ret) + goto out; + } + + /* The real thing. */ + for (i = 0; i < 8; i++) { + cycles_t start, end; + + start = get_cycles(); + + ret = crypto_ahash_init(req); + if (ret) + goto out; + for (pcount = 0; pcount < blen; pcount += plen) { + ret = do_one_ahash_op(req, crypto_ahash_update(req)); + if (ret) + goto out; + } + ret = do_one_ahash_op(req, crypto_ahash_final(req)); + if (ret) + goto out; + + end = get_cycles(); + + cycles += end - start; + } + +out: + if (ret) + return ret; + + pr_cont("%6lu cycles/operation, %4lu cycles/byte\n", + cycles / 8, cycles / (8 * blen)); + + return 0; +} + +static void test_ahash_speed(const char *algo, unsigned int sec, + struct hash_speed *speed) +{ + struct scatterlist sg[TVMEMSIZE]; + struct tcrypt_result tresult; + struct ahash_request *req; + struct crypto_ahash *tfm; + static char output[1024]; + int i, ret; + + printk(KERN_INFO "\ntesting speed of async %s\n", algo); + + tfm = crypto_alloc_ahash(algo, 0, 0); + if (IS_ERR(tfm)) { + pr_err("failed to load transform for %s: %ld\n", + algo, PTR_ERR(tfm)); + return; + } + + if (crypto_ahash_digestsize(tfm) > sizeof(output)) { + pr_err("digestsize(%u) > outputbuffer(%zu)\n", + crypto_ahash_digestsize(tfm), sizeof(output)); + goto out; + } + + test_hash_sg_init(sg); + req = ahash_request_alloc(tfm, GFP_KERNEL); + if (!req) { + pr_err("ahash request allocation failure\n"); + goto out; + } + + init_completion(&tresult.completion); + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + tcrypt_complete, &tresult); + + for (i = 0; speed[i].blen != 0; i++) { + if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { + pr_err("template (%u) too big for tvmem (%lu)\n", + speed[i].blen, TVMEMSIZE * PAGE_SIZE); + break; + } + + pr_info("test%3u " + "(%5u byte blocks,%5u bytes per update,%4u updates): ", + i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); + + ahash_request_set_crypt(req, sg, output, speed[i].plen); + + if (sec) + ret = test_ahash_jiffies(req, speed[i].blen, + speed[i].plen, output, sec); + else + ret = test_ahash_cycles(req, speed[i].blen, + speed[i].plen, output); + + if (ret) { + pr_err("hashing failed ret=%d\n", ret); + break; + } + } + + ahash_request_free(req); + +out: + crypto_free_ahash(tfm); +} + static void test_available(void) { char **name = check; @@ -881,9 +1134,87 @@ static int do_test(int m) test_hash_speed("rmd320", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; + case 318: + test_hash_speed("ghash-generic", sec, hash_speed_template_16); + if (mode > 300 && mode < 400) break; + case 399: break; + case 400: + /* fall through */ + + case 401: + test_ahash_speed("md4", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 402: + test_ahash_speed("md5", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 403: + test_ahash_speed("sha1", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 404: + test_ahash_speed("sha256", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 405: + test_ahash_speed("sha384", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 406: + test_ahash_speed("sha512", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 407: + test_ahash_speed("wp256", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 408: + test_ahash_speed("wp384", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 409: + test_ahash_speed("wp512", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 410: + test_ahash_speed("tgr128", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 411: + test_ahash_speed("tgr160", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 412: + test_ahash_speed("tgr192", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 413: + test_ahash_speed("sha224", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 414: + test_ahash_speed("rmd128", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 415: + test_ahash_speed("rmd160", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 416: + test_ahash_speed("rmd256", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 417: + test_ahash_speed("rmd320", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + + case 499: + break; + case 1000: test_available(); break; diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 966bbfa..10cb925 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -25,6 +25,7 @@ struct cipher_speed_template { struct hash_speed { unsigned int blen; /* buffer length */ unsigned int plen; /* per-update length */ + unsigned int klen; /* key length */ }; /* @@ -83,4 +84,32 @@ static struct hash_speed generic_hash_speed_template[] = { { .blen = 0, .plen = 0, } }; +static struct hash_speed hash_speed_template_16[] = { + { .blen = 16, .plen = 16, .klen = 16, }, + { .blen = 64, .plen = 16, .klen = 16, }, + { .blen = 64, .plen = 64, .klen = 16, }, + { .blen = 256, .plen = 16, .klen = 16, }, + { .blen = 256, .plen = 64, .klen = 16, }, + { .blen = 256, .plen = 256, .klen = 16, }, + { .blen = 1024, .plen = 16, .klen = 16, }, + { .blen = 1024, .plen = 256, .klen = 16, }, + { .blen = 1024, .plen = 1024, .klen = 16, }, + { .blen = 2048, .plen = 16, .klen = 16, }, + { .blen = 2048, .plen = 256, .klen = 16, }, + { .blen = 2048, .plen = 1024, .klen = 16, }, + { .blen = 2048, .plen = 2048, .klen = 16, }, + { .blen = 4096, .plen = 16, .klen = 16, }, + { .blen = 4096, .plen = 256, .klen = 16, }, + { .blen = 4096, .plen = 1024, .klen = 16, }, + { .blen = 4096, .plen = 4096, .klen = 16, }, + { .blen = 8192, .plen = 16, .klen = 16, }, + { .blen = 8192, .plen = 256, .klen = 16, }, + { .blen = 8192, .plen = 1024, .klen = 16, }, + { .blen = 8192, .plen = 4096, .klen = 16, }, + { .blen = 8192, .plen = 8192, .klen = 16, }, + + /* End marker */ + { .blen = 0, .plen = 0, .klen = 0, } +}; + #endif /* _CRYPTO_TCRYPT_H */ diff --git a/crypto/testmgr.c b/crypto/testmgr.c index c494d76..5c8aaa0 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -153,8 +153,21 @@ static void testmgr_free_buf(char *buf[XBUFSIZE]) free_page((unsigned long)buf[i]); } +static int do_one_async_hash_op(struct ahash_request *req, + struct tcrypt_result *tr, + int ret) +{ + if (ret == -EINPROGRESS || ret == -EBUSY) { + ret = wait_for_completion_interruptible(&tr->completion); + if (!ret) + ret = tr->err; + INIT_COMPLETION(tr->completion); + } + return ret; +} + static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, - unsigned int tcount) + unsigned int tcount, bool use_digest) { const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); unsigned int i, j, k, temp; @@ -206,23 +219,36 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, } ahash_request_set_crypt(req, sg, result, template[i].psize); - ret = crypto_ahash_digest(req); - switch (ret) { - case 0: - break; - case -EINPROGRESS: - case -EBUSY: - ret = wait_for_completion_interruptible( - &tresult.completion); - if (!ret && !(ret = tresult.err)) { - INIT_COMPLETION(tresult.completion); - break; + if (use_digest) { + ret = do_one_async_hash_op(req, &tresult, + crypto_ahash_digest(req)); + if (ret) { + pr_err("alg: hash: digest failed on test %d " + "for %s: ret=%d\n", j, algo, -ret); + goto out; + } + } else { + ret = do_one_async_hash_op(req, &tresult, + crypto_ahash_init(req)); + if (ret) { + pr_err("alt: hash: init failed on test %d " + "for %s: ret=%d\n", j, algo, -ret); + goto out; + } + ret = do_one_async_hash_op(req, &tresult, + crypto_ahash_update(req)); + if (ret) { + pr_err("alt: hash: update failed on test %d " + "for %s: ret=%d\n", j, algo, -ret); + goto out; + } + ret = do_one_async_hash_op(req, &tresult, + crypto_ahash_final(req)); + if (ret) { + pr_err("alt: hash: final failed on test %d " + "for %s: ret=%d\n", j, algo, -ret); + goto out; } - /* fall through */ - default: - printk(KERN_ERR "alg: hash: digest failed on test %d " - "for %s: ret=%d\n", j, algo, -ret); - goto out; } if (memcmp(result, template[i].digest, @@ -1402,7 +1428,11 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver, return PTR_ERR(tfm); } - err = test_hash(tfm, desc->suite.hash.vecs, desc->suite.hash.count); + err = test_hash(tfm, desc->suite.hash.vecs, + desc->suite.hash.count, true); + if (!err) + err = test_hash(tfm, desc->suite.hash.vecs, + desc->suite.hash.count, false); crypto_free_ahash(tfm); return err; diff --git a/crypto/testmgr.h b/crypto/testmgr.h index fb76517..74e3537 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -1669,17 +1669,73 @@ static struct hash_testvec aes_xcbc128_tv_template[] = { } }; -#define VMAC_AES_TEST_VECTORS 1 -static char vmac_string[128] = {'\x01', '\x01', '\x01', '\x01', +#define VMAC_AES_TEST_VECTORS 8 +static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01', '\x02', '\x03', '\x02', '\x02', '\x02', '\x04', '\x01', '\x07', '\x04', '\x01', '\x04', '\x03',}; +static char vmac_string2[128] = {'a', 'b', 'c',}; +static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c', + 'a', 'b', 'c', 'a', 'b', 'c', + 'a', 'b', 'c', 'a', 'b', 'c', + 'a', 'b', 'c', 'a', 'b', 'c', + 'a', 'b', 'c', 'a', 'b', 'c', + 'a', 'b', 'c', 'a', 'b', 'c', + 'a', 'b', 'c', 'a', 'b', 'c', + 'a', 'b', 'c', 'a', 'b', 'c', + }; + static struct hash_testvec aes_vmac128_tv_template[] = { { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .plaintext = NULL, + .digest = "\x07\x58\x80\x35\x77\xa4\x7b\x54", + .psize = 0, + .ksize = 16, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .plaintext = vmac_string1, + .digest = "\xce\xf5\x3c\xd3\xae\x68\x8c\xa1", + .psize = 128, + .ksize = 16, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .plaintext = vmac_string2, + .digest = "\xc9\x27\xb0\x73\x81\xbd\x14\x2d", + .psize = 128, + .ksize = 16, + }, { .key = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .plaintext = vmac_string, - .digest = "\xcb\xd7\x8a\xfd\xb7\x33\x79\xe7", + .plaintext = vmac_string3, + .digest = "\x8d\x1a\x95\x8c\x98\x47\x0b\x19", + .psize = 128, + .ksize = 16, + }, { + .key = "abcdefghijklmnop", + .plaintext = NULL, + .digest = "\x3b\x89\xa1\x26\x9e\x55\x8f\x84", + .psize = 0, + .ksize = 16, + }, { + .key = "abcdefghijklmnop", + .plaintext = vmac_string1, + .digest = "\xab\x5e\xab\xb0\xf6\x8d\x74\xc2", + .psize = 128, + .ksize = 16, + }, { + .key = "abcdefghijklmnop", + .plaintext = vmac_string2, + .digest = "\x11\x15\x68\x42\x3d\x7b\x09\xdf", + .psize = 128, + .ksize = 16, + }, { + .key = "abcdefghijklmnop", + .plaintext = vmac_string3, + .digest = "\x8b\x32\x8f\xe1\xed\x8f\xfa\xd4", .psize = 128, .ksize = 16, }, diff --git a/crypto/vmac.c b/crypto/vmac.c index 0a9468e..0999274 100644 --- a/crypto/vmac.c +++ b/crypto/vmac.c @@ -43,6 +43,8 @@ const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */ const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ +#define pe64_to_cpup le64_to_cpup /* Prefer little endian */ + #ifdef __LITTLE_ENDIAN #define INDEX_HIGH 1 #define INDEX_LOW 0 @@ -110,8 +112,8 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ int i; u64 th, tl; \ rh = rl = 0; \ for (i = 0; i < nw; i += 2) { \ - MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ - le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ + pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ ADD128(rh, rl, th, tl); \ } \ } while (0) @@ -121,11 +123,11 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ int i; u64 th, tl; \ rh1 = rl1 = rh = rl = 0; \ for (i = 0; i < nw; i += 2) { \ - MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ - le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ + pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ ADD128(rh, rl, th, tl); \ - MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \ - le64_to_cpup((mp)+i+1)+(kp)[i+3]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ + pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \ ADD128(rh1, rl1, th, tl); \ } \ } while (0) @@ -136,17 +138,17 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ int i; u64 th, tl; \ rh = rl = 0; \ for (i = 0; i < nw; i += 8) { \ - MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ - le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ + pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ ADD128(rh, rl, th, tl); \ - MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \ - le64_to_cpup((mp)+i+3)+(kp)[i+3]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \ + pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \ ADD128(rh, rl, th, tl); \ - MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \ - le64_to_cpup((mp)+i+5)+(kp)[i+5]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \ + pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \ ADD128(rh, rl, th, tl); \ - MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \ - le64_to_cpup((mp)+i+7)+(kp)[i+7]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \ + pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \ ADD128(rh, rl, th, tl); \ } \ } while (0) @@ -156,29 +158,29 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ int i; u64 th, tl; \ rh1 = rl1 = rh = rl = 0; \ for (i = 0; i < nw; i += 8) { \ - MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ - le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ + pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ ADD128(rh, rl, th, tl); \ - MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \ - le64_to_cpup((mp)+i+1)+(kp)[i+3]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ + pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \ ADD128(rh1, rl1, th, tl); \ - MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \ - le64_to_cpup((mp)+i+3)+(kp)[i+3]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \ + pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \ ADD128(rh, rl, th, tl); \ - MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+4], \ - le64_to_cpup((mp)+i+3)+(kp)[i+5]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \ + pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \ ADD128(rh1, rl1, th, tl); \ - MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \ - le64_to_cpup((mp)+i+5)+(kp)[i+5]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \ + pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \ ADD128(rh, rl, th, tl); \ - MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+6], \ - le64_to_cpup((mp)+i+5)+(kp)[i+7]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \ + pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \ ADD128(rh1, rl1, th, tl); \ - MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \ - le64_to_cpup((mp)+i+7)+(kp)[i+7]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \ + pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \ ADD128(rh, rl, th, tl); \ - MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+8], \ - le64_to_cpup((mp)+i+7)+(kp)[i+9]); \ + MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \ + pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \ ADD128(rh1, rl1, th, tl); \ } \ } while (0) @@ -216,8 +218,8 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ int i; \ rh = rl = t = 0; \ for (i = 0; i < nw; i += 2) { \ - t1 = le64_to_cpup(mp+i) + kp[i]; \ - t2 = le64_to_cpup(mp+i+1) + kp[i+1]; \ + t1 = pe64_to_cpup(mp+i) + kp[i]; \ + t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \ m2 = MUL32(t1 >> 32, t2); \ m1 = MUL32(t1, t2 >> 32); \ ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \ @@ -322,8 +324,7 @@ static void vhash_abort(struct vmac_ctx *ctx) ctx->first_block_processed = 0; } -static u64 l3hash(u64 p1, u64 p2, - u64 k1, u64 k2, u64 len) +static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len) { u64 rh, rl, t, z = 0; @@ -474,7 +475,7 @@ static u64 vmac(unsigned char m[], unsigned int mbytes, } p = be64_to_cpup(out_p + i); h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx); - return p + h; + return le64_to_cpu(p + h); } static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx) @@ -549,10 +550,6 @@ static int vmac_setkey(struct crypto_shash *parent, static int vmac_init(struct shash_desc *pdesc) { - struct crypto_shash *parent = pdesc->tfm; - struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); - - memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx)); return 0; } |