From 23e353c8a681cc30d42fbd4f2c2be85c44fe209b Mon Sep 17 00:00:00 2001 From: Joy Latten Date: Tue, 23 Oct 2007 08:50:32 +0800 Subject: [CRYPTO] ctr: Add CTR (Counter) block cipher mode This patch implements CTR mode for IPsec. It is based off of RFC 3686. Please note: 1. CTR turns a block cipher into a stream cipher. Encryption is done in blocks, however the last block may be a partial block. A "counter block" is encrypted, creating a keystream that is xor'ed with the plaintext. The counter portion of the counter block is incremented after each block of plaintext is encrypted. Decryption is performed in same manner. 2. The CTR counterblock is composed of, nonce + IV + counter The size of the counterblock is equivalent to the blocksize of the cipher. sizeof(nonce) + sizeof(IV) + sizeof(counter) = blocksize The CTR template requires the name of the cipher algorithm, the sizeof the nonce, and the sizeof the iv. ctr(cipher,sizeof_nonce,sizeof_iv) So for example, ctr(aes,4,8) specifies the counterblock will be composed of 4 bytes from a nonce, 8 bytes from the iv, and 4 bytes for counter since aes has a blocksize of 16 bytes. 3. The counter portion of the counter block is stored in big endian for conformance to rfc 3686. Signed-off-by: Joy Latten Signed-off-by: Herbert Xu --- crypto/Kconfig | 9 ++ crypto/Makefile | 1 + crypto/ctr.c | 369 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ crypto/tcrypt.c | 8 ++ crypto/tcrypt.h | 185 ++++++++++++++++++++++++++++ 5 files changed, 572 insertions(+) create mode 100644 crypto/ctr.c diff --git a/crypto/Kconfig b/crypto/Kconfig index 083d2e1..1f32071 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -195,6 +195,15 @@ config CRYPTO_XTS key size 256, 384 or 512 bits. This implementation currently can't handle a sectorsize which is not a multiple of 16 bytes. +config CRYPTO_CTR + tristate "CTR support" + select CRYPTO_BLKCIPHER + select CRYPTO_MANAGER + default m + help + CTR: Counter mode + This block cipher algorithm is required for IPSec. + config CRYPTO_CRYPTD tristate "Software async crypto daemon" select CRYPTO_ABLKCIPHER diff --git a/crypto/Makefile b/crypto/Makefile index 43c2a0d..1f87db2 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_CRYPTO_CBC) += cbc.o obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o obj-$(CONFIG_CRYPTO_LRW) += lrw.o obj-$(CONFIG_CRYPTO_XTS) += xts.o +obj-$(CONFIG_CRYPTO_CTR) += ctr.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o obj-$(CONFIG_CRYPTO_DES) += des_generic.o obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o diff --git a/crypto/ctr.c b/crypto/ctr.c new file mode 100644 index 0000000..810d5ec --- /dev/null +++ b/crypto/ctr.c @@ -0,0 +1,369 @@ +/* + * CTR: Counter mode + * + * (C) Copyright IBM Corp. 2007 - Joy Latten + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct ctr_instance_ctx { + struct crypto_spawn alg; + unsigned int noncesize; + unsigned int ivsize; +}; + +struct crypto_ctr_ctx { + struct crypto_cipher *child; + u8 *nonce; +}; + +static inline void __ctr_inc_byte(u8 *a, unsigned int size) +{ + u8 *b = (a + size); + u8 c; + + for (; size; size--) { + c = *--b + 1; + *b = c; + if (c) + break; + } +} + +static void ctr_inc_quad(u8 *a, unsigned int size) +{ + __be32 *b = (__be32 *)(a + size); + u32 c; + + for (; size >= 4; size -=4) { + c = be32_to_cpu(*--b) + 1; + *b = cpu_to_be32(c); + if (c) + return; + } + + __ctr_inc_byte(a, size); +} + +static void xor_byte(u8 *a, const u8 *b, unsigned int bs) +{ + for (; bs; bs--) + *a++ ^= *b++; +} + +static void xor_quad(u8 *dst, const u8 *src, unsigned int bs) +{ + u32 *a = (u32 *)dst; + u32 *b = (u32 *)src; + + for (; bs >= 4; bs -= 4) + *a++ ^= *b++; + + xor_byte((u8 *)a, (u8 *)b, bs); +} + +static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key, + unsigned int keylen) +{ + struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(parent); + struct crypto_cipher *child = ctx->child; + struct ctr_instance_ctx *ictx = + crypto_instance_ctx(crypto_tfm_alg_instance(parent)); + unsigned int noncelen = ictx->noncesize; + int err = 0; + + /* the nonce is stored in bytes at end of key */ + if (keylen < noncelen) + return -EINVAL; + + memcpy(ctx->nonce, key + (keylen - noncelen), noncelen); + + keylen -= noncelen; + + crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_cipher_setkey(child, key, keylen); + crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & + CRYPTO_TFM_RES_MASK); + + return err; +} + +static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk, + struct crypto_cipher *tfm, u8 *ctrblk, + unsigned int countersize) +{ + void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = + crypto_cipher_alg(tfm)->cia_encrypt; + unsigned int bsize = crypto_cipher_blocksize(tfm); + unsigned long alignmask = crypto_cipher_alignmask(tfm); + u8 ks[bsize + alignmask]; + u8 *keystream = (u8 *)ALIGN((unsigned long)ks, alignmask + 1); + u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + unsigned int nbytes = walk->nbytes; + + do { + /* create keystream */ + fn(crypto_cipher_tfm(tfm), keystream, ctrblk); + xor_quad(keystream, src, min(nbytes, bsize)); + + /* copy result into dst */ + memcpy(dst, keystream, min(nbytes, bsize)); + + /* increment counter in counterblock */ + ctr_inc_quad(ctrblk + (bsize - countersize), countersize); + + if (nbytes < bsize) + break; + + src += bsize; + dst += bsize; + nbytes -= bsize; + + } while (nbytes); + + return 0; +} + +static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk, + struct crypto_cipher *tfm, u8 *ctrblk, + unsigned int countersize) +{ + void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = + crypto_cipher_alg(tfm)->cia_encrypt; + unsigned int bsize = crypto_cipher_blocksize(tfm); + unsigned long alignmask = crypto_cipher_alignmask(tfm); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 ks[bsize + alignmask]; + u8 *keystream = (u8 *)ALIGN((unsigned long)ks, alignmask + 1); + + do { + /* create keystream */ + fn(crypto_cipher_tfm(tfm), keystream, ctrblk); + xor_quad(src, keystream, min(nbytes, bsize)); + + /* increment counter in counterblock */ + ctr_inc_quad(ctrblk + (bsize - countersize), countersize); + + if (nbytes < bsize) + break; + + src += bsize; + nbytes -= bsize; + + } while (nbytes); + + return 0; +} + +static int crypto_ctr_crypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct blkcipher_walk walk; + struct crypto_blkcipher *tfm = desc->tfm; + struct crypto_ctr_ctx *ctx = crypto_blkcipher_ctx(tfm); + struct crypto_cipher *child = ctx->child; + unsigned int bsize = crypto_cipher_blocksize(child); + struct ctr_instance_ctx *ictx = + crypto_instance_ctx(crypto_tfm_alg_instance(&tfm->base)); + unsigned long alignmask = crypto_cipher_alignmask(child); + u8 cblk[bsize + alignmask]; + u8 *counterblk = (u8 *)ALIGN((unsigned long)cblk, alignmask + 1); + unsigned int countersize; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt_block(desc, &walk, bsize); + + /* set up counter block */ + memset(counterblk, 0 , bsize); + memcpy(counterblk, ctx->nonce, ictx->noncesize); + memcpy(counterblk + ictx->noncesize, walk.iv, ictx->ivsize); + + /* initialize counter portion of counter block */ + countersize = bsize - ictx->noncesize - ictx->ivsize; + ctr_inc_quad(counterblk + (bsize - countersize), countersize); + + while (walk.nbytes) { + if (walk.src.virt.addr == walk.dst.virt.addr) + nbytes = crypto_ctr_crypt_inplace(&walk, child, + counterblk, + countersize); + else + nbytes = crypto_ctr_crypt_segment(&walk, child, + counterblk, + countersize); + + err = blkcipher_walk_done(desc, &walk, nbytes); + } + return err; +} + +static int crypto_ctr_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct ctr_instance_ctx *ictx = crypto_instance_ctx(inst); + struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_cipher *cipher; + + ctx->nonce = kzalloc(ictx->noncesize, GFP_KERNEL); + if (!ctx->nonce) + return -ENOMEM; + + cipher = crypto_spawn_cipher(&ictx->alg); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + ctx->child = cipher; + + return 0; +} + +static void crypto_ctr_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm); + + kfree(ctx->nonce); + crypto_free_cipher(ctx->child); +} + +static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) +{ + struct crypto_instance *inst; + struct crypto_alg *alg; + struct ctr_instance_ctx *ictx; + unsigned int noncesize; + unsigned int ivsize; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); + if (err) + return ERR_PTR(err); + + alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK); + if (IS_ERR(alg)) + return ERR_PTR(PTR_ERR(alg)); + + err = crypto_attr_u32(tb[2], &noncesize); + if (err) + goto out_put_alg; + + err = crypto_attr_u32(tb[3], &ivsize); + if (err) + goto out_put_alg; + + /* verify size of nonce + iv + counter */ + err = -EINVAL; + if ((noncesize + ivsize) >= alg->cra_blocksize) + goto out_put_alg; + + inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); + err = -ENOMEM; + if (!inst) + goto out_put_alg; + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, + "ctr(%s,%u,%u)", alg->cra_name, noncesize, + ivsize) >= CRYPTO_MAX_ALG_NAME) { + goto err_free_inst; + } + + if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "ctr(%s,%u,%u)", alg->cra_driver_name, noncesize, + ivsize) >= CRYPTO_MAX_ALG_NAME) { + goto err_free_inst; + } + + ictx = crypto_instance_ctx(inst); + ictx->noncesize = noncesize; + ictx->ivsize = ivsize; + + err = crypto_init_spawn(&ictx->alg, alg, inst, + CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + if (err) + goto err_free_inst; + + err = 0; + inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; + inst->alg.cra_priority = alg->cra_priority; + inst->alg.cra_blocksize = 1; + inst->alg.cra_alignmask = 3; + inst->alg.cra_type = &crypto_blkcipher_type; + + inst->alg.cra_blkcipher.ivsize = ivsize; + inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize + + noncesize; + inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize + + noncesize; + + inst->alg.cra_ctxsize = sizeof(struct crypto_ctr_ctx); + + inst->alg.cra_init = crypto_ctr_init_tfm; + inst->alg.cra_exit = crypto_ctr_exit_tfm; + + inst->alg.cra_blkcipher.setkey = crypto_ctr_setkey; + inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt; + inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt; + +err_free_inst: + if (err) + kfree(inst); + +out_put_alg: + crypto_mod_put(alg); + + if (err) + inst = ERR_PTR(err); + + return inst; +} + +static void crypto_ctr_free(struct crypto_instance *inst) +{ + struct ctr_instance_ctx *ictx = crypto_instance_ctx(inst); + + crypto_drop_spawn(&ictx->alg); + kfree(inst); +} + +static struct crypto_template crypto_ctr_tmpl = { + .name = "ctr", + .alloc = crypto_ctr_alloc, + .free = crypto_ctr_free, + .module = THIS_MODULE, +}; + +static int __init crypto_ctr_module_init(void) +{ + return crypto_register_template(&crypto_ctr_tmpl); +} + +static void __exit crypto_ctr_module_exit(void) +{ + crypto_unregister_template(&crypto_ctr_tmpl); +} + +module_init(crypto_ctr_module_init); +module_exit(crypto_ctr_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("CTR Counter block mode"); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 24141fb..640cbca 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -969,6 +969,10 @@ static void do_test(void) AES_XTS_ENC_TEST_VECTORS); test_cipher("xts(aes)", DECRYPT, aes_xts_dec_tv_template, AES_XTS_DEC_TEST_VECTORS); + test_cipher("ctr(aes,4,8)", ENCRYPT, aes_ctr_enc_tv_template, + AES_CTR_ENC_TEST_VECTORS); + test_cipher("ctr(aes,4,8)", DECRYPT, aes_ctr_dec_tv_template, + AES_CTR_DEC_TEST_VECTORS); //CAST5 test_cipher("ecb(cast5)", ENCRYPT, cast5_enc_tv_template, @@ -1156,6 +1160,10 @@ static void do_test(void) AES_XTS_ENC_TEST_VECTORS); test_cipher("xts(aes)", DECRYPT, aes_xts_dec_tv_template, AES_XTS_DEC_TEST_VECTORS); + test_cipher("ctr(aes,4,8)", ENCRYPT, aes_ctr_enc_tv_template, + AES_CTR_ENC_TEST_VECTORS); + test_cipher("ctr(aes,4,8)", DECRYPT, aes_ctr_dec_tv_template, + AES_CTR_DEC_TEST_VECTORS); break; case 11: diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index ec86138..f7f9b23 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -2146,6 +2146,8 @@ static struct cipher_testvec cast6_dec_tv_template[] = { #define AES_LRW_DEC_TEST_VECTORS 8 #define AES_XTS_ENC_TEST_VECTORS 4 #define AES_XTS_DEC_TEST_VECTORS 4 +#define AES_CTR_ENC_TEST_VECTORS 6 +#define AES_CTR_DEC_TEST_VECTORS 6 static struct cipher_testvec aes_enc_tv_template[] = { { /* From FIPS-197 */ @@ -3180,6 +3182,189 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = { } }; + +static struct cipher_testvec aes_ctr_enc_tv_template[] = { + { /* From RFC 3686 */ + .key = { 0xae, 0x68, 0x52, 0xf8, 0x12, 0x10, 0x67, 0xcc, + 0x4b, 0xf7, 0xa5, 0x76, 0x55, 0x77, 0xf3, 0x9e, + 0x00, 0x00, 0x00, 0x30 }, + .klen = 20, + .iv = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .input = { "Single block msg" }, + .ilen = 16, + .result = { 0xe4, 0x09, 0x5d, 0x4f, 0xb7, 0xa7, 0xb3, 0x79, + 0x2d, 0x61, 0x75, 0xa3, 0x26, 0x13, 0x11, 0xb8 }, + .rlen = 16, + }, { + .key = { 0x7e, 0x24, 0x06, 0x78, 0x17, 0xfa, 0xe0, 0xd7, + 0x43, 0xd6, 0xce, 0x1f, 0x32, 0x53, 0x91, 0x63, + 0x00, 0x6c, 0xb6, 0xdb }, + .klen = 20, + .iv = { 0xc0, 0x54, 0x3b, 0x59, 0xda, 0x48, 0xd9, 0x0b }, + .input = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f }, + .ilen = 32, + .result = { 0x51, 0x04, 0xa1, 0x06, 0x16, 0x8a, 0x72, 0xd9, + 0x79, 0x0d, 0x41, 0xee, 0x8e, 0xda, 0xd3, 0x88, + 0xeb, 0x2e, 0x1e, 0xfc, 0x46, 0xda, 0x57, 0xc8, + 0xfc, 0xe6, 0x30, 0xdf, 0x91, 0x41, 0xbe, 0x28 }, + .rlen = 32, + }, { + .key = { 0x16, 0xaf, 0x5b, 0x14, 0x5f, 0xc9, 0xf5, 0x79, + 0xc1, 0x75, 0xf9, 0x3e, 0x3b, 0xfb, 0x0e, 0xed, + 0x86, 0x3d, 0x06, 0xcc, 0xfd, 0xb7, 0x85, 0x15, + 0x00, 0x00, 0x00, 0x48 }, + .klen = 28, + .iv = { 0x36, 0x73, 0x3c, 0x14, 0x7d, 0x6d, 0x93, 0xcb }, + .input = { "Single block msg" }, + .ilen = 16, + .result = { 0x4b, 0x55, 0x38, 0x4f, 0xe2, 0x59, 0xc9, 0xc8, + 0x4e, 0x79, 0x35, 0xa0, 0x03, 0xcb, 0xe9, 0x28 }, + .rlen = 16, + }, { + .key = { 0x7c, 0x5c, 0xb2, 0x40, 0x1b, 0x3d, 0xc3, 0x3c, + 0x19, 0xe7, 0x34, 0x08, 0x19, 0xe0, 0xf6, 0x9c, + 0x67, 0x8c, 0x3d, 0xb8, 0xe6, 0xf6, 0xa9, 0x1a, + 0x00, 0x96, 0xb0, 0x3b }, + .klen = 28, + .iv = { 0x02, 0x0c, 0x6e, 0xad, 0xc2, 0xcb, 0x50, 0x0d }, + .input = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f }, + .ilen = 32, + .result = { 0x45, 0x32, 0x43, 0xfc, 0x60, 0x9b, 0x23, 0x32, + 0x7e, 0xdf, 0xaa, 0xfa, 0x71, 0x31, 0xcd, 0x9f, + 0x84, 0x90, 0x70, 0x1c, 0x5a, 0xd4, 0xa7, 0x9c, + 0xfc, 0x1f, 0xe0, 0xff, 0x42, 0xf4, 0xfb, 0x00 }, + .rlen = 32, + }, { + .key = { 0x77, 0x6b, 0xef, 0xf2, 0x85, 0x1d, 0xb0, 0x6f, + 0x4c, 0x8a, 0x05, 0x42, 0xc8, 0x69, 0x6f, 0x6c, + 0x6a, 0x81, 0xaf, 0x1e, 0xec, 0x96, 0xb4, 0xd3, + 0x7f, 0xc1, 0xd6, 0x89, 0xe6, 0xc1, 0xc1, 0x04, + 0x00, 0x00, 0x00, 0x60 }, + .klen = 36, + .iv = { 0xdb, 0x56, 0x72, 0xc9, 0x7a, 0xa8, 0xf0, 0xb2 }, + .input = { "Single block msg" }, + .ilen = 16, + .result = { 0x14, 0x5a, 0xd0, 0x1d, 0xbf, 0x82, 0x4e, 0xc7, + 0x56, 0x08, 0x63, 0xdc, 0x71, 0xe3, 0xe0, 0xc0 }, + .rlen = 16, + }, { + .key = { 0xf6, 0xd6, 0x6d, 0x6b, 0xd5, 0x2d, 0x59, 0xbb, + 0x07, 0x96, 0x36, 0x58, 0x79, 0xef, 0xf8, 0x86, + 0xc6, 0x6d, 0xd5, 0x1a, 0x5b, 0x6a, 0x99, 0x74, + 0x4b, 0x50, 0x59, 0x0c, 0x87, 0xa2, 0x38, 0x84, + 0x00, 0xfa, 0xac, 0x24 }, + .klen = 36, + .iv = { 0xc1, 0x58, 0x5e, 0xf1, 0x5a, 0x43, 0xd8, 0x75 }, + .input = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f }, + .ilen = 32, + .result = { 0xf0, 0x5e, 0x23, 0x1b, 0x38, 0x94, 0x61, 0x2c, + 0x49, 0xee, 0x00, 0x0b, 0x80, 0x4e, 0xb2, 0xa9, + 0xb8, 0x30, 0x6b, 0x50, 0x8f, 0x83, 0x9d, 0x6a, + 0x55, 0x30, 0x83, 0x1d, 0x93, 0x44, 0xaf, 0x1c }, + .rlen = 32, + }, +}; + +static struct cipher_testvec aes_ctr_dec_tv_template[] = { + { /* From RFC 3686 */ + .key = { 0xae, 0x68, 0x52, 0xf8, 0x12, 0x10, 0x67, 0xcc, + 0x4b, 0xf7, 0xa5, 0x76, 0x55, 0x77, 0xf3, 0x9e, + 0x00, 0x00, 0x00, 0x30 }, + .klen = 20, + .iv = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .input = { 0xe4, 0x09, 0x5d, 0x4f, 0xb7, 0xa7, 0xb3, 0x79, + 0x2d, 0x61, 0x75, 0xa3, 0x26, 0x13, 0x11, 0xb8 }, + .ilen = 16, + .result = { "Single block msg" }, + .rlen = 16, + }, { + .key = { 0x7e, 0x24, 0x06, 0x78, 0x17, 0xfa, 0xe0, 0xd7, + 0x43, 0xd6, 0xce, 0x1f, 0x32, 0x53, 0x91, 0x63, + 0x00, 0x6c, 0xb6, 0xdb }, + .klen = 20, + .iv = { 0xc0, 0x54, 0x3b, 0x59, 0xda, 0x48, 0xd9, 0x0b }, + .input = { 0x51, 0x04, 0xa1, 0x06, 0x16, 0x8a, 0x72, 0xd9, + 0x79, 0x0d, 0x41, 0xee, 0x8e, 0xda, 0xd3, 0x88, + 0xeb, 0x2e, 0x1e, 0xfc, 0x46, 0xda, 0x57, 0xc8, + 0xfc, 0xe6, 0x30, 0xdf, 0x91, 0x41, 0xbe, 0x28 }, + .ilen = 32, + .result = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f }, + .rlen = 32, + }, { + .key = { 0x16, 0xaf, 0x5b, 0x14, 0x5f, 0xc9, 0xf5, 0x79, + 0xc1, 0x75, 0xf9, 0x3e, 0x3b, 0xfb, 0x0e, 0xed, + 0x86, 0x3d, 0x06, 0xcc, 0xfd, 0xb7, 0x85, 0x15, + 0x00, 0x00, 0x00, 0x48 }, + .klen = 28, + .iv = { 0x36, 0x73, 0x3c, 0x14, 0x7d, 0x6d, 0x93, 0xcb }, + .input = { 0x4b, 0x55, 0x38, 0x4f, 0xe2, 0x59, 0xc9, 0xc8, + 0x4e, 0x79, 0x35, 0xa0, 0x03, 0xcb, 0xe9, 0x28 }, + .ilen = 16, + .result = { "Single block msg" }, + .rlen = 16, + }, { + .key = { 0x7c, 0x5c, 0xb2, 0x40, 0x1b, 0x3d, 0xc3, 0x3c, + 0x19, 0xe7, 0x34, 0x08, 0x19, 0xe0, 0xf6, 0x9c, + 0x67, 0x8c, 0x3d, 0xb8, 0xe6, 0xf6, 0xa9, 0x1a, + 0x00, 0x96, 0xb0, 0x3b }, + .klen = 28, + .iv = { 0x02, 0x0c, 0x6e, 0xad, 0xc2, 0xcb, 0x50, 0x0d }, + .input = { 0x45, 0x32, 0x43, 0xfc, 0x60, 0x9b, 0x23, 0x32, + 0x7e, 0xdf, 0xaa, 0xfa, 0x71, 0x31, 0xcd, 0x9f, + 0x84, 0x90, 0x70, 0x1c, 0x5a, 0xd4, 0xa7, 0x9c, + 0xfc, 0x1f, 0xe0, 0xff, 0x42, 0xf4, 0xfb, 0x00 }, + .ilen = 32, + .result = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f }, + .rlen = 32, + }, { + .key = { 0x77, 0x6b, 0xef, 0xf2, 0x85, 0x1d, 0xb0, 0x6f, + 0x4c, 0x8a, 0x05, 0x42, 0xc8, 0x69, 0x6f, 0x6c, + 0x6a, 0x81, 0xaf, 0x1e, 0xec, 0x96, 0xb4, 0xd3, + 0x7f, 0xc1, 0xd6, 0x89, 0xe6, 0xc1, 0xc1, 0x04, + 0x00, 0x00, 0x00, 0x60 }, + .klen = 36, + .iv = { 0xdb, 0x56, 0x72, 0xc9, 0x7a, 0xa8, 0xf0, 0xb2 }, + .input = { 0x14, 0x5a, 0xd0, 0x1d, 0xbf, 0x82, 0x4e, 0xc7, + 0x56, 0x08, 0x63, 0xdc, 0x71, 0xe3, 0xe0, 0xc0 }, + .ilen = 16, + .result = { "Single block msg" }, + .rlen = 16, + }, { + .key = { 0xf6, 0xd6, 0x6d, 0x6b, 0xd5, 0x2d, 0x59, 0xbb, + 0x07, 0x96, 0x36, 0x58, 0x79, 0xef, 0xf8, 0x86, + 0xc6, 0x6d, 0xd5, 0x1a, 0x5b, 0x6a, 0x99, 0x74, + 0x4b, 0x50, 0x59, 0x0c, 0x87, 0xa2, 0x38, 0x84, + 0x00, 0xfa, 0xac, 0x24 }, + .klen = 36, + .iv = { 0xc1, 0x58, 0x5e, 0xf1, 0x5a, 0x43, 0xd8, 0x75 }, + .input = { 0xf0, 0x5e, 0x23, 0x1b, 0x38, 0x94, 0x61, 0x2c, + 0x49, 0xee, 0x00, 0x0b, 0x80, 0x4e, 0xb2, 0xa9, + 0xb8, 0x30, 0x6b, 0x50, 0x8f, 0x83, 0x9d, 0x6a, + 0x55, 0x30, 0x83, 0x1d, 0x93, 0x44, 0xaf, 0x1c }, + .ilen = 32, + .result = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f }, + .rlen = 32, + }, +}; + /* Cast5 test vectors from RFC 2144 */ #define CAST5_ENC_TEST_VECTORS 3 #define CAST5_DEC_TEST_VECTORS 3 -- cgit v1.1 From f7d0561ea1dadec5462846520b1f4fb304294fd5 Mon Sep 17 00:00:00 2001 From: Evgeniy Polyakov Date: Fri, 26 Oct 2007 21:31:14 +0800 Subject: [CRYPTO] hifn_795x: HIFN 795x driver This is a driver for HIFN 795x crypto accelerator chips. It passed all tests for AES, DES and DES3_EDE except weak test for DES, since hardware can not determine weak keys. Signed-off-by: Evgeniy Polyakov Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 10 + drivers/crypto/Makefile | 1 + drivers/crypto/hifn_795x.c | 2619 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 2630 insertions(+) create mode 100644 drivers/crypto/hifn_795x.c diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 5fd6688..1ef7697 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -83,4 +83,14 @@ config ZCRYPT_MONOLITHIC that contains all parts of the crypto device driver (ap bus, request router and all the card drivers). +config CRYPTO_DEV_HIFN_795X + tristate "Driver HIFN 795x crypto accelerator chips" + select CRYPTO_ALGAPI + select CRYPTO_ABLKCIPHER + select CRYPTO_BLKCIPHER + help + This option allows you to have support for HIFN 795x crypto adapters. + + + endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index d070030..c0327f0 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o +obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c new file mode 100644 index 0000000..e3376f2 --- /dev/null +++ b/drivers/crypto/hifn_795x.c @@ -0,0 +1,2619 @@ +/* + * 2007+ Copyright (c) Evgeniy Polyakov + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#undef dprintk + +#define HIFN_TEST +//#define HIFN_DEBUG + +#ifdef HIFN_DEBUG +#define dprintk(f, a...) printk(f, ##a) +#else +#define dprintk(f, a...) do {} while (0) +#endif + +static atomic_t hifn_dev_number; + +#define ACRYPTO_OP_DECRYPT 0 +#define ACRYPTO_OP_ENCRYPT 1 +#define ACRYPTO_OP_HMAC 2 +#define ACRYPTO_OP_RNG 3 + +#define ACRYPTO_MODE_ECB 0 +#define ACRYPTO_MODE_CBC 1 +#define ACRYPTO_MODE_CFB 2 +#define ACRYPTO_MODE_OFB 3 + +#define ACRYPTO_TYPE_AES_128 0 +#define ACRYPTO_TYPE_AES_192 1 +#define ACRYPTO_TYPE_AES_256 2 +#define ACRYPTO_TYPE_3DES 3 +#define ACRYPTO_TYPE_DES 4 + +#define PCI_VENDOR_ID_HIFN 0x13A3 +#define PCI_DEVICE_ID_HIFN_7955 0x0020 +#define PCI_DEVICE_ID_HIFN_7956 0x001d + +/* I/O region sizes */ + +#define HIFN_BAR0_SIZE 0x1000 +#define HIFN_BAR1_SIZE 0x2000 +#define HIFN_BAR2_SIZE 0x8000 + +/* DMA registres */ + +#define HIFN_DMA_CRA 0x0C /* DMA Command Ring Address */ +#define HIFN_DMA_SDRA 0x1C /* DMA Source Data Ring Address */ +#define HIFN_DMA_RRA 0x2C /* DMA Result Ring Address */ +#define HIFN_DMA_DDRA 0x3C /* DMA Destination Data Ring Address */ +#define HIFN_DMA_STCTL 0x40 /* DMA Status and Control */ +#define HIFN_DMA_INTREN 0x44 /* DMA Interrupt Enable */ +#define HIFN_DMA_CFG1 0x48 /* DMA Configuration #1 */ +#define HIFN_DMA_CFG2 0x6C /* DMA Configuration #2 */ +#define HIFN_CHIP_ID 0x98 /* Chip ID */ + +/* + * Processing Unit Registers (offset from BASEREG0) + */ +#define HIFN_0_PUDATA 0x00 /* Processing Unit Data */ +#define HIFN_0_PUCTRL 0x04 /* Processing Unit Control */ +#define HIFN_0_PUISR 0x08 /* Processing Unit Interrupt Status */ +#define HIFN_0_PUCNFG 0x0c /* Processing Unit Configuration */ +#define HIFN_0_PUIER 0x10 /* Processing Unit Interrupt Enable */ +#define HIFN_0_PUSTAT 0x14 /* Processing Unit Status/Chip ID */ +#define HIFN_0_FIFOSTAT 0x18 /* FIFO Status */ +#define HIFN_0_FIFOCNFG 0x1c /* FIFO Configuration */ +#define HIFN_0_SPACESIZE 0x20 /* Register space size */ + +/* Processing Unit Control Register (HIFN_0_PUCTRL) */ +#define HIFN_PUCTRL_CLRSRCFIFO 0x0010 /* clear source fifo */ +#define HIFN_PUCTRL_STOP 0x0008 /* stop pu */ +#define HIFN_PUCTRL_LOCKRAM 0x0004 /* lock ram */ +#define HIFN_PUCTRL_DMAENA 0x0002 /* enable dma */ +#define HIFN_PUCTRL_RESET 0x0001 /* Reset processing unit */ + +/* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */ +#define HIFN_PUISR_CMDINVAL 0x8000 /* Invalid command interrupt */ +#define HIFN_PUISR_DATAERR 0x4000 /* Data error interrupt */ +#define HIFN_PUISR_SRCFIFO 0x2000 /* Source FIFO ready interrupt */ +#define HIFN_PUISR_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */ +#define HIFN_PUISR_DSTOVER 0x0200 /* Destination overrun interrupt */ +#define HIFN_PUISR_SRCCMD 0x0080 /* Source command interrupt */ +#define HIFN_PUISR_SRCCTX 0x0040 /* Source context interrupt */ +#define HIFN_PUISR_SRCDATA 0x0020 /* Source data interrupt */ +#define HIFN_PUISR_DSTDATA 0x0010 /* Destination data interrupt */ +#define HIFN_PUISR_DSTRESULT 0x0004 /* Destination result interrupt */ + +/* Processing Unit Configuration Register (HIFN_0_PUCNFG) */ +#define HIFN_PUCNFG_DRAMMASK 0xe000 /* DRAM size mask */ +#define HIFN_PUCNFG_DSZ_256K 0x0000 /* 256k dram */ +#define HIFN_PUCNFG_DSZ_512K 0x2000 /* 512k dram */ +#define HIFN_PUCNFG_DSZ_1M 0x4000 /* 1m dram */ +#define HIFN_PUCNFG_DSZ_2M 0x6000 /* 2m dram */ +#define HIFN_PUCNFG_DSZ_4M 0x8000 /* 4m dram */ +#define HIFN_PUCNFG_DSZ_8M 0xa000 /* 8m dram */ +#define HIFN_PUNCFG_DSZ_16M 0xc000 /* 16m dram */ +#define HIFN_PUCNFG_DSZ_32M 0xe000 /* 32m dram */ +#define HIFN_PUCNFG_DRAMREFRESH 0x1800 /* DRAM refresh rate mask */ +#define HIFN_PUCNFG_DRFR_512 0x0000 /* 512 divisor of ECLK */ +#define HIFN_PUCNFG_DRFR_256 0x0800 /* 256 divisor of ECLK */ +#define HIFN_PUCNFG_DRFR_128 0x1000 /* 128 divisor of ECLK */ +#define HIFN_PUCNFG_TCALLPHASES 0x0200 /* your guess is as good as mine... */ +#define HIFN_PUCNFG_TCDRVTOTEM 0x0100 /* your guess is as good as mine... */ +#define HIFN_PUCNFG_BIGENDIAN 0x0080 /* DMA big endian mode */ +#define HIFN_PUCNFG_BUS32 0x0040 /* Bus width 32bits */ +#define HIFN_PUCNFG_BUS16 0x0000 /* Bus width 16 bits */ +#define HIFN_PUCNFG_CHIPID 0x0020 /* Allow chipid from PUSTAT */ +#define HIFN_PUCNFG_DRAM 0x0010 /* Context RAM is DRAM */ +#define HIFN_PUCNFG_SRAM 0x0000 /* Context RAM is SRAM */ +#define HIFN_PUCNFG_COMPSING 0x0004 /* Enable single compression context */ +#define HIFN_PUCNFG_ENCCNFG 0x0002 /* Encryption configuration */ + +/* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */ +#define HIFN_PUIER_CMDINVAL 0x8000 /* Invalid command interrupt */ +#define HIFN_PUIER_DATAERR 0x4000 /* Data error interrupt */ +#define HIFN_PUIER_SRCFIFO 0x2000 /* Source FIFO ready interrupt */ +#define HIFN_PUIER_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */ +#define HIFN_PUIER_DSTOVER 0x0200 /* Destination overrun interrupt */ +#define HIFN_PUIER_SRCCMD 0x0080 /* Source command interrupt */ +#define HIFN_PUIER_SRCCTX 0x0040 /* Source context interrupt */ +#define HIFN_PUIER_SRCDATA 0x0020 /* Source data interrupt */ +#define HIFN_PUIER_DSTDATA 0x0010 /* Destination data interrupt */ +#define HIFN_PUIER_DSTRESULT 0x0004 /* Destination result interrupt */ + +/* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */ +#define HIFN_PUSTAT_CMDINVAL 0x8000 /* Invalid command interrupt */ +#define HIFN_PUSTAT_DATAERR 0x4000 /* Data error interrupt */ +#define HIFN_PUSTAT_SRCFIFO 0x2000 /* Source FIFO ready interrupt */ +#define HIFN_PUSTAT_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */ +#define HIFN_PUSTAT_DSTOVER 0x0200 /* Destination overrun interrupt */ +#define HIFN_PUSTAT_SRCCMD 0x0080 /* Source command interrupt */ +#define HIFN_PUSTAT_SRCCTX 0x0040 /* Source context interrupt */ +#define HIFN_PUSTAT_SRCDATA 0x0020 /* Source data interrupt */ +#define HIFN_PUSTAT_DSTDATA 0x0010 /* Destination data interrupt */ +#define HIFN_PUSTAT_DSTRESULT 0x0004 /* Destination result interrupt */ +#define HIFN_PUSTAT_CHIPREV 0x00ff /* Chip revision mask */ +#define HIFN_PUSTAT_CHIPENA 0xff00 /* Chip enabled mask */ +#define HIFN_PUSTAT_ENA_2 0x1100 /* Level 2 enabled */ +#define HIFN_PUSTAT_ENA_1 0x1000 /* Level 1 enabled */ +#define HIFN_PUSTAT_ENA_0 0x3000 /* Level 0 enabled */ +#define HIFN_PUSTAT_REV_2 0x0020 /* 7751 PT6/2 */ +#define HIFN_PUSTAT_REV_3 0x0030 /* 7751 PT6/3 */ + +/* FIFO Status Register (HIFN_0_FIFOSTAT) */ +#define HIFN_FIFOSTAT_SRC 0x7f00 /* Source FIFO available */ +#define HIFN_FIFOSTAT_DST 0x007f /* Destination FIFO available */ + +/* FIFO Configuration Register (HIFN_0_FIFOCNFG) */ +#define HIFN_FIFOCNFG_THRESHOLD 0x0400 /* must be written as 1 */ + +/* + * DMA Interface Registers (offset from BASEREG1) + */ +#define HIFN_1_DMA_CRAR 0x0c /* DMA Command Ring Address */ +#define HIFN_1_DMA_SRAR 0x1c /* DMA Source Ring Address */ +#define HIFN_1_DMA_RRAR 0x2c /* DMA Result Ring Address */ +#define HIFN_1_DMA_DRAR 0x3c /* DMA Destination Ring Address */ +#define HIFN_1_DMA_CSR 0x40 /* DMA Status and Control */ +#define HIFN_1_DMA_IER 0x44 /* DMA Interrupt Enable */ +#define HIFN_1_DMA_CNFG 0x48 /* DMA Configuration */ +#define HIFN_1_PLL 0x4c /* 795x: PLL config */ +#define HIFN_1_7811_RNGENA 0x60 /* 7811: rng enable */ +#define HIFN_1_7811_RNGCFG 0x64 /* 7811: rng config */ +#define HIFN_1_7811_RNGDAT 0x68 /* 7811: rng data */ +#define HIFN_1_7811_RNGSTS 0x6c /* 7811: rng status */ +#define HIFN_1_7811_MIPSRST 0x94 /* 7811: MIPS reset */ +#define HIFN_1_REVID 0x98 /* Revision ID */ +#define HIFN_1_UNLOCK_SECRET1 0xf4 +#define HIFN_1_UNLOCK_SECRET2 0xfc +#define HIFN_1_PUB_RESET 0x204 /* Public/RNG Reset */ +#define HIFN_1_PUB_BASE 0x300 /* Public Base Address */ +#define HIFN_1_PUB_OPLEN 0x304 /* Public Operand Length */ +#define HIFN_1_PUB_OP 0x308 /* Public Operand */ +#define HIFN_1_PUB_STATUS 0x30c /* Public Status */ +#define HIFN_1_PUB_IEN 0x310 /* Public Interrupt enable */ +#define HIFN_1_RNG_CONFIG 0x314 /* RNG config */ +#define HIFN_1_RNG_DATA 0x318 /* RNG data */ +#define HIFN_1_PUB_MEM 0x400 /* start of Public key memory */ +#define HIFN_1_PUB_MEMEND 0xbff /* end of Public key memory */ + +/* DMA Status and Control Register (HIFN_1_DMA_CSR) */ +#define HIFN_DMACSR_D_CTRLMASK 0xc0000000 /* Destinition Ring Control */ +#define HIFN_DMACSR_D_CTRL_NOP 0x00000000 /* Dest. Control: no-op */ +#define HIFN_DMACSR_D_CTRL_DIS 0x40000000 /* Dest. Control: disable */ +#define HIFN_DMACSR_D_CTRL_ENA 0x80000000 /* Dest. Control: enable */ +#define HIFN_DMACSR_D_ABORT 0x20000000 /* Destinition Ring PCIAbort */ +#define HIFN_DMACSR_D_DONE 0x10000000 /* Destinition Ring Done */ +#define HIFN_DMACSR_D_LAST 0x08000000 /* Destinition Ring Last */ +#define HIFN_DMACSR_D_WAIT 0x04000000 /* Destinition Ring Waiting */ +#define HIFN_DMACSR_D_OVER 0x02000000 /* Destinition Ring Overflow */ +#define HIFN_DMACSR_R_CTRL 0x00c00000 /* Result Ring Control */ +#define HIFN_DMACSR_R_CTRL_NOP 0x00000000 /* Result Control: no-op */ +#define HIFN_DMACSR_R_CTRL_DIS 0x00400000 /* Result Control: disable */ +#define HIFN_DMACSR_R_CTRL_ENA 0x00800000 /* Result Control: enable */ +#define HIFN_DMACSR_R_ABORT 0x00200000 /* Result Ring PCI Abort */ +#define HIFN_DMACSR_R_DONE 0x00100000 /* Result Ring Done */ +#define HIFN_DMACSR_R_LAST 0x00080000 /* Result Ring Last */ +#define HIFN_DMACSR_R_WAIT 0x00040000 /* Result Ring Waiting */ +#define HIFN_DMACSR_R_OVER 0x00020000 /* Result Ring Overflow */ +#define HIFN_DMACSR_S_CTRL 0x0000c000 /* Source Ring Control */ +#define HIFN_DMACSR_S_CTRL_NOP 0x00000000 /* Source Control: no-op */ +#define HIFN_DMACSR_S_CTRL_DIS 0x00004000 /* Source Control: disable */ +#define HIFN_DMACSR_S_CTRL_ENA 0x00008000 /* Source Control: enable */ +#define HIFN_DMACSR_S_ABORT 0x00002000 /* Source Ring PCI Abort */ +#define HIFN_DMACSR_S_DONE 0x00001000 /* Source Ring Done */ +#define HIFN_DMACSR_S_LAST 0x00000800 /* Source Ring Last */ +#define HIFN_DMACSR_S_WAIT 0x00000400 /* Source Ring Waiting */ +#define HIFN_DMACSR_ILLW 0x00000200 /* Illegal write (7811 only) */ +#define HIFN_DMACSR_ILLR 0x00000100 /* Illegal read (7811 only) */ +#define HIFN_DMACSR_C_CTRL 0x000000c0 /* Command Ring Control */ +#define HIFN_DMACSR_C_CTRL_NOP 0x00000000 /* Command Control: no-op */ +#define HIFN_DMACSR_C_CTRL_DIS 0x00000040 /* Command Control: disable */ +#define HIFN_DMACSR_C_CTRL_ENA 0x00000080 /* Command Control: enable */ +#define HIFN_DMACSR_C_ABORT 0x00000020 /* Command Ring PCI Abort */ +#define HIFN_DMACSR_C_DONE 0x00000010 /* Command Ring Done */ +#define HIFN_DMACSR_C_LAST 0x00000008 /* Command Ring Last */ +#define HIFN_DMACSR_C_WAIT 0x00000004 /* Command Ring Waiting */ +#define HIFN_DMACSR_PUBDONE 0x00000002 /* Public op done (7951 only) */ +#define HIFN_DMACSR_ENGINE 0x00000001 /* Command Ring Engine IRQ */ + +/* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */ +#define HIFN_DMAIER_D_ABORT 0x20000000 /* Destination Ring PCIAbort */ +#define HIFN_DMAIER_D_DONE 0x10000000 /* Destination Ring Done */ +#define HIFN_DMAIER_D_LAST 0x08000000 /* Destination Ring Last */ +#define HIFN_DMAIER_D_WAIT 0x04000000 /* Destination Ring Waiting */ +#define HIFN_DMAIER_D_OVER 0x02000000 /* Destination Ring Overflow */ +#define HIFN_DMAIER_R_ABORT 0x00200000 /* Result Ring PCI Abort */ +#define HIFN_DMAIER_R_DONE 0x00100000 /* Result Ring Done */ +#define HIFN_DMAIER_R_LAST 0x00080000 /* Result Ring Last */ +#define HIFN_DMAIER_R_WAIT 0x00040000 /* Result Ring Waiting */ +#define HIFN_DMAIER_R_OVER 0x00020000 /* Result Ring Overflow */ +#define HIFN_DMAIER_S_ABORT 0x00002000 /* Source Ring PCI Abort */ +#define HIFN_DMAIER_S_DONE 0x00001000 /* Source Ring Done */ +#define HIFN_DMAIER_S_LAST 0x00000800 /* Source Ring Last */ +#define HIFN_DMAIER_S_WAIT 0x00000400 /* Source Ring Waiting */ +#define HIFN_DMAIER_ILLW 0x00000200 /* Illegal write (7811 only) */ +#define HIFN_DMAIER_ILLR 0x00000100 /* Illegal read (7811 only) */ +#define HIFN_DMAIER_C_ABORT 0x00000020 /* Command Ring PCI Abort */ +#define HIFN_DMAIER_C_DONE 0x00000010 /* Command Ring Done */ +#define HIFN_DMAIER_C_LAST 0x00000008 /* Command Ring Last */ +#define HIFN_DMAIER_C_WAIT 0x00000004 /* Command Ring Waiting */ +#define HIFN_DMAIER_PUBDONE 0x00000002 /* public op done (7951 only) */ +#define HIFN_DMAIER_ENGINE 0x00000001 /* Engine IRQ */ + +/* DMA Configuration Register (HIFN_1_DMA_CNFG) */ +#define HIFN_DMACNFG_BIGENDIAN 0x10000000 /* big endian mode */ +#define HIFN_DMACNFG_POLLFREQ 0x00ff0000 /* Poll frequency mask */ +#define HIFN_DMACNFG_UNLOCK 0x00000800 +#define HIFN_DMACNFG_POLLINVAL 0x00000700 /* Invalid Poll Scalar */ +#define HIFN_DMACNFG_LAST 0x00000010 /* Host control LAST bit */ +#define HIFN_DMACNFG_MODE 0x00000004 /* DMA mode */ +#define HIFN_DMACNFG_DMARESET 0x00000002 /* DMA Reset # */ +#define HIFN_DMACNFG_MSTRESET 0x00000001 /* Master Reset # */ + +#define HIFN_PLL_7956 0x00001d18 /* 7956 PLL config value */ + +/* Public key reset register (HIFN_1_PUB_RESET) */ +#define HIFN_PUBRST_RESET 0x00000001 /* reset public/rng unit */ + +/* Public base address register (HIFN_1_PUB_BASE) */ +#define HIFN_PUBBASE_ADDR 0x00003fff /* base address */ + +/* Public operand length register (HIFN_1_PUB_OPLEN) */ +#define HIFN_PUBOPLEN_MOD_M 0x0000007f /* modulus length mask */ +#define HIFN_PUBOPLEN_MOD_S 0 /* modulus length shift */ +#define HIFN_PUBOPLEN_EXP_M 0x0003ff80 /* exponent length mask */ +#define HIFN_PUBOPLEN_EXP_S 7 /* exponent lenght shift */ +#define HIFN_PUBOPLEN_RED_M 0x003c0000 /* reducend length mask */ +#define HIFN_PUBOPLEN_RED_S 18 /* reducend length shift */ + +/* Public operation register (HIFN_1_PUB_OP) */ +#define HIFN_PUBOP_AOFFSET_M 0x0000007f /* A offset mask */ +#define HIFN_PUBOP_AOFFSET_S 0 /* A offset shift */ +#define HIFN_PUBOP_BOFFSET_M 0x00000f80 /* B offset mask */ +#define HIFN_PUBOP_BOFFSET_S 7 /* B offset shift */ +#define HIFN_PUBOP_MOFFSET_M 0x0003f000 /* M offset mask */ +#define HIFN_PUBOP_MOFFSET_S 12 /* M offset shift */ +#define HIFN_PUBOP_OP_MASK 0x003c0000 /* Opcode: */ +#define HIFN_PUBOP_OP_NOP 0x00000000 /* NOP */ +#define HIFN_PUBOP_OP_ADD 0x00040000 /* ADD */ +#define HIFN_PUBOP_OP_ADDC 0x00080000 /* ADD w/carry */ +#define HIFN_PUBOP_OP_SUB 0x000c0000 /* SUB */ +#define HIFN_PUBOP_OP_SUBC 0x00100000 /* SUB w/carry */ +#define HIFN_PUBOP_OP_MODADD 0x00140000 /* Modular ADD */ +#define HIFN_PUBOP_OP_MODSUB 0x00180000 /* Modular SUB */ +#define HIFN_PUBOP_OP_INCA 0x001c0000 /* INC A */ +#define HIFN_PUBOP_OP_DECA 0x00200000 /* DEC A */ +#define HIFN_PUBOP_OP_MULT 0x00240000 /* MULT */ +#define HIFN_PUBOP_OP_MODMULT 0x00280000 /* Modular MULT */ +#define HIFN_PUBOP_OP_MODRED 0x002c0000 /* Modular RED */ +#define HIFN_PUBOP_OP_MODEXP 0x00300000 /* Modular EXP */ + +/* Public status register (HIFN_1_PUB_STATUS) */ +#define HIFN_PUBSTS_DONE 0x00000001 /* operation done */ +#define HIFN_PUBSTS_CARRY 0x00000002 /* carry */ + +/* Public interrupt enable register (HIFN_1_PUB_IEN) */ +#define HIFN_PUBIEN_DONE 0x00000001 /* operation done interrupt */ + +/* Random number generator config register (HIFN_1_RNG_CONFIG) */ +#define HIFN_RNGCFG_ENA 0x00000001 /* enable rng */ + +#define HIFN_NAMESIZE 32 +#define HIFN_MAX_RESULT_ORDER 5 + +#define HIFN_D_CMD_RSIZE 24*4 +#define HIFN_D_SRC_RSIZE 80*4 +#define HIFN_D_DST_RSIZE 80*4 +#define HIFN_D_RES_RSIZE 24*4 + +#define HIFN_QUEUE_LENGTH HIFN_D_CMD_RSIZE-5 + +#define AES_MIN_KEY_SIZE 16 +#define AES_MAX_KEY_SIZE 32 + +#define HIFN_DES_KEY_LENGTH 8 +#define HIFN_3DES_KEY_LENGTH 24 +#define HIFN_MAX_CRYPT_KEY_LENGTH AES_MAX_KEY_SIZE +#define HIFN_IV_LENGTH 8 +#define HIFN_AES_IV_LENGTH 16 +#define HIFN_MAX_IV_LENGTH HIFN_AES_IV_LENGTH + +#define HIFN_MAC_KEY_LENGTH 64 +#define HIFN_MD5_LENGTH 16 +#define HIFN_SHA1_LENGTH 20 +#define HIFN_MAC_TRUNC_LENGTH 12 + +#define HIFN_MAX_COMMAND (8 + 8 + 8 + 64 + 260) +#define HIFN_MAX_RESULT (8 + 4 + 4 + 20 + 4) +#define HIFN_USED_RESULT 12 + +struct hifn_desc +{ + volatile u32 l; + volatile u32 p; +}; + +struct hifn_dma { + struct hifn_desc cmdr[HIFN_D_CMD_RSIZE+1]; + struct hifn_desc srcr[HIFN_D_SRC_RSIZE+1]; + struct hifn_desc dstr[HIFN_D_DST_RSIZE+1]; + struct hifn_desc resr[HIFN_D_RES_RSIZE+1]; + + u8 command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND]; + u8 result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT]; + + u64 test_src, test_dst; + + /* + * Our current positions for insertion and removal from the descriptor + * rings. + */ + volatile int cmdi, srci, dsti, resi; + volatile int cmdu, srcu, dstu, resu; + int cmdk, srck, dstk, resk; +}; + +#define HIFN_FLAG_CMD_BUSY (1<<0) +#define HIFN_FLAG_SRC_BUSY (1<<1) +#define HIFN_FLAG_DST_BUSY (1<<2) +#define HIFN_FLAG_RES_BUSY (1<<3) +#define HIFN_FLAG_OLD_KEY (1<<4) + +#define HIFN_DEFAULT_ACTIVE_NUM 5 + +struct hifn_device +{ + char name[HIFN_NAMESIZE]; + + int irq; + + struct pci_dev *pdev; + void __iomem *bar[3]; + + unsigned long result_mem; + dma_addr_t dst; + + void *desc_virt; + dma_addr_t desc_dma; + + u32 dmareg; + + void *sa[HIFN_D_RES_RSIZE]; + + spinlock_t lock; + + void *priv; + + u32 flags; + int active, started; + struct delayed_work work; + unsigned long reset; + unsigned long success; + unsigned long prev_success; + + u8 snum; + + struct crypto_queue queue; + struct list_head alg_list; +}; + +#define HIFN_D_LENGTH 0x0000ffff +#define HIFN_D_NOINVALID 0x01000000 +#define HIFN_D_MASKDONEIRQ 0x02000000 +#define HIFN_D_DESTOVER 0x04000000 +#define HIFN_D_OVER 0x08000000 +#define HIFN_D_LAST 0x20000000 +#define HIFN_D_JUMP 0x40000000 +#define HIFN_D_VALID 0x80000000 + +struct hifn_base_command +{ + volatile u16 masks; + volatile u16 session_num; + volatile u16 total_source_count; + volatile u16 total_dest_count; +}; + +#define HIFN_BASE_CMD_COMP 0x0100 /* enable compression engine */ +#define HIFN_BASE_CMD_PAD 0x0200 /* enable padding engine */ +#define HIFN_BASE_CMD_MAC 0x0400 /* enable MAC engine */ +#define HIFN_BASE_CMD_CRYPT 0x0800 /* enable crypt engine */ +#define HIFN_BASE_CMD_DECODE 0x2000 +#define HIFN_BASE_CMD_SRCLEN_M 0xc000 +#define HIFN_BASE_CMD_SRCLEN_S 14 +#define HIFN_BASE_CMD_DSTLEN_M 0x3000 +#define HIFN_BASE_CMD_DSTLEN_S 12 +#define HIFN_BASE_CMD_LENMASK_HI 0x30000 +#define HIFN_BASE_CMD_LENMASK_LO 0x0ffff + +/* + * Structure to help build up the command data structure. + */ +struct hifn_crypt_command +{ + volatile u16 masks; + volatile u16 header_skip; + volatile u16 source_count; + volatile u16 reserved; +}; + +#define HIFN_CRYPT_CMD_ALG_MASK 0x0003 /* algorithm: */ +#define HIFN_CRYPT_CMD_ALG_DES 0x0000 /* DES */ +#define HIFN_CRYPT_CMD_ALG_3DES 0x0001 /* 3DES */ +#define HIFN_CRYPT_CMD_ALG_RC4 0x0002 /* RC4 */ +#define HIFN_CRYPT_CMD_ALG_AES 0x0003 /* AES */ +#define HIFN_CRYPT_CMD_MODE_MASK 0x0018 /* Encrypt mode: */ +#define HIFN_CRYPT_CMD_MODE_ECB 0x0000 /* ECB */ +#define HIFN_CRYPT_CMD_MODE_CBC 0x0008 /* CBC */ +#define HIFN_CRYPT_CMD_MODE_CFB 0x0010 /* CFB */ +#define HIFN_CRYPT_CMD_MODE_OFB 0x0018 /* OFB */ +#define HIFN_CRYPT_CMD_CLR_CTX 0x0040 /* clear context */ +#define HIFN_CRYPT_CMD_KSZ_MASK 0x0600 /* AES key size: */ +#define HIFN_CRYPT_CMD_KSZ_128 0x0000 /* 128 bit */ +#define HIFN_CRYPT_CMD_KSZ_192 0x0200 /* 192 bit */ +#define HIFN_CRYPT_CMD_KSZ_256 0x0400 /* 256 bit */ +#define HIFN_CRYPT_CMD_NEW_KEY 0x0800 /* expect new key */ +#define HIFN_CRYPT_CMD_NEW_IV 0x1000 /* expect new iv */ +#define HIFN_CRYPT_CMD_SRCLEN_M 0xc000 +#define HIFN_CRYPT_CMD_SRCLEN_S 14 + +/* + * Structure to help build up the command data structure. + */ +struct hifn_mac_command +{ + volatile u16 masks; + volatile u16 header_skip; + volatile u16 source_count; + volatile u16 reserved; +}; + +#define HIFN_MAC_CMD_ALG_MASK 0x0001 +#define HIFN_MAC_CMD_ALG_SHA1 0x0000 +#define HIFN_MAC_CMD_ALG_MD5 0x0001 +#define HIFN_MAC_CMD_MODE_MASK 0x000c +#define HIFN_MAC_CMD_MODE_HMAC 0x0000 +#define HIFN_MAC_CMD_MODE_SSL_MAC 0x0004 +#define HIFN_MAC_CMD_MODE_HASH 0x0008 +#define HIFN_MAC_CMD_MODE_FULL 0x0004 +#define HIFN_MAC_CMD_TRUNC 0x0010 +#define HIFN_MAC_CMD_RESULT 0x0020 +#define HIFN_MAC_CMD_APPEND 0x0040 +#define HIFN_MAC_CMD_SRCLEN_M 0xc000 +#define HIFN_MAC_CMD_SRCLEN_S 14 + +/* + * MAC POS IPsec initiates authentication after encryption on encodes + * and before decryption on decodes. + */ +#define HIFN_MAC_CMD_POS_IPSEC 0x0200 +#define HIFN_MAC_CMD_NEW_KEY 0x0800 + +struct hifn_comp_command +{ + volatile u16 masks; + volatile u16 header_skip; + volatile u16 source_count; + volatile u16 reserved; +}; + +#define HIFN_COMP_CMD_SRCLEN_M 0xc000 +#define HIFN_COMP_CMD_SRCLEN_S 14 +#define HIFN_COMP_CMD_ONE 0x0100 /* must be one */ +#define HIFN_COMP_CMD_CLEARHIST 0x0010 /* clear history */ +#define HIFN_COMP_CMD_UPDATEHIST 0x0008 /* update history */ +#define HIFN_COMP_CMD_LZS_STRIP0 0x0004 /* LZS: strip zero */ +#define HIFN_COMP_CMD_MPPC_RESTART 0x0004 /* MPPC: restart */ +#define HIFN_COMP_CMD_ALG_MASK 0x0001 /* compression mode: */ +#define HIFN_COMP_CMD_ALG_MPPC 0x0001 /* MPPC */ +#define HIFN_COMP_CMD_ALG_LZS 0x0000 /* LZS */ + +struct hifn_base_result +{ + volatile u16 flags; + volatile u16 session; + volatile u16 src_cnt; /* 15:0 of source count */ + volatile u16 dst_cnt; /* 15:0 of dest count */ +}; + +#define HIFN_BASE_RES_DSTOVERRUN 0x0200 /* destination overrun */ +#define HIFN_BASE_RES_SRCLEN_M 0xc000 /* 17:16 of source count */ +#define HIFN_BASE_RES_SRCLEN_S 14 +#define HIFN_BASE_RES_DSTLEN_M 0x3000 /* 17:16 of dest count */ +#define HIFN_BASE_RES_DSTLEN_S 12 + +struct hifn_comp_result +{ + volatile u16 flags; + volatile u16 crc; +}; + +#define HIFN_COMP_RES_LCB_M 0xff00 /* longitudinal check byte */ +#define HIFN_COMP_RES_LCB_S 8 +#define HIFN_COMP_RES_RESTART 0x0004 /* MPPC: restart */ +#define HIFN_COMP_RES_ENDMARKER 0x0002 /* LZS: end marker seen */ +#define HIFN_COMP_RES_SRC_NOTZERO 0x0001 /* source expired */ + +struct hifn_mac_result +{ + volatile u16 flags; + volatile u16 reserved; + /* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */ +}; + +#define HIFN_MAC_RES_MISCOMPARE 0x0002 /* compare failed */ +#define HIFN_MAC_RES_SRC_NOTZERO 0x0001 /* source expired */ + +struct hifn_crypt_result +{ + volatile u16 flags; + volatile u16 reserved; +}; + +#define HIFN_CRYPT_RES_SRC_NOTZERO 0x0001 /* source expired */ + +#ifndef HIFN_POLL_FREQUENCY +#define HIFN_POLL_FREQUENCY 0x1 +#endif + +#ifndef HIFN_POLL_SCALAR +#define HIFN_POLL_SCALAR 0x0 +#endif + +#define HIFN_MAX_SEGLEN 0xffff /* maximum dma segment len */ +#define HIFN_MAX_DMALEN 0x3ffff /* maximum dma length */ + +struct hifn_crypto_alg +{ + struct list_head entry; + struct crypto_alg alg; + struct hifn_device *dev; +}; + +#define ASYNC_SCATTERLIST_CACHE 16 + +#define ASYNC_FLAGS_MISALIGNED (1<<0) + +struct ablkcipher_walk +{ + struct scatterlist cache[ASYNC_SCATTERLIST_CACHE]; + u32 flags; + int num; +}; + +struct hifn_context +{ + u8 key[HIFN_MAX_CRYPT_KEY_LENGTH], *iv; + struct hifn_device *dev; + unsigned int keysize, ivsize; + u8 op, type, mode, unused; + struct ablkcipher_walk walk; + atomic_t sg_num; +}; + +#define crypto_alg_to_hifn(alg) container_of(alg, struct hifn_crypto_alg, alg) + +static inline u32 hifn_read_0(struct hifn_device *dev, u32 reg) +{ + u32 ret; + + ret = readl((char *)(dev->bar[0]) + reg); + + return ret; +} + +static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg) +{ + u32 ret; + + ret = readl((char *)(dev->bar[1]) + reg); + + return ret; +} + +static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val) +{ + writel(val, (char *)(dev->bar[0]) + reg); +} + +static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val) +{ + writel(val, (char *)(dev->bar[1]) + reg); +} + +static void hifn_wait_puc(struct hifn_device *dev) +{ + int i; + u32 ret; + + for (i=10000; i > 0; --i) { + ret = hifn_read_0(dev, HIFN_0_PUCTRL); + if (!(ret & HIFN_PUCTRL_RESET)) + break; + + udelay(1); + } + + if (!i) + dprintk("%s: Failed to reset PUC unit.\n", dev->name); +} + +static void hifn_reset_puc(struct hifn_device *dev) +{ + hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); + hifn_wait_puc(dev); +} + +static void hifn_stop_device(struct hifn_device *dev) +{ + hifn_write_1(dev, HIFN_1_DMA_CSR, + HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | + HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS); + hifn_write_0(dev, HIFN_0_PUIER, 0); + hifn_write_1(dev, HIFN_1_DMA_IER, 0); +} + +static void hifn_reset_dma(struct hifn_device *dev, int full) +{ + hifn_stop_device(dev); + + /* + * Setting poll frequency and others to 0. + */ + hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); + mdelay(1); + + /* + * Reset DMA. + */ + if (full) { + hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); + mdelay(1); + } else { + hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE | + HIFN_DMACNFG_MSTRESET); + hifn_reset_puc(dev); + } + + hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); + + hifn_reset_puc(dev); +} + +static u32 hifn_next_signature(u_int32_t a, u_int cnt) +{ + int i; + u32 v; + + for (i = 0; i < cnt; i++) { + + /* get the parity */ + v = a & 0x80080125; + v ^= v >> 16; + v ^= v >> 8; + v ^= v >> 4; + v ^= v >> 2; + v ^= v >> 1; + + a = (v & 1) ^ (a << 1); + } + + return a; +} + +static struct pci2id { + u_short pci_vendor; + u_short pci_prod; + char card_id[13]; +} pci2id[] = { + { + PCI_VENDOR_ID_HIFN, + PCI_DEVICE_ID_HIFN_7955, + { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00 } + }, + { + PCI_VENDOR_ID_HIFN, + PCI_DEVICE_ID_HIFN_7956, + { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00 } + } +}; + +static int hifn_init_pubrng(struct hifn_device *dev) +{ + int i; + + hifn_write_1(dev, HIFN_1_PUB_RESET, hifn_read_1(dev, HIFN_1_PUB_RESET) | + HIFN_PUBRST_RESET); + + for (i=100; i > 0; --i) { + mdelay(1); + + if ((hifn_read_1(dev, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0) + break; + } + + if (!i) + dprintk("Chip %s: Failed to initialise public key engine.\n", + dev->name); + else { + hifn_write_1(dev, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); + dev->dmareg |= HIFN_DMAIER_PUBDONE; + hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); + + dprintk("Chip %s: Public key engine has been sucessfully " + "initialised.\n", dev->name); + } + + /* + * Enable RNG engine. + */ + + hifn_write_1(dev, HIFN_1_RNG_CONFIG, + hifn_read_1(dev, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA); + dprintk("Chip %s: RNG engine has been successfully initialised.\n", + dev->name); + + return 0; +} + +static int hifn_enable_crypto(struct hifn_device *dev) +{ + u32 dmacfg, addr; + char *offtbl = NULL; + int i; + + for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) { + if (pci2id[i].pci_vendor == dev->pdev->vendor && + pci2id[i].pci_prod == dev->pdev->device) { + offtbl = pci2id[i].card_id; + break; + } + } + + if (offtbl == NULL) { + dprintk("Chip %s: Unknown card!\n", dev->name); + return -ENODEV; + } + + dmacfg = hifn_read_1(dev, HIFN_1_DMA_CNFG); + + hifn_write_1(dev, HIFN_1_DMA_CNFG, + HIFN_DMACNFG_UNLOCK | HIFN_DMACNFG_MSTRESET | + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); + mdelay(1); + addr = hifn_read_1(dev, HIFN_1_UNLOCK_SECRET1); + mdelay(1); + hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, 0); + mdelay(1); + + for (i=0; i<12; ++i) { + addr = hifn_next_signature(addr, offtbl[i] + 0x101); + hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, addr); + + mdelay(1); + } + hifn_write_1(dev, HIFN_1_DMA_CNFG, dmacfg); + + dprintk("Chip %s: %s.\n", dev->name, pci_name(dev->pdev)); + + return 0; +} + +static void hifn_init_dma(struct hifn_device *dev) +{ + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + u32 dptr = dev->desc_dma; + int i; + + for (i=0; icmdr[i].p = __cpu_to_le32(dptr + + offsetof(struct hifn_dma, command_bufs[i][0])); + for (i=0; iresr[i].p = __cpu_to_le32(dptr + + offsetof(struct hifn_dma, result_bufs[i][0])); + + /* + * Setup LAST descriptors. + */ + dma->cmdr[HIFN_D_CMD_RSIZE].p = __cpu_to_le32(dptr + + offsetof(struct hifn_dma, cmdr[0])); + dma->srcr[HIFN_D_SRC_RSIZE].p = __cpu_to_le32(dptr + + offsetof(struct hifn_dma, srcr[0])); + dma->dstr[HIFN_D_DST_RSIZE].p = __cpu_to_le32(dptr + + offsetof(struct hifn_dma, dstr[0])); + dma->resr[HIFN_D_RES_RSIZE].p = __cpu_to_le32(dptr + + offsetof(struct hifn_dma, resr[0])); + + dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0; + dma->cmdi = dma->srci = dma->dsti = dma->resi = 0; + dma->cmdk = dma->srck = dma->dstk = dma->resk = 0; +} + +static void hifn_init_registers(struct hifn_device *dev) +{ + u32 dptr = dev->desc_dma; + + /* Initialization magic... */ + hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); + hifn_write_0(dev, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); + hifn_write_0(dev, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); + + /* write all 4 ring address registers */ + hifn_write_1(dev, HIFN_1_DMA_CRAR, __cpu_to_le32(dptr + + offsetof(struct hifn_dma, cmdr[0]))); + hifn_write_1(dev, HIFN_1_DMA_SRAR, __cpu_to_le32(dptr + + offsetof(struct hifn_dma, srcr[0]))); + hifn_write_1(dev, HIFN_1_DMA_DRAR, __cpu_to_le32(dptr + + offsetof(struct hifn_dma, dstr[0]))); + hifn_write_1(dev, HIFN_1_DMA_RRAR, __cpu_to_le32(dptr + + offsetof(struct hifn_dma, resr[0]))); + + mdelay(2); +#if 0 + hifn_write_1(dev, HIFN_1_DMA_CSR, + HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | + HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS | + HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | + HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | + HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | + HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | + HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | + HIFN_DMACSR_S_WAIT | + HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | + HIFN_DMACSR_C_WAIT | + HIFN_DMACSR_ENGINE | + HIFN_DMACSR_PUBDONE); +#else + hifn_write_1(dev, HIFN_1_DMA_CSR, + HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | + HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA | + HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | + HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | + HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | + HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | + HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | + HIFN_DMACSR_S_WAIT | + HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | + HIFN_DMACSR_C_WAIT | + HIFN_DMACSR_ENGINE | + HIFN_DMACSR_PUBDONE); +#endif + hifn_read_1(dev, HIFN_1_DMA_CSR); + + dev->dmareg |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT | + HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER | + HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT | + HIFN_DMAIER_ENGINE; + dev->dmareg &= ~HIFN_DMAIER_C_WAIT; + + hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); + hifn_read_1(dev, HIFN_1_DMA_IER); +#if 0 + hifn_write_0(dev, HIFN_0_PUCNFG, HIFN_PUCNFG_ENCCNFG | + HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | + HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | + HIFN_PUCNFG_DRAM); +#else + hifn_write_0(dev, HIFN_0_PUCNFG, 0x10342); +#endif + hifn_write_1(dev, HIFN_1_PLL, HIFN_PLL_7956); + + hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); + hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST | + ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | + ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); +} + +static int hifn_setup_base_command(struct hifn_device *dev, u8 *buf, + unsigned dlen, unsigned slen, u16 mask, u8 snum) +{ + struct hifn_base_command *base_cmd; + u8 *buf_pos = buf; + + base_cmd = (struct hifn_base_command *)buf_pos; + base_cmd->masks = __cpu_to_le16(mask); + base_cmd->total_source_count = + __cpu_to_le16(slen & HIFN_BASE_CMD_LENMASK_LO); + base_cmd->total_dest_count = + __cpu_to_le16(dlen & HIFN_BASE_CMD_LENMASK_LO); + + dlen >>= 16; + slen >>= 16; + base_cmd->session_num = __cpu_to_le16(snum | + ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) | + ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M)); + + return sizeof(struct hifn_base_command); +} + +static int hifn_setup_crypto_command(struct hifn_device *dev, + u8 *buf, unsigned dlen, unsigned slen, + u8 *key, int keylen, u8 *iv, int ivsize, u16 mode) +{ + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + struct hifn_crypt_command *cry_cmd; + u8 *buf_pos = buf; + u16 cmd_len; + + cry_cmd = (struct hifn_crypt_command *)buf_pos; + + cry_cmd->source_count = __cpu_to_le16(dlen & 0xffff); + dlen >>= 16; + cry_cmd->masks = __cpu_to_le16(mode | + ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & + HIFN_CRYPT_CMD_SRCLEN_M)); + cry_cmd->header_skip = 0; + cry_cmd->reserved = 0; + + buf_pos += sizeof(struct hifn_crypt_command); + + dma->cmdu++; + if (dma->cmdu > 1) { + dev->dmareg |= HIFN_DMAIER_C_WAIT; + hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); + } + + if (keylen) { + memcpy(buf_pos, key, keylen); + buf_pos += keylen; + } + if (ivsize) { + memcpy(buf_pos, iv, ivsize); + buf_pos += ivsize; + } + + cmd_len = buf_pos - buf; + + return cmd_len; +} + +static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page, + unsigned int offset, unsigned int size) +{ + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + int idx; + dma_addr_t addr; + + addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE); + + idx = dma->srci; + + dma->srcr[idx].p = __cpu_to_le32(addr); + dma->srcr[idx].l = __cpu_to_le32(size) | HIFN_D_VALID | + HIFN_D_MASKDONEIRQ | HIFN_D_NOINVALID | HIFN_D_LAST; + + if (++idx == HIFN_D_SRC_RSIZE) { + dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID | + HIFN_D_JUMP | + HIFN_D_MASKDONEIRQ | HIFN_D_LAST); + idx = 0; + } + + dma->srci = idx; + dma->srcu++; + + if (!(dev->flags & HIFN_FLAG_SRC_BUSY)) { + hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA); + dev->flags |= HIFN_FLAG_SRC_BUSY; + } + + return size; +} + +static void hifn_setup_res_desc(struct hifn_device *dev) +{ + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + + dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT | + HIFN_D_VALID | HIFN_D_LAST); + /* + * dma->resr[dma->resi].l = __cpu_to_le32(HIFN_MAX_RESULT | HIFN_D_VALID | + * HIFN_D_LAST | HIFN_D_NOINVALID); + */ + + if (++dma->resi == HIFN_D_RES_RSIZE) { + dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID | + HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | HIFN_D_LAST); + dma->resi = 0; + } + + dma->resu++; + + if (!(dev->flags & HIFN_FLAG_RES_BUSY)) { + hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA); + dev->flags |= HIFN_FLAG_RES_BUSY; + } +} + +static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page, + unsigned offset, unsigned size) +{ + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + int idx; + dma_addr_t addr; + + addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE); + + idx = dma->dsti; + dma->dstr[idx].p = __cpu_to_le32(addr); + dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID | + HIFN_D_MASKDONEIRQ | HIFN_D_NOINVALID | HIFN_D_LAST); + + if (++idx == HIFN_D_DST_RSIZE) { + dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID | + HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | + HIFN_D_LAST | HIFN_D_NOINVALID); + idx = 0; + } + dma->dsti = idx; + dma->dstu++; + + if (!(dev->flags & HIFN_FLAG_DST_BUSY)) { + hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA); + dev->flags |= HIFN_FLAG_DST_BUSY; + } +} + +static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned int soff, + struct page *dpage, unsigned int doff, unsigned int nbytes, void *priv, + struct hifn_context *ctx) +{ + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + int cmd_len, sa_idx; + u8 *buf, *buf_pos; + u16 mask; + + dprintk("%s: spage: %p, soffset: %u, dpage: %p, doffset: %u, nbytes: %u, priv: %p, ctx: %p.\n", + dev->name, spage, soff, dpage, doff, nbytes, priv, ctx); + + sa_idx = dma->resi; + + hifn_setup_src_desc(dev, spage, soff, nbytes); + + buf_pos = buf = dma->command_bufs[dma->cmdi]; + + mask = 0; + switch (ctx->op) { + case ACRYPTO_OP_DECRYPT: + mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE; + break; + case ACRYPTO_OP_ENCRYPT: + mask = HIFN_BASE_CMD_CRYPT; + break; + case ACRYPTO_OP_HMAC: + mask = HIFN_BASE_CMD_MAC; + break; + default: + goto err_out; + } + + buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes, + nbytes, mask, dev->snum); + + if (ctx->op == ACRYPTO_OP_ENCRYPT || ctx->op == ACRYPTO_OP_DECRYPT) { + u16 md = 0; + + if (ctx->keysize) + md |= HIFN_CRYPT_CMD_NEW_KEY; + if (ctx->iv && ctx->mode != ACRYPTO_MODE_ECB) + md |= HIFN_CRYPT_CMD_NEW_IV; + + switch (ctx->mode) { + case ACRYPTO_MODE_ECB: + md |= HIFN_CRYPT_CMD_MODE_ECB; + break; + case ACRYPTO_MODE_CBC: + md |= HIFN_CRYPT_CMD_MODE_CBC; + break; + case ACRYPTO_MODE_CFB: + md |= HIFN_CRYPT_CMD_MODE_CFB; + break; + case ACRYPTO_MODE_OFB: + md |= HIFN_CRYPT_CMD_MODE_OFB; + break; + default: + goto err_out; + } + + switch (ctx->type) { + case ACRYPTO_TYPE_AES_128: + if (ctx->keysize != 16) + goto err_out; + md |= HIFN_CRYPT_CMD_KSZ_128 | + HIFN_CRYPT_CMD_ALG_AES; + break; + case ACRYPTO_TYPE_AES_192: + if (ctx->keysize != 24) + goto err_out; + md |= HIFN_CRYPT_CMD_KSZ_192 | + HIFN_CRYPT_CMD_ALG_AES; + break; + case ACRYPTO_TYPE_AES_256: + if (ctx->keysize != 32) + goto err_out; + md |= HIFN_CRYPT_CMD_KSZ_256 | + HIFN_CRYPT_CMD_ALG_AES; + break; + case ACRYPTO_TYPE_3DES: + if (ctx->keysize != 24) + goto err_out; + md |= HIFN_CRYPT_CMD_ALG_3DES; + break; + case ACRYPTO_TYPE_DES: + if (ctx->keysize != 8) + goto err_out; + md |= HIFN_CRYPT_CMD_ALG_DES; + break; + default: + goto err_out; + } + + buf_pos += hifn_setup_crypto_command(dev, buf_pos, + nbytes, nbytes, ctx->key, ctx->keysize, + ctx->iv, ctx->ivsize, md); + } + + dev->sa[sa_idx] = priv; + + cmd_len = buf_pos - buf; + dma->cmdr[dma->cmdi].l = __cpu_to_le32(cmd_len | HIFN_D_VALID | + HIFN_D_LAST | HIFN_D_MASKDONEIRQ); + + if (++dma->cmdi == HIFN_D_CMD_RSIZE) { + dma->cmdr[dma->cmdi].l = __cpu_to_le32(HIFN_MAX_COMMAND | + HIFN_D_VALID | HIFN_D_LAST | + HIFN_D_MASKDONEIRQ | HIFN_D_JUMP); + dma->cmdi = 0; + } else + dma->cmdr[dma->cmdi-1].l |= __cpu_to_le32(HIFN_D_VALID); + + if (!(dev->flags & HIFN_FLAG_CMD_BUSY)) { + hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); + dev->flags |= HIFN_FLAG_CMD_BUSY; + } + + hifn_setup_dst_desc(dev, dpage, doff, nbytes); + hifn_setup_res_desc(dev); + + return 0; + +err_out: + return -EINVAL; +} + +static int ablkcipher_walk_init(struct ablkcipher_walk *w, + int num, gfp_t gfp_flags) +{ + int i; + + num = min(ASYNC_SCATTERLIST_CACHE, num); + sg_init_table(w->cache, num); + + w->num = 0; + for (i=0; icache[i]; + + sg_set_page(s, page, PAGE_SIZE, 0); + w->num++; + } + + return i; +} + +static void ablkcipher_walk_exit(struct ablkcipher_walk *w) +{ + int i; + + for (i=0; inum; ++i) { + struct scatterlist *s = &w->cache[i]; + + __free_page(sg_page(s)); + + s->length = 0; + } + + w->num = 0; +} + +static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist *src, + unsigned int size, unsigned int *nbytesp) +{ + unsigned int copy, drest = *drestp, nbytes = *nbytesp; + int idx = 0; + void *saddr; + + if (drest < size || size > nbytes) + return -EINVAL; + + while (size) { + copy = min(drest, src->length); + + saddr = kmap_atomic(sg_page(src), KM_SOFTIRQ1); + memcpy(daddr, saddr + src->offset, copy); + kunmap_atomic(saddr, KM_SOFTIRQ1); + + size -= copy; + drest -= copy; + nbytes -= copy; + daddr += copy; + + dprintk("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n", + __func__, copy, size, drest, nbytes); + + src++; + idx++; + } + + *nbytesp = nbytes; + *drestp = drest; + + return idx; +} + +static int ablkcipher_walk(struct ablkcipher_request *req, + struct ablkcipher_walk *w) +{ + unsigned blocksize = + crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(req)); + unsigned alignmask = + crypto_ablkcipher_alignmask(crypto_ablkcipher_reqtfm(req)); + struct scatterlist *src, *dst, *t; + void *daddr; + unsigned int nbytes = req->nbytes, offset, copy, diff; + int idx, tidx, err; + + tidx = idx = 0; + offset = 0; + while (nbytes) { + if (idx >= w->num && (w->flags & ASYNC_FLAGS_MISALIGNED)) + return -EINVAL; + + src = &req->src[idx]; + dst = &req->dst[idx]; + + dprintk("\n%s: slen: %u, dlen: %u, soff: %u, doff: %u, offset: %u, " + "blocksize: %u, nbytes: %u.\n", + __func__, src->length, dst->length, src->offset, + dst->offset, offset, blocksize, nbytes); + + if (src->length & (blocksize - 1) || + src->offset & (alignmask - 1) || + dst->length & (blocksize - 1) || + dst->offset & (alignmask - 1) || + offset) { + unsigned slen = src->length - offset; + unsigned dlen = PAGE_SIZE; + + t = &w->cache[idx]; + + daddr = kmap_atomic(sg_page(t), KM_SOFTIRQ0); + err = ablkcipher_add(daddr, &dlen, src, slen, &nbytes); + if (err < 0) + goto err_out_unmap; + + idx += err; + + copy = slen & ~(blocksize - 1); + diff = slen & (blocksize - 1); + + if (dlen < nbytes) { + /* + * Destination page does not have enough space + * to put there additional blocksized chunk, + * so we mark that page as containing only + * blocksize aligned chunks: + * t->length = (slen & ~(blocksize - 1)); + * and increase number of bytes to be processed + * in next chunk: + * nbytes += diff; + */ + nbytes += diff; + + /* + * Temporary of course... + * Kick author if you will catch this one. + */ + printk(KERN_ERR "%s: dlen: %u, nbytes: %u," + "slen: %u, offset: %u.\n", + __func__, dlen, nbytes, slen, offset); + printk(KERN_ERR "%s: please contact author to fix this " + "issue, generally you should not catch " + "this path under any condition but who " + "knows how did you use crypto code.\n" + "Thank you.\n", __func__); + BUG(); + } else { + copy += diff + nbytes; + + src = &req->src[idx]; + + err = ablkcipher_add(daddr + slen, &dlen, src, nbytes, &nbytes); + if (err < 0) + goto err_out_unmap; + + idx += err; + } + + t->length = copy; + t->offset = offset; + + kunmap_atomic(daddr, KM_SOFTIRQ0); + } else { + nbytes -= src->length; + idx++; + } + + tidx++; + } + + return tidx; + +err_out_unmap: + kunmap_atomic(daddr, KM_SOFTIRQ0); + return err; +} + +static int hifn_setup_session(struct ablkcipher_request *req) +{ + struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); + struct hifn_device *dev = ctx->dev; + struct page *spage, *dpage; + unsigned long soff, doff, flags; + unsigned int nbytes = req->nbytes, idx = 0, len; + int err = -EINVAL, sg_num; + struct scatterlist *src, *dst, *t; + unsigned blocksize = + crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(req)); + unsigned alignmask = + crypto_ablkcipher_alignmask(crypto_ablkcipher_reqtfm(req)); + + if (ctx->iv && !ctx->ivsize && ctx->mode != ACRYPTO_MODE_ECB) + goto err_out_exit; + + ctx->walk.flags = 0; + + while (nbytes) { + src = &req->src[idx]; + dst = &req->dst[idx]; + + if (src->length & (blocksize - 1) || + src->offset & (alignmask - 1) || + dst->length & (blocksize - 1) || + dst->offset & (alignmask - 1)) { + ctx->walk.flags |= ASYNC_FLAGS_MISALIGNED; + } + + nbytes -= src->length; + idx++; + } + + if (ctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { + err = ablkcipher_walk_init(&ctx->walk, idx, GFP_ATOMIC); + if (err < 0) + return err; + } + + nbytes = req->nbytes; + idx = 0; + + sg_num = ablkcipher_walk(req, &ctx->walk); + + atomic_set(&ctx->sg_num, sg_num); + + spin_lock_irqsave(&dev->lock, flags); + if (dev->started + sg_num > HIFN_QUEUE_LENGTH) { + err = -EAGAIN; + goto err_out; + } + + dev->snum++; + dev->started += sg_num; + + while (nbytes) { + src = &req->src[idx]; + dst = &req->dst[idx]; + t = &ctx->walk.cache[idx]; + + if (t->length) { + spage = dpage = sg_page(t); + soff = doff = 0; + len = t->length; + } else { + spage = sg_page(src); + soff = src->offset; + + dpage = sg_page(dst); + doff = dst->offset; + + len = dst->length; + } + + idx++; + + err = hifn_setup_dma(dev, spage, soff, dpage, doff, nbytes, + req, ctx); + if (err) + goto err_out; + + nbytes -= len; + } + + dev->active = HIFN_DEFAULT_ACTIVE_NUM; + spin_unlock_irqrestore(&dev->lock, flags); + + return 0; + +err_out: + spin_unlock_irqrestore(&dev->lock, flags); +err_out_exit: + if (err && printk_ratelimit()) + dprintk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, " + "type: %u, err: %d.\n", + dev->name, ctx->iv, ctx->ivsize, + ctx->key, ctx->keysize, + ctx->mode, ctx->op, ctx->type, err); + + return err; +} + +static int hifn_test(struct hifn_device *dev, int encdec, u8 snum) +{ + int n, err; + u8 src[16]; + struct hifn_context ctx; + u8 fips_aes_ecb_from_zero[16] = { + 0x66, 0xE9, 0x4B, 0xD4, + 0xEF, 0x8A, 0x2C, 0x3B, + 0x88, 0x4C, 0xFA, 0x59, + 0xCA, 0x34, 0x2B, 0x2E}; + + memset(src, 0, sizeof(src)); + memset(ctx.key, 0, sizeof(ctx.key)); + + ctx.dev = dev; + ctx.keysize = 16; + ctx.ivsize = 0; + ctx.iv = NULL; + ctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT; + ctx.mode = ACRYPTO_MODE_ECB; + ctx.type = ACRYPTO_TYPE_AES_128; + atomic_set(&ctx.sg_num, 1); + + err = hifn_setup_dma(dev, + virt_to_page(src), offset_in_page(src), + virt_to_page(src), offset_in_page(src), + sizeof(src), NULL, &ctx); + if (err) + goto err_out; + + msleep(200); + + dprintk("%s: decoded: ", dev->name); + for (n=0; nname); + for (n=0; nname); + return 0; + } + +err_out: + printk(KERN_INFO "%s: AES 128 ECB test has been failed.\n", dev->name); + return -1; +} + +static int hifn_start_device(struct hifn_device *dev) +{ + int err; + + hifn_reset_dma(dev, 1); + + err = hifn_enable_crypto(dev); + if (err) + return err; + + hifn_reset_puc(dev); + + hifn_init_dma(dev); + + hifn_init_registers(dev); + + hifn_init_pubrng(dev); + + return 0; +} + +static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset, + struct scatterlist *dst, unsigned int size, unsigned int *nbytesp) +{ + unsigned int srest = *srestp, nbytes = *nbytesp, copy; + void *daddr; + int idx = 0; + + if (srest < size || size > nbytes) + return -EINVAL; + + while (size) { + + copy = min(dst->length, srest); + + daddr = kmap_atomic(sg_page(dst), KM_IRQ0); + memcpy(daddr + dst->offset + offset, saddr, copy); + kunmap_atomic(daddr, KM_IRQ0); + + nbytes -= copy; + size -= copy; + srest -= copy; + saddr += copy; + offset = 0; + + dprintk("%s: copy: %u, size: %u, srest: %u, nbytes: %u.\n", + __func__, copy, size, srest, nbytes); + + dst++; + idx++; + } + + *nbytesp = nbytes; + *srestp = srest; + + return idx; +} + +static void hifn_process_ready(struct ablkcipher_request *req, int error) +{ + struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); + struct hifn_device *dev; + + dprintk("%s: req: %p, ctx: %p.\n", __func__, req, ctx); + + dev = ctx->dev; + dprintk("%s: req: %p, started: %d, sg_num: %d.\n", + __func__, req, dev->started, atomic_read(&ctx->sg_num)); + + if (--dev->started < 0) + BUG(); + + if (atomic_dec_and_test(&ctx->sg_num)) { + unsigned int nbytes = req->nbytes; + int idx = 0, err; + struct scatterlist *dst, *t; + void *saddr; + + if (ctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { + while (nbytes) { + t = &ctx->walk.cache[idx]; + dst = &req->dst[idx]; + + dprintk("\n%s: sg_page(t): %p, t->length: %u, " + "sg_page(dst): %p, dst->length: %u, " + "nbytes: %u.\n", + __func__, sg_page(t), t->length, + sg_page(dst), dst->length, nbytes); + + if (!t->length) { + nbytes -= dst->length; + idx++; + continue; + } + + saddr = kmap_atomic(sg_page(t), KM_IRQ1); + + err = ablkcipher_get(saddr, &t->length, t->offset, + dst, nbytes, &nbytes); + if (err < 0) { + kunmap_atomic(saddr, KM_IRQ1); + break; + } + + idx += err; + kunmap_atomic(saddr, KM_IRQ1); + } + + ablkcipher_walk_exit(&ctx->walk); + } + + req->base.complete(&req->base, error); + } +} + +static void hifn_check_for_completion(struct hifn_device *dev, int error) +{ + int i; + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + + for (i=0; iresr[i]; + + if (!(d->l & __cpu_to_le32(HIFN_D_VALID)) && dev->sa[i]) { + dev->success++; + dev->reset = 0; + hifn_process_ready(dev->sa[i], error); + dev->sa[i] = NULL; + } + + if (d->l & __cpu_to_le32(HIFN_D_DESTOVER | HIFN_D_OVER)) + if (printk_ratelimit()) + printk("%s: overflow detected [d: %u, o: %u] " + "at %d resr: l: %08x, p: %08x.\n", + dev->name, + !!(d->l & __cpu_to_le32(HIFN_D_DESTOVER)), + !!(d->l & __cpu_to_le32(HIFN_D_OVER)), + i, d->l, d->p); + } +} + +static void hifn_clear_rings(struct hifn_device *dev) +{ + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + int i, u; + + dprintk("%s: ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, " + "k: %d.%d.%d.%d.\n", + dev->name, + dma->cmdi, dma->srci, dma->dsti, dma->resi, + dma->cmdu, dma->srcu, dma->dstu, dma->resu, + dma->cmdk, dma->srck, dma->dstk, dma->resk); + + i = dma->resk; u = dma->resu; + while (u != 0) { + if (dma->resr[i].l & __cpu_to_le32(HIFN_D_VALID)) + break; + + if (i != HIFN_D_RES_RSIZE) + u--; + + if (++i == (HIFN_D_RES_RSIZE + 1)) + i = 0; + } + dma->resk = i; dma->resu = u; + + i = dma->srck; u = dma->srcu; + while (u != 0) { + if (i == HIFN_D_SRC_RSIZE) + i = 0; + if (dma->srcr[i].l & __cpu_to_le32(HIFN_D_VALID)) + break; + i++, u--; + } + dma->srck = i; dma->srcu = u; + + i = dma->cmdk; u = dma->cmdu; + while (u != 0) { + if (dma->cmdr[i].l & __cpu_to_le32(HIFN_D_VALID)) + break; + if (i != HIFN_D_CMD_RSIZE) + u--; + if (++i == (HIFN_D_CMD_RSIZE + 1)) + i = 0; + } + dma->cmdk = i; dma->cmdu = u; + + i = dma->dstk; u = dma->dstu; + while (u != 0) { + if (i == HIFN_D_DST_RSIZE) + i = 0; + if (dma->dstr[i].l & __cpu_to_le32(HIFN_D_VALID)) + break; + i++, u--; + } + dma->dstk = i; dma->dstu = u; + + dprintk("%s: ring cleanup 2: i: %d.%d.%d.%d, u: %d.%d.%d.%d, " + "k: %d.%d.%d.%d.\n", + dev->name, + dma->cmdi, dma->srci, dma->dsti, dma->resi, + dma->cmdu, dma->srcu, dma->dstu, dma->resu, + dma->cmdk, dma->srck, dma->dstk, dma->resk); +} + +static void hifn_work(struct work_struct *work) +{ + struct delayed_work *dw = container_of(work, struct delayed_work, work); + struct hifn_device *dev = container_of(dw, struct hifn_device, work); + unsigned long flags; + int reset = 0; + u32 r = 0; + + spin_lock_irqsave(&dev->lock, flags); + if (dev->active == 0) { + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + + if (dma->cmdu == 0 && (dev->flags & HIFN_FLAG_CMD_BUSY)) { + dev->flags &= ~HIFN_FLAG_CMD_BUSY; + r |= HIFN_DMACSR_C_CTRL_DIS; + } + if (dma->srcu == 0 && (dev->flags & HIFN_FLAG_SRC_BUSY)) { + dev->flags &= ~HIFN_FLAG_SRC_BUSY; + r |= HIFN_DMACSR_S_CTRL_DIS; + } + if (dma->dstu == 0 && (dev->flags & HIFN_FLAG_DST_BUSY)) { + dev->flags &= ~HIFN_FLAG_DST_BUSY; + r |= HIFN_DMACSR_D_CTRL_DIS; + } + if (dma->resu == 0 && (dev->flags & HIFN_FLAG_RES_BUSY)) { + dev->flags &= ~HIFN_FLAG_RES_BUSY; + r |= HIFN_DMACSR_R_CTRL_DIS; + } + if (r) + hifn_write_1(dev, HIFN_1_DMA_CSR, r); + } else + dev->active--; + + if (dev->prev_success == dev->success && dev->started) + reset = 1; + dev->prev_success = dev->success; + spin_unlock_irqrestore(&dev->lock, flags); + + if (reset) { + dprintk("%s: r: %08x, active: %d, started: %d, " + "success: %lu: reset: %d.\n", + dev->name, r, dev->active, dev->started, + dev->success, reset); + + if (++dev->reset >= 5) { + dprintk("%s: really hard reset.\n", dev->name); + hifn_reset_dma(dev, 1); + hifn_stop_device(dev); + hifn_start_device(dev); + dev->reset = 0; + } + + spin_lock_irqsave(&dev->lock, flags); + hifn_check_for_completion(dev, -EBUSY); + hifn_clear_rings(dev); + dev->started = 0; + spin_unlock_irqrestore(&dev->lock, flags); + } + + schedule_delayed_work(&dev->work, HZ); +} + +static irqreturn_t hifn_interrupt(int irq, void *data) +{ + struct hifn_device *dev = (struct hifn_device *)data; + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + u32 dmacsr, restart; + + dmacsr = hifn_read_1(dev, HIFN_1_DMA_CSR); + + dprintk("%s: 1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], " + "i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n", + dev->name, dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi, + dma->cmdu, dma->srcu, dma->dstu, dma->resu, + dma->cmdi, dma->srci, dma->dsti, dma->resi); + + if ((dmacsr & dev->dmareg) == 0) + return IRQ_NONE; + + hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & dev->dmareg); + + if (dmacsr & HIFN_DMACSR_ENGINE) + hifn_write_0(dev, HIFN_0_PUISR, hifn_read_0(dev, HIFN_0_PUISR)); + if (dmacsr & HIFN_DMACSR_PUBDONE) + hifn_write_1(dev, HIFN_1_PUB_STATUS, + hifn_read_1(dev, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE); + + restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER); + if (restart) { + u32 puisr = hifn_read_0(dev, HIFN_0_PUISR); + + if (printk_ratelimit()) + printk("%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n", + dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER), + !!(dmacsr & HIFN_DMACSR_D_OVER), + puisr, !!(puisr & HIFN_PUISR_DSTOVER)); + if (!!(puisr & HIFN_PUISR_DSTOVER)) + hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); + hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & (HIFN_DMACSR_R_OVER | + HIFN_DMACSR_D_OVER)); + } + + restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | + HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); + if (restart) { + if (printk_ratelimit()) + printk("%s: abort: c: %d, s: %d, d: %d, r: %d.\n", + dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT), + !!(dmacsr & HIFN_DMACSR_S_ABORT), + !!(dmacsr & HIFN_DMACSR_D_ABORT), + !!(dmacsr & HIFN_DMACSR_R_ABORT)); + hifn_reset_dma(dev, 1); + hifn_init_dma(dev); + hifn_init_registers(dev); + } + + if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) { + dprintk("%s: wait on command.\n", dev->name); + dev->dmareg &= ~(HIFN_DMAIER_C_WAIT); + hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); + } + + hifn_check_for_completion(dev, 0); + hifn_clear_rings(dev); + + return IRQ_HANDLED; +} + +static void hifn_flush(struct hifn_device *dev) +{ + unsigned long flags; + struct crypto_async_request *async_req; + struct hifn_context *ctx; + struct ablkcipher_request *req; + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + int i; + + spin_lock_irqsave(&dev->lock, flags); + for (i=0; iresr[i]; + + if (dev->sa[i]) { + hifn_process_ready(dev->sa[i], + (d->l & __cpu_to_le32(HIFN_D_VALID))?-ENODEV:0); + } + } + + while ((async_req = crypto_dequeue_request(&dev->queue))) { + ctx = crypto_tfm_ctx(async_req->tfm); + req = container_of(async_req, struct ablkcipher_request, base); + + hifn_process_ready(req, -ENODEV); + } + spin_unlock_irqrestore(&dev->lock, flags); +} + +static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct hifn_context *ctx = crypto_tfm_ctx(tfm); + struct hifn_device *dev = ctx->dev; + + if (len > HIFN_MAX_CRYPT_KEY_LENGTH) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -1; + } + + dev->flags &= ~HIFN_FLAG_OLD_KEY; + + memcpy(ctx->key, key, len); + ctx->keysize = len; + + return 0; +} + +static int hifn_handle_req(struct ablkcipher_request *req) +{ + struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); + struct hifn_device *dev = ctx->dev; + int err = -EAGAIN; + + if (dev->started + DIV_ROUND_UP(req->nbytes, PAGE_SIZE) <= HIFN_QUEUE_LENGTH) + err = hifn_setup_session(req); + + if (err == -EAGAIN) { + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + err = ablkcipher_enqueue_request(&dev->queue, req); + spin_unlock_irqrestore(&dev->lock, flags); + } + + return err; +} + +static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op, + u8 type, u8 mode) +{ + struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); + unsigned ivsize; + + ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)); + + if (req->info && mode != ACRYPTO_MODE_ECB) { + if (type == ACRYPTO_TYPE_AES_128) + ivsize = HIFN_AES_IV_LENGTH; + else if (type == ACRYPTO_TYPE_DES) + ivsize = HIFN_DES_KEY_LENGTH; + else if (type == ACRYPTO_TYPE_3DES) + ivsize = HIFN_3DES_KEY_LENGTH; + } + + if (ctx->keysize != 16 && type == ACRYPTO_TYPE_AES_128) { + if (ctx->keysize == 24) + type = ACRYPTO_TYPE_AES_192; + else if (ctx->keysize == 32) + type = ACRYPTO_TYPE_AES_256; + } + + ctx->op = op; + ctx->mode = mode; + ctx->type = type; + ctx->iv = req->info; + ctx->ivsize = ivsize; + + /* + * HEAVY TODO: needs to kick Herbert XU to write documentation. + * HEAVY TODO: needs to kick Herbert XU to write documentation. + * HEAVY TODO: needs to kick Herbert XU to write documentation. + */ + + return hifn_handle_req(req); +} + +static int hifn_process_queue(struct hifn_device *dev) +{ + struct crypto_async_request *async_req; + struct hifn_context *ctx; + struct ablkcipher_request *req; + unsigned long flags; + int err = 0; + + while (dev->started < HIFN_QUEUE_LENGTH) { + spin_lock_irqsave(&dev->lock, flags); + async_req = crypto_dequeue_request(&dev->queue); + spin_unlock_irqrestore(&dev->lock, flags); + + if (!async_req) + break; + + ctx = crypto_tfm_ctx(async_req->tfm); + req = container_of(async_req, struct ablkcipher_request, base); + + err = hifn_handle_req(req); + if (err) + break; + } + + return err; +} + +static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op, + u8 type, u8 mode) +{ + int err; + struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); + struct hifn_device *dev = ctx->dev; + + err = hifn_setup_crypto_req(req, op, type, mode); + if (err) + return err; + + if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen) + err = hifn_process_queue(dev); + + return err; +} + +/* + * AES ecryption functions. + */ +static inline int hifn_encrypt_aes_ecb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB); +} +static inline int hifn_encrypt_aes_cbc(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC); +} +static inline int hifn_encrypt_aes_cfb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB); +} +static inline int hifn_encrypt_aes_ofb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB); +} + +/* + * AES decryption functions. + */ +static inline int hifn_decrypt_aes_ecb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB); +} +static inline int hifn_decrypt_aes_cbc(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC); +} +static inline int hifn_decrypt_aes_cfb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB); +} +static inline int hifn_decrypt_aes_ofb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB); +} + +/* + * DES ecryption functions. + */ +static inline int hifn_encrypt_des_ecb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB); +} +static inline int hifn_encrypt_des_cbc(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC); +} +static inline int hifn_encrypt_des_cfb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB); +} +static inline int hifn_encrypt_des_ofb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB); +} + +/* + * DES decryption functions. + */ +static inline int hifn_decrypt_des_ecb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB); +} +static inline int hifn_decrypt_des_cbc(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC); +} +static inline int hifn_decrypt_des_cfb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB); +} +static inline int hifn_decrypt_des_ofb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB); +} + +/* + * 3DES ecryption functions. + */ +static inline int hifn_encrypt_3des_ecb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB); +} +static inline int hifn_encrypt_3des_cbc(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC); +} +static inline int hifn_encrypt_3des_cfb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB); +} +static inline int hifn_encrypt_3des_ofb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, + ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB); +} + +/* + * 3DES decryption functions. + */ +static inline int hifn_decrypt_3des_ecb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB); +} +static inline int hifn_decrypt_3des_cbc(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC); +} +static inline int hifn_decrypt_3des_cfb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB); +} +static inline int hifn_decrypt_3des_ofb(struct ablkcipher_request *req) +{ + return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, + ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB); +} + +struct hifn_alg_template +{ + char name[CRYPTO_MAX_ALG_NAME]; + char drv_name[CRYPTO_MAX_ALG_NAME]; + unsigned int bsize; + struct ablkcipher_alg ablkcipher; +}; + +static struct hifn_alg_template hifn_alg_templates[] = { + /* + * 3DES ECB, CBC, CFB and OFB modes. + */ + { + .name = "cfb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8, + .ablkcipher = { + .min_keysize = HIFN_3DES_KEY_LENGTH, + .max_keysize = HIFN_3DES_KEY_LENGTH, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_3des_cfb, + .decrypt = hifn_decrypt_3des_cfb, + }, + }, + { + .name = "ofb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8, + .ablkcipher = { + .min_keysize = HIFN_3DES_KEY_LENGTH, + .max_keysize = HIFN_3DES_KEY_LENGTH, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_3des_ofb, + .decrypt = hifn_decrypt_3des_ofb, + }, + }, + { + .name = "cbc(des3_ede)", .drv_name = "hifn-3des", .bsize = 8, + .ablkcipher = { + .min_keysize = HIFN_3DES_KEY_LENGTH, + .max_keysize = HIFN_3DES_KEY_LENGTH, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_3des_cbc, + .decrypt = hifn_decrypt_3des_cbc, + }, + }, + { + .name = "ecb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8, + .ablkcipher = { + .min_keysize = HIFN_3DES_KEY_LENGTH, + .max_keysize = HIFN_3DES_KEY_LENGTH, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_3des_ecb, + .decrypt = hifn_decrypt_3des_ecb, + }, + }, + + /* + * DES ECB, CBC, CFB and OFB modes. + */ + { + .name = "cfb(des)", .drv_name = "hifn-des", .bsize = 8, + .ablkcipher = { + .min_keysize = HIFN_DES_KEY_LENGTH, + .max_keysize = HIFN_DES_KEY_LENGTH, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_des_cfb, + .decrypt = hifn_decrypt_des_cfb, + }, + }, + { + .name = "ofb(des)", .drv_name = "hifn-des", .bsize = 8, + .ablkcipher = { + .min_keysize = HIFN_DES_KEY_LENGTH, + .max_keysize = HIFN_DES_KEY_LENGTH, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_des_ofb, + .decrypt = hifn_decrypt_des_ofb, + }, + }, + { + .name = "cbc(des)", .drv_name = "hifn-des", .bsize = 8, + .ablkcipher = { + .min_keysize = HIFN_DES_KEY_LENGTH, + .max_keysize = HIFN_DES_KEY_LENGTH, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_des_cbc, + .decrypt = hifn_decrypt_des_cbc, + }, + }, + { + .name = "ecb(des)", .drv_name = "hifn-des", .bsize = 8, + .ablkcipher = { + .min_keysize = HIFN_DES_KEY_LENGTH, + .max_keysize = HIFN_DES_KEY_LENGTH, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_des_ecb, + .decrypt = hifn_decrypt_des_ecb, + }, + }, + + /* + * AES ECB, CBC, CFB and OFB modes. + */ + { + .name = "ecb(aes)", .drv_name = "hifn-aes", .bsize = 16, + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_aes_ecb, + .decrypt = hifn_decrypt_aes_ecb, + }, + }, + { + .name = "cbc(aes)", .drv_name = "hifn-aes", .bsize = 16, + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_aes_cbc, + .decrypt = hifn_decrypt_aes_cbc, + }, + }, + { + .name = "cfb(aes)", .drv_name = "hifn-aes", .bsize = 16, + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_aes_cfb, + .decrypt = hifn_decrypt_aes_cfb, + }, + }, + { + .name = "ofb(aes)", .drv_name = "hifn-aes", .bsize = 16, + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = hifn_setkey, + .encrypt = hifn_encrypt_aes_ofb, + .decrypt = hifn_decrypt_aes_ofb, + }, + }, +}; + +static int hifn_cra_init(struct crypto_tfm *tfm) +{ + struct crypto_alg *alg = tfm->__crt_alg; + struct hifn_crypto_alg *ha = crypto_alg_to_hifn(alg); + struct hifn_context *ctx = crypto_tfm_ctx(tfm); + + ctx->dev = ha->dev; + + return 0; +} + +static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t) +{ + struct hifn_crypto_alg *alg; + int err; + + alg = kzalloc(sizeof(struct hifn_crypto_alg), GFP_KERNEL); + if (!alg) + return -ENOMEM; + + snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name); + snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", t->drv_name); + + alg->alg.cra_priority = 300; + alg->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_ASYNC; + alg->alg.cra_blocksize = t->bsize; + alg->alg.cra_ctxsize = sizeof(struct hifn_context); + alg->alg.cra_alignmask = 15; + if (t->bsize == 8) + alg->alg.cra_alignmask = 3; + alg->alg.cra_type = &crypto_ablkcipher_type; + alg->alg.cra_module = THIS_MODULE; + alg->alg.cra_u.ablkcipher = t->ablkcipher; + alg->alg.cra_init = hifn_cra_init; + + alg->dev = dev; + + list_add_tail(&alg->entry, &dev->alg_list); + + err = crypto_register_alg(&alg->alg); + if (err) { + list_del(&alg->entry); + kfree(alg); + } + + return err; +} + +static void hifn_unregister_alg(struct hifn_device *dev) +{ + struct hifn_crypto_alg *a, *n; + + list_for_each_entry_safe(a, n, &dev->alg_list, entry) { + list_del(&a->entry); + crypto_unregister_alg(&a->alg); + kfree(a); + } +} + +static int hifn_register_alg(struct hifn_device *dev) +{ + int i, err; + + for (i=0; ialg_list); + + snprintf(dev->name, sizeof(dev->name), "%s", name); + spin_lock_init(&dev->lock); + + for (i=0; i<3; ++i) { + unsigned long addr, size; + + addr = pci_resource_start(pdev, i); + size = pci_resource_len(pdev, i); + + dev->bar[i] = ioremap_nocache(addr, size); + if (!dev->bar[i]) + goto err_out_unmap_bars; + } + + dev->result_mem = __get_free_pages(GFP_KERNEL, HIFN_MAX_RESULT_ORDER); + if (!dev->result_mem) { + dprintk("Failed to allocate %d pages for result_mem.\n", + HIFN_MAX_RESULT_ORDER); + goto err_out_unmap_bars; + } + memset((void *)dev->result_mem, 0, PAGE_SIZE*(1<dst = pci_map_single(pdev, (void *)dev->result_mem, + PAGE_SIZE << HIFN_MAX_RESULT_ORDER, PCI_DMA_FROMDEVICE); + + dev->desc_virt = pci_alloc_consistent(pdev, sizeof(struct hifn_dma), + &dev->desc_dma); + if (!dev->desc_virt) { + dprintk("Failed to allocate descriptor rings.\n"); + goto err_out_free_result_pages; + } + memset(dev->desc_virt, 0, sizeof(struct hifn_dma)); + + dev->pdev = pdev; + dev->irq = pdev->irq; + + for (i=0; isa[i] = NULL; + + pci_set_drvdata(pdev, dev); + + crypto_init_queue(&dev->queue, 1); + + err = request_irq(dev->irq, hifn_interrupt, IRQF_SHARED, dev->name, dev); + if (err) { + dprintk("Failed to request IRQ%d: err: %d.\n", dev->irq, err); + dev->irq = 0; + goto err_out_free_desc; + } + + err = hifn_start_device(dev); + if (err) + goto err_out_free_irq; + + err = hifn_test(dev, 1, 0); + if (err) + goto err_out_stop_device; + + err = hifn_register_alg(dev); + if (err) + goto err_out_stop_device; + + INIT_DELAYED_WORK(&dev->work, hifn_work); + schedule_delayed_work(&dev->work, HZ); + + dprintk("HIFN crypto accelerator card at %s has been " + "successfully registered as %s.\n", + pci_name(pdev), dev->name); + + return 0; + +err_out_stop_device: + hifn_reset_dma(dev, 1); + hifn_stop_device(dev); +err_out_free_irq: + free_irq(dev->irq, dev->name); +err_out_free_desc: + pci_free_consistent(pdev, sizeof(struct hifn_dma), + dev->desc_virt, dev->desc_dma); + +err_out_free_result_pages: + pci_unmap_single(pdev, dev->dst, PAGE_SIZE << HIFN_MAX_RESULT_ORDER, + PCI_DMA_FROMDEVICE); + free_pages(dev->result_mem, HIFN_MAX_RESULT_ORDER); + +err_out_unmap_bars: + for (i=0; i<3; ++i) + if (dev->bar[i]) + iounmap(dev->bar[i]); + +err_out_free_regions: + pci_release_regions(pdev); + +err_out_disable_pci_device: + pci_disable_device(pdev); + + return err; +} + +static void hifn_remove(struct pci_dev *pdev) +{ + int i; + struct hifn_device *dev; + + dev = pci_get_drvdata(pdev); + + if (dev) { + cancel_delayed_work(&dev->work); + flush_scheduled_work(); + + hifn_unregister_alg(dev); + hifn_reset_dma(dev, 1); + hifn_stop_device(dev); + + free_irq(dev->irq, dev->name); + + hifn_flush(dev); + + pci_free_consistent(pdev, sizeof(struct hifn_dma), + dev->desc_virt, dev->desc_dma); + pci_unmap_single(pdev, dev->dst, + PAGE_SIZE << HIFN_MAX_RESULT_ORDER, + PCI_DMA_FROMDEVICE); + free_pages(dev->result_mem, HIFN_MAX_RESULT_ORDER); + for (i=0; i<3; ++i) + if (dev->bar[i]) + iounmap(dev->bar[i]); + + kfree(dev); + } + + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_device_id hifn_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7955) }, + { PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7956) }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, hifn_pci_tbl); + +static struct pci_driver hifn_pci_driver = { + .name = "hifn795x", + .id_table = hifn_pci_tbl, + .probe = hifn_probe, + .remove = __devexit_p(hifn_remove), +}; + +static int __devinit hifn_init(void) +{ + int err; + + err = pci_register_driver(&hifn_pci_driver); + if (err < 0) { + dprintk("Failed to register PCI driver for %s device.\n", + hifn_pci_driver.name); + return -ENODEV; + } + + printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip " + "has been successfully registered.\n"); + + return 0; +} + +static void __devexit hifn_fini(void) +{ + pci_unregister_driver(&hifn_pci_driver); + + printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip " + "has been successfully unregistered.\n"); +} + +module_init(hifn_init); +module_exit(hifn_fini); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Evgeniy Polyakov "); +MODULE_DESCRIPTION("Driver for HIFN 795x crypto accelerator chip."); -- cgit v1.1 From 16d004a2eda7be2c6a2de63eca2ad3c6b57307b3 Mon Sep 17 00:00:00 2001 From: Evgeniy Polyakov Date: Thu, 11 Oct 2007 19:48:58 +0800 Subject: [CRYPTO] des: Create header file for common macros This patch creates include/crypto/des.h for common macros shared between DES implementations. Signed-off-by: Evgeniy Polyakov Signed-off-by: Herbert Xu --- crypto/des_generic.c | 8 +------- include/crypto/des.h | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+), 7 deletions(-) create mode 100644 include/crypto/des.h diff --git a/crypto/des_generic.c b/crypto/des_generic.c index 59966d1..f75eafe 100644 --- a/crypto/des_generic.c +++ b/crypto/des_generic.c @@ -20,13 +20,7 @@ #include #include -#define DES_KEY_SIZE 8 -#define DES_EXPKEY_WORDS 32 -#define DES_BLOCK_SIZE 8 - -#define DES3_EDE_KEY_SIZE (3 * DES_KEY_SIZE) -#define DES3_EDE_EXPKEY_WORDS (3 * DES_EXPKEY_WORDS) -#define DES3_EDE_BLOCK_SIZE DES_BLOCK_SIZE +#include #define ROL(x, r) ((x) = rol32((x), (r))) #define ROR(x, r) ((x) = ror32((x), (r))) diff --git a/include/crypto/des.h b/include/crypto/des.h new file mode 100644 index 0000000..2971c63 --- /dev/null +++ b/include/crypto/des.h @@ -0,0 +1,19 @@ +/* + * DES & Triple DES EDE Cipher Algorithms. + */ + +#ifndef __CRYPTO_DES_H +#define __CRYPTO_DES_H + +#define DES_KEY_SIZE 8 +#define DES_EXPKEY_WORDS 32 +#define DES_BLOCK_SIZE 8 + +#define DES3_EDE_KEY_SIZE (3 * DES_KEY_SIZE) +#define DES3_EDE_EXPKEY_WORDS (3 * DES_EXPKEY_WORDS) +#define DES3_EDE_BLOCK_SIZE DES_BLOCK_SIZE + + +extern unsigned long des_ekey(u32 *pe, const u8 *k); + +#endif /* __CRYPTO_DES_H */ -- cgit v1.1 From c3041f9c93e31159f4e321abea7c1549d271e6a7 Mon Sep 17 00:00:00 2001 From: Evgeniy Polyakov Date: Thu, 11 Oct 2007 19:58:16 +0800 Subject: [CRYPTO] hifn_795x: Detect weak keys HIFN driver update to use DES weak key checks (exported in this patch). Signed-off-by: Evgeniy Polyakov Signed-off-by: Herbert Xu --- crypto/des_generic.c | 9 +++++---- drivers/crypto/Kconfig | 2 +- drivers/crypto/hifn_795x.c | 11 +++++++++++ 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/crypto/des_generic.c b/crypto/des_generic.c index f75eafe..355ecb7 100644 --- a/crypto/des_generic.c +++ b/crypto/des_generic.c @@ -628,7 +628,7 @@ static const u32 S8[64] = { * Choice 1 has operated on the key. * */ -static unsigned long ekey(u32 *pe, const u8 *k) +unsigned long des_ekey(u32 *pe, const u8 *k) { /* K&R: long is at least 32 bits */ unsigned long a, b, c, d, w; @@ -703,6 +703,7 @@ static unsigned long ekey(u32 *pe, const u8 *k) /* Zero if weak key */ return w; } +EXPORT_SYMBOL_GPL(des_ekey); /* * Decryption key expansion @@ -786,7 +787,7 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key, int ret; /* Expand to tmp */ - ret = ekey(tmp, key); + ret = des_ekey(tmp, key); if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; @@ -873,9 +874,9 @@ static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key, return -EINVAL; } - ekey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE; + des_ekey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE; dkey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE; - ekey(expkey, key); + des_ekey(expkey, key); return 0; } diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 1ef7697..c2de135 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -85,9 +85,9 @@ config ZCRYPT_MONOLITHIC config CRYPTO_DEV_HIFN_795X tristate "Driver HIFN 795x crypto accelerator chips" + select CRYPTO_DES select CRYPTO_ALGAPI select CRYPTO_ABLKCIPHER - select CRYPTO_BLKCIPHER help This option allows you to have support for HIFN 795x crypto adapters. diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index e3376f2..391c20a 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -29,6 +29,7 @@ #include #include +#include #include @@ -1924,6 +1925,16 @@ static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key, return -1; } + if (len == HIFN_DES_KEY_LENGTH) { + u32 tmp[DES_EXPKEY_WORDS]; + int ret = des_ekey(tmp, key); + + if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; + return -EINVAL; + } + } + dev->flags &= ~HIFN_FLAG_OLD_KEY; memcpy(ctx->key, key, len); -- cgit v1.1 From f1901f1fc710ec0fc482a7c98ee4552874139f39 Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Mon, 15 Oct 2007 22:09:47 +0800 Subject: [CRYPTO] geode: remove alias alias isn't required because the module provides PCI ids. Signed-off-by: Sebastian Siewior Signed-off-by: Herbert Xu --- drivers/crypto/geode-aes.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index 711e246..fa4c990 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c @@ -472,7 +472,6 @@ geode_aes_exit(void) MODULE_AUTHOR("Advanced Micro Devices, Inc."); MODULE_DESCRIPTION("Geode LX Hardware AES driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("aes"); module_init(geode_aes_init); module_exit(geode_aes_exit); -- cgit v1.1 From 89e12654312dddbbdbf17b5adc95b22cb672f947 Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Wed, 17 Oct 2007 23:18:57 +0800 Subject: [CRYPTO] aes: Move common defines into a header file This three defines are used in all AES related hardware. Signed-off-by: Sebastian Siewior Signed-off-by: Herbert Xu --- arch/s390/crypto/aes_s390.c | 7 +------ arch/x86/crypto/aes_32.c | 4 +--- arch/x86/crypto/aes_64.c | 6 +----- crypto/aes_generic.c | 6 +----- drivers/crypto/geode-aes.c | 1 + drivers/crypto/padlock-aes.c | 4 +--- include/crypto/aes.h | 15 +++++++++++++++ 7 files changed, 21 insertions(+), 22 deletions(-) create mode 100644 include/crypto/aes.h diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 5126696..812511b 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -16,17 +16,12 @@ * */ +#include #include #include #include #include "crypt_s390.h" -#define AES_MIN_KEY_SIZE 16 -#define AES_MAX_KEY_SIZE 32 - -/* data block size for all key lengths */ -#define AES_BLOCK_SIZE 16 - #define AES_KEYLEN_128 1 #define AES_KEYLEN_192 2 #define AES_KEYLEN_256 4 diff --git a/arch/x86/crypto/aes_32.c b/arch/x86/crypto/aes_32.c index 49aad93..9b0ab50 100644 --- a/arch/x86/crypto/aes_32.c +++ b/arch/x86/crypto/aes_32.c @@ -38,6 +38,7 @@ */ #include +#include #include #include #include @@ -48,9 +49,6 @@ asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); -#define AES_MIN_KEY_SIZE 16 -#define AES_MAX_KEY_SIZE 32 -#define AES_BLOCK_SIZE 16 #define AES_KS_LENGTH 4 * AES_BLOCK_SIZE #define RC_LENGTH 29 diff --git a/arch/x86/crypto/aes_64.c b/arch/x86/crypto/aes_64.c index 5cdb13e..0b38a4c 100644 --- a/arch/x86/crypto/aes_64.c +++ b/arch/x86/crypto/aes_64.c @@ -54,6 +54,7 @@ */ #include +#include #include #include #include @@ -61,11 +62,6 @@ #include #include -#define AES_MIN_KEY_SIZE 16 -#define AES_MAX_KEY_SIZE 32 - -#define AES_BLOCK_SIZE 16 - /* * #define byte(x, nr) ((unsigned char)((x) >> (nr*8))) */ diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index 9401dca..6683260 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c @@ -52,6 +52,7 @@ s/RIJNDAEL(d_key)/D_KEY/g */ +#include #include #include #include @@ -59,11 +60,6 @@ #include #include -#define AES_MIN_KEY_SIZE 16 -#define AES_MAX_KEY_SIZE 32 - -#define AES_BLOCK_SIZE 16 - /* * #define byte(x, nr) ((unsigned char)((x) >> (nr*8))) */ diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index fa4c990..5008a1c 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 5f7e718..c33334a 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -44,6 +44,7 @@ */ #include +#include #include #include #include @@ -53,9 +54,6 @@ #include #include "padlock.h" -#define AES_MIN_KEY_SIZE 16 /* in uint8_t units */ -#define AES_MAX_KEY_SIZE 32 /* ditto */ -#define AES_BLOCK_SIZE 16 /* ditto */ #define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */ #define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t)) diff --git a/include/crypto/aes.h b/include/crypto/aes.h new file mode 100644 index 0000000..9ff842f --- /dev/null +++ b/include/crypto/aes.h @@ -0,0 +1,15 @@ +/* + * Common values for AES algorithms + */ + +#ifndef _CRYPTO_AES_H +#define _CRYPTO_AES_H + +#define AES_MIN_KEY_SIZE 16 +#define AES_MAX_KEY_SIZE 32 +#define AES_KEYSIZE_128 16 +#define AES_KEYSIZE_192 24 +#define AES_KEYSIZE_256 32 +#define AES_BLOCK_SIZE 16 + +#endif -- cgit v1.1 From 2d506d4fa1df18aa9505820722f834426edc907f Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Sun, 21 Oct 2007 16:04:23 +0800 Subject: [CRYPTO] geode: use consistent IV copy It is enough if the IV is copied before and after the while loop. With DM-Crypt is seems not be required to save the IV after encrytion because a new one is used in the request (dunno about other users). It is not save to load the IV within while loop and not save afterwards because we mill end up with the wrong IV if the request goes consists of more than one page. Signed-off-by: Sebastian Siewior Signed-off-by: Herbert Xu --- drivers/crypto/geode-aes.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index 5008a1c..6c04f13 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c @@ -226,6 +226,7 @@ geode_cbc_decrypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + memcpy(op->iv, walk.iv, AES_IV_LENGTH); while((nbytes = walk.nbytes)) { op->src = walk.src.virt.addr, @@ -234,16 +235,13 @@ geode_cbc_decrypt(struct blkcipher_desc *desc, op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE); op->dir = AES_DIR_DECRYPT; - memcpy(op->iv, walk.iv, AES_IV_LENGTH); - ret = geode_aes_crypt(op); - memcpy(walk.iv, op->iv, AES_IV_LENGTH); nbytes -= ret; - err = blkcipher_walk_done(desc, &walk, nbytes); } + memcpy(walk.iv, op->iv, AES_IV_LENGTH); return err; } @@ -258,6 +256,7 @@ geode_cbc_encrypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + memcpy(op->iv, walk.iv, AES_IV_LENGTH); while((nbytes = walk.nbytes)) { op->src = walk.src.virt.addr, @@ -266,13 +265,12 @@ geode_cbc_encrypt(struct blkcipher_desc *desc, op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE); op->dir = AES_DIR_ENCRYPT; - memcpy(op->iv, walk.iv, AES_IV_LENGTH); - ret = geode_aes_crypt(op); nbytes -= ret; err = blkcipher_walk_done(desc, &walk, nbytes); } + memcpy(walk.iv, op->iv, AES_IV_LENGTH); return err; } -- cgit v1.1 From 1f4e4773761d0aa622411469b54d6570005a66b1 Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Sun, 21 Oct 2007 16:18:12 +0800 Subject: [CRYPTO] geode: relax in busy loop and care about return value The code waits in a busy loop until the hardware finishes the encryption or decryption process. This wants a cpu_relax() :) The busy loop finishes either if the encryption is done or if the counter is zero. If the latter is true than the hardware failed. Since this should not happen, leave sith a BUG(). Signed-off-by: Sebastian Siewior Signed-off-by: Herbert Xu --- drivers/crypto/geode-aes.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index 6c04f13..6c6a31b 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c @@ -88,9 +88,10 @@ do_crypt(void *src, void *dst, int len, u32 flags) /* Start the operation */ iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG); - do + do { status = ioread32(_iobase + AES_INTR_REG); - while(!(status & AES_INTRA_PENDING) && --counter); + cpu_relax(); + } while(!(status & AES_INTRA_PENDING) && --counter); /* Clear the event */ iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG); @@ -102,6 +103,7 @@ geode_aes_crypt(struct geode_aes_op *op) { u32 flags = 0; unsigned long iflags; + int ret; if (op->len == 0) return 0; @@ -130,7 +132,8 @@ geode_aes_crypt(struct geode_aes_op *op) _writefield(AES_WRITEKEY0_REG, op->key); } - do_crypt(op->src, op->dst, op->len, flags); + ret = do_crypt(op->src, op->dst, op->len, flags); + BUG_ON(ret); if (op->mode == AES_MODE_CBC) _readfield(AES_WRITEIV0_REG, op->iv); -- cgit v1.1 From b7a30da61adc5f252ee97b2a4f3fc23c9d06a08a Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Sun, 21 Oct 2007 16:21:25 +0800 Subject: [CRYPTO] geode: move defines into a headerfile This patch moves macros in geode-aes.c into geode-aes.h. Signed-off-by: Sebastian Siewior Signed-off-by: Herbert Xu --- drivers/crypto/geode-aes.c | 32 -------------------------------- drivers/crypto/geode-aes.h | 36 ++++++++++++++++++++++++++++++++++-- 2 files changed, 34 insertions(+), 34 deletions(-) diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index 6c6a31b..181d42c 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c @@ -20,38 +20,6 @@ #include "geode-aes.h" -/* Register definitions */ - -#define AES_CTRLA_REG 0x0000 - -#define AES_CTRL_START 0x01 -#define AES_CTRL_DECRYPT 0x00 -#define AES_CTRL_ENCRYPT 0x02 -#define AES_CTRL_WRKEY 0x04 -#define AES_CTRL_DCA 0x08 -#define AES_CTRL_SCA 0x10 -#define AES_CTRL_CBC 0x20 - -#define AES_INTR_REG 0x0008 - -#define AES_INTRA_PENDING (1 << 16) -#define AES_INTRB_PENDING (1 << 17) - -#define AES_INTR_PENDING (AES_INTRA_PENDING | AES_INTRB_PENDING) -#define AES_INTR_MASK 0x07 - -#define AES_SOURCEA_REG 0x0010 -#define AES_DSTA_REG 0x0014 -#define AES_LENA_REG 0x0018 -#define AES_WRITEKEY0_REG 0x0030 -#define AES_WRITEIV0_REG 0x0040 - -/* A very large counter that is used to gracefully bail out of an - * operation in case of trouble - */ - -#define AES_OP_TIMEOUT 0x50000 - /* Static structures */ static void __iomem * _iobase; diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h index f479686..2f1d559 100644 --- a/drivers/crypto/geode-aes.h +++ b/drivers/crypto/geode-aes.h @@ -9,9 +9,9 @@ #ifndef _GEODE_AES_H_ #define _GEODE_AES_H_ -#define AES_KEY_LENGTH 16 +/* driver logic flags */ #define AES_IV_LENGTH 16 - +#define AES_KEY_LENGTH 16 #define AES_MIN_BLOCK_SIZE 16 #define AES_MODE_ECB 0 @@ -22,6 +22,38 @@ #define AES_FLAGS_HIDDENKEY (1 << 0) +/* Register definitions */ + +#define AES_CTRLA_REG 0x0000 + +#define AES_CTRL_START 0x01 +#define AES_CTRL_DECRYPT 0x00 +#define AES_CTRL_ENCRYPT 0x02 +#define AES_CTRL_WRKEY 0x04 +#define AES_CTRL_DCA 0x08 +#define AES_CTRL_SCA 0x10 +#define AES_CTRL_CBC 0x20 + +#define AES_INTR_REG 0x0008 + +#define AES_INTRA_PENDING (1 << 16) +#define AES_INTRB_PENDING (1 << 17) + +#define AES_INTR_PENDING (AES_INTRA_PENDING | AES_INTRB_PENDING) +#define AES_INTR_MASK 0x07 + +#define AES_SOURCEA_REG 0x0010 +#define AES_DSTA_REG 0x0014 +#define AES_LENA_REG 0x0018 +#define AES_WRITEKEY0_REG 0x0030 +#define AES_WRITEIV0_REG 0x0040 + +/* A very large counter that is used to gracefully bail out of an + * operation in case of trouble + */ + +#define AES_OP_TIMEOUT 0x50000 + struct geode_aes_op { void *src; -- cgit v1.1 From e2b21b5002a2bf21ca73c7448309a7288a984ddf Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Fri, 26 Oct 2007 16:22:57 +0800 Subject: [CRYPTO] twofish: Do not unroll big stuff in twofish key setup Currently twofish cipher key setup code has unrolled loops - approximately 70-100 instructions are repeated 40 times. As a result, twofish module is the biggest module in crypto/*. Unrolling produces x2.5 more code (+18k on i386), and speeds up key setup by 7%: unrolled: twofish_setkey/sec: 41128 loop: twofish_setkey/sec: 38148 CALC_K256: ~100 insns each CALC_K192: ~90 insns CALC_K: ~70 insns Attached patch removes this unrolling. $ size */twofish_common.o text data bss dec hex filename 37920 0 0 37920 9420 crypto.org/twofish_common.o 13209 0 0 13209 3399 crypto/twofish_common.o Run tested (modprobe tcrypt reports ok). Please apply. Signed-off-by: Denys Vlasenko Signed-off-by: Herbert Xu --- crypto/twofish_common.c | 96 ++++++++++++++++--------------------------------- 1 file changed, 30 insertions(+), 66 deletions(-) diff --git a/crypto/twofish_common.c b/crypto/twofish_common.c index b4b9c0c..0af216c 100644 --- a/crypto/twofish_common.c +++ b/crypto/twofish_common.c @@ -655,84 +655,48 @@ int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len) CALC_SB256_2( i, calc_sb_tbl[j], calc_sb_tbl[k] ); } - /* Calculate whitening and round subkeys. The constants are - * indices of subkeys, preprocessed through q0 and q1. */ - CALC_K256 (w, 0, 0xA9, 0x75, 0x67, 0xF3); - CALC_K256 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4); - CALC_K256 (w, 4, 0x04, 0xDB, 0xFD, 0x7B); - CALC_K256 (w, 6, 0xA3, 0xFB, 0x76, 0xC8); - CALC_K256 (k, 0, 0x9A, 0x4A, 0x92, 0xD3); - CALC_K256 (k, 2, 0x80, 0xE6, 0x78, 0x6B); - CALC_K256 (k, 4, 0xE4, 0x45, 0xDD, 0x7D); - CALC_K256 (k, 6, 0xD1, 0xE8, 0x38, 0x4B); - CALC_K256 (k, 8, 0x0D, 0xD6, 0xC6, 0x32); - CALC_K256 (k, 10, 0x35, 0xD8, 0x98, 0xFD); - CALC_K256 (k, 12, 0x18, 0x37, 0xF7, 0x71); - CALC_K256 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1); - CALC_K256 (k, 16, 0x43, 0x30, 0x75, 0x0F); - CALC_K256 (k, 18, 0x37, 0xF8, 0x26, 0x1B); - CALC_K256 (k, 20, 0xFA, 0x87, 0x13, 0xFA); - CALC_K256 (k, 22, 0x94, 0x06, 0x48, 0x3F); - CALC_K256 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA); - CALC_K256 (k, 26, 0x8B, 0xAE, 0x30, 0x5B); - CALC_K256 (k, 28, 0x84, 0x8A, 0x54, 0x00); - CALC_K256 (k, 30, 0xDF, 0xBC, 0x23, 0x9D); + /* CALC_K256/CALC_K192/CALC_K loops were unrolled. + * Unrolling produced x2.5 more code (+18k on i386), + * and speeded up key setup by 7%: + * unrolled: twofish_setkey/sec: 41128 + * loop: twofish_setkey/sec: 38148 + * CALC_K256: ~100 insns each + * CALC_K192: ~90 insns + * CALC_K: ~70 insns + */ + /* Calculate whitening and round subkeys */ + for ( i = 0; i < 8; i += 2 ) { + CALC_K256 (w, i, q0[i], q1[i], q0[i+1], q1[i+1]); + } + for ( i = 0; i < 32; i += 2 ) { + CALC_K256 (k, i, q0[i+8], q1[i+8], q0[i+9], q1[i+9]); + } } else if (key_len == 24) { /* 192-bit key */ /* Compute the S-boxes. */ for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) { CALC_SB192_2( i, calc_sb_tbl[j], calc_sb_tbl[k] ); } - /* Calculate whitening and round subkeys. The constants are - * indices of subkeys, preprocessed through q0 and q1. */ - CALC_K192 (w, 0, 0xA9, 0x75, 0x67, 0xF3); - CALC_K192 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4); - CALC_K192 (w, 4, 0x04, 0xDB, 0xFD, 0x7B); - CALC_K192 (w, 6, 0xA3, 0xFB, 0x76, 0xC8); - CALC_K192 (k, 0, 0x9A, 0x4A, 0x92, 0xD3); - CALC_K192 (k, 2, 0x80, 0xE6, 0x78, 0x6B); - CALC_K192 (k, 4, 0xE4, 0x45, 0xDD, 0x7D); - CALC_K192 (k, 6, 0xD1, 0xE8, 0x38, 0x4B); - CALC_K192 (k, 8, 0x0D, 0xD6, 0xC6, 0x32); - CALC_K192 (k, 10, 0x35, 0xD8, 0x98, 0xFD); - CALC_K192 (k, 12, 0x18, 0x37, 0xF7, 0x71); - CALC_K192 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1); - CALC_K192 (k, 16, 0x43, 0x30, 0x75, 0x0F); - CALC_K192 (k, 18, 0x37, 0xF8, 0x26, 0x1B); - CALC_K192 (k, 20, 0xFA, 0x87, 0x13, 0xFA); - CALC_K192 (k, 22, 0x94, 0x06, 0x48, 0x3F); - CALC_K192 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA); - CALC_K192 (k, 26, 0x8B, 0xAE, 0x30, 0x5B); - CALC_K192 (k, 28, 0x84, 0x8A, 0x54, 0x00); - CALC_K192 (k, 30, 0xDF, 0xBC, 0x23, 0x9D); + /* Calculate whitening and round subkeys */ + for ( i = 0; i < 8; i += 2 ) { + CALC_K192 (w, i, q0[i], q1[i], q0[i+1], q1[i+1]); + } + for ( i = 0; i < 32; i += 2 ) { + CALC_K192 (k, i, q0[i+8], q1[i+8], q0[i+9], q1[i+9]); + } } else { /* 128-bit key */ /* Compute the S-boxes. */ for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) { CALC_SB_2( i, calc_sb_tbl[j], calc_sb_tbl[k] ); } - /* Calculate whitening and round subkeys. The constants are - * indices of subkeys, preprocessed through q0 and q1. */ - CALC_K (w, 0, 0xA9, 0x75, 0x67, 0xF3); - CALC_K (w, 2, 0xB3, 0xC6, 0xE8, 0xF4); - CALC_K (w, 4, 0x04, 0xDB, 0xFD, 0x7B); - CALC_K (w, 6, 0xA3, 0xFB, 0x76, 0xC8); - CALC_K (k, 0, 0x9A, 0x4A, 0x92, 0xD3); - CALC_K (k, 2, 0x80, 0xE6, 0x78, 0x6B); - CALC_K (k, 4, 0xE4, 0x45, 0xDD, 0x7D); - CALC_K (k, 6, 0xD1, 0xE8, 0x38, 0x4B); - CALC_K (k, 8, 0x0D, 0xD6, 0xC6, 0x32); - CALC_K (k, 10, 0x35, 0xD8, 0x98, 0xFD); - CALC_K (k, 12, 0x18, 0x37, 0xF7, 0x71); - CALC_K (k, 14, 0xEC, 0xF1, 0x6C, 0xE1); - CALC_K (k, 16, 0x43, 0x30, 0x75, 0x0F); - CALC_K (k, 18, 0x37, 0xF8, 0x26, 0x1B); - CALC_K (k, 20, 0xFA, 0x87, 0x13, 0xFA); - CALC_K (k, 22, 0x94, 0x06, 0x48, 0x3F); - CALC_K (k, 24, 0xF2, 0x5E, 0xD0, 0xBA); - CALC_K (k, 26, 0x8B, 0xAE, 0x30, 0x5B); - CALC_K (k, 28, 0x84, 0x8A, 0x54, 0x00); - CALC_K (k, 30, 0xDF, 0xBC, 0x23, 0x9D); + /* Calculate whitening and round subkeys */ + for ( i = 0; i < 8; i += 2 ) { + CALC_K (w, i, q0[i], q1[i], q0[i+1], q1[i+1]); + } + for ( i = 0; i < 32; i += 2 ) { + CALC_K (k, i, q0[i+8], q1[i+8], q0[i+9], q1[i+9]); + } } return 0; -- cgit v1.1 From 1721a81256ed7f7bfdd1a721f3a6c9c85efeac53 Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Tue, 6 Nov 2007 22:01:20 +0800 Subject: [CRYPTO] camellia: Code cleanup Move code blocks around so that related pieces are closer together: e.g. CAMELLIA_ROUNDSM macro does not need to be separated from the rest of the code by huge array of constants. Remove unused macros (COPY4WORD, SWAP4WORD, XOR4WORD[2]) Drop SUBL(), SUBR() macros which only obscure things. Same for CAMELLIA_SP1110() macro and KEY_TABLE_TYPE typedef. Remove useless comments: /* encryption */ -- well it's obvious enough already! void camellia_encrypt128(...) Combine swap with copying at the beginning/end of encrypt/decrypt. Signed-off-by: Denys Vlasenko Acked-by: Noriaki TAKAMIYA Signed-off-by: Herbert Xu --- crypto/camellia.c | 966 ++++++++++++++++++++++++------------------------------ 1 file changed, 426 insertions(+), 540 deletions(-) diff --git a/crypto/camellia.c b/crypto/camellia.c index 6877ecf..aaae60e 100644 --- a/crypto/camellia.c +++ b/crypto/camellia.c @@ -36,176 +36,6 @@ #include #include - -#define CAMELLIA_MIN_KEY_SIZE 16 -#define CAMELLIA_MAX_KEY_SIZE 32 -#define CAMELLIA_BLOCK_SIZE 16 -#define CAMELLIA_TABLE_BYTE_LEN 272 -#define CAMELLIA_TABLE_WORD_LEN (CAMELLIA_TABLE_BYTE_LEN / 4) - -typedef u32 KEY_TABLE_TYPE[CAMELLIA_TABLE_WORD_LEN]; - - -/* key constants */ - -#define CAMELLIA_SIGMA1L (0xA09E667FL) -#define CAMELLIA_SIGMA1R (0x3BCC908BL) -#define CAMELLIA_SIGMA2L (0xB67AE858L) -#define CAMELLIA_SIGMA2R (0x4CAA73B2L) -#define CAMELLIA_SIGMA3L (0xC6EF372FL) -#define CAMELLIA_SIGMA3R (0xE94F82BEL) -#define CAMELLIA_SIGMA4L (0x54FF53A5L) -#define CAMELLIA_SIGMA4R (0xF1D36F1CL) -#define CAMELLIA_SIGMA5L (0x10E527FAL) -#define CAMELLIA_SIGMA5R (0xDE682D1DL) -#define CAMELLIA_SIGMA6L (0xB05688C2L) -#define CAMELLIA_SIGMA6R (0xB3E6C1FDL) - -struct camellia_ctx { - int key_length; - KEY_TABLE_TYPE key_table; -}; - - -/* - * macros - */ - - -# define GETU32(pt) (((u32)(pt)[0] << 24) \ - ^ ((u32)(pt)[1] << 16) \ - ^ ((u32)(pt)[2] << 8) \ - ^ ((u32)(pt)[3])) - -#define COPY4WORD(dst, src) \ - do { \ - (dst)[0]=(src)[0]; \ - (dst)[1]=(src)[1]; \ - (dst)[2]=(src)[2]; \ - (dst)[3]=(src)[3]; \ - }while(0) - -#define SWAP4WORD(word) \ - do { \ - CAMELLIA_SWAP4((word)[0]); \ - CAMELLIA_SWAP4((word)[1]); \ - CAMELLIA_SWAP4((word)[2]); \ - CAMELLIA_SWAP4((word)[3]); \ - }while(0) - -#define XOR4WORD(a, b)/* a = a ^ b */ \ - do { \ - (a)[0]^=(b)[0]; \ - (a)[1]^=(b)[1]; \ - (a)[2]^=(b)[2]; \ - (a)[3]^=(b)[3]; \ - }while(0) - -#define XOR4WORD2(a, b, c)/* a = b ^ c */ \ - do { \ - (a)[0]=(b)[0]^(c)[0]; \ - (a)[1]=(b)[1]^(c)[1]; \ - (a)[2]=(b)[2]^(c)[2]; \ - (a)[3]=(b)[3]^(c)[3]; \ - }while(0) - -#define CAMELLIA_SUBKEY_L(INDEX) (subkey[(INDEX)*2]) -#define CAMELLIA_SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1]) - -/* rotation right shift 1byte */ -#define CAMELLIA_RR8(x) (((x) >> 8) + ((x) << 24)) -/* rotation left shift 1bit */ -#define CAMELLIA_RL1(x) (((x) << 1) + ((x) >> 31)) -/* rotation left shift 1byte */ -#define CAMELLIA_RL8(x) (((x) << 8) + ((x) >> 24)) - -#define CAMELLIA_ROLDQ(ll, lr, rl, rr, w0, w1, bits) \ - do { \ - w0 = ll; \ - ll = (ll << bits) + (lr >> (32 - bits)); \ - lr = (lr << bits) + (rl >> (32 - bits)); \ - rl = (rl << bits) + (rr >> (32 - bits)); \ - rr = (rr << bits) + (w0 >> (32 - bits)); \ - } while(0) - -#define CAMELLIA_ROLDQo32(ll, lr, rl, rr, w0, w1, bits) \ - do { \ - w0 = ll; \ - w1 = lr; \ - ll = (lr << (bits - 32)) + (rl >> (64 - bits)); \ - lr = (rl << (bits - 32)) + (rr >> (64 - bits)); \ - rl = (rr << (bits - 32)) + (w0 >> (64 - bits)); \ - rr = (w0 << (bits - 32)) + (w1 >> (64 - bits)); \ - } while(0) - -#define CAMELLIA_SP1110(INDEX) (camellia_sp1110[(INDEX)]) -#define CAMELLIA_SP0222(INDEX) (camellia_sp0222[(INDEX)]) -#define CAMELLIA_SP3033(INDEX) (camellia_sp3033[(INDEX)]) -#define CAMELLIA_SP4404(INDEX) (camellia_sp4404[(INDEX)]) - -#define CAMELLIA_F(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) \ - do { \ - il = xl ^ kl; \ - ir = xr ^ kr; \ - t0 = il >> 16; \ - t1 = ir >> 16; \ - yl = CAMELLIA_SP1110(ir & 0xff) \ - ^ CAMELLIA_SP0222((t1 >> 8) & 0xff) \ - ^ CAMELLIA_SP3033(t1 & 0xff) \ - ^ CAMELLIA_SP4404((ir >> 8) & 0xff); \ - yr = CAMELLIA_SP1110((t0 >> 8) & 0xff) \ - ^ CAMELLIA_SP0222(t0 & 0xff) \ - ^ CAMELLIA_SP3033((il >> 8) & 0xff) \ - ^ CAMELLIA_SP4404(il & 0xff); \ - yl ^= yr; \ - yr = CAMELLIA_RR8(yr); \ - yr ^= yl; \ - } while(0) - - -/* - * for speed up - * - */ -#define CAMELLIA_FLS(ll, lr, rl, rr, kll, klr, krl, krr, t0, t1, t2, t3) \ - do { \ - t0 = kll; \ - t2 = krr; \ - t0 &= ll; \ - t2 |= rr; \ - rl ^= t2; \ - lr ^= CAMELLIA_RL1(t0); \ - t3 = krl; \ - t1 = klr; \ - t3 &= rl; \ - t1 |= lr; \ - ll ^= t1; \ - rr ^= CAMELLIA_RL1(t3); \ - } while(0) - -#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) \ - do { \ - ir = CAMELLIA_SP1110(xr & 0xff); \ - il = CAMELLIA_SP1110((xl>>24) & 0xff); \ - ir ^= CAMELLIA_SP0222((xr>>24) & 0xff); \ - il ^= CAMELLIA_SP0222((xl>>16) & 0xff); \ - ir ^= CAMELLIA_SP3033((xr>>16) & 0xff); \ - il ^= CAMELLIA_SP3033((xl>>8) & 0xff); \ - ir ^= CAMELLIA_SP4404((xr>>8) & 0xff); \ - il ^= CAMELLIA_SP4404(xl & 0xff); \ - il ^= kl; \ - ir ^= il ^ kr; \ - yl ^= ir; \ - yr ^= CAMELLIA_RR8(il) ^ ir; \ - } while(0) - -/** - * Stuff related to the Camellia key schedule - */ -#define SUBL(x) subL[(x)] -#define SUBR(x) subR[(x)] - - static const u32 camellia_sp1110[256] = { 0x70707000,0x82828200,0x2c2c2c00,0xececec00, 0xb3b3b300,0x27272700,0xc0c0c000,0xe5e5e500, @@ -475,6 +305,122 @@ static const u32 camellia_sp4404[256] = { }; +#define CAMELLIA_MIN_KEY_SIZE 16 +#define CAMELLIA_MAX_KEY_SIZE 32 +#define CAMELLIA_BLOCK_SIZE 16 +#define CAMELLIA_TABLE_BYTE_LEN 272 + + +/* key constants */ + +#define CAMELLIA_SIGMA1L (0xA09E667FL) +#define CAMELLIA_SIGMA1R (0x3BCC908BL) +#define CAMELLIA_SIGMA2L (0xB67AE858L) +#define CAMELLIA_SIGMA2R (0x4CAA73B2L) +#define CAMELLIA_SIGMA3L (0xC6EF372FL) +#define CAMELLIA_SIGMA3R (0xE94F82BEL) +#define CAMELLIA_SIGMA4L (0x54FF53A5L) +#define CAMELLIA_SIGMA4R (0xF1D36F1CL) +#define CAMELLIA_SIGMA5L (0x10E527FAL) +#define CAMELLIA_SIGMA5R (0xDE682D1DL) +#define CAMELLIA_SIGMA6L (0xB05688C2L) +#define CAMELLIA_SIGMA6R (0xB3E6C1FDL) + +/* + * macros + */ + +# define GETU32(pt) (((u32)(pt)[0] << 24) \ + ^ ((u32)(pt)[1] << 16) \ + ^ ((u32)(pt)[2] << 8) \ + ^ ((u32)(pt)[3])) + +/* rotation right shift 1byte */ +#define CAMELLIA_RR8(x) (((x) >> 8) + ((x) << 24)) +/* rotation left shift 1bit */ +#define CAMELLIA_RL1(x) (((x) << 1) + ((x) >> 31)) +/* rotation left shift 1byte */ +#define CAMELLIA_RL8(x) (((x) << 8) + ((x) >> 24)) + +#define CAMELLIA_ROLDQ(ll, lr, rl, rr, w0, w1, bits) \ + do { \ + w0 = ll; \ + ll = (ll << bits) + (lr >> (32 - bits)); \ + lr = (lr << bits) + (rl >> (32 - bits)); \ + rl = (rl << bits) + (rr >> (32 - bits)); \ + rr = (rr << bits) + (w0 >> (32 - bits)); \ + } while(0) + +#define CAMELLIA_ROLDQo32(ll, lr, rl, rr, w0, w1, bits) \ + do { \ + w0 = ll; \ + w1 = lr; \ + ll = (lr << (bits - 32)) + (rl >> (64 - bits)); \ + lr = (rl << (bits - 32)) + (rr >> (64 - bits)); \ + rl = (rr << (bits - 32)) + (w0 >> (64 - bits)); \ + rr = (w0 << (bits - 32)) + (w1 >> (64 - bits)); \ + } while(0) + + +#define CAMELLIA_F(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) \ + do { \ + il = xl ^ kl; \ + ir = xr ^ kr; \ + t0 = il >> 16; \ + t1 = ir >> 16; \ + yl = camellia_sp1110[ir & 0xff] \ + ^ camellia_sp0222[(t1 >> 8) & 0xff] \ + ^ camellia_sp3033[t1 & 0xff] \ + ^ camellia_sp4404[(ir >> 8) & 0xff]; \ + yr = camellia_sp1110[(t0 >> 8) & 0xff] \ + ^ camellia_sp0222[t0 & 0xff] \ + ^ camellia_sp3033[(il >> 8) & 0xff] \ + ^ camellia_sp4404[il & 0xff]; \ + yl ^= yr; \ + yr = CAMELLIA_RR8(yr); \ + yr ^= yl; \ + } while(0) + + +/* + * for speed up + * + */ +#define CAMELLIA_FLS(ll, lr, rl, rr, kll, klr, krl, krr, t0, t1, t2, t3) \ + do { \ + t0 = kll; \ + t2 = krr; \ + t0 &= ll; \ + t2 |= rr; \ + rl ^= t2; \ + lr ^= CAMELLIA_RL1(t0); \ + t3 = krl; \ + t1 = klr; \ + t3 &= rl; \ + t1 |= lr; \ + ll ^= t1; \ + rr ^= CAMELLIA_RL1(t3); \ + } while(0) + +#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) \ + do { \ + ir = camellia_sp1110[xr & 0xff]; \ + il = camellia_sp1110[(xl>>24) & 0xff]; \ + ir ^= camellia_sp0222[(xr>>24) & 0xff]; \ + il ^= camellia_sp0222[(xl>>16) & 0xff]; \ + ir ^= camellia_sp3033[(xr>>16) & 0xff]; \ + il ^= camellia_sp3033[(xl>>8) & 0xff]; \ + ir ^= camellia_sp4404[(xr>>8) & 0xff]; \ + il ^= camellia_sp4404[xl & 0xff]; \ + il ^= kl; \ + ir ^= il ^ kr; \ + yl ^= ir; \ + yr ^= CAMELLIA_RR8(il) ^ ir; \ + } while(0) + + +#define CAMELLIA_SUBKEY_L(INDEX) (subkey[(INDEX)*2]) +#define CAMELLIA_SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1]) static void camellia_setup128(const unsigned char *key, u32 *subkey) { @@ -495,47 +441,47 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey) * generate KL dependent subkeys */ /* kw1 */ - SUBL(0) = kll; SUBR(0) = klr; + subL[0] = kll; subR[0] = klr; /* kw2 */ - SUBL(1) = krl; SUBR(1) = krr; + subL[1] = krl; subR[1] = krr; /* rotation left shift 15bit */ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* k3 */ - SUBL(4) = kll; SUBR(4) = klr; + subL[4] = kll; subR[4] = klr; /* k4 */ - SUBL(5) = krl; SUBR(5) = krr; + subL[5] = krl; subR[5] = krr; /* rotation left shift 15+30bit */ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 30); /* k7 */ - SUBL(10) = kll; SUBR(10) = klr; + subL[10] = kll; subR[10] = klr; /* k8 */ - SUBL(11) = krl; SUBR(11) = krr; + subL[11] = krl; subR[11] = krr; /* rotation left shift 15+30+15bit */ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* k10 */ - SUBL(13) = krl; SUBR(13) = krr; + subL[13] = krl; subR[13] = krr; /* rotation left shift 15+30+15+17 bit */ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17); /* kl3 */ - SUBL(16) = kll; SUBR(16) = klr; + subL[16] = kll; subR[16] = klr; /* kl4 */ - SUBL(17) = krl; SUBR(17) = krr; + subL[17] = krl; subR[17] = krr; /* rotation left shift 15+30+15+17+17 bit */ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17); /* k13 */ - SUBL(18) = kll; SUBR(18) = klr; + subL[18] = kll; subR[18] = klr; /* k14 */ - SUBL(19) = krl; SUBR(19) = krr; + subL[19] = krl; subR[19] = krr; /* rotation left shift 15+30+15+17+17+17 bit */ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17); /* k17 */ - SUBL(22) = kll; SUBR(22) = klr; + subL[22] = kll; subR[22] = klr; /* k18 */ - SUBL(23) = krl; SUBR(23) = krr; + subL[23] = krl; subR[23] = krr; /* generate KA */ - kll = SUBL(0); klr = SUBR(0); - krl = SUBL(1); krr = SUBR(1); + kll = subL[0]; klr = subR[0]; + krl = subL[1]; krr = subR[1]; CAMELLIA_F(kll, klr, CAMELLIA_SIGMA1L, CAMELLIA_SIGMA1R, w0, w1, il, ir, t0, t1); @@ -555,152 +501,150 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey) /* generate KA dependent subkeys */ /* k1, k2 */ - SUBL(2) = kll; SUBR(2) = klr; - SUBL(3) = krl; SUBR(3) = krr; + subL[2] = kll; subR[2] = klr; + subL[3] = krl; subR[3] = krr; CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* k5,k6 */ - SUBL(6) = kll; SUBR(6) = klr; - SUBL(7) = krl; SUBR(7) = krr; + subL[6] = kll; subR[6] = klr; + subL[7] = krl; subR[7] = krr; CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* kl1, kl2 */ - SUBL(8) = kll; SUBR(8) = klr; - SUBL(9) = krl; SUBR(9) = krr; + subL[8] = kll; subR[8] = klr; + subL[9] = krl; subR[9] = krr; CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* k9 */ - SUBL(12) = kll; SUBR(12) = klr; + subL[12] = kll; subR[12] = klr; CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* k11, k12 */ - SUBL(14) = kll; SUBR(14) = klr; - SUBL(15) = krl; SUBR(15) = krr; + subL[14] = kll; subR[14] = klr; + subL[15] = krl; subR[15] = krr; CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 34); /* k15, k16 */ - SUBL(20) = kll; SUBR(20) = klr; - SUBL(21) = krl; SUBR(21) = krr; + subL[20] = kll; subR[20] = klr; + subL[21] = krl; subR[21] = krr; CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17); /* kw3, kw4 */ - SUBL(24) = kll; SUBR(24) = klr; - SUBL(25) = krl; SUBR(25) = krr; - + subL[24] = kll; subR[24] = klr; + subL[25] = krl; subR[25] = krr; /* absorb kw2 to other subkeys */ /* round 2 */ - SUBL(3) ^= SUBL(1); SUBR(3) ^= SUBR(1); + subL[3] ^= subL[1]; subR[3] ^= subR[1]; /* round 4 */ - SUBL(5) ^= SUBL(1); SUBR(5) ^= SUBR(1); + subL[5] ^= subL[1]; subR[5] ^= subR[1]; /* round 6 */ - SUBL(7) ^= SUBL(1); SUBR(7) ^= SUBR(1); - SUBL(1) ^= SUBR(1) & ~SUBR(9); - dw = SUBL(1) & SUBL(9), - SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl2) */ + subL[7] ^= subL[1]; subR[7] ^= subR[1]; + subL[1] ^= subR[1] & ~subR[9]; + dw = subL[1] & subL[9], + subR[1] ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl2) */ /* round 8 */ - SUBL(11) ^= SUBL(1); SUBR(11) ^= SUBR(1); + subL[11] ^= subL[1]; subR[11] ^= subR[1]; /* round 10 */ - SUBL(13) ^= SUBL(1); SUBR(13) ^= SUBR(1); + subL[13] ^= subL[1]; subR[13] ^= subR[1]; /* round 12 */ - SUBL(15) ^= SUBL(1); SUBR(15) ^= SUBR(1); - SUBL(1) ^= SUBR(1) & ~SUBR(17); - dw = SUBL(1) & SUBL(17), - SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl4) */ + subL[15] ^= subL[1]; subR[15] ^= subR[1]; + subL[1] ^= subR[1] & ~subR[17]; + dw = subL[1] & subL[17], + subR[1] ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl4) */ /* round 14 */ - SUBL(19) ^= SUBL(1); SUBR(19) ^= SUBR(1); + subL[19] ^= subL[1]; subR[19] ^= subR[1]; /* round 16 */ - SUBL(21) ^= SUBL(1); SUBR(21) ^= SUBR(1); + subL[21] ^= subL[1]; subR[21] ^= subR[1]; /* round 18 */ - SUBL(23) ^= SUBL(1); SUBR(23) ^= SUBR(1); + subL[23] ^= subL[1]; subR[23] ^= subR[1]; /* kw3 */ - SUBL(24) ^= SUBL(1); SUBR(24) ^= SUBR(1); + subL[24] ^= subL[1]; subR[24] ^= subR[1]; /* absorb kw4 to other subkeys */ - kw4l = SUBL(25); kw4r = SUBR(25); + kw4l = subL[25]; kw4r = subR[25]; /* round 17 */ - SUBL(22) ^= kw4l; SUBR(22) ^= kw4r; + subL[22] ^= kw4l; subR[22] ^= kw4r; /* round 15 */ - SUBL(20) ^= kw4l; SUBR(20) ^= kw4r; + subL[20] ^= kw4l; subR[20] ^= kw4r; /* round 13 */ - SUBL(18) ^= kw4l; SUBR(18) ^= kw4r; - kw4l ^= kw4r & ~SUBR(16); - dw = kw4l & SUBL(16), + subL[18] ^= kw4l; subR[18] ^= kw4r; + kw4l ^= kw4r & ~subR[16]; + dw = kw4l & subL[16], kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl3) */ /* round 11 */ - SUBL(14) ^= kw4l; SUBR(14) ^= kw4r; + subL[14] ^= kw4l; subR[14] ^= kw4r; /* round 9 */ - SUBL(12) ^= kw4l; SUBR(12) ^= kw4r; + subL[12] ^= kw4l; subR[12] ^= kw4r; /* round 7 */ - SUBL(10) ^= kw4l; SUBR(10) ^= kw4r; - kw4l ^= kw4r & ~SUBR(8); - dw = kw4l & SUBL(8), + subL[10] ^= kw4l; subR[10] ^= kw4r; + kw4l ^= kw4r & ~subR[8]; + dw = kw4l & subL[8], kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl1) */ /* round 5 */ - SUBL(6) ^= kw4l; SUBR(6) ^= kw4r; + subL[6] ^= kw4l; subR[6] ^= kw4r; /* round 3 */ - SUBL(4) ^= kw4l; SUBR(4) ^= kw4r; + subL[4] ^= kw4l; subR[4] ^= kw4r; /* round 1 */ - SUBL(2) ^= kw4l; SUBR(2) ^= kw4r; + subL[2] ^= kw4l; subR[2] ^= kw4r; /* kw1 */ - SUBL(0) ^= kw4l; SUBR(0) ^= kw4r; - + subL[0] ^= kw4l; subR[0] ^= kw4r; /* key XOR is end of F-function */ - CAMELLIA_SUBKEY_L(0) = SUBL(0) ^ SUBL(2);/* kw1 */ - CAMELLIA_SUBKEY_R(0) = SUBR(0) ^ SUBR(2); - CAMELLIA_SUBKEY_L(2) = SUBL(3); /* round 1 */ - CAMELLIA_SUBKEY_R(2) = SUBR(3); - CAMELLIA_SUBKEY_L(3) = SUBL(2) ^ SUBL(4); /* round 2 */ - CAMELLIA_SUBKEY_R(3) = SUBR(2) ^ SUBR(4); - CAMELLIA_SUBKEY_L(4) = SUBL(3) ^ SUBL(5); /* round 3 */ - CAMELLIA_SUBKEY_R(4) = SUBR(3) ^ SUBR(5); - CAMELLIA_SUBKEY_L(5) = SUBL(4) ^ SUBL(6); /* round 4 */ - CAMELLIA_SUBKEY_R(5) = SUBR(4) ^ SUBR(6); - CAMELLIA_SUBKEY_L(6) = SUBL(5) ^ SUBL(7); /* round 5 */ - CAMELLIA_SUBKEY_R(6) = SUBR(5) ^ SUBR(7); - tl = SUBL(10) ^ (SUBR(10) & ~SUBR(8)); - dw = tl & SUBL(8), /* FL(kl1) */ - tr = SUBR(10) ^ CAMELLIA_RL1(dw); - CAMELLIA_SUBKEY_L(7) = SUBL(6) ^ tl; /* round 6 */ - CAMELLIA_SUBKEY_R(7) = SUBR(6) ^ tr; - CAMELLIA_SUBKEY_L(8) = SUBL(8); /* FL(kl1) */ - CAMELLIA_SUBKEY_R(8) = SUBR(8); - CAMELLIA_SUBKEY_L(9) = SUBL(9); /* FLinv(kl2) */ - CAMELLIA_SUBKEY_R(9) = SUBR(9); - tl = SUBL(7) ^ (SUBR(7) & ~SUBR(9)); - dw = tl & SUBL(9), /* FLinv(kl2) */ - tr = SUBR(7) ^ CAMELLIA_RL1(dw); - CAMELLIA_SUBKEY_L(10) = tl ^ SUBL(11); /* round 7 */ - CAMELLIA_SUBKEY_R(10) = tr ^ SUBR(11); - CAMELLIA_SUBKEY_L(11) = SUBL(10) ^ SUBL(12); /* round 8 */ - CAMELLIA_SUBKEY_R(11) = SUBR(10) ^ SUBR(12); - CAMELLIA_SUBKEY_L(12) = SUBL(11) ^ SUBL(13); /* round 9 */ - CAMELLIA_SUBKEY_R(12) = SUBR(11) ^ SUBR(13); - CAMELLIA_SUBKEY_L(13) = SUBL(12) ^ SUBL(14); /* round 10 */ - CAMELLIA_SUBKEY_R(13) = SUBR(12) ^ SUBR(14); - CAMELLIA_SUBKEY_L(14) = SUBL(13) ^ SUBL(15); /* round 11 */ - CAMELLIA_SUBKEY_R(14) = SUBR(13) ^ SUBR(15); - tl = SUBL(18) ^ (SUBR(18) & ~SUBR(16)); - dw = tl & SUBL(16), /* FL(kl3) */ - tr = SUBR(18) ^ CAMELLIA_RL1(dw); - CAMELLIA_SUBKEY_L(15) = SUBL(14) ^ tl; /* round 12 */ - CAMELLIA_SUBKEY_R(15) = SUBR(14) ^ tr; - CAMELLIA_SUBKEY_L(16) = SUBL(16); /* FL(kl3) */ - CAMELLIA_SUBKEY_R(16) = SUBR(16); - CAMELLIA_SUBKEY_L(17) = SUBL(17); /* FLinv(kl4) */ - CAMELLIA_SUBKEY_R(17) = SUBR(17); - tl = SUBL(15) ^ (SUBR(15) & ~SUBR(17)); - dw = tl & SUBL(17), /* FLinv(kl4) */ - tr = SUBR(15) ^ CAMELLIA_RL1(dw); - CAMELLIA_SUBKEY_L(18) = tl ^ SUBL(19); /* round 13 */ - CAMELLIA_SUBKEY_R(18) = tr ^ SUBR(19); - CAMELLIA_SUBKEY_L(19) = SUBL(18) ^ SUBL(20); /* round 14 */ - CAMELLIA_SUBKEY_R(19) = SUBR(18) ^ SUBR(20); - CAMELLIA_SUBKEY_L(20) = SUBL(19) ^ SUBL(21); /* round 15 */ - CAMELLIA_SUBKEY_R(20) = SUBR(19) ^ SUBR(21); - CAMELLIA_SUBKEY_L(21) = SUBL(20) ^ SUBL(22); /* round 16 */ - CAMELLIA_SUBKEY_R(21) = SUBR(20) ^ SUBR(22); - CAMELLIA_SUBKEY_L(22) = SUBL(21) ^ SUBL(23); /* round 17 */ - CAMELLIA_SUBKEY_R(22) = SUBR(21) ^ SUBR(23); - CAMELLIA_SUBKEY_L(23) = SUBL(22); /* round 18 */ - CAMELLIA_SUBKEY_R(23) = SUBR(22); - CAMELLIA_SUBKEY_L(24) = SUBL(24) ^ SUBL(23); /* kw3 */ - CAMELLIA_SUBKEY_R(24) = SUBR(24) ^ SUBR(23); + CAMELLIA_SUBKEY_L(0) = subL[0] ^ subL[2];/* kw1 */ + CAMELLIA_SUBKEY_R(0) = subR[0] ^ subR[2]; + CAMELLIA_SUBKEY_L(2) = subL[3]; /* round 1 */ + CAMELLIA_SUBKEY_R(2) = subR[3]; + CAMELLIA_SUBKEY_L(3) = subL[2] ^ subL[4]; /* round 2 */ + CAMELLIA_SUBKEY_R(3) = subR[2] ^ subR[4]; + CAMELLIA_SUBKEY_L(4) = subL[3] ^ subL[5]; /* round 3 */ + CAMELLIA_SUBKEY_R(4) = subR[3] ^ subR[5]; + CAMELLIA_SUBKEY_L(5) = subL[4] ^ subL[6]; /* round 4 */ + CAMELLIA_SUBKEY_R(5) = subR[4] ^ subR[6]; + CAMELLIA_SUBKEY_L(6) = subL[5] ^ subL[7]; /* round 5 */ + CAMELLIA_SUBKEY_R(6) = subR[5] ^ subR[7]; + tl = subL[10] ^ (subR[10] & ~subR[8]); + dw = tl & subL[8], /* FL(kl1) */ + tr = subR[10] ^ CAMELLIA_RL1(dw); + CAMELLIA_SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ + CAMELLIA_SUBKEY_R(7) = subR[6] ^ tr; + CAMELLIA_SUBKEY_L(8) = subL[8]; /* FL(kl1) */ + CAMELLIA_SUBKEY_R(8) = subR[8]; + CAMELLIA_SUBKEY_L(9) = subL[9]; /* FLinv(kl2) */ + CAMELLIA_SUBKEY_R(9) = subR[9]; + tl = subL[7] ^ (subR[7] & ~subR[9]); + dw = tl & subL[9], /* FLinv(kl2) */ + tr = subR[7] ^ CAMELLIA_RL1(dw); + CAMELLIA_SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ + CAMELLIA_SUBKEY_R(10) = tr ^ subR[11]; + CAMELLIA_SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */ + CAMELLIA_SUBKEY_R(11) = subR[10] ^ subR[12]; + CAMELLIA_SUBKEY_L(12) = subL[11] ^ subL[13]; /* round 9 */ + CAMELLIA_SUBKEY_R(12) = subR[11] ^ subR[13]; + CAMELLIA_SUBKEY_L(13) = subL[12] ^ subL[14]; /* round 10 */ + CAMELLIA_SUBKEY_R(13) = subR[12] ^ subR[14]; + CAMELLIA_SUBKEY_L(14) = subL[13] ^ subL[15]; /* round 11 */ + CAMELLIA_SUBKEY_R(14) = subR[13] ^ subR[15]; + tl = subL[18] ^ (subR[18] & ~subR[16]); + dw = tl & subL[16], /* FL(kl3) */ + tr = subR[18] ^ CAMELLIA_RL1(dw); + CAMELLIA_SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ + CAMELLIA_SUBKEY_R(15) = subR[14] ^ tr; + CAMELLIA_SUBKEY_L(16) = subL[16]; /* FL(kl3) */ + CAMELLIA_SUBKEY_R(16) = subR[16]; + CAMELLIA_SUBKEY_L(17) = subL[17]; /* FLinv(kl4) */ + CAMELLIA_SUBKEY_R(17) = subR[17]; + tl = subL[15] ^ (subR[15] & ~subR[17]); + dw = tl & subL[17], /* FLinv(kl4) */ + tr = subR[15] ^ CAMELLIA_RL1(dw); + CAMELLIA_SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */ + CAMELLIA_SUBKEY_R(18) = tr ^ subR[19]; + CAMELLIA_SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */ + CAMELLIA_SUBKEY_R(19) = subR[18] ^ subR[20]; + CAMELLIA_SUBKEY_L(20) = subL[19] ^ subL[21]; /* round 15 */ + CAMELLIA_SUBKEY_R(20) = subR[19] ^ subR[21]; + CAMELLIA_SUBKEY_L(21) = subL[20] ^ subL[22]; /* round 16 */ + CAMELLIA_SUBKEY_R(21) = subR[20] ^ subR[22]; + CAMELLIA_SUBKEY_L(22) = subL[21] ^ subL[23]; /* round 17 */ + CAMELLIA_SUBKEY_R(22) = subR[21] ^ subR[23]; + CAMELLIA_SUBKEY_L(23) = subL[22]; /* round 18 */ + CAMELLIA_SUBKEY_R(23) = subR[22]; + CAMELLIA_SUBKEY_L(24) = subL[24] ^ subL[23]; /* kw3 */ + CAMELLIA_SUBKEY_R(24) = subR[24] ^ subR[23]; /* apply the inverse of the last half of P-function */ dw = CAMELLIA_SUBKEY_L(2) ^ CAMELLIA_SUBKEY_R(2), @@ -775,11 +719,8 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey) dw = CAMELLIA_RL8(dw);/* round 18 */ CAMELLIA_SUBKEY_R(23) = CAMELLIA_SUBKEY_L(23) ^ dw, CAMELLIA_SUBKEY_L(23) = dw; - - return; } - static void camellia_setup256(const unsigned char *key, u32 *subkey) { u32 kll,klr,krl,krr; /* left half of key */ @@ -805,56 +746,56 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) /* generate KL dependent subkeys */ /* kw1 */ - SUBL(0) = kll; SUBR(0) = klr; + subL[0] = kll; subR[0] = klr; /* kw2 */ - SUBL(1) = krl; SUBR(1) = krr; + subL[1] = krl; subR[1] = krr; CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 45); /* k9 */ - SUBL(12) = kll; SUBR(12) = klr; + subL[12] = kll; subR[12] = klr; /* k10 */ - SUBL(13) = krl; SUBR(13) = krr; + subL[13] = krl; subR[13] = krr; CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* kl3 */ - SUBL(16) = kll; SUBR(16) = klr; + subL[16] = kll; subR[16] = klr; /* kl4 */ - SUBL(17) = krl; SUBR(17) = krr; + subL[17] = krl; subR[17] = krr; CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17); /* k17 */ - SUBL(22) = kll; SUBR(22) = klr; + subL[22] = kll; subR[22] = klr; /* k18 */ - SUBL(23) = krl; SUBR(23) = krr; + subL[23] = krl; subR[23] = krr; CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 34); /* k23 */ - SUBL(30) = kll; SUBR(30) = klr; + subL[30] = kll; subR[30] = klr; /* k24 */ - SUBL(31) = krl; SUBR(31) = krr; + subL[31] = krl; subR[31] = krr; /* generate KR dependent subkeys */ CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15); /* k3 */ - SUBL(4) = krll; SUBR(4) = krlr; + subL[4] = krll; subR[4] = krlr; /* k4 */ - SUBL(5) = krrl; SUBR(5) = krrr; + subL[5] = krrl; subR[5] = krrr; CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15); /* kl1 */ - SUBL(8) = krll; SUBR(8) = krlr; + subL[8] = krll; subR[8] = krlr; /* kl2 */ - SUBL(9) = krrl; SUBR(9) = krrr; + subL[9] = krrl; subR[9] = krrr; CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30); /* k13 */ - SUBL(18) = krll; SUBR(18) = krlr; + subL[18] = krll; subR[18] = krlr; /* k14 */ - SUBL(19) = krrl; SUBR(19) = krrr; + subL[19] = krrl; subR[19] = krrr; CAMELLIA_ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34); /* k19 */ - SUBL(26) = krll; SUBR(26) = krlr; + subL[26] = krll; subR[26] = krlr; /* k20 */ - SUBL(27) = krrl; SUBR(27) = krrr; + subL[27] = krrl; subR[27] = krrr; CAMELLIA_ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34); /* generate KA */ - kll = SUBL(0) ^ krll; klr = SUBR(0) ^ krlr; - krl = SUBL(1) ^ krrl; krr = SUBR(1) ^ krrr; + kll = subL[0] ^ krll; klr = subR[0] ^ krlr; + krl = subL[1] ^ krrl; krr = subR[1] ^ krrr; CAMELLIA_F(kll, klr, CAMELLIA_SIGMA1L, CAMELLIA_SIGMA1R, w0, w1, il, ir, t0, t1); @@ -887,208 +828,207 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) /* generate KA dependent subkeys */ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* k5 */ - SUBL(6) = kll; SUBR(6) = klr; + subL[6] = kll; subR[6] = klr; /* k6 */ - SUBL(7) = krl; SUBR(7) = krr; + subL[7] = krl; subR[7] = krr; CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 30); /* k11 */ - SUBL(14) = kll; SUBR(14) = klr; + subL[14] = kll; subR[14] = klr; /* k12 */ - SUBL(15) = krl; SUBR(15) = krr; + subL[15] = krl; subR[15] = krr; /* rotation left shift 32bit */ /* kl5 */ - SUBL(24) = klr; SUBR(24) = krl; + subL[24] = klr; subR[24] = krl; /* kl6 */ - SUBL(25) = krr; SUBR(25) = kll; + subL[25] = krr; subR[25] = kll; /* rotation left shift 49 from k11,k12 -> k21,k22 */ CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 49); /* k21 */ - SUBL(28) = kll; SUBR(28) = klr; + subL[28] = kll; subR[28] = klr; /* k22 */ - SUBL(29) = krl; SUBR(29) = krr; + subL[29] = krl; subR[29] = krr; /* generate KB dependent subkeys */ /* k1 */ - SUBL(2) = krll; SUBR(2) = krlr; + subL[2] = krll; subR[2] = krlr; /* k2 */ - SUBL(3) = krrl; SUBR(3) = krrr; + subL[3] = krrl; subR[3] = krrr; CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30); /* k7 */ - SUBL(10) = krll; SUBR(10) = krlr; + subL[10] = krll; subR[10] = krlr; /* k8 */ - SUBL(11) = krrl; SUBR(11) = krrr; + subL[11] = krrl; subR[11] = krrr; CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30); /* k15 */ - SUBL(20) = krll; SUBR(20) = krlr; + subL[20] = krll; subR[20] = krlr; /* k16 */ - SUBL(21) = krrl; SUBR(21) = krrr; + subL[21] = krrl; subR[21] = krrr; CAMELLIA_ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 51); /* kw3 */ - SUBL(32) = krll; SUBR(32) = krlr; + subL[32] = krll; subR[32] = krlr; /* kw4 */ - SUBL(33) = krrl; SUBR(33) = krrr; + subL[33] = krrl; subR[33] = krrr; /* absorb kw2 to other subkeys */ /* round 2 */ - SUBL(3) ^= SUBL(1); SUBR(3) ^= SUBR(1); + subL[3] ^= subL[1]; subR[3] ^= subR[1]; /* round 4 */ - SUBL(5) ^= SUBL(1); SUBR(5) ^= SUBR(1); + subL[5] ^= subL[1]; subR[5] ^= subR[1]; /* round 6 */ - SUBL(7) ^= SUBL(1); SUBR(7) ^= SUBR(1); - SUBL(1) ^= SUBR(1) & ~SUBR(9); - dw = SUBL(1) & SUBL(9), - SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl2) */ + subL[7] ^= subL[1]; subR[7] ^= subR[1]; + subL[1] ^= subR[1] & ~subR[9]; + dw = subL[1] & subL[9], + subR[1] ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl2) */ /* round 8 */ - SUBL(11) ^= SUBL(1); SUBR(11) ^= SUBR(1); + subL[11] ^= subL[1]; subR[11] ^= subR[1]; /* round 10 */ - SUBL(13) ^= SUBL(1); SUBR(13) ^= SUBR(1); + subL[13] ^= subL[1]; subR[13] ^= subR[1]; /* round 12 */ - SUBL(15) ^= SUBL(1); SUBR(15) ^= SUBR(1); - SUBL(1) ^= SUBR(1) & ~SUBR(17); - dw = SUBL(1) & SUBL(17), - SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl4) */ + subL[15] ^= subL[1]; subR[15] ^= subR[1]; + subL[1] ^= subR[1] & ~subR[17]; + dw = subL[1] & subL[17], + subR[1] ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl4) */ /* round 14 */ - SUBL(19) ^= SUBL(1); SUBR(19) ^= SUBR(1); + subL[19] ^= subL[1]; subR[19] ^= subR[1]; /* round 16 */ - SUBL(21) ^= SUBL(1); SUBR(21) ^= SUBR(1); + subL[21] ^= subL[1]; subR[21] ^= subR[1]; /* round 18 */ - SUBL(23) ^= SUBL(1); SUBR(23) ^= SUBR(1); - SUBL(1) ^= SUBR(1) & ~SUBR(25); - dw = SUBL(1) & SUBL(25), - SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl6) */ + subL[23] ^= subL[1]; subR[23] ^= subR[1]; + subL[1] ^= subR[1] & ~subR[25]; + dw = subL[1] & subL[25], + subR[1] ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl6) */ /* round 20 */ - SUBL(27) ^= SUBL(1); SUBR(27) ^= SUBR(1); + subL[27] ^= subL[1]; subR[27] ^= subR[1]; /* round 22 */ - SUBL(29) ^= SUBL(1); SUBR(29) ^= SUBR(1); + subL[29] ^= subL[1]; subR[29] ^= subR[1]; /* round 24 */ - SUBL(31) ^= SUBL(1); SUBR(31) ^= SUBR(1); + subL[31] ^= subL[1]; subR[31] ^= subR[1]; /* kw3 */ - SUBL(32) ^= SUBL(1); SUBR(32) ^= SUBR(1); - + subL[32] ^= subL[1]; subR[32] ^= subR[1]; /* absorb kw4 to other subkeys */ - kw4l = SUBL(33); kw4r = SUBR(33); + kw4l = subL[33]; kw4r = subR[33]; /* round 23 */ - SUBL(30) ^= kw4l; SUBR(30) ^= kw4r; + subL[30] ^= kw4l; subR[30] ^= kw4r; /* round 21 */ - SUBL(28) ^= kw4l; SUBR(28) ^= kw4r; + subL[28] ^= kw4l; subR[28] ^= kw4r; /* round 19 */ - SUBL(26) ^= kw4l; SUBR(26) ^= kw4r; - kw4l ^= kw4r & ~SUBR(24); - dw = kw4l & SUBL(24), + subL[26] ^= kw4l; subR[26] ^= kw4r; + kw4l ^= kw4r & ~subR[24]; + dw = kw4l & subL[24], kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl5) */ /* round 17 */ - SUBL(22) ^= kw4l; SUBR(22) ^= kw4r; + subL[22] ^= kw4l; subR[22] ^= kw4r; /* round 15 */ - SUBL(20) ^= kw4l; SUBR(20) ^= kw4r; + subL[20] ^= kw4l; subR[20] ^= kw4r; /* round 13 */ - SUBL(18) ^= kw4l; SUBR(18) ^= kw4r; - kw4l ^= kw4r & ~SUBR(16); - dw = kw4l & SUBL(16), + subL[18] ^= kw4l; subR[18] ^= kw4r; + kw4l ^= kw4r & ~subR[16]; + dw = kw4l & subL[16], kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl3) */ /* round 11 */ - SUBL(14) ^= kw4l; SUBR(14) ^= kw4r; + subL[14] ^= kw4l; subR[14] ^= kw4r; /* round 9 */ - SUBL(12) ^= kw4l; SUBR(12) ^= kw4r; + subL[12] ^= kw4l; subR[12] ^= kw4r; /* round 7 */ - SUBL(10) ^= kw4l; SUBR(10) ^= kw4r; - kw4l ^= kw4r & ~SUBR(8); - dw = kw4l & SUBL(8), + subL[10] ^= kw4l; subR[10] ^= kw4r; + kw4l ^= kw4r & ~subR[8]; + dw = kw4l & subL[8], kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl1) */ /* round 5 */ - SUBL(6) ^= kw4l; SUBR(6) ^= kw4r; + subL[6] ^= kw4l; subR[6] ^= kw4r; /* round 3 */ - SUBL(4) ^= kw4l; SUBR(4) ^= kw4r; + subL[4] ^= kw4l; subR[4] ^= kw4r; /* round 1 */ - SUBL(2) ^= kw4l; SUBR(2) ^= kw4r; + subL[2] ^= kw4l; subR[2] ^= kw4r; /* kw1 */ - SUBL(0) ^= kw4l; SUBR(0) ^= kw4r; + subL[0] ^= kw4l; subR[0] ^= kw4r; /* key XOR is end of F-function */ - CAMELLIA_SUBKEY_L(0) = SUBL(0) ^ SUBL(2);/* kw1 */ - CAMELLIA_SUBKEY_R(0) = SUBR(0) ^ SUBR(2); - CAMELLIA_SUBKEY_L(2) = SUBL(3); /* round 1 */ - CAMELLIA_SUBKEY_R(2) = SUBR(3); - CAMELLIA_SUBKEY_L(3) = SUBL(2) ^ SUBL(4); /* round 2 */ - CAMELLIA_SUBKEY_R(3) = SUBR(2) ^ SUBR(4); - CAMELLIA_SUBKEY_L(4) = SUBL(3) ^ SUBL(5); /* round 3 */ - CAMELLIA_SUBKEY_R(4) = SUBR(3) ^ SUBR(5); - CAMELLIA_SUBKEY_L(5) = SUBL(4) ^ SUBL(6); /* round 4 */ - CAMELLIA_SUBKEY_R(5) = SUBR(4) ^ SUBR(6); - CAMELLIA_SUBKEY_L(6) = SUBL(5) ^ SUBL(7); /* round 5 */ - CAMELLIA_SUBKEY_R(6) = SUBR(5) ^ SUBR(7); - tl = SUBL(10) ^ (SUBR(10) & ~SUBR(8)); - dw = tl & SUBL(8), /* FL(kl1) */ - tr = SUBR(10) ^ CAMELLIA_RL1(dw); - CAMELLIA_SUBKEY_L(7) = SUBL(6) ^ tl; /* round 6 */ - CAMELLIA_SUBKEY_R(7) = SUBR(6) ^ tr; - CAMELLIA_SUBKEY_L(8) = SUBL(8); /* FL(kl1) */ - CAMELLIA_SUBKEY_R(8) = SUBR(8); - CAMELLIA_SUBKEY_L(9) = SUBL(9); /* FLinv(kl2) */ - CAMELLIA_SUBKEY_R(9) = SUBR(9); - tl = SUBL(7) ^ (SUBR(7) & ~SUBR(9)); - dw = tl & SUBL(9), /* FLinv(kl2) */ - tr = SUBR(7) ^ CAMELLIA_RL1(dw); - CAMELLIA_SUBKEY_L(10) = tl ^ SUBL(11); /* round 7 */ - CAMELLIA_SUBKEY_R(10) = tr ^ SUBR(11); - CAMELLIA_SUBKEY_L(11) = SUBL(10) ^ SUBL(12); /* round 8 */ - CAMELLIA_SUBKEY_R(11) = SUBR(10) ^ SUBR(12); - CAMELLIA_SUBKEY_L(12) = SUBL(11) ^ SUBL(13); /* round 9 */ - CAMELLIA_SUBKEY_R(12) = SUBR(11) ^ SUBR(13); - CAMELLIA_SUBKEY_L(13) = SUBL(12) ^ SUBL(14); /* round 10 */ - CAMELLIA_SUBKEY_R(13) = SUBR(12) ^ SUBR(14); - CAMELLIA_SUBKEY_L(14) = SUBL(13) ^ SUBL(15); /* round 11 */ - CAMELLIA_SUBKEY_R(14) = SUBR(13) ^ SUBR(15); - tl = SUBL(18) ^ (SUBR(18) & ~SUBR(16)); - dw = tl & SUBL(16), /* FL(kl3) */ - tr = SUBR(18) ^ CAMELLIA_RL1(dw); - CAMELLIA_SUBKEY_L(15) = SUBL(14) ^ tl; /* round 12 */ - CAMELLIA_SUBKEY_R(15) = SUBR(14) ^ tr; - CAMELLIA_SUBKEY_L(16) = SUBL(16); /* FL(kl3) */ - CAMELLIA_SUBKEY_R(16) = SUBR(16); - CAMELLIA_SUBKEY_L(17) = SUBL(17); /* FLinv(kl4) */ - CAMELLIA_SUBKEY_R(17) = SUBR(17); - tl = SUBL(15) ^ (SUBR(15) & ~SUBR(17)); - dw = tl & SUBL(17), /* FLinv(kl4) */ - tr = SUBR(15) ^ CAMELLIA_RL1(dw); - CAMELLIA_SUBKEY_L(18) = tl ^ SUBL(19); /* round 13 */ - CAMELLIA_SUBKEY_R(18) = tr ^ SUBR(19); - CAMELLIA_SUBKEY_L(19) = SUBL(18) ^ SUBL(20); /* round 14 */ - CAMELLIA_SUBKEY_R(19) = SUBR(18) ^ SUBR(20); - CAMELLIA_SUBKEY_L(20) = SUBL(19) ^ SUBL(21); /* round 15 */ - CAMELLIA_SUBKEY_R(20) = SUBR(19) ^ SUBR(21); - CAMELLIA_SUBKEY_L(21) = SUBL(20) ^ SUBL(22); /* round 16 */ - CAMELLIA_SUBKEY_R(21) = SUBR(20) ^ SUBR(22); - CAMELLIA_SUBKEY_L(22) = SUBL(21) ^ SUBL(23); /* round 17 */ - CAMELLIA_SUBKEY_R(22) = SUBR(21) ^ SUBR(23); - tl = SUBL(26) ^ (SUBR(26) - & ~SUBR(24)); - dw = tl & SUBL(24), /* FL(kl5) */ - tr = SUBR(26) ^ CAMELLIA_RL1(dw); - CAMELLIA_SUBKEY_L(23) = SUBL(22) ^ tl; /* round 18 */ - CAMELLIA_SUBKEY_R(23) = SUBR(22) ^ tr; - CAMELLIA_SUBKEY_L(24) = SUBL(24); /* FL(kl5) */ - CAMELLIA_SUBKEY_R(24) = SUBR(24); - CAMELLIA_SUBKEY_L(25) = SUBL(25); /* FLinv(kl6) */ - CAMELLIA_SUBKEY_R(25) = SUBR(25); - tl = SUBL(23) ^ (SUBR(23) & - ~SUBR(25)); - dw = tl & SUBL(25), /* FLinv(kl6) */ - tr = SUBR(23) ^ CAMELLIA_RL1(dw); - CAMELLIA_SUBKEY_L(26) = tl ^ SUBL(27); /* round 19 */ - CAMELLIA_SUBKEY_R(26) = tr ^ SUBR(27); - CAMELLIA_SUBKEY_L(27) = SUBL(26) ^ SUBL(28); /* round 20 */ - CAMELLIA_SUBKEY_R(27) = SUBR(26) ^ SUBR(28); - CAMELLIA_SUBKEY_L(28) = SUBL(27) ^ SUBL(29); /* round 21 */ - CAMELLIA_SUBKEY_R(28) = SUBR(27) ^ SUBR(29); - CAMELLIA_SUBKEY_L(29) = SUBL(28) ^ SUBL(30); /* round 22 */ - CAMELLIA_SUBKEY_R(29) = SUBR(28) ^ SUBR(30); - CAMELLIA_SUBKEY_L(30) = SUBL(29) ^ SUBL(31); /* round 23 */ - CAMELLIA_SUBKEY_R(30) = SUBR(29) ^ SUBR(31); - CAMELLIA_SUBKEY_L(31) = SUBL(30); /* round 24 */ - CAMELLIA_SUBKEY_R(31) = SUBR(30); - CAMELLIA_SUBKEY_L(32) = SUBL(32) ^ SUBL(31); /* kw3 */ - CAMELLIA_SUBKEY_R(32) = SUBR(32) ^ SUBR(31); + CAMELLIA_SUBKEY_L(0) = subL[0] ^ subL[2];/* kw1 */ + CAMELLIA_SUBKEY_R(0) = subR[0] ^ subR[2]; + CAMELLIA_SUBKEY_L(2) = subL[3]; /* round 1 */ + CAMELLIA_SUBKEY_R(2) = subR[3]; + CAMELLIA_SUBKEY_L(3) = subL[2] ^ subL[4]; /* round 2 */ + CAMELLIA_SUBKEY_R(3) = subR[2] ^ subR[4]; + CAMELLIA_SUBKEY_L(4) = subL[3] ^ subL[5]; /* round 3 */ + CAMELLIA_SUBKEY_R(4) = subR[3] ^ subR[5]; + CAMELLIA_SUBKEY_L(5) = subL[4] ^ subL[6]; /* round 4 */ + CAMELLIA_SUBKEY_R(5) = subR[4] ^ subR[6]; + CAMELLIA_SUBKEY_L(6) = subL[5] ^ subL[7]; /* round 5 */ + CAMELLIA_SUBKEY_R(6) = subR[5] ^ subR[7]; + tl = subL[10] ^ (subR[10] & ~subR[8]); + dw = tl & subL[8], /* FL(kl1) */ + tr = subR[10] ^ CAMELLIA_RL1(dw); + CAMELLIA_SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ + CAMELLIA_SUBKEY_R(7) = subR[6] ^ tr; + CAMELLIA_SUBKEY_L(8) = subL[8]; /* FL(kl1) */ + CAMELLIA_SUBKEY_R(8) = subR[8]; + CAMELLIA_SUBKEY_L(9) = subL[9]; /* FLinv(kl2) */ + CAMELLIA_SUBKEY_R(9) = subR[9]; + tl = subL[7] ^ (subR[7] & ~subR[9]); + dw = tl & subL[9], /* FLinv(kl2) */ + tr = subR[7] ^ CAMELLIA_RL1(dw); + CAMELLIA_SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ + CAMELLIA_SUBKEY_R(10) = tr ^ subR[11]; + CAMELLIA_SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */ + CAMELLIA_SUBKEY_R(11) = subR[10] ^ subR[12]; + CAMELLIA_SUBKEY_L(12) = subL[11] ^ subL[13]; /* round 9 */ + CAMELLIA_SUBKEY_R(12) = subR[11] ^ subR[13]; + CAMELLIA_SUBKEY_L(13) = subL[12] ^ subL[14]; /* round 10 */ + CAMELLIA_SUBKEY_R(13) = subR[12] ^ subR[14]; + CAMELLIA_SUBKEY_L(14) = subL[13] ^ subL[15]; /* round 11 */ + CAMELLIA_SUBKEY_R(14) = subR[13] ^ subR[15]; + tl = subL[18] ^ (subR[18] & ~subR[16]); + dw = tl & subL[16], /* FL(kl3) */ + tr = subR[18] ^ CAMELLIA_RL1(dw); + CAMELLIA_SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ + CAMELLIA_SUBKEY_R(15) = subR[14] ^ tr; + CAMELLIA_SUBKEY_L(16) = subL[16]; /* FL(kl3) */ + CAMELLIA_SUBKEY_R(16) = subR[16]; + CAMELLIA_SUBKEY_L(17) = subL[17]; /* FLinv(kl4) */ + CAMELLIA_SUBKEY_R(17) = subR[17]; + tl = subL[15] ^ (subR[15] & ~subR[17]); + dw = tl & subL[17], /* FLinv(kl4) */ + tr = subR[15] ^ CAMELLIA_RL1(dw); + CAMELLIA_SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */ + CAMELLIA_SUBKEY_R(18) = tr ^ subR[19]; + CAMELLIA_SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */ + CAMELLIA_SUBKEY_R(19) = subR[18] ^ subR[20]; + CAMELLIA_SUBKEY_L(20) = subL[19] ^ subL[21]; /* round 15 */ + CAMELLIA_SUBKEY_R(20) = subR[19] ^ subR[21]; + CAMELLIA_SUBKEY_L(21) = subL[20] ^ subL[22]; /* round 16 */ + CAMELLIA_SUBKEY_R(21) = subR[20] ^ subR[22]; + CAMELLIA_SUBKEY_L(22) = subL[21] ^ subL[23]; /* round 17 */ + CAMELLIA_SUBKEY_R(22) = subR[21] ^ subR[23]; + tl = subL[26] ^ (subR[26] + & ~subR[24]); + dw = tl & subL[24], /* FL(kl5) */ + tr = subR[26] ^ CAMELLIA_RL1(dw); + CAMELLIA_SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */ + CAMELLIA_SUBKEY_R(23) = subR[22] ^ tr; + CAMELLIA_SUBKEY_L(24) = subL[24]; /* FL(kl5) */ + CAMELLIA_SUBKEY_R(24) = subR[24]; + CAMELLIA_SUBKEY_L(25) = subL[25]; /* FLinv(kl6) */ + CAMELLIA_SUBKEY_R(25) = subR[25]; + tl = subL[23] ^ (subR[23] & + ~subR[25]); + dw = tl & subL[25], /* FLinv(kl6) */ + tr = subR[23] ^ CAMELLIA_RL1(dw); + CAMELLIA_SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */ + CAMELLIA_SUBKEY_R(26) = tr ^ subR[27]; + CAMELLIA_SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */ + CAMELLIA_SUBKEY_R(27) = subR[26] ^ subR[28]; + CAMELLIA_SUBKEY_L(28) = subL[27] ^ subL[29]; /* round 21 */ + CAMELLIA_SUBKEY_R(28) = subR[27] ^ subR[29]; + CAMELLIA_SUBKEY_L(29) = subL[28] ^ subL[30]; /* round 22 */ + CAMELLIA_SUBKEY_R(29) = subR[28] ^ subR[30]; + CAMELLIA_SUBKEY_L(30) = subL[29] ^ subL[31]; /* round 23 */ + CAMELLIA_SUBKEY_R(30) = subR[29] ^ subR[31]; + CAMELLIA_SUBKEY_L(31) = subL[30]; /* round 24 */ + CAMELLIA_SUBKEY_R(31) = subR[30]; + CAMELLIA_SUBKEY_L(32) = subL[32] ^ subL[31]; /* kw3 */ + CAMELLIA_SUBKEY_R(32) = subR[32] ^ subR[31]; /* apply the inverse of the last half of P-function */ dw = CAMELLIA_SUBKEY_L(2) ^ CAMELLIA_SUBKEY_R(2), @@ -1187,8 +1127,6 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) dw = CAMELLIA_RL8(dw);/* round 24 */ CAMELLIA_SUBKEY_R(31) = CAMELLIA_SUBKEY_L(31) ^ dw, CAMELLIA_SUBKEY_L(31) = dw; - - return; } static void camellia_setup192(const unsigned char *key, u32 *subkey) @@ -1197,20 +1135,16 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey) u32 krll, krlr, krrl,krrr; memcpy(kk, key, 24); - memcpy((unsigned char *)&krll, key+16,4); - memcpy((unsigned char *)&krlr, key+20,4); + memcpy((unsigned char *)&krll, key+16, 4); + memcpy((unsigned char *)&krlr, key+20, 4); krrl = ~krll; krrr = ~krlr; memcpy(kk+24, (unsigned char *)&krrl, 4); memcpy(kk+28, (unsigned char *)&krrr, 4); camellia_setup256(kk, subkey); - return; } -/** - * Stuff related to camellia encryption/decryption - */ static void camellia_encrypt128(const u32 *subkey, __be32 *io_text) { u32 il,ir,t0,t1; /* temporary valiables */ @@ -1222,11 +1156,11 @@ static void camellia_encrypt128(const u32 *subkey, __be32 *io_text) io[2] = be32_to_cpu(io_text[2]); io[3] = be32_to_cpu(io_text[3]); - /* pre whitening but absorb kw2*/ + /* pre whitening but absorb kw2 */ io[0] ^= CAMELLIA_SUBKEY_L(0); io[1] ^= CAMELLIA_SUBKEY_R(0); - /* main iteration */ + /* main iteration */ CAMELLIA_ROUNDSM(io[0],io[1], CAMELLIA_SUBKEY_L(2),CAMELLIA_SUBKEY_R(2), io[2],io[3],il,ir,t0,t1); @@ -1298,19 +1232,10 @@ static void camellia_encrypt128(const u32 *subkey, __be32 *io_text) io[2] ^= CAMELLIA_SUBKEY_L(24); io[3] ^= CAMELLIA_SUBKEY_R(24); - t0 = io[0]; - t1 = io[1]; - io[0] = io[2]; - io[1] = io[3]; - io[2] = t0; - io[3] = t1; - - io_text[0] = cpu_to_be32(io[0]); - io_text[1] = cpu_to_be32(io[1]); - io_text[2] = cpu_to_be32(io[2]); - io_text[3] = cpu_to_be32(io[3]); - - return; + io_text[0] = cpu_to_be32(io[2]); + io_text[1] = cpu_to_be32(io[3]); + io_text[2] = cpu_to_be32(io[0]); + io_text[3] = cpu_to_be32(io[1]); } static void camellia_decrypt128(const u32 *subkey, __be32 *io_text) @@ -1324,7 +1249,7 @@ static void camellia_decrypt128(const u32 *subkey, __be32 *io_text) io[2] = be32_to_cpu(io_text[2]); io[3] = be32_to_cpu(io_text[3]); - /* pre whitening but absorb kw2*/ + /* pre whitening but absorb kw2 */ io[0] ^= CAMELLIA_SUBKEY_L(24); io[1] ^= CAMELLIA_SUBKEY_R(24); @@ -1400,25 +1325,12 @@ static void camellia_decrypt128(const u32 *subkey, __be32 *io_text) io[2] ^= CAMELLIA_SUBKEY_L(0); io[3] ^= CAMELLIA_SUBKEY_R(0); - t0 = io[0]; - t1 = io[1]; - io[0] = io[2]; - io[1] = io[3]; - io[2] = t0; - io[3] = t1; - - io_text[0] = cpu_to_be32(io[0]); - io_text[1] = cpu_to_be32(io[1]); - io_text[2] = cpu_to_be32(io[2]); - io_text[3] = cpu_to_be32(io[3]); - - return; + io_text[0] = cpu_to_be32(io[2]); + io_text[1] = cpu_to_be32(io[3]); + io_text[2] = cpu_to_be32(io[0]); + io_text[3] = cpu_to_be32(io[1]); } - -/** - * stuff for 192 and 256bit encryption/decryption - */ static void camellia_encrypt256(const u32 *subkey, __be32 *io_text) { u32 il,ir,t0,t1; /* temporary valiables */ @@ -1430,7 +1342,7 @@ static void camellia_encrypt256(const u32 *subkey, __be32 *io_text) io[2] = be32_to_cpu(io_text[2]); io[3] = be32_to_cpu(io_text[3]); - /* pre whitening but absorb kw2*/ + /* pre whitening but absorb kw2 */ io[0] ^= CAMELLIA_SUBKEY_L(0); io[1] ^= CAMELLIA_SUBKEY_R(0); @@ -1530,22 +1442,12 @@ static void camellia_encrypt256(const u32 *subkey, __be32 *io_text) io[2] ^= CAMELLIA_SUBKEY_L(32); io[3] ^= CAMELLIA_SUBKEY_R(32); - t0 = io[0]; - t1 = io[1]; - io[0] = io[2]; - io[1] = io[3]; - io[2] = t0; - io[3] = t1; - - io_text[0] = cpu_to_be32(io[0]); - io_text[1] = cpu_to_be32(io[1]); - io_text[2] = cpu_to_be32(io[2]); - io_text[3] = cpu_to_be32(io[3]); - - return; + io_text[0] = cpu_to_be32(io[2]); + io_text[1] = cpu_to_be32(io[3]); + io_text[2] = cpu_to_be32(io[0]); + io_text[3] = cpu_to_be32(io[1]); } - static void camellia_decrypt256(const u32 *subkey, __be32 *io_text) { u32 il,ir,t0,t1; /* temporary valiables */ @@ -1557,7 +1459,7 @@ static void camellia_decrypt256(const u32 *subkey, __be32 *io_text) io[2] = be32_to_cpu(io_text[2]); io[3] = be32_to_cpu(io_text[3]); - /* pre whitening but absorb kw2*/ + /* pre whitening but absorb kw2 */ io[0] ^= CAMELLIA_SUBKEY_L(32); io[1] ^= CAMELLIA_SUBKEY_R(32); @@ -1657,22 +1559,18 @@ static void camellia_decrypt256(const u32 *subkey, __be32 *io_text) io[2] ^= CAMELLIA_SUBKEY_L(0); io[3] ^= CAMELLIA_SUBKEY_R(0); - t0 = io[0]; - t1 = io[1]; - io[0] = io[2]; - io[1] = io[3]; - io[2] = t0; - io[3] = t1; - - io_text[0] = cpu_to_be32(io[0]); - io_text[1] = cpu_to_be32(io[1]); - io_text[2] = cpu_to_be32(io[2]); - io_text[3] = cpu_to_be32(io[3]); - - return; + io_text[0] = cpu_to_be32(io[2]); + io_text[1] = cpu_to_be32(io[3]); + io_text[2] = cpu_to_be32(io[0]); + io_text[3] = cpu_to_be32(io[1]); } +struct camellia_ctx { + int key_length; + u32 key_table[CAMELLIA_TABLE_BYTE_LEN / 4]; +}; + static int camellia_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) @@ -1688,7 +1586,7 @@ camellia_set_key(struct crypto_tfm *tfm, const u8 *in_key, cctx->key_length = key_len; - switch(key_len) { + switch (key_len) { case 16: camellia_setup128(key, cctx->key_table); break; @@ -1698,14 +1596,11 @@ camellia_set_key(struct crypto_tfm *tfm, const u8 *in_key, case 32: camellia_setup256(key, cctx->key_table); break; - default: - break; } return 0; } - static void camellia_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct camellia_ctx *cctx = crypto_tfm_ctx(tfm); @@ -1725,14 +1620,11 @@ static void camellia_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) case 32: camellia_encrypt256(cctx->key_table, tmp); break; - default: - break; } memcpy(dst, tmp, CAMELLIA_BLOCK_SIZE); } - static void camellia_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct camellia_ctx *cctx = crypto_tfm_ctx(tfm); @@ -1752,14 +1644,11 @@ static void camellia_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) case 32: camellia_decrypt256(cctx->key_table, tmp); break; - default: - break; } memcpy(dst, tmp, CAMELLIA_BLOCK_SIZE); } - static struct crypto_alg camellia_alg = { .cra_name = "camellia", .cra_driver_name = "camellia-generic", @@ -1786,16 +1675,13 @@ static int __init camellia_init(void) return crypto_register_alg(&camellia_alg); } - static void __exit camellia_fini(void) { crypto_unregister_alg(&camellia_alg); } - module_init(camellia_init); module_exit(camellia_fini); - MODULE_DESCRIPTION("Camellia Cipher Algorithm"); MODULE_LICENSE("GPL"); -- cgit v1.1 From 3a5e5f8108fe440657e8041afd973d2fe72180bb Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Tue, 6 Nov 2007 22:05:36 +0800 Subject: [CRYPTO] camellia: Code cleanup Rename some macros to shorter names: CAMELLIA_RR8 -> ROR8, making it easier to understand that it is just a right rotation, nothing camellia-specific in it. CAMELLIA_SUBKEY_L() -> SUBKEY_L() - just shorter. Move be32 <-> cpu conversions out of en/decrypt128/256 and into camellia_en/decrypt - no reason to have that code duplicated twice. Signed-off-by: Denys Vlasenko Acked-by: Noriaki TAKAMIYA Signed-off-by: Herbert Xu --- crypto/camellia.c | 935 ++++++++++++++++++++++++------------------------------ 1 file changed, 418 insertions(+), 517 deletions(-) diff --git a/crypto/camellia.c b/crypto/camellia.c index aaae60e..ac372e4 100644 --- a/crypto/camellia.c +++ b/crypto/camellia.c @@ -336,13 +336,13 @@ static const u32 camellia_sp4404[256] = { ^ ((u32)(pt)[3])) /* rotation right shift 1byte */ -#define CAMELLIA_RR8(x) (((x) >> 8) + ((x) << 24)) +#define ROR8(x) (((x) >> 8) + ((x) << 24)) /* rotation left shift 1bit */ -#define CAMELLIA_RL1(x) (((x) << 1) + ((x) >> 31)) +#define ROL1(x) (((x) << 1) + ((x) >> 31)) /* rotation left shift 1byte */ -#define CAMELLIA_RL8(x) (((x) << 8) + ((x) >> 24)) +#define ROL8(x) (((x) << 8) + ((x) >> 24)) -#define CAMELLIA_ROLDQ(ll, lr, rl, rr, w0, w1, bits) \ +#define ROLDQ(ll, lr, rl, rr, w0, w1, bits) \ do { \ w0 = ll; \ ll = (ll << bits) + (lr >> (32 - bits)); \ @@ -351,7 +351,7 @@ static const u32 camellia_sp4404[256] = { rr = (rr << bits) + (w0 >> (32 - bits)); \ } while(0) -#define CAMELLIA_ROLDQo32(ll, lr, rl, rr, w0, w1, bits) \ +#define ROLDQo32(ll, lr, rl, rr, w0, w1, bits) \ do { \ w0 = ll; \ w1 = lr; \ @@ -377,7 +377,7 @@ static const u32 camellia_sp4404[256] = { ^ camellia_sp3033[(il >> 8) & 0xff] \ ^ camellia_sp4404[il & 0xff]; \ yl ^= yr; \ - yr = CAMELLIA_RR8(yr); \ + yr = ROR8(yr); \ yr ^= yl; \ } while(0) @@ -393,13 +393,13 @@ static const u32 camellia_sp4404[256] = { t0 &= ll; \ t2 |= rr; \ rl ^= t2; \ - lr ^= CAMELLIA_RL1(t0); \ + lr ^= ROL1(t0); \ t3 = krl; \ t1 = klr; \ t3 &= rl; \ t1 |= lr; \ ll ^= t1; \ - rr ^= CAMELLIA_RL1(t3); \ + rr ^= ROL1(t3); \ } while(0) #define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) \ @@ -415,12 +415,12 @@ static const u32 camellia_sp4404[256] = { il ^= kl; \ ir ^= il ^ kr; \ yl ^= ir; \ - yr ^= CAMELLIA_RR8(il) ^ ir; \ + yr ^= ROR8(il) ^ ir; \ } while(0) -#define CAMELLIA_SUBKEY_L(INDEX) (subkey[(INDEX)*2]) -#define CAMELLIA_SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1]) +#define SUBKEY_L(INDEX) (subkey[(INDEX)*2]) +#define SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1]) static void camellia_setup128(const unsigned char *key, u32 *subkey) { @@ -445,35 +445,35 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey) /* kw2 */ subL[1] = krl; subR[1] = krr; /* rotation left shift 15bit */ - CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15); + ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* k3 */ subL[4] = kll; subR[4] = klr; /* k4 */ subL[5] = krl; subR[5] = krr; /* rotation left shift 15+30bit */ - CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 30); + ROLDQ(kll, klr, krl, krr, w0, w1, 30); /* k7 */ subL[10] = kll; subR[10] = klr; /* k8 */ subL[11] = krl; subR[11] = krr; /* rotation left shift 15+30+15bit */ - CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15); + ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* k10 */ subL[13] = krl; subR[13] = krr; /* rotation left shift 15+30+15+17 bit */ - CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17); + ROLDQ(kll, klr, krl, krr, w0, w1, 17); /* kl3 */ subL[16] = kll; subR[16] = klr; /* kl4 */ subL[17] = krl; subR[17] = krr; /* rotation left shift 15+30+15+17+17 bit */ - CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17); + ROLDQ(kll, klr, krl, krr, w0, w1, 17); /* k13 */ subL[18] = kll; subR[18] = klr; /* k14 */ subL[19] = krl; subR[19] = krr; /* rotation left shift 15+30+15+17+17+17 bit */ - CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17); + ROLDQ(kll, klr, krl, krr, w0, w1, 17); /* k17 */ subL[22] = kll; subR[22] = klr; /* k18 */ @@ -503,26 +503,26 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey) /* k1, k2 */ subL[2] = kll; subR[2] = klr; subL[3] = krl; subR[3] = krr; - CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15); + ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* k5,k6 */ subL[6] = kll; subR[6] = klr; subL[7] = krl; subR[7] = krr; - CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15); + ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* kl1, kl2 */ subL[8] = kll; subR[8] = klr; subL[9] = krl; subR[9] = krr; - CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15); + ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* k9 */ subL[12] = kll; subR[12] = klr; - CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15); + ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* k11, k12 */ subL[14] = kll; subR[14] = klr; subL[15] = krl; subR[15] = krr; - CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 34); + ROLDQo32(kll, klr, krl, krr, w0, w1, 34); /* k15, k16 */ subL[20] = kll; subR[20] = klr; subL[21] = krl; subR[21] = krr; - CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17); + ROLDQ(kll, klr, krl, krr, w0, w1, 17); /* kw3, kw4 */ subL[24] = kll; subR[24] = klr; subL[25] = krl; subR[25] = krr; @@ -536,7 +536,7 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey) subL[7] ^= subL[1]; subR[7] ^= subR[1]; subL[1] ^= subR[1] & ~subR[9]; dw = subL[1] & subL[9], - subR[1] ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl2) */ + subR[1] ^= ROL1(dw); /* modified for FLinv(kl2) */ /* round 8 */ subL[11] ^= subL[1]; subR[11] ^= subR[1]; /* round 10 */ @@ -545,7 +545,7 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey) subL[15] ^= subL[1]; subR[15] ^= subR[1]; subL[1] ^= subR[1] & ~subR[17]; dw = subL[1] & subL[17], - subR[1] ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl4) */ + subR[1] ^= ROL1(dw); /* modified for FLinv(kl4) */ /* round 14 */ subL[19] ^= subL[1]; subR[19] ^= subR[1]; /* round 16 */ @@ -565,7 +565,7 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey) subL[18] ^= kw4l; subR[18] ^= kw4r; kw4l ^= kw4r & ~subR[16]; dw = kw4l & subL[16], - kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl3) */ + kw4r ^= ROL1(dw); /* modified for FL(kl3) */ /* round 11 */ subL[14] ^= kw4l; subR[14] ^= kw4r; /* round 9 */ @@ -574,7 +574,7 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey) subL[10] ^= kw4l; subR[10] ^= kw4r; kw4l ^= kw4r & ~subR[8]; dw = kw4l & subL[8], - kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl1) */ + kw4r ^= ROL1(dw); /* modified for FL(kl1) */ /* round 5 */ subL[6] ^= kw4l; subR[6] ^= kw4r; /* round 3 */ @@ -585,140 +585,104 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey) subL[0] ^= kw4l; subR[0] ^= kw4r; /* key XOR is end of F-function */ - CAMELLIA_SUBKEY_L(0) = subL[0] ^ subL[2];/* kw1 */ - CAMELLIA_SUBKEY_R(0) = subR[0] ^ subR[2]; - CAMELLIA_SUBKEY_L(2) = subL[3]; /* round 1 */ - CAMELLIA_SUBKEY_R(2) = subR[3]; - CAMELLIA_SUBKEY_L(3) = subL[2] ^ subL[4]; /* round 2 */ - CAMELLIA_SUBKEY_R(3) = subR[2] ^ subR[4]; - CAMELLIA_SUBKEY_L(4) = subL[3] ^ subL[5]; /* round 3 */ - CAMELLIA_SUBKEY_R(4) = subR[3] ^ subR[5]; - CAMELLIA_SUBKEY_L(5) = subL[4] ^ subL[6]; /* round 4 */ - CAMELLIA_SUBKEY_R(5) = subR[4] ^ subR[6]; - CAMELLIA_SUBKEY_L(6) = subL[5] ^ subL[7]; /* round 5 */ - CAMELLIA_SUBKEY_R(6) = subR[5] ^ subR[7]; + SUBKEY_L(0) = subL[0] ^ subL[2];/* kw1 */ + SUBKEY_R(0) = subR[0] ^ subR[2]; + SUBKEY_L(2) = subL[3]; /* round 1 */ + SUBKEY_R(2) = subR[3]; + SUBKEY_L(3) = subL[2] ^ subL[4]; /* round 2 */ + SUBKEY_R(3) = subR[2] ^ subR[4]; + SUBKEY_L(4) = subL[3] ^ subL[5]; /* round 3 */ + SUBKEY_R(4) = subR[3] ^ subR[5]; + SUBKEY_L(5) = subL[4] ^ subL[6]; /* round 4 */ + SUBKEY_R(5) = subR[4] ^ subR[6]; + SUBKEY_L(6) = subL[5] ^ subL[7]; /* round 5 */ + SUBKEY_R(6) = subR[5] ^ subR[7]; tl = subL[10] ^ (subR[10] & ~subR[8]); dw = tl & subL[8], /* FL(kl1) */ - tr = subR[10] ^ CAMELLIA_RL1(dw); - CAMELLIA_SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ - CAMELLIA_SUBKEY_R(7) = subR[6] ^ tr; - CAMELLIA_SUBKEY_L(8) = subL[8]; /* FL(kl1) */ - CAMELLIA_SUBKEY_R(8) = subR[8]; - CAMELLIA_SUBKEY_L(9) = subL[9]; /* FLinv(kl2) */ - CAMELLIA_SUBKEY_R(9) = subR[9]; + tr = subR[10] ^ ROL1(dw); + SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ + SUBKEY_R(7) = subR[6] ^ tr; + SUBKEY_L(8) = subL[8]; /* FL(kl1) */ + SUBKEY_R(8) = subR[8]; + SUBKEY_L(9) = subL[9]; /* FLinv(kl2) */ + SUBKEY_R(9) = subR[9]; tl = subL[7] ^ (subR[7] & ~subR[9]); dw = tl & subL[9], /* FLinv(kl2) */ - tr = subR[7] ^ CAMELLIA_RL1(dw); - CAMELLIA_SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ - CAMELLIA_SUBKEY_R(10) = tr ^ subR[11]; - CAMELLIA_SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */ - CAMELLIA_SUBKEY_R(11) = subR[10] ^ subR[12]; - CAMELLIA_SUBKEY_L(12) = subL[11] ^ subL[13]; /* round 9 */ - CAMELLIA_SUBKEY_R(12) = subR[11] ^ subR[13]; - CAMELLIA_SUBKEY_L(13) = subL[12] ^ subL[14]; /* round 10 */ - CAMELLIA_SUBKEY_R(13) = subR[12] ^ subR[14]; - CAMELLIA_SUBKEY_L(14) = subL[13] ^ subL[15]; /* round 11 */ - CAMELLIA_SUBKEY_R(14) = subR[13] ^ subR[15]; + tr = subR[7] ^ ROL1(dw); + SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ + SUBKEY_R(10) = tr ^ subR[11]; + SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */ + SUBKEY_R(11) = subR[10] ^ subR[12]; + SUBKEY_L(12) = subL[11] ^ subL[13]; /* round 9 */ + SUBKEY_R(12) = subR[11] ^ subR[13]; + SUBKEY_L(13) = subL[12] ^ subL[14]; /* round 10 */ + SUBKEY_R(13) = subR[12] ^ subR[14]; + SUBKEY_L(14) = subL[13] ^ subL[15]; /* round 11 */ + SUBKEY_R(14) = subR[13] ^ subR[15]; tl = subL[18] ^ (subR[18] & ~subR[16]); dw = tl & subL[16], /* FL(kl3) */ - tr = subR[18] ^ CAMELLIA_RL1(dw); - CAMELLIA_SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ - CAMELLIA_SUBKEY_R(15) = subR[14] ^ tr; - CAMELLIA_SUBKEY_L(16) = subL[16]; /* FL(kl3) */ - CAMELLIA_SUBKEY_R(16) = subR[16]; - CAMELLIA_SUBKEY_L(17) = subL[17]; /* FLinv(kl4) */ - CAMELLIA_SUBKEY_R(17) = subR[17]; + tr = subR[18] ^ ROL1(dw); + SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ + SUBKEY_R(15) = subR[14] ^ tr; + SUBKEY_L(16) = subL[16]; /* FL(kl3) */ + SUBKEY_R(16) = subR[16]; + SUBKEY_L(17) = subL[17]; /* FLinv(kl4) */ + SUBKEY_R(17) = subR[17]; tl = subL[15] ^ (subR[15] & ~subR[17]); dw = tl & subL[17], /* FLinv(kl4) */ - tr = subR[15] ^ CAMELLIA_RL1(dw); - CAMELLIA_SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */ - CAMELLIA_SUBKEY_R(18) = tr ^ subR[19]; - CAMELLIA_SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */ - CAMELLIA_SUBKEY_R(19) = subR[18] ^ subR[20]; - CAMELLIA_SUBKEY_L(20) = subL[19] ^ subL[21]; /* round 15 */ - CAMELLIA_SUBKEY_R(20) = subR[19] ^ subR[21]; - CAMELLIA_SUBKEY_L(21) = subL[20] ^ subL[22]; /* round 16 */ - CAMELLIA_SUBKEY_R(21) = subR[20] ^ subR[22]; - CAMELLIA_SUBKEY_L(22) = subL[21] ^ subL[23]; /* round 17 */ - CAMELLIA_SUBKEY_R(22) = subR[21] ^ subR[23]; - CAMELLIA_SUBKEY_L(23) = subL[22]; /* round 18 */ - CAMELLIA_SUBKEY_R(23) = subR[22]; - CAMELLIA_SUBKEY_L(24) = subL[24] ^ subL[23]; /* kw3 */ - CAMELLIA_SUBKEY_R(24) = subR[24] ^ subR[23]; + tr = subR[15] ^ ROL1(dw); + SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */ + SUBKEY_R(18) = tr ^ subR[19]; + SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */ + SUBKEY_R(19) = subR[18] ^ subR[20]; + SUBKEY_L(20) = subL[19] ^ subL[21]; /* round 15 */ + SUBKEY_R(20) = subR[19] ^ subR[21]; + SUBKEY_L(21) = subL[20] ^ subL[22]; /* round 16 */ + SUBKEY_R(21) = subR[20] ^ subR[22]; + SUBKEY_L(22) = subL[21] ^ subL[23]; /* round 17 */ + SUBKEY_R(22) = subR[21] ^ subR[23]; + SUBKEY_L(23) = subL[22]; /* round 18 */ + SUBKEY_R(23) = subR[22]; + SUBKEY_L(24) = subL[24] ^ subL[23]; /* kw3 */ + SUBKEY_R(24) = subR[24] ^ subR[23]; /* apply the inverse of the last half of P-function */ - dw = CAMELLIA_SUBKEY_L(2) ^ CAMELLIA_SUBKEY_R(2), - dw = CAMELLIA_RL8(dw);/* round 1 */ - CAMELLIA_SUBKEY_R(2) = CAMELLIA_SUBKEY_L(2) ^ dw, - CAMELLIA_SUBKEY_L(2) = dw; - dw = CAMELLIA_SUBKEY_L(3) ^ CAMELLIA_SUBKEY_R(3), - dw = CAMELLIA_RL8(dw);/* round 2 */ - CAMELLIA_SUBKEY_R(3) = CAMELLIA_SUBKEY_L(3) ^ dw, - CAMELLIA_SUBKEY_L(3) = dw; - dw = CAMELLIA_SUBKEY_L(4) ^ CAMELLIA_SUBKEY_R(4), - dw = CAMELLIA_RL8(dw);/* round 3 */ - CAMELLIA_SUBKEY_R(4) = CAMELLIA_SUBKEY_L(4) ^ dw, - CAMELLIA_SUBKEY_L(4) = dw; - dw = CAMELLIA_SUBKEY_L(5) ^ CAMELLIA_SUBKEY_R(5), - dw = CAMELLIA_RL8(dw);/* round 4 */ - CAMELLIA_SUBKEY_R(5) = CAMELLIA_SUBKEY_L(5) ^ dw, - CAMELLIA_SUBKEY_L(5) = dw; - dw = CAMELLIA_SUBKEY_L(6) ^ CAMELLIA_SUBKEY_R(6), - dw = CAMELLIA_RL8(dw);/* round 5 */ - CAMELLIA_SUBKEY_R(6) = CAMELLIA_SUBKEY_L(6) ^ dw, - CAMELLIA_SUBKEY_L(6) = dw; - dw = CAMELLIA_SUBKEY_L(7) ^ CAMELLIA_SUBKEY_R(7), - dw = CAMELLIA_RL8(dw);/* round 6 */ - CAMELLIA_SUBKEY_R(7) = CAMELLIA_SUBKEY_L(7) ^ dw, - CAMELLIA_SUBKEY_L(7) = dw; - dw = CAMELLIA_SUBKEY_L(10) ^ CAMELLIA_SUBKEY_R(10), - dw = CAMELLIA_RL8(dw);/* round 7 */ - CAMELLIA_SUBKEY_R(10) = CAMELLIA_SUBKEY_L(10) ^ dw, - CAMELLIA_SUBKEY_L(10) = dw; - dw = CAMELLIA_SUBKEY_L(11) ^ CAMELLIA_SUBKEY_R(11), - dw = CAMELLIA_RL8(dw);/* round 8 */ - CAMELLIA_SUBKEY_R(11) = CAMELLIA_SUBKEY_L(11) ^ dw, - CAMELLIA_SUBKEY_L(11) = dw; - dw = CAMELLIA_SUBKEY_L(12) ^ CAMELLIA_SUBKEY_R(12), - dw = CAMELLIA_RL8(dw);/* round 9 */ - CAMELLIA_SUBKEY_R(12) = CAMELLIA_SUBKEY_L(12) ^ dw, - CAMELLIA_SUBKEY_L(12) = dw; - dw = CAMELLIA_SUBKEY_L(13) ^ CAMELLIA_SUBKEY_R(13), - dw = CAMELLIA_RL8(dw);/* round 10 */ - CAMELLIA_SUBKEY_R(13) = CAMELLIA_SUBKEY_L(13) ^ dw, - CAMELLIA_SUBKEY_L(13) = dw; - dw = CAMELLIA_SUBKEY_L(14) ^ CAMELLIA_SUBKEY_R(14), - dw = CAMELLIA_RL8(dw);/* round 11 */ - CAMELLIA_SUBKEY_R(14) = CAMELLIA_SUBKEY_L(14) ^ dw, - CAMELLIA_SUBKEY_L(14) = dw; - dw = CAMELLIA_SUBKEY_L(15) ^ CAMELLIA_SUBKEY_R(15), - dw = CAMELLIA_RL8(dw);/* round 12 */ - CAMELLIA_SUBKEY_R(15) = CAMELLIA_SUBKEY_L(15) ^ dw, - CAMELLIA_SUBKEY_L(15) = dw; - dw = CAMELLIA_SUBKEY_L(18) ^ CAMELLIA_SUBKEY_R(18), - dw = CAMELLIA_RL8(dw);/* round 13 */ - CAMELLIA_SUBKEY_R(18) = CAMELLIA_SUBKEY_L(18) ^ dw, - CAMELLIA_SUBKEY_L(18) = dw; - dw = CAMELLIA_SUBKEY_L(19) ^ CAMELLIA_SUBKEY_R(19), - dw = CAMELLIA_RL8(dw);/* round 14 */ - CAMELLIA_SUBKEY_R(19) = CAMELLIA_SUBKEY_L(19) ^ dw, - CAMELLIA_SUBKEY_L(19) = dw; - dw = CAMELLIA_SUBKEY_L(20) ^ CAMELLIA_SUBKEY_R(20), - dw = CAMELLIA_RL8(dw);/* round 15 */ - CAMELLIA_SUBKEY_R(20) = CAMELLIA_SUBKEY_L(20) ^ dw, - CAMELLIA_SUBKEY_L(20) = dw; - dw = CAMELLIA_SUBKEY_L(21) ^ CAMELLIA_SUBKEY_R(21), - dw = CAMELLIA_RL8(dw);/* round 16 */ - CAMELLIA_SUBKEY_R(21) = CAMELLIA_SUBKEY_L(21) ^ dw, - CAMELLIA_SUBKEY_L(21) = dw; - dw = CAMELLIA_SUBKEY_L(22) ^ CAMELLIA_SUBKEY_R(22), - dw = CAMELLIA_RL8(dw);/* round 17 */ - CAMELLIA_SUBKEY_R(22) = CAMELLIA_SUBKEY_L(22) ^ dw, - CAMELLIA_SUBKEY_L(22) = dw; - dw = CAMELLIA_SUBKEY_L(23) ^ CAMELLIA_SUBKEY_R(23), - dw = CAMELLIA_RL8(dw);/* round 18 */ - CAMELLIA_SUBKEY_R(23) = CAMELLIA_SUBKEY_L(23) ^ dw, - CAMELLIA_SUBKEY_L(23) = dw; + dw = SUBKEY_L(2) ^ SUBKEY_R(2); dw = ROL8(dw);/* round 1 */ + SUBKEY_R(2) = SUBKEY_L(2) ^ dw; SUBKEY_L(2) = dw; + dw = SUBKEY_L(3) ^ SUBKEY_R(3); dw = ROL8(dw);/* round 2 */ + SUBKEY_R(3) = SUBKEY_L(3) ^ dw; SUBKEY_L(3) = dw; + dw = SUBKEY_L(4) ^ SUBKEY_R(4); dw = ROL8(dw);/* round 3 */ + SUBKEY_R(4) = SUBKEY_L(4) ^ dw; SUBKEY_L(4) = dw; + dw = SUBKEY_L(5) ^ SUBKEY_R(5); dw = ROL8(dw);/* round 4 */ + SUBKEY_R(5) = SUBKEY_L(5) ^ dw; SUBKEY_L(5) = dw; + dw = SUBKEY_L(6) ^ SUBKEY_R(6); dw = ROL8(dw);/* round 5 */ + SUBKEY_R(6) = SUBKEY_L(6) ^ dw; SUBKEY_L(6) = dw; + dw = SUBKEY_L(7) ^ SUBKEY_R(7); dw = ROL8(dw);/* round 6 */ + SUBKEY_R(7) = SUBKEY_L(7) ^ dw; SUBKEY_L(7) = dw; + dw = SUBKEY_L(10) ^ SUBKEY_R(10); dw = ROL8(dw);/* round 7 */ + SUBKEY_R(10) = SUBKEY_L(10) ^ dw; SUBKEY_L(10) = dw; + dw = SUBKEY_L(11) ^ SUBKEY_R(11); dw = ROL8(dw);/* round 8 */ + SUBKEY_R(11) = SUBKEY_L(11) ^ dw; SUBKEY_L(11) = dw; + dw = SUBKEY_L(12) ^ SUBKEY_R(12); dw = ROL8(dw);/* round 9 */ + SUBKEY_R(12) = SUBKEY_L(12) ^ dw; SUBKEY_L(12) = dw; + dw = SUBKEY_L(13) ^ SUBKEY_R(13); dw = ROL8(dw);/* round 10 */ + SUBKEY_R(13) = SUBKEY_L(13) ^ dw; SUBKEY_L(13) = dw; + dw = SUBKEY_L(14) ^ SUBKEY_R(14); dw = ROL8(dw);/* round 11 */ + SUBKEY_R(14) = SUBKEY_L(14) ^ dw; SUBKEY_L(14) = dw; + dw = SUBKEY_L(15) ^ SUBKEY_R(15); dw = ROL8(dw);/* round 12 */ + SUBKEY_R(15) = SUBKEY_L(15) ^ dw; SUBKEY_L(15) = dw; + dw = SUBKEY_L(18) ^ SUBKEY_R(18); dw = ROL8(dw);/* round 13 */ + SUBKEY_R(18) = SUBKEY_L(18) ^ dw; SUBKEY_L(18) = dw; + dw = SUBKEY_L(19) ^ SUBKEY_R(19); dw = ROL8(dw);/* round 14 */ + SUBKEY_R(19) = SUBKEY_L(19) ^ dw; SUBKEY_L(19) = dw; + dw = SUBKEY_L(20) ^ SUBKEY_R(20); dw = ROL8(dw);/* round 15 */ + SUBKEY_R(20) = SUBKEY_L(20) ^ dw; SUBKEY_L(20) = dw; + dw = SUBKEY_L(21) ^ SUBKEY_R(21); dw = ROL8(dw);/* round 16 */ + SUBKEY_R(21) = SUBKEY_L(21) ^ dw; SUBKEY_L(21) = dw; + dw = SUBKEY_L(22) ^ SUBKEY_R(22); dw = ROL8(dw);/* round 17 */ + SUBKEY_R(22) = SUBKEY_L(22) ^ dw; SUBKEY_L(22) = dw; + dw = SUBKEY_L(23) ^ SUBKEY_R(23); dw = ROL8(dw);/* round 18 */ + SUBKEY_R(23) = SUBKEY_L(23) ^ dw; SUBKEY_L(23) = dw; } static void camellia_setup256(const unsigned char *key, u32 *subkey) @@ -734,7 +698,6 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) * key = (kll || klr || krl || krr || krll || krlr || krrl || krrr) * (|| is concatination) */ - kll = GETU32(key ); klr = GETU32(key + 4); krl = GETU32(key + 8); @@ -749,49 +712,49 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) subL[0] = kll; subR[0] = klr; /* kw2 */ subL[1] = krl; subR[1] = krr; - CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 45); + ROLDQo32(kll, klr, krl, krr, w0, w1, 45); /* k9 */ subL[12] = kll; subR[12] = klr; /* k10 */ subL[13] = krl; subR[13] = krr; - CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15); + ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* kl3 */ subL[16] = kll; subR[16] = klr; /* kl4 */ subL[17] = krl; subR[17] = krr; - CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17); + ROLDQ(kll, klr, krl, krr, w0, w1, 17); /* k17 */ subL[22] = kll; subR[22] = klr; /* k18 */ subL[23] = krl; subR[23] = krr; - CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 34); + ROLDQo32(kll, klr, krl, krr, w0, w1, 34); /* k23 */ subL[30] = kll; subR[30] = klr; /* k24 */ subL[31] = krl; subR[31] = krr; /* generate KR dependent subkeys */ - CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15); + ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15); /* k3 */ subL[4] = krll; subR[4] = krlr; /* k4 */ subL[5] = krrl; subR[5] = krrr; - CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15); + ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15); /* kl1 */ subL[8] = krll; subR[8] = krlr; /* kl2 */ subL[9] = krrl; subR[9] = krrr; - CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30); + ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30); /* k13 */ subL[18] = krll; subR[18] = krlr; /* k14 */ subL[19] = krrl; subR[19] = krrr; - CAMELLIA_ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34); + ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34); /* k19 */ subL[26] = krll; subR[26] = krlr; /* k20 */ subL[27] = krrl; subR[27] = krrr; - CAMELLIA_ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34); + ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34); /* generate KA */ kll = subL[0] ^ krll; klr = subR[0] ^ krlr; @@ -826,12 +789,12 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) krll ^= w0; krlr ^= w1; /* generate KA dependent subkeys */ - CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15); + ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* k5 */ subL[6] = kll; subR[6] = klr; /* k6 */ subL[7] = krl; subR[7] = krr; - CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 30); + ROLDQ(kll, klr, krl, krr, w0, w1, 30); /* k11 */ subL[14] = kll; subR[14] = klr; /* k12 */ @@ -842,7 +805,7 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) /* kl6 */ subL[25] = krr; subR[25] = kll; /* rotation left shift 49 from k11,k12 -> k21,k22 */ - CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 49); + ROLDQo32(kll, klr, krl, krr, w0, w1, 49); /* k21 */ subL[28] = kll; subR[28] = klr; /* k22 */ @@ -853,17 +816,17 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) subL[2] = krll; subR[2] = krlr; /* k2 */ subL[3] = krrl; subR[3] = krrr; - CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30); + ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30); /* k7 */ subL[10] = krll; subR[10] = krlr; /* k8 */ subL[11] = krrl; subR[11] = krrr; - CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30); + ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30); /* k15 */ subL[20] = krll; subR[20] = krlr; /* k16 */ subL[21] = krrl; subR[21] = krrr; - CAMELLIA_ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 51); + ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 51); /* kw3 */ subL[32] = krll; subR[32] = krlr; /* kw4 */ @@ -878,7 +841,7 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) subL[7] ^= subL[1]; subR[7] ^= subR[1]; subL[1] ^= subR[1] & ~subR[9]; dw = subL[1] & subL[9], - subR[1] ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl2) */ + subR[1] ^= ROL1(dw); /* modified for FLinv(kl2) */ /* round 8 */ subL[11] ^= subL[1]; subR[11] ^= subR[1]; /* round 10 */ @@ -887,7 +850,7 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) subL[15] ^= subL[1]; subR[15] ^= subR[1]; subL[1] ^= subR[1] & ~subR[17]; dw = subL[1] & subL[17], - subR[1] ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl4) */ + subR[1] ^= ROL1(dw); /* modified for FLinv(kl4) */ /* round 14 */ subL[19] ^= subL[1]; subR[19] ^= subR[1]; /* round 16 */ @@ -896,7 +859,7 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) subL[23] ^= subL[1]; subR[23] ^= subR[1]; subL[1] ^= subR[1] & ~subR[25]; dw = subL[1] & subL[25], - subR[1] ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl6) */ + subR[1] ^= ROL1(dw); /* modified for FLinv(kl6) */ /* round 20 */ subL[27] ^= subL[1]; subR[27] ^= subR[1]; /* round 22 */ @@ -916,7 +879,7 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) subL[26] ^= kw4l; subR[26] ^= kw4r; kw4l ^= kw4r & ~subR[24]; dw = kw4l & subL[24], - kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl5) */ + kw4r ^= ROL1(dw); /* modified for FL(kl5) */ /* round 17 */ subL[22] ^= kw4l; subR[22] ^= kw4r; /* round 15 */ @@ -925,7 +888,7 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) subL[18] ^= kw4l; subR[18] ^= kw4r; kw4l ^= kw4r & ~subR[16]; dw = kw4l & subL[16], - kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl3) */ + kw4r ^= ROL1(dw); /* modified for FL(kl3) */ /* round 11 */ subL[14] ^= kw4l; subR[14] ^= kw4r; /* round 9 */ @@ -934,7 +897,7 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) subL[10] ^= kw4l; subR[10] ^= kw4r; kw4l ^= kw4r & ~subR[8]; dw = kw4l & subL[8], - kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl1) */ + kw4r ^= ROL1(dw); /* modified for FL(kl1) */ /* round 5 */ subL[6] ^= kw4l; subR[6] ^= kw4r; /* round 3 */ @@ -945,188 +908,138 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) subL[0] ^= kw4l; subR[0] ^= kw4r; /* key XOR is end of F-function */ - CAMELLIA_SUBKEY_L(0) = subL[0] ^ subL[2];/* kw1 */ - CAMELLIA_SUBKEY_R(0) = subR[0] ^ subR[2]; - CAMELLIA_SUBKEY_L(2) = subL[3]; /* round 1 */ - CAMELLIA_SUBKEY_R(2) = subR[3]; - CAMELLIA_SUBKEY_L(3) = subL[2] ^ subL[4]; /* round 2 */ - CAMELLIA_SUBKEY_R(3) = subR[2] ^ subR[4]; - CAMELLIA_SUBKEY_L(4) = subL[3] ^ subL[5]; /* round 3 */ - CAMELLIA_SUBKEY_R(4) = subR[3] ^ subR[5]; - CAMELLIA_SUBKEY_L(5) = subL[4] ^ subL[6]; /* round 4 */ - CAMELLIA_SUBKEY_R(5) = subR[4] ^ subR[6]; - CAMELLIA_SUBKEY_L(6) = subL[5] ^ subL[7]; /* round 5 */ - CAMELLIA_SUBKEY_R(6) = subR[5] ^ subR[7]; + SUBKEY_L(0) = subL[0] ^ subL[2];/* kw1 */ + SUBKEY_R(0) = subR[0] ^ subR[2]; + SUBKEY_L(2) = subL[3]; /* round 1 */ + SUBKEY_R(2) = subR[3]; + SUBKEY_L(3) = subL[2] ^ subL[4]; /* round 2 */ + SUBKEY_R(3) = subR[2] ^ subR[4]; + SUBKEY_L(4) = subL[3] ^ subL[5]; /* round 3 */ + SUBKEY_R(4) = subR[3] ^ subR[5]; + SUBKEY_L(5) = subL[4] ^ subL[6]; /* round 4 */ + SUBKEY_R(5) = subR[4] ^ subR[6]; + SUBKEY_L(6) = subL[5] ^ subL[7]; /* round 5 */ + SUBKEY_R(6) = subR[5] ^ subR[7]; tl = subL[10] ^ (subR[10] & ~subR[8]); dw = tl & subL[8], /* FL(kl1) */ - tr = subR[10] ^ CAMELLIA_RL1(dw); - CAMELLIA_SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ - CAMELLIA_SUBKEY_R(7) = subR[6] ^ tr; - CAMELLIA_SUBKEY_L(8) = subL[8]; /* FL(kl1) */ - CAMELLIA_SUBKEY_R(8) = subR[8]; - CAMELLIA_SUBKEY_L(9) = subL[9]; /* FLinv(kl2) */ - CAMELLIA_SUBKEY_R(9) = subR[9]; + tr = subR[10] ^ ROL1(dw); + SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ + SUBKEY_R(7) = subR[6] ^ tr; + SUBKEY_L(8) = subL[8]; /* FL(kl1) */ + SUBKEY_R(8) = subR[8]; + SUBKEY_L(9) = subL[9]; /* FLinv(kl2) */ + SUBKEY_R(9) = subR[9]; tl = subL[7] ^ (subR[7] & ~subR[9]); dw = tl & subL[9], /* FLinv(kl2) */ - tr = subR[7] ^ CAMELLIA_RL1(dw); - CAMELLIA_SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ - CAMELLIA_SUBKEY_R(10) = tr ^ subR[11]; - CAMELLIA_SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */ - CAMELLIA_SUBKEY_R(11) = subR[10] ^ subR[12]; - CAMELLIA_SUBKEY_L(12) = subL[11] ^ subL[13]; /* round 9 */ - CAMELLIA_SUBKEY_R(12) = subR[11] ^ subR[13]; - CAMELLIA_SUBKEY_L(13) = subL[12] ^ subL[14]; /* round 10 */ - CAMELLIA_SUBKEY_R(13) = subR[12] ^ subR[14]; - CAMELLIA_SUBKEY_L(14) = subL[13] ^ subL[15]; /* round 11 */ - CAMELLIA_SUBKEY_R(14) = subR[13] ^ subR[15]; + tr = subR[7] ^ ROL1(dw); + SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ + SUBKEY_R(10) = tr ^ subR[11]; + SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */ + SUBKEY_R(11) = subR[10] ^ subR[12]; + SUBKEY_L(12) = subL[11] ^ subL[13]; /* round 9 */ + SUBKEY_R(12) = subR[11] ^ subR[13]; + SUBKEY_L(13) = subL[12] ^ subL[14]; /* round 10 */ + SUBKEY_R(13) = subR[12] ^ subR[14]; + SUBKEY_L(14) = subL[13] ^ subL[15]; /* round 11 */ + SUBKEY_R(14) = subR[13] ^ subR[15]; tl = subL[18] ^ (subR[18] & ~subR[16]); dw = tl & subL[16], /* FL(kl3) */ - tr = subR[18] ^ CAMELLIA_RL1(dw); - CAMELLIA_SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ - CAMELLIA_SUBKEY_R(15) = subR[14] ^ tr; - CAMELLIA_SUBKEY_L(16) = subL[16]; /* FL(kl3) */ - CAMELLIA_SUBKEY_R(16) = subR[16]; - CAMELLIA_SUBKEY_L(17) = subL[17]; /* FLinv(kl4) */ - CAMELLIA_SUBKEY_R(17) = subR[17]; + tr = subR[18] ^ ROL1(dw); + SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ + SUBKEY_R(15) = subR[14] ^ tr; + SUBKEY_L(16) = subL[16]; /* FL(kl3) */ + SUBKEY_R(16) = subR[16]; + SUBKEY_L(17) = subL[17]; /* FLinv(kl4) */ + SUBKEY_R(17) = subR[17]; tl = subL[15] ^ (subR[15] & ~subR[17]); dw = tl & subL[17], /* FLinv(kl4) */ - tr = subR[15] ^ CAMELLIA_RL1(dw); - CAMELLIA_SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */ - CAMELLIA_SUBKEY_R(18) = tr ^ subR[19]; - CAMELLIA_SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */ - CAMELLIA_SUBKEY_R(19) = subR[18] ^ subR[20]; - CAMELLIA_SUBKEY_L(20) = subL[19] ^ subL[21]; /* round 15 */ - CAMELLIA_SUBKEY_R(20) = subR[19] ^ subR[21]; - CAMELLIA_SUBKEY_L(21) = subL[20] ^ subL[22]; /* round 16 */ - CAMELLIA_SUBKEY_R(21) = subR[20] ^ subR[22]; - CAMELLIA_SUBKEY_L(22) = subL[21] ^ subL[23]; /* round 17 */ - CAMELLIA_SUBKEY_R(22) = subR[21] ^ subR[23]; - tl = subL[26] ^ (subR[26] - & ~subR[24]); + tr = subR[15] ^ ROL1(dw); + SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */ + SUBKEY_R(18) = tr ^ subR[19]; + SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */ + SUBKEY_R(19) = subR[18] ^ subR[20]; + SUBKEY_L(20) = subL[19] ^ subL[21]; /* round 15 */ + SUBKEY_R(20) = subR[19] ^ subR[21]; + SUBKEY_L(21) = subL[20] ^ subL[22]; /* round 16 */ + SUBKEY_R(21) = subR[20] ^ subR[22]; + SUBKEY_L(22) = subL[21] ^ subL[23]; /* round 17 */ + SUBKEY_R(22) = subR[21] ^ subR[23]; + tl = subL[26] ^ (subR[26] & ~subR[24]); dw = tl & subL[24], /* FL(kl5) */ - tr = subR[26] ^ CAMELLIA_RL1(dw); - CAMELLIA_SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */ - CAMELLIA_SUBKEY_R(23) = subR[22] ^ tr; - CAMELLIA_SUBKEY_L(24) = subL[24]; /* FL(kl5) */ - CAMELLIA_SUBKEY_R(24) = subR[24]; - CAMELLIA_SUBKEY_L(25) = subL[25]; /* FLinv(kl6) */ - CAMELLIA_SUBKEY_R(25) = subR[25]; - tl = subL[23] ^ (subR[23] & - ~subR[25]); + tr = subR[26] ^ ROL1(dw); + SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */ + SUBKEY_R(23) = subR[22] ^ tr; + SUBKEY_L(24) = subL[24]; /* FL(kl5) */ + SUBKEY_R(24) = subR[24]; + SUBKEY_L(25) = subL[25]; /* FLinv(kl6) */ + SUBKEY_R(25) = subR[25]; + tl = subL[23] ^ (subR[23] & ~subR[25]); dw = tl & subL[25], /* FLinv(kl6) */ - tr = subR[23] ^ CAMELLIA_RL1(dw); - CAMELLIA_SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */ - CAMELLIA_SUBKEY_R(26) = tr ^ subR[27]; - CAMELLIA_SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */ - CAMELLIA_SUBKEY_R(27) = subR[26] ^ subR[28]; - CAMELLIA_SUBKEY_L(28) = subL[27] ^ subL[29]; /* round 21 */ - CAMELLIA_SUBKEY_R(28) = subR[27] ^ subR[29]; - CAMELLIA_SUBKEY_L(29) = subL[28] ^ subL[30]; /* round 22 */ - CAMELLIA_SUBKEY_R(29) = subR[28] ^ subR[30]; - CAMELLIA_SUBKEY_L(30) = subL[29] ^ subL[31]; /* round 23 */ - CAMELLIA_SUBKEY_R(30) = subR[29] ^ subR[31]; - CAMELLIA_SUBKEY_L(31) = subL[30]; /* round 24 */ - CAMELLIA_SUBKEY_R(31) = subR[30]; - CAMELLIA_SUBKEY_L(32) = subL[32] ^ subL[31]; /* kw3 */ - CAMELLIA_SUBKEY_R(32) = subR[32] ^ subR[31]; + tr = subR[23] ^ ROL1(dw); + SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */ + SUBKEY_R(26) = tr ^ subR[27]; + SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */ + SUBKEY_R(27) = subR[26] ^ subR[28]; + SUBKEY_L(28) = subL[27] ^ subL[29]; /* round 21 */ + SUBKEY_R(28) = subR[27] ^ subR[29]; + SUBKEY_L(29) = subL[28] ^ subL[30]; /* round 22 */ + SUBKEY_R(29) = subR[28] ^ subR[30]; + SUBKEY_L(30) = subL[29] ^ subL[31]; /* round 23 */ + SUBKEY_R(30) = subR[29] ^ subR[31]; + SUBKEY_L(31) = subL[30]; /* round 24 */ + SUBKEY_R(31) = subR[30]; + SUBKEY_L(32) = subL[32] ^ subL[31]; /* kw3 */ + SUBKEY_R(32) = subR[32] ^ subR[31]; /* apply the inverse of the last half of P-function */ - dw = CAMELLIA_SUBKEY_L(2) ^ CAMELLIA_SUBKEY_R(2), - dw = CAMELLIA_RL8(dw);/* round 1 */ - CAMELLIA_SUBKEY_R(2) = CAMELLIA_SUBKEY_L(2) ^ dw, - CAMELLIA_SUBKEY_L(2) = dw; - dw = CAMELLIA_SUBKEY_L(3) ^ CAMELLIA_SUBKEY_R(3), - dw = CAMELLIA_RL8(dw);/* round 2 */ - CAMELLIA_SUBKEY_R(3) = CAMELLIA_SUBKEY_L(3) ^ dw, - CAMELLIA_SUBKEY_L(3) = dw; - dw = CAMELLIA_SUBKEY_L(4) ^ CAMELLIA_SUBKEY_R(4), - dw = CAMELLIA_RL8(dw);/* round 3 */ - CAMELLIA_SUBKEY_R(4) = CAMELLIA_SUBKEY_L(4) ^ dw, - CAMELLIA_SUBKEY_L(4) = dw; - dw = CAMELLIA_SUBKEY_L(5) ^ CAMELLIA_SUBKEY_R(5), - dw = CAMELLIA_RL8(dw);/* round 4 */ - CAMELLIA_SUBKEY_R(5) = CAMELLIA_SUBKEY_L(5) ^ dw, - CAMELLIA_SUBKEY_L(5) = dw; - dw = CAMELLIA_SUBKEY_L(6) ^ CAMELLIA_SUBKEY_R(6), - dw = CAMELLIA_RL8(dw);/* round 5 */ - CAMELLIA_SUBKEY_R(6) = CAMELLIA_SUBKEY_L(6) ^ dw, - CAMELLIA_SUBKEY_L(6) = dw; - dw = CAMELLIA_SUBKEY_L(7) ^ CAMELLIA_SUBKEY_R(7), - dw = CAMELLIA_RL8(dw);/* round 6 */ - CAMELLIA_SUBKEY_R(7) = CAMELLIA_SUBKEY_L(7) ^ dw, - CAMELLIA_SUBKEY_L(7) = dw; - dw = CAMELLIA_SUBKEY_L(10) ^ CAMELLIA_SUBKEY_R(10), - dw = CAMELLIA_RL8(dw);/* round 7 */ - CAMELLIA_SUBKEY_R(10) = CAMELLIA_SUBKEY_L(10) ^ dw, - CAMELLIA_SUBKEY_L(10) = dw; - dw = CAMELLIA_SUBKEY_L(11) ^ CAMELLIA_SUBKEY_R(11), - dw = CAMELLIA_RL8(dw);/* round 8 */ - CAMELLIA_SUBKEY_R(11) = CAMELLIA_SUBKEY_L(11) ^ dw, - CAMELLIA_SUBKEY_L(11) = dw; - dw = CAMELLIA_SUBKEY_L(12) ^ CAMELLIA_SUBKEY_R(12), - dw = CAMELLIA_RL8(dw);/* round 9 */ - CAMELLIA_SUBKEY_R(12) = CAMELLIA_SUBKEY_L(12) ^ dw, - CAMELLIA_SUBKEY_L(12) = dw; - dw = CAMELLIA_SUBKEY_L(13) ^ CAMELLIA_SUBKEY_R(13), - dw = CAMELLIA_RL8(dw);/* round 10 */ - CAMELLIA_SUBKEY_R(13) = CAMELLIA_SUBKEY_L(13) ^ dw, - CAMELLIA_SUBKEY_L(13) = dw; - dw = CAMELLIA_SUBKEY_L(14) ^ CAMELLIA_SUBKEY_R(14), - dw = CAMELLIA_RL8(dw);/* round 11 */ - CAMELLIA_SUBKEY_R(14) = CAMELLIA_SUBKEY_L(14) ^ dw, - CAMELLIA_SUBKEY_L(14) = dw; - dw = CAMELLIA_SUBKEY_L(15) ^ CAMELLIA_SUBKEY_R(15), - dw = CAMELLIA_RL8(dw);/* round 12 */ - CAMELLIA_SUBKEY_R(15) = CAMELLIA_SUBKEY_L(15) ^ dw, - CAMELLIA_SUBKEY_L(15) = dw; - dw = CAMELLIA_SUBKEY_L(18) ^ CAMELLIA_SUBKEY_R(18), - dw = CAMELLIA_RL8(dw);/* round 13 */ - CAMELLIA_SUBKEY_R(18) = CAMELLIA_SUBKEY_L(18) ^ dw, - CAMELLIA_SUBKEY_L(18) = dw; - dw = CAMELLIA_SUBKEY_L(19) ^ CAMELLIA_SUBKEY_R(19), - dw = CAMELLIA_RL8(dw);/* round 14 */ - CAMELLIA_SUBKEY_R(19) = CAMELLIA_SUBKEY_L(19) ^ dw, - CAMELLIA_SUBKEY_L(19) = dw; - dw = CAMELLIA_SUBKEY_L(20) ^ CAMELLIA_SUBKEY_R(20), - dw = CAMELLIA_RL8(dw);/* round 15 */ - CAMELLIA_SUBKEY_R(20) = CAMELLIA_SUBKEY_L(20) ^ dw, - CAMELLIA_SUBKEY_L(20) = dw; - dw = CAMELLIA_SUBKEY_L(21) ^ CAMELLIA_SUBKEY_R(21), - dw = CAMELLIA_RL8(dw);/* round 16 */ - CAMELLIA_SUBKEY_R(21) = CAMELLIA_SUBKEY_L(21) ^ dw, - CAMELLIA_SUBKEY_L(21) = dw; - dw = CAMELLIA_SUBKEY_L(22) ^ CAMELLIA_SUBKEY_R(22), - dw = CAMELLIA_RL8(dw);/* round 17 */ - CAMELLIA_SUBKEY_R(22) = CAMELLIA_SUBKEY_L(22) ^ dw, - CAMELLIA_SUBKEY_L(22) = dw; - dw = CAMELLIA_SUBKEY_L(23) ^ CAMELLIA_SUBKEY_R(23), - dw = CAMELLIA_RL8(dw);/* round 18 */ - CAMELLIA_SUBKEY_R(23) = CAMELLIA_SUBKEY_L(23) ^ dw, - CAMELLIA_SUBKEY_L(23) = dw; - dw = CAMELLIA_SUBKEY_L(26) ^ CAMELLIA_SUBKEY_R(26), - dw = CAMELLIA_RL8(dw);/* round 19 */ - CAMELLIA_SUBKEY_R(26) = CAMELLIA_SUBKEY_L(26) ^ dw, - CAMELLIA_SUBKEY_L(26) = dw; - dw = CAMELLIA_SUBKEY_L(27) ^ CAMELLIA_SUBKEY_R(27), - dw = CAMELLIA_RL8(dw);/* round 20 */ - CAMELLIA_SUBKEY_R(27) = CAMELLIA_SUBKEY_L(27) ^ dw, - CAMELLIA_SUBKEY_L(27) = dw; - dw = CAMELLIA_SUBKEY_L(28) ^ CAMELLIA_SUBKEY_R(28), - dw = CAMELLIA_RL8(dw);/* round 21 */ - CAMELLIA_SUBKEY_R(28) = CAMELLIA_SUBKEY_L(28) ^ dw, - CAMELLIA_SUBKEY_L(28) = dw; - dw = CAMELLIA_SUBKEY_L(29) ^ CAMELLIA_SUBKEY_R(29), - dw = CAMELLIA_RL8(dw);/* round 22 */ - CAMELLIA_SUBKEY_R(29) = CAMELLIA_SUBKEY_L(29) ^ dw, - CAMELLIA_SUBKEY_L(29) = dw; - dw = CAMELLIA_SUBKEY_L(30) ^ CAMELLIA_SUBKEY_R(30), - dw = CAMELLIA_RL8(dw);/* round 23 */ - CAMELLIA_SUBKEY_R(30) = CAMELLIA_SUBKEY_L(30) ^ dw, - CAMELLIA_SUBKEY_L(30) = dw; - dw = CAMELLIA_SUBKEY_L(31) ^ CAMELLIA_SUBKEY_R(31), - dw = CAMELLIA_RL8(dw);/* round 24 */ - CAMELLIA_SUBKEY_R(31) = CAMELLIA_SUBKEY_L(31) ^ dw, - CAMELLIA_SUBKEY_L(31) = dw; + dw = SUBKEY_L(2) ^ SUBKEY_R(2); dw = ROL8(dw);/* round 1 */ + SUBKEY_R(2) = SUBKEY_L(2) ^ dw; SUBKEY_L(2) = dw; + dw = SUBKEY_L(3) ^ SUBKEY_R(3); dw = ROL8(dw);/* round 2 */ + SUBKEY_R(3) = SUBKEY_L(3) ^ dw; SUBKEY_L(3) = dw; + dw = SUBKEY_L(4) ^ SUBKEY_R(4); dw = ROL8(dw);/* round 3 */ + SUBKEY_R(4) = SUBKEY_L(4) ^ dw; SUBKEY_L(4) = dw; + dw = SUBKEY_L(5) ^ SUBKEY_R(5); dw = ROL8(dw);/* round 4 */ + SUBKEY_R(5) = SUBKEY_L(5) ^ dw; SUBKEY_L(5) = dw; + dw = SUBKEY_L(6) ^ SUBKEY_R(6); dw = ROL8(dw);/* round 5 */ + SUBKEY_R(6) = SUBKEY_L(6) ^ dw; SUBKEY_L(6) = dw; + dw = SUBKEY_L(7) ^ SUBKEY_R(7); dw = ROL8(dw);/* round 6 */ + SUBKEY_R(7) = SUBKEY_L(7) ^ dw; SUBKEY_L(7) = dw; + dw = SUBKEY_L(10) ^ SUBKEY_R(10); dw = ROL8(dw);/* round 7 */ + SUBKEY_R(10) = SUBKEY_L(10) ^ dw; SUBKEY_L(10) = dw; + dw = SUBKEY_L(11) ^ SUBKEY_R(11); dw = ROL8(dw);/* round 8 */ + SUBKEY_R(11) = SUBKEY_L(11) ^ dw; SUBKEY_L(11) = dw; + dw = SUBKEY_L(12) ^ SUBKEY_R(12); dw = ROL8(dw);/* round 9 */ + SUBKEY_R(12) = SUBKEY_L(12) ^ dw; SUBKEY_L(12) = dw; + dw = SUBKEY_L(13) ^ SUBKEY_R(13); dw = ROL8(dw);/* round 10 */ + SUBKEY_R(13) = SUBKEY_L(13) ^ dw; SUBKEY_L(13) = dw; + dw = SUBKEY_L(14) ^ SUBKEY_R(14); dw = ROL8(dw);/* round 11 */ + SUBKEY_R(14) = SUBKEY_L(14) ^ dw; SUBKEY_L(14) = dw; + dw = SUBKEY_L(15) ^ SUBKEY_R(15); dw = ROL8(dw);/* round 12 */ + SUBKEY_R(15) = SUBKEY_L(15) ^ dw; SUBKEY_L(15) = dw; + dw = SUBKEY_L(18) ^ SUBKEY_R(18); dw = ROL8(dw);/* round 13 */ + SUBKEY_R(18) = SUBKEY_L(18) ^ dw; SUBKEY_L(18) = dw; + dw = SUBKEY_L(19) ^ SUBKEY_R(19); dw = ROL8(dw);/* round 14 */ + SUBKEY_R(19) = SUBKEY_L(19) ^ dw; SUBKEY_L(19) = dw; + dw = SUBKEY_L(20) ^ SUBKEY_R(20); dw = ROL8(dw);/* round 15 */ + SUBKEY_R(20) = SUBKEY_L(20) ^ dw; SUBKEY_L(20) = dw; + dw = SUBKEY_L(21) ^ SUBKEY_R(21); dw = ROL8(dw);/* round 16 */ + SUBKEY_R(21) = SUBKEY_L(21) ^ dw; SUBKEY_L(21) = dw; + dw = SUBKEY_L(22) ^ SUBKEY_R(22); dw = ROL8(dw);/* round 17 */ + SUBKEY_R(22) = SUBKEY_L(22) ^ dw; SUBKEY_L(22) = dw; + dw = SUBKEY_L(23) ^ SUBKEY_R(23); dw = ROL8(dw);/* round 18 */ + SUBKEY_R(23) = SUBKEY_L(23) ^ dw; SUBKEY_L(23) = dw; + dw = SUBKEY_L(26) ^ SUBKEY_R(26); dw = ROL8(dw);/* round 19 */ + SUBKEY_R(26) = SUBKEY_L(26) ^ dw; SUBKEY_L(26) = dw; + dw = SUBKEY_L(27) ^ SUBKEY_R(27); dw = ROL8(dw);/* round 20 */ + SUBKEY_R(27) = SUBKEY_L(27) ^ dw; SUBKEY_L(27) = dw; + dw = SUBKEY_L(28) ^ SUBKEY_R(28); dw = ROL8(dw);/* round 21 */ + SUBKEY_R(28) = SUBKEY_L(28) ^ dw; SUBKEY_L(28) = dw; + dw = SUBKEY_L(29) ^ SUBKEY_R(29); dw = ROL8(dw);/* round 22 */ + SUBKEY_R(29) = SUBKEY_L(29) ^ dw; SUBKEY_L(29) = dw; + dw = SUBKEY_L(30) ^ SUBKEY_R(30); dw = ROL8(dw);/* round 23 */ + SUBKEY_R(30) = SUBKEY_L(30) ^ dw; SUBKEY_L(30) = dw; + dw = SUBKEY_L(31) ^ SUBKEY_R(31); dw = ROL8(dw);/* round 24 */ + SUBKEY_R(31) = SUBKEY_L(31) ^ dw; SUBKEY_L(31) = dw; } static void camellia_setup192(const unsigned char *key, u32 *subkey) @@ -1145,424 +1058,400 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey) } -static void camellia_encrypt128(const u32 *subkey, __be32 *io_text) +static void camellia_encrypt128(const u32 *subkey, u32 *io_text) { - u32 il,ir,t0,t1; /* temporary valiables */ + u32 il,ir,t0,t1; /* temporary variables */ u32 io[4]; - io[0] = be32_to_cpu(io_text[0]); - io[1] = be32_to_cpu(io_text[1]); - io[2] = be32_to_cpu(io_text[2]); - io[3] = be32_to_cpu(io_text[3]); - /* pre whitening but absorb kw2 */ - io[0] ^= CAMELLIA_SUBKEY_L(0); - io[1] ^= CAMELLIA_SUBKEY_R(0); + io[0] = io_text[0] ^ SUBKEY_L(0); + io[1] = io_text[1] ^ SUBKEY_R(0); + io[2] = io_text[2]; + io[3] = io_text[3]; /* main iteration */ CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(2),CAMELLIA_SUBKEY_R(2), + SUBKEY_L(2),SUBKEY_R(2), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(3),CAMELLIA_SUBKEY_R(3), + SUBKEY_L(3),SUBKEY_R(3), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(4),CAMELLIA_SUBKEY_R(4), + SUBKEY_L(4),SUBKEY_R(4), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(5),CAMELLIA_SUBKEY_R(5), + SUBKEY_L(5),SUBKEY_R(5), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(6),CAMELLIA_SUBKEY_R(6), + SUBKEY_L(6),SUBKEY_R(6), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(7),CAMELLIA_SUBKEY_R(7), + SUBKEY_L(7),SUBKEY_R(7), io[0],io[1],il,ir,t0,t1); CAMELLIA_FLS(io[0],io[1],io[2],io[3], - CAMELLIA_SUBKEY_L(8),CAMELLIA_SUBKEY_R(8), - CAMELLIA_SUBKEY_L(9),CAMELLIA_SUBKEY_R(9), + SUBKEY_L(8),SUBKEY_R(8), + SUBKEY_L(9),SUBKEY_R(9), t0,t1,il,ir); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(10),CAMELLIA_SUBKEY_R(10), + SUBKEY_L(10),SUBKEY_R(10), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(11),CAMELLIA_SUBKEY_R(11), + SUBKEY_L(11),SUBKEY_R(11), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(12),CAMELLIA_SUBKEY_R(12), + SUBKEY_L(12),SUBKEY_R(12), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(13),CAMELLIA_SUBKEY_R(13), + SUBKEY_L(13),SUBKEY_R(13), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(14),CAMELLIA_SUBKEY_R(14), + SUBKEY_L(14),SUBKEY_R(14), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(15),CAMELLIA_SUBKEY_R(15), + SUBKEY_L(15),SUBKEY_R(15), io[0],io[1],il,ir,t0,t1); CAMELLIA_FLS(io[0],io[1],io[2],io[3], - CAMELLIA_SUBKEY_L(16),CAMELLIA_SUBKEY_R(16), - CAMELLIA_SUBKEY_L(17),CAMELLIA_SUBKEY_R(17), + SUBKEY_L(16),SUBKEY_R(16), + SUBKEY_L(17),SUBKEY_R(17), t0,t1,il,ir); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(18),CAMELLIA_SUBKEY_R(18), + SUBKEY_L(18),SUBKEY_R(18), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(19),CAMELLIA_SUBKEY_R(19), + SUBKEY_L(19),SUBKEY_R(19), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(20),CAMELLIA_SUBKEY_R(20), + SUBKEY_L(20),SUBKEY_R(20), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(21),CAMELLIA_SUBKEY_R(21), + SUBKEY_L(21),SUBKEY_R(21), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(22),CAMELLIA_SUBKEY_R(22), + SUBKEY_L(22),SUBKEY_R(22), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(23),CAMELLIA_SUBKEY_R(23), + SUBKEY_L(23),SUBKEY_R(23), io[0],io[1],il,ir,t0,t1); /* post whitening but kw4 */ - io[2] ^= CAMELLIA_SUBKEY_L(24); - io[3] ^= CAMELLIA_SUBKEY_R(24); - - io_text[0] = cpu_to_be32(io[2]); - io_text[1] = cpu_to_be32(io[3]); - io_text[2] = cpu_to_be32(io[0]); - io_text[3] = cpu_to_be32(io[1]); + io_text[0] = io[2] ^ SUBKEY_L(24); + io_text[1] = io[3] ^ SUBKEY_R(24); + io_text[2] = io[0]; + io_text[3] = io[1]; } -static void camellia_decrypt128(const u32 *subkey, __be32 *io_text) +static void camellia_decrypt128(const u32 *subkey, u32 *io_text) { - u32 il,ir,t0,t1; /* temporary valiables */ + u32 il,ir,t0,t1; /* temporary variables */ u32 io[4]; - io[0] = be32_to_cpu(io_text[0]); - io[1] = be32_to_cpu(io_text[1]); - io[2] = be32_to_cpu(io_text[2]); - io[3] = be32_to_cpu(io_text[3]); - /* pre whitening but absorb kw2 */ - io[0] ^= CAMELLIA_SUBKEY_L(24); - io[1] ^= CAMELLIA_SUBKEY_R(24); + io[0] = io_text[0] ^ SUBKEY_L(24); + io[1] = io_text[1] ^ SUBKEY_R(24); + io[2] = io_text[2]; + io[3] = io_text[3]; /* main iteration */ CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(23),CAMELLIA_SUBKEY_R(23), + SUBKEY_L(23),SUBKEY_R(23), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(22),CAMELLIA_SUBKEY_R(22), + SUBKEY_L(22),SUBKEY_R(22), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(21),CAMELLIA_SUBKEY_R(21), + SUBKEY_L(21),SUBKEY_R(21), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(20),CAMELLIA_SUBKEY_R(20), + SUBKEY_L(20),SUBKEY_R(20), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(19),CAMELLIA_SUBKEY_R(19), + SUBKEY_L(19),SUBKEY_R(19), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(18),CAMELLIA_SUBKEY_R(18), + SUBKEY_L(18),SUBKEY_R(18), io[0],io[1],il,ir,t0,t1); CAMELLIA_FLS(io[0],io[1],io[2],io[3], - CAMELLIA_SUBKEY_L(17),CAMELLIA_SUBKEY_R(17), - CAMELLIA_SUBKEY_L(16),CAMELLIA_SUBKEY_R(16), + SUBKEY_L(17),SUBKEY_R(17), + SUBKEY_L(16),SUBKEY_R(16), t0,t1,il,ir); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(15),CAMELLIA_SUBKEY_R(15), + SUBKEY_L(15),SUBKEY_R(15), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(14),CAMELLIA_SUBKEY_R(14), + SUBKEY_L(14),SUBKEY_R(14), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(13),CAMELLIA_SUBKEY_R(13), + SUBKEY_L(13),SUBKEY_R(13), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(12),CAMELLIA_SUBKEY_R(12), + SUBKEY_L(12),SUBKEY_R(12), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(11),CAMELLIA_SUBKEY_R(11), + SUBKEY_L(11),SUBKEY_R(11), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(10),CAMELLIA_SUBKEY_R(10), + SUBKEY_L(10),SUBKEY_R(10), io[0],io[1],il,ir,t0,t1); CAMELLIA_FLS(io[0],io[1],io[2],io[3], - CAMELLIA_SUBKEY_L(9),CAMELLIA_SUBKEY_R(9), - CAMELLIA_SUBKEY_L(8),CAMELLIA_SUBKEY_R(8), + SUBKEY_L(9),SUBKEY_R(9), + SUBKEY_L(8),SUBKEY_R(8), t0,t1,il,ir); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(7),CAMELLIA_SUBKEY_R(7), + SUBKEY_L(7),SUBKEY_R(7), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(6),CAMELLIA_SUBKEY_R(6), + SUBKEY_L(6),SUBKEY_R(6), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(5),CAMELLIA_SUBKEY_R(5), + SUBKEY_L(5),SUBKEY_R(5), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(4),CAMELLIA_SUBKEY_R(4), + SUBKEY_L(4),SUBKEY_R(4), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(3),CAMELLIA_SUBKEY_R(3), + SUBKEY_L(3),SUBKEY_R(3), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(2),CAMELLIA_SUBKEY_R(2), + SUBKEY_L(2),SUBKEY_R(2), io[0],io[1],il,ir,t0,t1); /* post whitening but kw4 */ - io[2] ^= CAMELLIA_SUBKEY_L(0); - io[3] ^= CAMELLIA_SUBKEY_R(0); - - io_text[0] = cpu_to_be32(io[2]); - io_text[1] = cpu_to_be32(io[3]); - io_text[2] = cpu_to_be32(io[0]); - io_text[3] = cpu_to_be32(io[1]); + io_text[0] = io[2] ^ SUBKEY_L(0); + io_text[1] = io[3] ^ SUBKEY_R(0); + io_text[2] = io[0]; + io_text[3] = io[1]; } -static void camellia_encrypt256(const u32 *subkey, __be32 *io_text) +static void camellia_encrypt256(const u32 *subkey, u32 *io_text) { - u32 il,ir,t0,t1; /* temporary valiables */ + u32 il,ir,t0,t1; /* temporary variables */ u32 io[4]; - io[0] = be32_to_cpu(io_text[0]); - io[1] = be32_to_cpu(io_text[1]); - io[2] = be32_to_cpu(io_text[2]); - io[3] = be32_to_cpu(io_text[3]); - /* pre whitening but absorb kw2 */ - io[0] ^= CAMELLIA_SUBKEY_L(0); - io[1] ^= CAMELLIA_SUBKEY_R(0); + io[0] = io_text[0] ^ SUBKEY_L(0); + io[1] = io_text[1] ^ SUBKEY_R(0); + io[2] = io_text[2]; + io[3] = io_text[3]; /* main iteration */ CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(2),CAMELLIA_SUBKEY_R(2), + SUBKEY_L(2),SUBKEY_R(2), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(3),CAMELLIA_SUBKEY_R(3), + SUBKEY_L(3),SUBKEY_R(3), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(4),CAMELLIA_SUBKEY_R(4), + SUBKEY_L(4),SUBKEY_R(4), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(5),CAMELLIA_SUBKEY_R(5), + SUBKEY_L(5),SUBKEY_R(5), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(6),CAMELLIA_SUBKEY_R(6), + SUBKEY_L(6),SUBKEY_R(6), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(7),CAMELLIA_SUBKEY_R(7), + SUBKEY_L(7),SUBKEY_R(7), io[0],io[1],il,ir,t0,t1); CAMELLIA_FLS(io[0],io[1],io[2],io[3], - CAMELLIA_SUBKEY_L(8),CAMELLIA_SUBKEY_R(8), - CAMELLIA_SUBKEY_L(9),CAMELLIA_SUBKEY_R(9), + SUBKEY_L(8),SUBKEY_R(8), + SUBKEY_L(9),SUBKEY_R(9), t0,t1,il,ir); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(10),CAMELLIA_SUBKEY_R(10), + SUBKEY_L(10),SUBKEY_R(10), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(11),CAMELLIA_SUBKEY_R(11), + SUBKEY_L(11),SUBKEY_R(11), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(12),CAMELLIA_SUBKEY_R(12), + SUBKEY_L(12),SUBKEY_R(12), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(13),CAMELLIA_SUBKEY_R(13), + SUBKEY_L(13),SUBKEY_R(13), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(14),CAMELLIA_SUBKEY_R(14), + SUBKEY_L(14),SUBKEY_R(14), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(15),CAMELLIA_SUBKEY_R(15), + SUBKEY_L(15),SUBKEY_R(15), io[0],io[1],il,ir,t0,t1); CAMELLIA_FLS(io[0],io[1],io[2],io[3], - CAMELLIA_SUBKEY_L(16),CAMELLIA_SUBKEY_R(16), - CAMELLIA_SUBKEY_L(17),CAMELLIA_SUBKEY_R(17), + SUBKEY_L(16),SUBKEY_R(16), + SUBKEY_L(17),SUBKEY_R(17), t0,t1,il,ir); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(18),CAMELLIA_SUBKEY_R(18), + SUBKEY_L(18),SUBKEY_R(18), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(19),CAMELLIA_SUBKEY_R(19), + SUBKEY_L(19),SUBKEY_R(19), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(20),CAMELLIA_SUBKEY_R(20), + SUBKEY_L(20),SUBKEY_R(20), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(21),CAMELLIA_SUBKEY_R(21), + SUBKEY_L(21),SUBKEY_R(21), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(22),CAMELLIA_SUBKEY_R(22), + SUBKEY_L(22),SUBKEY_R(22), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(23),CAMELLIA_SUBKEY_R(23), + SUBKEY_L(23),SUBKEY_R(23), io[0],io[1],il,ir,t0,t1); CAMELLIA_FLS(io[0],io[1],io[2],io[3], - CAMELLIA_SUBKEY_L(24),CAMELLIA_SUBKEY_R(24), - CAMELLIA_SUBKEY_L(25),CAMELLIA_SUBKEY_R(25), + SUBKEY_L(24),SUBKEY_R(24), + SUBKEY_L(25),SUBKEY_R(25), t0,t1,il,ir); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(26),CAMELLIA_SUBKEY_R(26), + SUBKEY_L(26),SUBKEY_R(26), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(27),CAMELLIA_SUBKEY_R(27), + SUBKEY_L(27),SUBKEY_R(27), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(28),CAMELLIA_SUBKEY_R(28), + SUBKEY_L(28),SUBKEY_R(28), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(29),CAMELLIA_SUBKEY_R(29), + SUBKEY_L(29),SUBKEY_R(29), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(30),CAMELLIA_SUBKEY_R(30), + SUBKEY_L(30),SUBKEY_R(30), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(31),CAMELLIA_SUBKEY_R(31), + SUBKEY_L(31),SUBKEY_R(31), io[0],io[1],il,ir,t0,t1); /* post whitening but kw4 */ - io[2] ^= CAMELLIA_SUBKEY_L(32); - io[3] ^= CAMELLIA_SUBKEY_R(32); - - io_text[0] = cpu_to_be32(io[2]); - io_text[1] = cpu_to_be32(io[3]); - io_text[2] = cpu_to_be32(io[0]); - io_text[3] = cpu_to_be32(io[1]); + io_text[0] = io[2] ^ SUBKEY_L(32); + io_text[1] = io[3] ^ SUBKEY_R(32); + io_text[2] = io[0]; + io_text[3] = io[1]; } -static void camellia_decrypt256(const u32 *subkey, __be32 *io_text) +static void camellia_decrypt256(const u32 *subkey, u32 *io_text) { - u32 il,ir,t0,t1; /* temporary valiables */ + u32 il,ir,t0,t1; /* temporary variables */ u32 io[4]; - io[0] = be32_to_cpu(io_text[0]); - io[1] = be32_to_cpu(io_text[1]); - io[2] = be32_to_cpu(io_text[2]); - io[3] = be32_to_cpu(io_text[3]); - /* pre whitening but absorb kw2 */ - io[0] ^= CAMELLIA_SUBKEY_L(32); - io[1] ^= CAMELLIA_SUBKEY_R(32); + io[0] = io_text[0] ^ SUBKEY_L(32); + io[1] = io_text[1] ^ SUBKEY_R(32); + io[2] = io_text[2]; + io[3] = io_text[3]; /* main iteration */ CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(31),CAMELLIA_SUBKEY_R(31), + SUBKEY_L(31),SUBKEY_R(31), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(30),CAMELLIA_SUBKEY_R(30), + SUBKEY_L(30),SUBKEY_R(30), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(29),CAMELLIA_SUBKEY_R(29), + SUBKEY_L(29),SUBKEY_R(29), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(28),CAMELLIA_SUBKEY_R(28), + SUBKEY_L(28),SUBKEY_R(28), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(27),CAMELLIA_SUBKEY_R(27), + SUBKEY_L(27),SUBKEY_R(27), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(26),CAMELLIA_SUBKEY_R(26), + SUBKEY_L(26),SUBKEY_R(26), io[0],io[1],il,ir,t0,t1); CAMELLIA_FLS(io[0],io[1],io[2],io[3], - CAMELLIA_SUBKEY_L(25),CAMELLIA_SUBKEY_R(25), - CAMELLIA_SUBKEY_L(24),CAMELLIA_SUBKEY_R(24), + SUBKEY_L(25),SUBKEY_R(25), + SUBKEY_L(24),SUBKEY_R(24), t0,t1,il,ir); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(23),CAMELLIA_SUBKEY_R(23), + SUBKEY_L(23),SUBKEY_R(23), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(22),CAMELLIA_SUBKEY_R(22), + SUBKEY_L(22),SUBKEY_R(22), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(21),CAMELLIA_SUBKEY_R(21), + SUBKEY_L(21),SUBKEY_R(21), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(20),CAMELLIA_SUBKEY_R(20), + SUBKEY_L(20),SUBKEY_R(20), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(19),CAMELLIA_SUBKEY_R(19), + SUBKEY_L(19),SUBKEY_R(19), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(18),CAMELLIA_SUBKEY_R(18), + SUBKEY_L(18),SUBKEY_R(18), io[0],io[1],il,ir,t0,t1); CAMELLIA_FLS(io[0],io[1],io[2],io[3], - CAMELLIA_SUBKEY_L(17),CAMELLIA_SUBKEY_R(17), - CAMELLIA_SUBKEY_L(16),CAMELLIA_SUBKEY_R(16), + SUBKEY_L(17),SUBKEY_R(17), + SUBKEY_L(16),SUBKEY_R(16), t0,t1,il,ir); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(15),CAMELLIA_SUBKEY_R(15), + SUBKEY_L(15),SUBKEY_R(15), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(14),CAMELLIA_SUBKEY_R(14), + SUBKEY_L(14),SUBKEY_R(14), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(13),CAMELLIA_SUBKEY_R(13), + SUBKEY_L(13),SUBKEY_R(13), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(12),CAMELLIA_SUBKEY_R(12), + SUBKEY_L(12),SUBKEY_R(12), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(11),CAMELLIA_SUBKEY_R(11), + SUBKEY_L(11),SUBKEY_R(11), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(10),CAMELLIA_SUBKEY_R(10), + SUBKEY_L(10),SUBKEY_R(10), io[0],io[1],il,ir,t0,t1); CAMELLIA_FLS(io[0],io[1],io[2],io[3], - CAMELLIA_SUBKEY_L(9),CAMELLIA_SUBKEY_R(9), - CAMELLIA_SUBKEY_L(8),CAMELLIA_SUBKEY_R(8), + SUBKEY_L(9),SUBKEY_R(9), + SUBKEY_L(8),SUBKEY_R(8), t0,t1,il,ir); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(7),CAMELLIA_SUBKEY_R(7), + SUBKEY_L(7),SUBKEY_R(7), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(6),CAMELLIA_SUBKEY_R(6), + SUBKEY_L(6),SUBKEY_R(6), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(5),CAMELLIA_SUBKEY_R(5), + SUBKEY_L(5),SUBKEY_R(5), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(4),CAMELLIA_SUBKEY_R(4), + SUBKEY_L(4),SUBKEY_R(4), io[0],io[1],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[0],io[1], - CAMELLIA_SUBKEY_L(3),CAMELLIA_SUBKEY_R(3), + SUBKEY_L(3),SUBKEY_R(3), io[2],io[3],il,ir,t0,t1); CAMELLIA_ROUNDSM(io[2],io[3], - CAMELLIA_SUBKEY_L(2),CAMELLIA_SUBKEY_R(2), + SUBKEY_L(2),SUBKEY_R(2), io[0],io[1],il,ir,t0,t1); /* post whitening but kw4 */ - io[2] ^= CAMELLIA_SUBKEY_L(0); - io[3] ^= CAMELLIA_SUBKEY_R(0); - - io_text[0] = cpu_to_be32(io[2]); - io_text[1] = cpu_to_be32(io[3]); - io_text[2] = cpu_to_be32(io[0]); - io_text[3] = cpu_to_be32(io[1]); + io_text[0] = io[2] ^ SUBKEY_L(0); + io_text[1] = io[3] ^ SUBKEY_R(0); + io_text[2] = io[0]; + io_text[3] = io[1]; } @@ -1607,9 +1496,12 @@ static void camellia_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) const __be32 *src = (const __be32 *)in; __be32 *dst = (__be32 *)out; - __be32 tmp[4]; + u32 tmp[4]; - memcpy(tmp, src, CAMELLIA_BLOCK_SIZE); + tmp[0] = be32_to_cpu(src[0]); + tmp[1] = be32_to_cpu(src[1]); + tmp[2] = be32_to_cpu(src[2]); + tmp[3] = be32_to_cpu(src[3]); switch (cctx->key_length) { case 16: @@ -1622,7 +1514,10 @@ static void camellia_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) break; } - memcpy(dst, tmp, CAMELLIA_BLOCK_SIZE); + dst[0] = cpu_to_be32(tmp[0]); + dst[1] = cpu_to_be32(tmp[1]); + dst[2] = cpu_to_be32(tmp[2]); + dst[3] = cpu_to_be32(tmp[3]); } static void camellia_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) @@ -1631,9 +1526,12 @@ static void camellia_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) const __be32 *src = (const __be32 *)in; __be32 *dst = (__be32 *)out; - __be32 tmp[4]; + u32 tmp[4]; - memcpy(tmp, src, CAMELLIA_BLOCK_SIZE); + tmp[0] = be32_to_cpu(src[0]); + tmp[1] = be32_to_cpu(src[1]); + tmp[2] = be32_to_cpu(src[2]); + tmp[3] = be32_to_cpu(src[3]); switch (cctx->key_length) { case 16: @@ -1646,7 +1544,10 @@ static void camellia_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) break; } - memcpy(dst, tmp, CAMELLIA_BLOCK_SIZE); + dst[0] = cpu_to_be32(tmp[0]); + dst[1] = cpu_to_be32(tmp[1]); + dst[2] = cpu_to_be32(tmp[2]); + dst[3] = cpu_to_be32(tmp[3]); } static struct crypto_alg camellia_alg = { -- cgit v1.1 From 1ce73e8d6d95ceb860184c34fa1a91a82e51cbb3 Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Tue, 6 Nov 2007 22:13:40 +0800 Subject: [CRYPTO] camellia: Code cleanup Optimize GETU32 to use 4-byte memcpy (modern gcc will convert such memcpy to single move instruction on i386). Original GETU32 did four byte fetches, and shifted/XORed those. Signed-off-by: Denys Vlasenko Acked-by: Noriaki TAKAMIYA Signed-off-by: Herbert Xu --- crypto/camellia.c | 39 +++++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/crypto/camellia.c b/crypto/camellia.c index ac372e4..16529dd 100644 --- a/crypto/camellia.c +++ b/crypto/camellia.c @@ -330,10 +330,12 @@ static const u32 camellia_sp4404[256] = { * macros */ -# define GETU32(pt) (((u32)(pt)[0] << 24) \ - ^ ((u32)(pt)[1] << 16) \ - ^ ((u32)(pt)[2] << 8) \ - ^ ((u32)(pt)[3])) +# define GETU32(v, pt) \ + do { \ + /* latest breed of gcc is clever enough to use move */ \ + memcpy(&(v), (pt), 4); \ + (v) = be32_to_cpu(v); \ + } while(0) /* rotation right shift 1byte */ #define ROR8(x) (((x) >> 8) + ((x) << 24)) @@ -433,10 +435,11 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey) /** * k == kll || klr || krl || krr (|| is concatination) */ - kll = GETU32(key ); - klr = GETU32(key + 4); - krl = GETU32(key + 8); - krr = GETU32(key + 12); + GETU32(kll, key ); + GETU32(klr, key + 4); + GETU32(krl, key + 8); + GETU32(krr, key + 12); + /** * generate KL dependent subkeys */ @@ -687,8 +690,8 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey) static void camellia_setup256(const unsigned char *key, u32 *subkey) { - u32 kll,klr,krl,krr; /* left half of key */ - u32 krll,krlr,krrl,krrr; /* right half of key */ + u32 kll, klr, krl, krr; /* left half of key */ + u32 krll, krlr, krrl, krrr; /* right half of key */ u32 il, ir, t0, t1, w0, w1; /* temporary variables */ u32 kw4l, kw4r, dw, tl, tr; u32 subL[34]; @@ -698,14 +701,14 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) * key = (kll || klr || krl || krr || krll || krlr || krrl || krrr) * (|| is concatination) */ - kll = GETU32(key ); - klr = GETU32(key + 4); - krl = GETU32(key + 8); - krr = GETU32(key + 12); - krll = GETU32(key + 16); - krlr = GETU32(key + 20); - krrl = GETU32(key + 24); - krrr = GETU32(key + 28); + GETU32(kll, key ); + GETU32(klr, key + 4); + GETU32(krl, key + 8); + GETU32(krr, key + 12); + GETU32(krll, key + 16); + GETU32(krlr, key + 20); + GETU32(krrl, key + 24); + GETU32(krrr, key + 28); /* generate KL dependent subkeys */ /* kw1 */ -- cgit v1.1 From d3e7480572bf882dee5baa2891bccbfa3db0b1a1 Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Tue, 6 Nov 2007 22:15:19 +0800 Subject: [CRYPTO] camellia: De-unrolling Move huge unrolled pieces of code (3 screenfuls) at the end of 128/256 key setup routines into common camellia_setup_tail(), convert it to loop there. Loop is still unrolled six times, so performance hit is very small, code size win is big. Signed-off-by: Denys Vlasenko Acked-by: Noriaki TAKAMIYA Signed-off-by: Herbert Xu --- crypto/camellia.c | 107 ++++++++++++------------------------------------------ 1 file changed, 23 insertions(+), 84 deletions(-) diff --git a/crypto/camellia.c b/crypto/camellia.c index 16529dd..2e129ab 100644 --- a/crypto/camellia.c +++ b/crypto/camellia.c @@ -424,6 +424,27 @@ static const u32 camellia_sp4404[256] = { #define SUBKEY_L(INDEX) (subkey[(INDEX)*2]) #define SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1]) +static void camellia_setup_tail(u32 *subkey, int max) +{ + u32 dw; + int i = 2; + do { + dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = ROL8(dw);/* round 1 */ + SUBKEY_R(i + 0) = SUBKEY_L(i + 0) ^ dw; SUBKEY_L(i + 0) = dw; + dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = ROL8(dw);/* round 2 */ + SUBKEY_R(i + 1) = SUBKEY_L(i + 1) ^ dw; SUBKEY_L(i + 1) = dw; + dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = ROL8(dw);/* round 3 */ + SUBKEY_R(i + 2) = SUBKEY_L(i + 2) ^ dw; SUBKEY_L(i + 2) = dw; + dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = ROL8(dw);/* round 4 */ + SUBKEY_R(i + 3) = SUBKEY_L(i + 3) ^ dw; SUBKEY_L(i + 3) = dw; + dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = ROL8(dw);/* round 5 */ + SUBKEY_R(i + 4) = SUBKEY_L(i + 4) ^ dw; SUBKEY_L(i + 4) = dw; + dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = ROL8(dw);/* round 6 */ + SUBKEY_R(i + 5) = SUBKEY_L(i + 5) ^ dw; SUBKEY_L(i + 5) = dw; + i += 8; + } while (i < max); +} + static void camellia_setup128(const unsigned char *key, u32 *subkey) { u32 kll, klr, krl, krr; @@ -650,42 +671,7 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey) SUBKEY_R(24) = subR[24] ^ subR[23]; /* apply the inverse of the last half of P-function */ - dw = SUBKEY_L(2) ^ SUBKEY_R(2); dw = ROL8(dw);/* round 1 */ - SUBKEY_R(2) = SUBKEY_L(2) ^ dw; SUBKEY_L(2) = dw; - dw = SUBKEY_L(3) ^ SUBKEY_R(3); dw = ROL8(dw);/* round 2 */ - SUBKEY_R(3) = SUBKEY_L(3) ^ dw; SUBKEY_L(3) = dw; - dw = SUBKEY_L(4) ^ SUBKEY_R(4); dw = ROL8(dw);/* round 3 */ - SUBKEY_R(4) = SUBKEY_L(4) ^ dw; SUBKEY_L(4) = dw; - dw = SUBKEY_L(5) ^ SUBKEY_R(5); dw = ROL8(dw);/* round 4 */ - SUBKEY_R(5) = SUBKEY_L(5) ^ dw; SUBKEY_L(5) = dw; - dw = SUBKEY_L(6) ^ SUBKEY_R(6); dw = ROL8(dw);/* round 5 */ - SUBKEY_R(6) = SUBKEY_L(6) ^ dw; SUBKEY_L(6) = dw; - dw = SUBKEY_L(7) ^ SUBKEY_R(7); dw = ROL8(dw);/* round 6 */ - SUBKEY_R(7) = SUBKEY_L(7) ^ dw; SUBKEY_L(7) = dw; - dw = SUBKEY_L(10) ^ SUBKEY_R(10); dw = ROL8(dw);/* round 7 */ - SUBKEY_R(10) = SUBKEY_L(10) ^ dw; SUBKEY_L(10) = dw; - dw = SUBKEY_L(11) ^ SUBKEY_R(11); dw = ROL8(dw);/* round 8 */ - SUBKEY_R(11) = SUBKEY_L(11) ^ dw; SUBKEY_L(11) = dw; - dw = SUBKEY_L(12) ^ SUBKEY_R(12); dw = ROL8(dw);/* round 9 */ - SUBKEY_R(12) = SUBKEY_L(12) ^ dw; SUBKEY_L(12) = dw; - dw = SUBKEY_L(13) ^ SUBKEY_R(13); dw = ROL8(dw);/* round 10 */ - SUBKEY_R(13) = SUBKEY_L(13) ^ dw; SUBKEY_L(13) = dw; - dw = SUBKEY_L(14) ^ SUBKEY_R(14); dw = ROL8(dw);/* round 11 */ - SUBKEY_R(14) = SUBKEY_L(14) ^ dw; SUBKEY_L(14) = dw; - dw = SUBKEY_L(15) ^ SUBKEY_R(15); dw = ROL8(dw);/* round 12 */ - SUBKEY_R(15) = SUBKEY_L(15) ^ dw; SUBKEY_L(15) = dw; - dw = SUBKEY_L(18) ^ SUBKEY_R(18); dw = ROL8(dw);/* round 13 */ - SUBKEY_R(18) = SUBKEY_L(18) ^ dw; SUBKEY_L(18) = dw; - dw = SUBKEY_L(19) ^ SUBKEY_R(19); dw = ROL8(dw);/* round 14 */ - SUBKEY_R(19) = SUBKEY_L(19) ^ dw; SUBKEY_L(19) = dw; - dw = SUBKEY_L(20) ^ SUBKEY_R(20); dw = ROL8(dw);/* round 15 */ - SUBKEY_R(20) = SUBKEY_L(20) ^ dw; SUBKEY_L(20) = dw; - dw = SUBKEY_L(21) ^ SUBKEY_R(21); dw = ROL8(dw);/* round 16 */ - SUBKEY_R(21) = SUBKEY_L(21) ^ dw; SUBKEY_L(21) = dw; - dw = SUBKEY_L(22) ^ SUBKEY_R(22); dw = ROL8(dw);/* round 17 */ - SUBKEY_R(22) = SUBKEY_L(22) ^ dw; SUBKEY_L(22) = dw; - dw = SUBKEY_L(23) ^ SUBKEY_R(23); dw = ROL8(dw);/* round 18 */ - SUBKEY_R(23) = SUBKEY_L(23) ^ dw; SUBKEY_L(23) = dw; + camellia_setup_tail(subkey, 24); } static void camellia_setup256(const unsigned char *key, u32 *subkey) @@ -995,54 +981,7 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) SUBKEY_R(32) = subR[32] ^ subR[31]; /* apply the inverse of the last half of P-function */ - dw = SUBKEY_L(2) ^ SUBKEY_R(2); dw = ROL8(dw);/* round 1 */ - SUBKEY_R(2) = SUBKEY_L(2) ^ dw; SUBKEY_L(2) = dw; - dw = SUBKEY_L(3) ^ SUBKEY_R(3); dw = ROL8(dw);/* round 2 */ - SUBKEY_R(3) = SUBKEY_L(3) ^ dw; SUBKEY_L(3) = dw; - dw = SUBKEY_L(4) ^ SUBKEY_R(4); dw = ROL8(dw);/* round 3 */ - SUBKEY_R(4) = SUBKEY_L(4) ^ dw; SUBKEY_L(4) = dw; - dw = SUBKEY_L(5) ^ SUBKEY_R(5); dw = ROL8(dw);/* round 4 */ - SUBKEY_R(5) = SUBKEY_L(5) ^ dw; SUBKEY_L(5) = dw; - dw = SUBKEY_L(6) ^ SUBKEY_R(6); dw = ROL8(dw);/* round 5 */ - SUBKEY_R(6) = SUBKEY_L(6) ^ dw; SUBKEY_L(6) = dw; - dw = SUBKEY_L(7) ^ SUBKEY_R(7); dw = ROL8(dw);/* round 6 */ - SUBKEY_R(7) = SUBKEY_L(7) ^ dw; SUBKEY_L(7) = dw; - dw = SUBKEY_L(10) ^ SUBKEY_R(10); dw = ROL8(dw);/* round 7 */ - SUBKEY_R(10) = SUBKEY_L(10) ^ dw; SUBKEY_L(10) = dw; - dw = SUBKEY_L(11) ^ SUBKEY_R(11); dw = ROL8(dw);/* round 8 */ - SUBKEY_R(11) = SUBKEY_L(11) ^ dw; SUBKEY_L(11) = dw; - dw = SUBKEY_L(12) ^ SUBKEY_R(12); dw = ROL8(dw);/* round 9 */ - SUBKEY_R(12) = SUBKEY_L(12) ^ dw; SUBKEY_L(12) = dw; - dw = SUBKEY_L(13) ^ SUBKEY_R(13); dw = ROL8(dw);/* round 10 */ - SUBKEY_R(13) = SUBKEY_L(13) ^ dw; SUBKEY_L(13) = dw; - dw = SUBKEY_L(14) ^ SUBKEY_R(14); dw = ROL8(dw);/* round 11 */ - SUBKEY_R(14) = SUBKEY_L(14) ^ dw; SUBKEY_L(14) = dw; - dw = SUBKEY_L(15) ^ SUBKEY_R(15); dw = ROL8(dw);/* round 12 */ - SUBKEY_R(15) = SUBKEY_L(15) ^ dw; SUBKEY_L(15) = dw; - dw = SUBKEY_L(18) ^ SUBKEY_R(18); dw = ROL8(dw);/* round 13 */ - SUBKEY_R(18) = SUBKEY_L(18) ^ dw; SUBKEY_L(18) = dw; - dw = SUBKEY_L(19) ^ SUBKEY_R(19); dw = ROL8(dw);/* round 14 */ - SUBKEY_R(19) = SUBKEY_L(19) ^ dw; SUBKEY_L(19) = dw; - dw = SUBKEY_L(20) ^ SUBKEY_R(20); dw = ROL8(dw);/* round 15 */ - SUBKEY_R(20) = SUBKEY_L(20) ^ dw; SUBKEY_L(20) = dw; - dw = SUBKEY_L(21) ^ SUBKEY_R(21); dw = ROL8(dw);/* round 16 */ - SUBKEY_R(21) = SUBKEY_L(21) ^ dw; SUBKEY_L(21) = dw; - dw = SUBKEY_L(22) ^ SUBKEY_R(22); dw = ROL8(dw);/* round 17 */ - SUBKEY_R(22) = SUBKEY_L(22) ^ dw; SUBKEY_L(22) = dw; - dw = SUBKEY_L(23) ^ SUBKEY_R(23); dw = ROL8(dw);/* round 18 */ - SUBKEY_R(23) = SUBKEY_L(23) ^ dw; SUBKEY_L(23) = dw; - dw = SUBKEY_L(26) ^ SUBKEY_R(26); dw = ROL8(dw);/* round 19 */ - SUBKEY_R(26) = SUBKEY_L(26) ^ dw; SUBKEY_L(26) = dw; - dw = SUBKEY_L(27) ^ SUBKEY_R(27); dw = ROL8(dw);/* round 20 */ - SUBKEY_R(27) = SUBKEY_L(27) ^ dw; SUBKEY_L(27) = dw; - dw = SUBKEY_L(28) ^ SUBKEY_R(28); dw = ROL8(dw);/* round 21 */ - SUBKEY_R(28) = SUBKEY_L(28) ^ dw; SUBKEY_L(28) = dw; - dw = SUBKEY_L(29) ^ SUBKEY_R(29); dw = ROL8(dw);/* round 22 */ - SUBKEY_R(29) = SUBKEY_L(29) ^ dw; SUBKEY_L(29) = dw; - dw = SUBKEY_L(30) ^ SUBKEY_R(30); dw = ROL8(dw);/* round 23 */ - SUBKEY_R(30) = SUBKEY_L(30) ^ dw; SUBKEY_L(30) = dw; - dw = SUBKEY_L(31) ^ SUBKEY_R(31); dw = ROL8(dw);/* round 24 */ - SUBKEY_R(31) = SUBKEY_L(31) ^ dw; SUBKEY_L(31) = dw; + camellia_setup_tail(subkey, 32); } static void camellia_setup192(const unsigned char *key, u32 *subkey) -- cgit v1.1 From 41fdab3dd385dde36caae60ed2df82aecb7a32f0 Mon Sep 17 00:00:00 2001 From: Joy Latten Date: Wed, 7 Nov 2007 22:59:47 +0800 Subject: [CRYPTO] ctr: Add countersize This patch adds countersize to CTR mode. The template is now ctr(algo,noncesize,ivsize,countersize). For example, ctr(aes,4,8,4) indicates the counterblock will be composed of a salt/nonce that is 4 bytes, an iv that is 8 bytes and the counter is 4 bytes. When noncesize + ivsize < blocksize, CTR initializes the last block - ivsize - noncesize portion of the block to zero. Otherwise the counter block is composed of the IV (and nonce if necessary). If noncesize + ivsize == blocksize, then this indicates that user is passing in entire counterblock. Thus countersize indicates the amount of bytes in counterblock to use as the counter for incrementing. CTR will increment counter portion by 1, and begin encryption with that value. Note that CTR assumes the counter portion of the block that will be incremented is stored in big endian. Signed-off-by: Joy Latten Signed-off-by: Herbert Xu --- crypto/ctr.c | 32 +++++++++++++++++++++----------- crypto/tcrypt.c | 8 ++++---- 2 files changed, 25 insertions(+), 15 deletions(-) diff --git a/crypto/ctr.c b/crypto/ctr.c index 810d5ec..b974a9f 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -23,6 +23,7 @@ struct ctr_instance_ctx { struct crypto_spawn alg; unsigned int noncesize; unsigned int ivsize; + unsigned int countersize; }; struct crypto_ctr_ctx { @@ -186,7 +187,6 @@ static int crypto_ctr_crypt(struct blkcipher_desc *desc, unsigned long alignmask = crypto_cipher_alignmask(child); u8 cblk[bsize + alignmask]; u8 *counterblk = (u8 *)ALIGN((unsigned long)cblk, alignmask + 1); - unsigned int countersize; int err; blkcipher_walk_init(&walk, dst, src, nbytes); @@ -198,18 +198,18 @@ static int crypto_ctr_crypt(struct blkcipher_desc *desc, memcpy(counterblk + ictx->noncesize, walk.iv, ictx->ivsize); /* initialize counter portion of counter block */ - countersize = bsize - ictx->noncesize - ictx->ivsize; - ctr_inc_quad(counterblk + (bsize - countersize), countersize); + ctr_inc_quad(counterblk + (bsize - ictx->countersize), + ictx->countersize); while (walk.nbytes) { if (walk.src.virt.addr == walk.dst.virt.addr) nbytes = crypto_ctr_crypt_inplace(&walk, child, counterblk, - countersize); + ictx->countersize); else nbytes = crypto_ctr_crypt_segment(&walk, child, counterblk, - countersize); + ictx->countersize); err = blkcipher_walk_done(desc, &walk, nbytes); } @@ -251,6 +251,7 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) struct ctr_instance_ctx *ictx; unsigned int noncesize; unsigned int ivsize; + unsigned int countersize; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); @@ -270,9 +271,17 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) if (err) goto out_put_alg; - /* verify size of nonce + iv + counter */ + err = crypto_attr_u32(tb[4], &countersize); + if (err) + goto out_put_alg; + + /* verify size of nonce + iv + counter + * counter must be >= 4 bytes. + */ err = -EINVAL; - if ((noncesize + ivsize) >= alg->cra_blocksize) + if (((noncesize + ivsize + countersize) < alg->cra_blocksize) || + ((noncesize + ivsize) > alg->cra_blocksize) || + (countersize > alg->cra_blocksize) || (countersize < 4)) goto out_put_alg; inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); @@ -282,20 +291,21 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) err = -ENAMETOOLONG; if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, - "ctr(%s,%u,%u)", alg->cra_name, noncesize, - ivsize) >= CRYPTO_MAX_ALG_NAME) { + "ctr(%s,%u,%u,%u)", alg->cra_name, noncesize, + ivsize, countersize) >= CRYPTO_MAX_ALG_NAME) { goto err_free_inst; } if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "ctr(%s,%u,%u)", alg->cra_driver_name, noncesize, - ivsize) >= CRYPTO_MAX_ALG_NAME) { + "ctr(%s,%u,%u,%u)", alg->cra_driver_name, noncesize, + ivsize, countersize) >= CRYPTO_MAX_ALG_NAME) { goto err_free_inst; } ictx = crypto_instance_ctx(inst); ictx->noncesize = noncesize; ictx->ivsize = ivsize; + ictx->countersize = countersize; err = crypto_init_spawn(&ictx->alg, alg, inst, CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 640cbca..aa84bc4 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -969,9 +969,9 @@ static void do_test(void) AES_XTS_ENC_TEST_VECTORS); test_cipher("xts(aes)", DECRYPT, aes_xts_dec_tv_template, AES_XTS_DEC_TEST_VECTORS); - test_cipher("ctr(aes,4,8)", ENCRYPT, aes_ctr_enc_tv_template, + test_cipher("ctr(aes,4,8,4)", ENCRYPT, aes_ctr_enc_tv_template, AES_CTR_ENC_TEST_VECTORS); - test_cipher("ctr(aes,4,8)", DECRYPT, aes_ctr_dec_tv_template, + test_cipher("ctr(aes,4,8,4)", DECRYPT, aes_ctr_dec_tv_template, AES_CTR_DEC_TEST_VECTORS); //CAST5 @@ -1160,9 +1160,9 @@ static void do_test(void) AES_XTS_ENC_TEST_VECTORS); test_cipher("xts(aes)", DECRYPT, aes_xts_dec_tv_template, AES_XTS_DEC_TEST_VECTORS); - test_cipher("ctr(aes,4,8)", ENCRYPT, aes_ctr_enc_tv_template, + test_cipher("ctr(aes,4,8,4)", ENCRYPT, aes_ctr_enc_tv_template, AES_CTR_ENC_TEST_VECTORS); - test_cipher("ctr(aes,4,8)", DECRYPT, aes_ctr_dec_tv_template, + test_cipher("ctr(aes,4,8,4)", DECRYPT, aes_ctr_dec_tv_template, AES_CTR_DEC_TEST_VECTORS); break; -- cgit v1.1 From be5fb270125729b7bca7879967f1dfadff0d9841 Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Thu, 8 Nov 2007 20:39:26 +0800 Subject: [CRYPTO] aes-generic: Coding style cleanup Signed-off-by: Sebastian Siewior Signed-off-by: Herbert Xu --- crypto/aes_generic.c | 325 ++++++++++++++++++++++++++++----------------------- 1 file changed, 176 insertions(+), 149 deletions(-) diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index 6683260..df8df4d 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c @@ -63,8 +63,7 @@ /* * #define byte(x, nr) ((unsigned char)((x) >> (nr*8))) */ -static inline u8 -byte(const u32 x, const unsigned n) +static inline u8 byte(const u32 x, const unsigned n) { return x >> (n << 3); } @@ -88,55 +87,25 @@ static u32 it_tab[4][256]; static u32 fl_tab[4][256]; static u32 il_tab[4][256]; -static inline u8 __init -f_mult (u8 a, u8 b) +static inline u8 __init f_mult(u8 a, u8 b) { u8 aa = log_tab[a], cc = aa + log_tab[b]; return pow_tab[cc + (cc < aa ? 1 : 0)]; } -#define ff_mult(a,b) (a && b ? f_mult(a, b) : 0) - -#define f_rn(bo, bi, n, k) \ - bo[n] = ft_tab[0][byte(bi[n],0)] ^ \ - ft_tab[1][byte(bi[(n + 1) & 3],1)] ^ \ - ft_tab[2][byte(bi[(n + 2) & 3],2)] ^ \ - ft_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n) - -#define i_rn(bo, bi, n, k) \ - bo[n] = it_tab[0][byte(bi[n],0)] ^ \ - it_tab[1][byte(bi[(n + 3) & 3],1)] ^ \ - it_tab[2][byte(bi[(n + 2) & 3],2)] ^ \ - it_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n) - -#define ls_box(x) \ - ( fl_tab[0][byte(x, 0)] ^ \ - fl_tab[1][byte(x, 1)] ^ \ - fl_tab[2][byte(x, 2)] ^ \ - fl_tab[3][byte(x, 3)] ) - -#define f_rl(bo, bi, n, k) \ - bo[n] = fl_tab[0][byte(bi[n],0)] ^ \ - fl_tab[1][byte(bi[(n + 1) & 3],1)] ^ \ - fl_tab[2][byte(bi[(n + 2) & 3],2)] ^ \ - fl_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n) - -#define i_rl(bo, bi, n, k) \ - bo[n] = il_tab[0][byte(bi[n],0)] ^ \ - il_tab[1][byte(bi[(n + 3) & 3],1)] ^ \ - il_tab[2][byte(bi[(n + 2) & 3],2)] ^ \ - il_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n) - -static void __init -gen_tabs (void) +#define ff_mult(a, b) (a && b ? f_mult(a, b) : 0) + +static void __init gen_tabs(void) { u32 i, t; u8 p, q; - /* log and power tables for GF(2**8) finite field with - 0x011b as modular polynomial - the simplest primitive - root is 0x03, used here to generate the tables */ + /* + * log and power tables for GF(2**8) finite field with + * 0x011b as modular polynomial - the simplest primitive + * root is 0x03, used here to generate the tables + */ for (i = 0, p = 1; i < 256; ++i) { pow_tab[i] = (u8) p; @@ -170,9 +139,9 @@ gen_tabs (void) fl_tab[2][i] = rol32(t, 16); fl_tab[3][i] = rol32(t, 24); - t = ((u32) ff_mult (2, p)) | + t = ((u32) ff_mult(2, p)) | ((u32) p << 8) | - ((u32) p << 16) | ((u32) ff_mult (3, p) << 24); + ((u32) p << 16) | ((u32) ff_mult(3, p) << 24); ft_tab[0][i] = t; ft_tab[1][i] = rol32(t, 8); @@ -187,10 +156,10 @@ gen_tabs (void) il_tab[2][i] = rol32(t, 16); il_tab[3][i] = rol32(t, 24); - t = ((u32) ff_mult (14, p)) | - ((u32) ff_mult (9, p) << 8) | - ((u32) ff_mult (13, p) << 16) | - ((u32) ff_mult (11, p) << 24); + t = ((u32) ff_mult(14, p)) | + ((u32) ff_mult(9, p) << 8) | + ((u32) ff_mult(13, p) << 16) | + ((u32) ff_mult(11, p) << 24); it_tab[0][i] = t; it_tab[1][i] = rol32(t, 8); @@ -199,53 +168,80 @@ gen_tabs (void) } } -#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b) - -#define imix_col(y,x) \ - u = star_x(x); \ - v = star_x(u); \ - w = star_x(v); \ - t = w ^ (x); \ - (y) = u ^ v ^ w; \ - (y) ^= ror32(u ^ t, 8) ^ \ - ror32(v ^ t, 16) ^ \ - ror32(t,24) - /* initialise the key schedule from the user supplied key */ -#define loop4(i) \ -{ t = ror32(t, 8); t = ls_box(t) ^ rco_tab[i]; \ - t ^= E_KEY[4 * i]; E_KEY[4 * i + 4] = t; \ - t ^= E_KEY[4 * i + 1]; E_KEY[4 * i + 5] = t; \ - t ^= E_KEY[4 * i + 2]; E_KEY[4 * i + 6] = t; \ - t ^= E_KEY[4 * i + 3]; E_KEY[4 * i + 7] = t; \ -} - -#define loop6(i) \ -{ t = ror32(t, 8); t = ls_box(t) ^ rco_tab[i]; \ - t ^= E_KEY[6 * i]; E_KEY[6 * i + 6] = t; \ - t ^= E_KEY[6 * i + 1]; E_KEY[6 * i + 7] = t; \ - t ^= E_KEY[6 * i + 2]; E_KEY[6 * i + 8] = t; \ - t ^= E_KEY[6 * i + 3]; E_KEY[6 * i + 9] = t; \ - t ^= E_KEY[6 * i + 4]; E_KEY[6 * i + 10] = t; \ - t ^= E_KEY[6 * i + 5]; E_KEY[6 * i + 11] = t; \ -} +#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b) -#define loop8(i) \ -{ t = ror32(t, 8); ; t = ls_box(t) ^ rco_tab[i]; \ - t ^= E_KEY[8 * i]; E_KEY[8 * i + 8] = t; \ - t ^= E_KEY[8 * i + 1]; E_KEY[8 * i + 9] = t; \ - t ^= E_KEY[8 * i + 2]; E_KEY[8 * i + 10] = t; \ - t ^= E_KEY[8 * i + 3]; E_KEY[8 * i + 11] = t; \ - t = E_KEY[8 * i + 4] ^ ls_box(t); \ - E_KEY[8 * i + 12] = t; \ - t ^= E_KEY[8 * i + 5]; E_KEY[8 * i + 13] = t; \ - t ^= E_KEY[8 * i + 6]; E_KEY[8 * i + 14] = t; \ - t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \ -} +#define imix_col(y,x) do { \ + u = star_x(x); \ + v = star_x(u); \ + w = star_x(v); \ + t = w ^ (x); \ + (y) = u ^ v ^ w; \ + (y) ^= ror32(u ^ t, 8) ^ \ + ror32(v ^ t, 16) ^ \ + ror32(t, 24); \ +} while (0) + +#define ls_box(x) \ + fl_tab[0][byte(x, 0)] ^ \ + fl_tab[1][byte(x, 1)] ^ \ + fl_tab[2][byte(x, 2)] ^ \ + fl_tab[3][byte(x, 3)] + +#define loop4(i) do { \ + t = ror32(t, 8); \ + t = ls_box(t) ^ rco_tab[i]; \ + t ^= E_KEY[4 * i]; \ + E_KEY[4 * i + 4] = t; \ + t ^= E_KEY[4 * i + 1]; \ + E_KEY[4 * i + 5] = t; \ + t ^= E_KEY[4 * i + 2]; \ + E_KEY[4 * i + 6] = t; \ + t ^= E_KEY[4 * i + 3]; \ + E_KEY[4 * i + 7] = t; \ +} while (0) + +#define loop6(i) do { \ + t = ror32(t, 8); \ + t = ls_box(t) ^ rco_tab[i]; \ + t ^= E_KEY[6 * i]; \ + E_KEY[6 * i + 6] = t; \ + t ^= E_KEY[6 * i + 1]; \ + E_KEY[6 * i + 7] = t; \ + t ^= E_KEY[6 * i + 2]; \ + E_KEY[6 * i + 8] = t; \ + t ^= E_KEY[6 * i + 3]; \ + E_KEY[6 * i + 9] = t; \ + t ^= E_KEY[6 * i + 4]; \ + E_KEY[6 * i + 10] = t; \ + t ^= E_KEY[6 * i + 5]; \ + E_KEY[6 * i + 11] = t; \ +} while (0) + +#define loop8(i) do { \ + t = ror32(t, 8); \ + t = ls_box(t) ^ rco_tab[i]; \ + t ^= E_KEY[8 * i]; \ + E_KEY[8 * i + 8] = t; \ + t ^= E_KEY[8 * i + 1]; \ + E_KEY[8 * i + 9] = t; \ + t ^= E_KEY[8 * i + 2]; \ + E_KEY[8 * i + 10] = t; \ + t ^= E_KEY[8 * i + 3]; \ + E_KEY[8 * i + 11] = t; \ + t = E_KEY[8 * i + 4] ^ ls_box(t); \ + E_KEY[8 * i + 12] = t; \ + t ^= E_KEY[8 * i + 5]; \ + E_KEY[8 * i + 13] = t; \ + t ^= E_KEY[8 * i + 6]; \ + E_KEY[8 * i + 14] = t; \ + t ^= E_KEY[8 * i + 7]; \ + E_KEY[8 * i + 15] = t; \ +} while (0) static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, - unsigned int key_len) + unsigned int key_len) { struct aes_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *key = (const __le32 *)in_key; @@ -268,14 +264,14 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, case 16: t = E_KEY[3]; for (i = 0; i < 10; ++i) - loop4 (i); + loop4(i); break; case 24: E_KEY[4] = le32_to_cpu(key[4]); t = E_KEY[5] = le32_to_cpu(key[5]); for (i = 0; i < 8; ++i) - loop6 (i); + loop6(i); break; case 32: @@ -284,7 +280,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, E_KEY[6] = le32_to_cpu(key[6]); t = E_KEY[7] = le32_to_cpu(key[7]); for (i = 0; i < 7; ++i) - loop8 (i); + loop8(i); break; } @@ -294,7 +290,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, D_KEY[3] = E_KEY[3]; for (i = 4; i < key_len + 24; ++i) { - imix_col (D_KEY[i], E_KEY[i]); + imix_col(D_KEY[i], E_KEY[i]); } return 0; @@ -302,18 +298,34 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, /* encrypt a block of text */ -#define f_nround(bo, bi, k) \ - f_rn(bo, bi, 0, k); \ - f_rn(bo, bi, 1, k); \ - f_rn(bo, bi, 2, k); \ - f_rn(bo, bi, 3, k); \ - k += 4 - -#define f_lround(bo, bi, k) \ - f_rl(bo, bi, 0, k); \ - f_rl(bo, bi, 1, k); \ - f_rl(bo, bi, 2, k); \ - f_rl(bo, bi, 3, k) +#define f_rn(bo, bi, n, k) do { \ + bo[n] = ft_tab[0][byte(bi[n], 0)] ^ \ + ft_tab[1][byte(bi[(n + 1) & 3], 1)] ^ \ + ft_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \ + ft_tab[3][byte(bi[(n + 3) & 3], 3)] ^ *(k + n); \ +} while (0) + +#define f_nround(bo, bi, k) do {\ + f_rn(bo, bi, 0, k); \ + f_rn(bo, bi, 1, k); \ + f_rn(bo, bi, 2, k); \ + f_rn(bo, bi, 3, k); \ + k += 4; \ +} while (0) + +#define f_rl(bo, bi, n, k) do { \ + bo[n] = fl_tab[0][byte(bi[n], 0)] ^ \ + fl_tab[1][byte(bi[(n + 1) & 3], 1)] ^ \ + fl_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \ + fl_tab[3][byte(bi[(n + 3) & 3], 3)] ^ *(k + n); \ +} while (0) + +#define f_lround(bo, bi, k) do {\ + f_rl(bo, bi, 0, k); \ + f_rl(bo, bi, 1, k); \ + f_rl(bo, bi, 2, k); \ + f_rl(bo, bi, 3, k); \ +} while (0) static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { @@ -329,25 +341,25 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) b0[3] = le32_to_cpu(src[3]) ^ E_KEY[3]; if (ctx->key_length > 24) { - f_nround (b1, b0, kp); - f_nround (b0, b1, kp); + f_nround(b1, b0, kp); + f_nround(b0, b1, kp); } if (ctx->key_length > 16) { - f_nround (b1, b0, kp); - f_nround (b0, b1, kp); + f_nround(b1, b0, kp); + f_nround(b0, b1, kp); } - f_nround (b1, b0, kp); - f_nround (b0, b1, kp); - f_nround (b1, b0, kp); - f_nround (b0, b1, kp); - f_nround (b1, b0, kp); - f_nround (b0, b1, kp); - f_nround (b1, b0, kp); - f_nround (b0, b1, kp); - f_nround (b1, b0, kp); - f_lround (b0, b1, kp); + f_nround(b1, b0, kp); + f_nround(b0, b1, kp); + f_nround(b1, b0, kp); + f_nround(b0, b1, kp); + f_nround(b1, b0, kp); + f_nround(b0, b1, kp); + f_nround(b1, b0, kp); + f_nround(b0, b1, kp); + f_nround(b1, b0, kp); + f_lround(b0, b1, kp); dst[0] = cpu_to_le32(b0[0]); dst[1] = cpu_to_le32(b0[1]); @@ -357,18 +369,34 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) /* decrypt a block of text */ -#define i_nround(bo, bi, k) \ - i_rn(bo, bi, 0, k); \ - i_rn(bo, bi, 1, k); \ - i_rn(bo, bi, 2, k); \ - i_rn(bo, bi, 3, k); \ - k -= 4 - -#define i_lround(bo, bi, k) \ - i_rl(bo, bi, 0, k); \ - i_rl(bo, bi, 1, k); \ - i_rl(bo, bi, 2, k); \ - i_rl(bo, bi, 3, k) +#define i_rn(bo, bi, n, k) do { \ + bo[n] = it_tab[0][byte(bi[n], 0)] ^ \ + it_tab[1][byte(bi[(n + 3) & 3], 1)] ^ \ + it_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \ + it_tab[3][byte(bi[(n + 1) & 3], 3)] ^ *(k + n); \ +} while (0) + +#define i_nround(bo, bi, k) do {\ + i_rn(bo, bi, 0, k); \ + i_rn(bo, bi, 1, k); \ + i_rn(bo, bi, 2, k); \ + i_rn(bo, bi, 3, k); \ + k -= 4; \ +} while (0) + +#define i_rl(bo, bi, n, k) do { \ + bo[n] = il_tab[0][byte(bi[n], 0)] ^ \ + il_tab[1][byte(bi[(n + 3) & 3], 1)] ^ \ + il_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \ + il_tab[3][byte(bi[(n + 1) & 3], 3)] ^ *(k + n); \ +} while (0) + +#define i_lround(bo, bi, k) do {\ + i_rl(bo, bi, 0, k); \ + i_rl(bo, bi, 1, k); \ + i_rl(bo, bi, 2, k); \ + i_rl(bo, bi, 3, k); \ +} while (0) static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { @@ -385,25 +413,25 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) b0[3] = le32_to_cpu(src[3]) ^ E_KEY[key_len + 27]; if (key_len > 24) { - i_nround (b1, b0, kp); - i_nround (b0, b1, kp); + i_nround(b1, b0, kp); + i_nround(b0, b1, kp); } if (key_len > 16) { - i_nround (b1, b0, kp); - i_nround (b0, b1, kp); + i_nround(b1, b0, kp); + i_nround(b0, b1, kp); } - i_nround (b1, b0, kp); - i_nround (b0, b1, kp); - i_nround (b1, b0, kp); - i_nround (b0, b1, kp); - i_nround (b1, b0, kp); - i_nround (b0, b1, kp); - i_nround (b1, b0, kp); - i_nround (b0, b1, kp); - i_nround (b1, b0, kp); - i_lround (b0, b1, kp); + i_nround(b1, b0, kp); + i_nround(b0, b1, kp); + i_nround(b1, b0, kp); + i_nround(b0, b1, kp); + i_nround(b1, b0, kp); + i_nround(b0, b1, kp); + i_nround(b1, b0, kp); + i_nround(b0, b1, kp); + i_nround(b1, b0, kp); + i_lround(b0, b1, kp); dst[0] = cpu_to_le32(b0[0]); dst[1] = cpu_to_le32(b0[1]); @@ -411,7 +439,6 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) dst[3] = cpu_to_le32(b0[3]); } - static struct crypto_alg aes_alg = { .cra_name = "aes", .cra_driver_name = "aes-generic", @@ -426,9 +453,9 @@ static struct crypto_alg aes_alg = { .cipher = { .cia_min_keysize = AES_MIN_KEY_SIZE, .cia_max_keysize = AES_MAX_KEY_SIZE, - .cia_setkey = aes_set_key, - .cia_encrypt = aes_encrypt, - .cia_decrypt = aes_decrypt + .cia_setkey = aes_set_key, + .cia_encrypt = aes_encrypt, + .cia_decrypt = aes_decrypt } } }; -- cgit v1.1 From 96e82e4551d38e0863b366a7b61185bc4a9946cc Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Thu, 8 Nov 2007 21:20:30 +0800 Subject: [CRYPTO] aes-generic: Make key generation exportable This patch exports four tables and the set_key() routine. This ressources can be shared by other AES implementations (aes-x86_64 for instance). The decryption key has been turned around (deckey[0] is the first piece of the key instead of deckey[keylen+20]). The encrypt/decrypt functions are looking now identical (except they are using different tables and key). Signed-off-by: Sebastian Siewior Signed-off-by: Herbert Xu --- crypto/aes_generic.c | 249 +++++++++++++++++++++++++-------------------------- include/crypto/aes.h | 16 ++++ 2 files changed, 136 insertions(+), 129 deletions(-) diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index df8df4d..cf30af74 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c @@ -47,11 +47,6 @@ * --------------------------------------------------------------------------- */ -/* Some changes from the Gladman version: - s/RIJNDAEL(e_key)/E_KEY/g - s/RIJNDAEL(d_key)/D_KEY/g -*/ - #include #include #include @@ -60,32 +55,26 @@ #include #include -/* - * #define byte(x, nr) ((unsigned char)((x) >> (nr*8))) - */ static inline u8 byte(const u32 x, const unsigned n) { return x >> (n << 3); } -struct aes_ctx { - int key_length; - u32 buf[120]; -}; - -#define E_KEY (&ctx->buf[0]) -#define D_KEY (&ctx->buf[60]) - static u8 pow_tab[256] __initdata; static u8 log_tab[256] __initdata; static u8 sbx_tab[256] __initdata; static u8 isb_tab[256] __initdata; static u32 rco_tab[10]; -static u32 ft_tab[4][256]; -static u32 it_tab[4][256]; -static u32 fl_tab[4][256]; -static u32 il_tab[4][256]; +u32 crypto_ft_tab[4][256]; +u32 crypto_fl_tab[4][256]; +u32 crypto_it_tab[4][256]; +u32 crypto_il_tab[4][256]; + +EXPORT_SYMBOL_GPL(crypto_ft_tab); +EXPORT_SYMBOL_GPL(crypto_fl_tab); +EXPORT_SYMBOL_GPL(crypto_it_tab); +EXPORT_SYMBOL_GPL(crypto_il_tab); static inline u8 __init f_mult(u8 a, u8 b) { @@ -134,37 +123,37 @@ static void __init gen_tabs(void) p = sbx_tab[i]; t = p; - fl_tab[0][i] = t; - fl_tab[1][i] = rol32(t, 8); - fl_tab[2][i] = rol32(t, 16); - fl_tab[3][i] = rol32(t, 24); + crypto_fl_tab[0][i] = t; + crypto_fl_tab[1][i] = rol32(t, 8); + crypto_fl_tab[2][i] = rol32(t, 16); + crypto_fl_tab[3][i] = rol32(t, 24); t = ((u32) ff_mult(2, p)) | ((u32) p << 8) | ((u32) p << 16) | ((u32) ff_mult(3, p) << 24); - ft_tab[0][i] = t; - ft_tab[1][i] = rol32(t, 8); - ft_tab[2][i] = rol32(t, 16); - ft_tab[3][i] = rol32(t, 24); + crypto_ft_tab[0][i] = t; + crypto_ft_tab[1][i] = rol32(t, 8); + crypto_ft_tab[2][i] = rol32(t, 16); + crypto_ft_tab[3][i] = rol32(t, 24); p = isb_tab[i]; t = p; - il_tab[0][i] = t; - il_tab[1][i] = rol32(t, 8); - il_tab[2][i] = rol32(t, 16); - il_tab[3][i] = rol32(t, 24); + crypto_il_tab[0][i] = t; + crypto_il_tab[1][i] = rol32(t, 8); + crypto_il_tab[2][i] = rol32(t, 16); + crypto_il_tab[3][i] = rol32(t, 24); t = ((u32) ff_mult(14, p)) | ((u32) ff_mult(9, p) << 8) | ((u32) ff_mult(13, p) << 16) | ((u32) ff_mult(11, p) << 24); - it_tab[0][i] = t; - it_tab[1][i] = rol32(t, 8); - it_tab[2][i] = rol32(t, 16); - it_tab[3][i] = rol32(t, 24); + crypto_it_tab[0][i] = t; + crypto_it_tab[1][i] = rol32(t, 8); + crypto_it_tab[2][i] = rol32(t, 16); + crypto_it_tab[3][i] = rol32(t, 24); } } @@ -184,69 +173,69 @@ static void __init gen_tabs(void) } while (0) #define ls_box(x) \ - fl_tab[0][byte(x, 0)] ^ \ - fl_tab[1][byte(x, 1)] ^ \ - fl_tab[2][byte(x, 2)] ^ \ - fl_tab[3][byte(x, 3)] + crypto_fl_tab[0][byte(x, 0)] ^ \ + crypto_fl_tab[1][byte(x, 1)] ^ \ + crypto_fl_tab[2][byte(x, 2)] ^ \ + crypto_fl_tab[3][byte(x, 3)] #define loop4(i) do { \ t = ror32(t, 8); \ t = ls_box(t) ^ rco_tab[i]; \ - t ^= E_KEY[4 * i]; \ - E_KEY[4 * i + 4] = t; \ - t ^= E_KEY[4 * i + 1]; \ - E_KEY[4 * i + 5] = t; \ - t ^= E_KEY[4 * i + 2]; \ - E_KEY[4 * i + 6] = t; \ - t ^= E_KEY[4 * i + 3]; \ - E_KEY[4 * i + 7] = t; \ + t ^= ctx->key_enc[4 * i]; \ + ctx->key_enc[4 * i + 4] = t; \ + t ^= ctx->key_enc[4 * i + 1]; \ + ctx->key_enc[4 * i + 5] = t; \ + t ^= ctx->key_enc[4 * i + 2]; \ + ctx->key_enc[4 * i + 6] = t; \ + t ^= ctx->key_enc[4 * i + 3]; \ + ctx->key_enc[4 * i + 7] = t; \ } while (0) #define loop6(i) do { \ t = ror32(t, 8); \ t = ls_box(t) ^ rco_tab[i]; \ - t ^= E_KEY[6 * i]; \ - E_KEY[6 * i + 6] = t; \ - t ^= E_KEY[6 * i + 1]; \ - E_KEY[6 * i + 7] = t; \ - t ^= E_KEY[6 * i + 2]; \ - E_KEY[6 * i + 8] = t; \ - t ^= E_KEY[6 * i + 3]; \ - E_KEY[6 * i + 9] = t; \ - t ^= E_KEY[6 * i + 4]; \ - E_KEY[6 * i + 10] = t; \ - t ^= E_KEY[6 * i + 5]; \ - E_KEY[6 * i + 11] = t; \ + t ^= ctx->key_enc[6 * i]; \ + ctx->key_enc[6 * i + 6] = t; \ + t ^= ctx->key_enc[6 * i + 1]; \ + ctx->key_enc[6 * i + 7] = t; \ + t ^= ctx->key_enc[6 * i + 2]; \ + ctx->key_enc[6 * i + 8] = t; \ + t ^= ctx->key_enc[6 * i + 3]; \ + ctx->key_enc[6 * i + 9] = t; \ + t ^= ctx->key_enc[6 * i + 4]; \ + ctx->key_enc[6 * i + 10] = t; \ + t ^= ctx->key_enc[6 * i + 5]; \ + ctx->key_enc[6 * i + 11] = t; \ } while (0) #define loop8(i) do { \ t = ror32(t, 8); \ t = ls_box(t) ^ rco_tab[i]; \ - t ^= E_KEY[8 * i]; \ - E_KEY[8 * i + 8] = t; \ - t ^= E_KEY[8 * i + 1]; \ - E_KEY[8 * i + 9] = t; \ - t ^= E_KEY[8 * i + 2]; \ - E_KEY[8 * i + 10] = t; \ - t ^= E_KEY[8 * i + 3]; \ - E_KEY[8 * i + 11] = t; \ - t = E_KEY[8 * i + 4] ^ ls_box(t); \ - E_KEY[8 * i + 12] = t; \ - t ^= E_KEY[8 * i + 5]; \ - E_KEY[8 * i + 13] = t; \ - t ^= E_KEY[8 * i + 6]; \ - E_KEY[8 * i + 14] = t; \ - t ^= E_KEY[8 * i + 7]; \ - E_KEY[8 * i + 15] = t; \ + t ^= ctx->key_enc[8 * i]; \ + ctx->key_enc[8 * i + 8] = t; \ + t ^= ctx->key_enc[8 * i + 1]; \ + ctx->key_enc[8 * i + 9] = t; \ + t ^= ctx->key_enc[8 * i + 2]; \ + ctx->key_enc[8 * i + 10] = t; \ + t ^= ctx->key_enc[8 * i + 3]; \ + ctx->key_enc[8 * i + 11] = t; \ + t = ctx->key_enc[8 * i + 4] ^ ls_box(t); \ + ctx->key_enc[8 * i + 12] = t; \ + t ^= ctx->key_enc[8 * i + 5]; \ + ctx->key_enc[8 * i + 13] = t; \ + t ^= ctx->key_enc[8 * i + 6]; \ + ctx->key_enc[8 * i + 14] = t; \ + t ^= ctx->key_enc[8 * i + 7]; \ + ctx->key_enc[8 * i + 15] = t; \ } while (0) -static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, +int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { - struct aes_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *key = (const __le32 *)in_key; u32 *flags = &tfm->crt_flags; - u32 i, t, u, v, w; + u32 i, t, u, v, w, j; if (key_len % 8) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; @@ -255,54 +244,55 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, ctx->key_length = key_len; - E_KEY[0] = le32_to_cpu(key[0]); - E_KEY[1] = le32_to_cpu(key[1]); - E_KEY[2] = le32_to_cpu(key[2]); - E_KEY[3] = le32_to_cpu(key[3]); + ctx->key_dec[key_len + 24] = ctx->key_enc[0] = le32_to_cpu(key[0]); + ctx->key_dec[key_len + 25] = ctx->key_enc[1] = le32_to_cpu(key[1]); + ctx->key_dec[key_len + 26] = ctx->key_enc[2] = le32_to_cpu(key[2]); + ctx->key_dec[key_len + 27] = ctx->key_enc[3] = le32_to_cpu(key[3]); switch (key_len) { case 16: - t = E_KEY[3]; + t = ctx->key_enc[3]; for (i = 0; i < 10; ++i) loop4(i); break; case 24: - E_KEY[4] = le32_to_cpu(key[4]); - t = E_KEY[5] = le32_to_cpu(key[5]); + ctx->key_enc[4] = le32_to_cpu(key[4]); + t = ctx->key_enc[5] = le32_to_cpu(key[5]); for (i = 0; i < 8; ++i) loop6(i); break; case 32: - E_KEY[4] = le32_to_cpu(key[4]); - E_KEY[5] = le32_to_cpu(key[5]); - E_KEY[6] = le32_to_cpu(key[6]); - t = E_KEY[7] = le32_to_cpu(key[7]); + ctx->key_enc[4] = le32_to_cpu(key[4]); + ctx->key_enc[5] = le32_to_cpu(key[5]); + ctx->key_enc[6] = le32_to_cpu(key[6]); + t = ctx->key_enc[7] = le32_to_cpu(key[7]); for (i = 0; i < 7; ++i) loop8(i); break; } - D_KEY[0] = E_KEY[0]; - D_KEY[1] = E_KEY[1]; - D_KEY[2] = E_KEY[2]; - D_KEY[3] = E_KEY[3]; + ctx->key_dec[0] = ctx->key_enc[key_len + 24]; + ctx->key_dec[1] = ctx->key_enc[key_len + 25]; + ctx->key_dec[2] = ctx->key_enc[key_len + 26]; + ctx->key_dec[3] = ctx->key_enc[key_len + 27]; for (i = 4; i < key_len + 24; ++i) { - imix_col(D_KEY[i], E_KEY[i]); + j = key_len + 24 - (i & ~3) + (i & 3); + imix_col(ctx->key_dec[j], ctx->key_enc[i]); } - return 0; } +EXPORT_SYMBOL_GPL(crypto_aes_set_key); /* encrypt a block of text */ #define f_rn(bo, bi, n, k) do { \ - bo[n] = ft_tab[0][byte(bi[n], 0)] ^ \ - ft_tab[1][byte(bi[(n + 1) & 3], 1)] ^ \ - ft_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \ - ft_tab[3][byte(bi[(n + 3) & 3], 3)] ^ *(k + n); \ + bo[n] = crypto_ft_tab[0][byte(bi[n], 0)] ^ \ + crypto_ft_tab[1][byte(bi[(n + 1) & 3], 1)] ^ \ + crypto_ft_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \ + crypto_ft_tab[3][byte(bi[(n + 3) & 3], 3)] ^ *(k + n); \ } while (0) #define f_nround(bo, bi, k) do {\ @@ -314,10 +304,10 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, } while (0) #define f_rl(bo, bi, n, k) do { \ - bo[n] = fl_tab[0][byte(bi[n], 0)] ^ \ - fl_tab[1][byte(bi[(n + 1) & 3], 1)] ^ \ - fl_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \ - fl_tab[3][byte(bi[(n + 3) & 3], 3)] ^ *(k + n); \ + bo[n] = crypto_fl_tab[0][byte(bi[n], 0)] ^ \ + crypto_fl_tab[1][byte(bi[(n + 1) & 3], 1)] ^ \ + crypto_fl_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \ + crypto_fl_tab[3][byte(bi[(n + 3) & 3], 3)] ^ *(k + n); \ } while (0) #define f_lround(bo, bi, k) do {\ @@ -329,23 +319,24 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { - const struct aes_ctx *ctx = crypto_tfm_ctx(tfm); + const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *src = (const __le32 *)in; __le32 *dst = (__le32 *)out; u32 b0[4], b1[4]; - const u32 *kp = E_KEY + 4; + const u32 *kp = ctx->key_enc + 4; + const int key_len = ctx->key_length; - b0[0] = le32_to_cpu(src[0]) ^ E_KEY[0]; - b0[1] = le32_to_cpu(src[1]) ^ E_KEY[1]; - b0[2] = le32_to_cpu(src[2]) ^ E_KEY[2]; - b0[3] = le32_to_cpu(src[3]) ^ E_KEY[3]; + b0[0] = le32_to_cpu(src[0]) ^ ctx->key_enc[0]; + b0[1] = le32_to_cpu(src[1]) ^ ctx->key_enc[1]; + b0[2] = le32_to_cpu(src[2]) ^ ctx->key_enc[2]; + b0[3] = le32_to_cpu(src[3]) ^ ctx->key_enc[3]; - if (ctx->key_length > 24) { + if (key_len > 24) { f_nround(b1, b0, kp); f_nround(b0, b1, kp); } - if (ctx->key_length > 16) { + if (key_len > 16) { f_nround(b1, b0, kp); f_nround(b0, b1, kp); } @@ -370,10 +361,10 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) /* decrypt a block of text */ #define i_rn(bo, bi, n, k) do { \ - bo[n] = it_tab[0][byte(bi[n], 0)] ^ \ - it_tab[1][byte(bi[(n + 3) & 3], 1)] ^ \ - it_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \ - it_tab[3][byte(bi[(n + 1) & 3], 3)] ^ *(k + n); \ + bo[n] = crypto_it_tab[0][byte(bi[n], 0)] ^ \ + crypto_it_tab[1][byte(bi[(n + 3) & 3], 1)] ^ \ + crypto_it_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \ + crypto_it_tab[3][byte(bi[(n + 1) & 3], 3)] ^ *(k + n); \ } while (0) #define i_nround(bo, bi, k) do {\ @@ -381,14 +372,14 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) i_rn(bo, bi, 1, k); \ i_rn(bo, bi, 2, k); \ i_rn(bo, bi, 3, k); \ - k -= 4; \ + k += 4; \ } while (0) #define i_rl(bo, bi, n, k) do { \ - bo[n] = il_tab[0][byte(bi[n], 0)] ^ \ - il_tab[1][byte(bi[(n + 3) & 3], 1)] ^ \ - il_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \ - il_tab[3][byte(bi[(n + 1) & 3], 3)] ^ *(k + n); \ + bo[n] = crypto_il_tab[0][byte(bi[n], 0)] ^ \ + crypto_il_tab[1][byte(bi[(n + 3) & 3], 1)] ^ \ + crypto_il_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \ + crypto_il_tab[3][byte(bi[(n + 1) & 3], 3)] ^ *(k + n); \ } while (0) #define i_lround(bo, bi, k) do {\ @@ -400,17 +391,17 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { - const struct aes_ctx *ctx = crypto_tfm_ctx(tfm); + const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *src = (const __le32 *)in; __le32 *dst = (__le32 *)out; u32 b0[4], b1[4]; const int key_len = ctx->key_length; - const u32 *kp = D_KEY + key_len + 20; + const u32 *kp = ctx->key_dec + 4; - b0[0] = le32_to_cpu(src[0]) ^ E_KEY[key_len + 24]; - b0[1] = le32_to_cpu(src[1]) ^ E_KEY[key_len + 25]; - b0[2] = le32_to_cpu(src[2]) ^ E_KEY[key_len + 26]; - b0[3] = le32_to_cpu(src[3]) ^ E_KEY[key_len + 27]; + b0[0] = le32_to_cpu(src[0]) ^ ctx->key_dec[0]; + b0[1] = le32_to_cpu(src[1]) ^ ctx->key_dec[1]; + b0[2] = le32_to_cpu(src[2]) ^ ctx->key_dec[2]; + b0[3] = le32_to_cpu(src[3]) ^ ctx->key_dec[3]; if (key_len > 24) { i_nround(b1, b0, kp); @@ -445,7 +436,7 @@ static struct crypto_alg aes_alg = { .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct aes_ctx), + .cra_ctxsize = sizeof(struct crypto_aes_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), @@ -453,7 +444,7 @@ static struct crypto_alg aes_alg = { .cipher = { .cia_min_keysize = AES_MIN_KEY_SIZE, .cia_max_keysize = AES_MAX_KEY_SIZE, - .cia_setkey = aes_set_key, + .cia_setkey = crypto_aes_set_key, .cia_encrypt = aes_encrypt, .cia_decrypt = aes_decrypt } diff --git a/include/crypto/aes.h b/include/crypto/aes.h index 9ff842f..d480b76 100644 --- a/include/crypto/aes.h +++ b/include/crypto/aes.h @@ -5,6 +5,9 @@ #ifndef _CRYPTO_AES_H #define _CRYPTO_AES_H +#include +#include + #define AES_MIN_KEY_SIZE 16 #define AES_MAX_KEY_SIZE 32 #define AES_KEYSIZE_128 16 @@ -12,4 +15,17 @@ #define AES_KEYSIZE_256 32 #define AES_BLOCK_SIZE 16 +struct crypto_aes_ctx { + u32 key_length; + u32 key_enc[60]; + u32 key_dec[60]; +}; + +extern u32 crypto_ft_tab[4][256]; +extern u32 crypto_fl_tab[4][256]; +extern u32 crypto_it_tab[4][256]; +extern u32 crypto_il_tab[4][256]; + +int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len); #endif -- cgit v1.1 From 81190b321548bb0bf2d6e1f172695275b0fd1363 Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Thu, 8 Nov 2007 21:25:04 +0800 Subject: [CRYPTO] aes-x86-64: Remove setkey The setkey() function can be shared with the generic algorithm. Signed-off-by: Sebastian Siewior Signed-off-by: Herbert Xu --- arch/x86/crypto/aes-x86_64-asm_64.S | 68 ++++----- arch/x86/crypto/aes_64.c | 282 +----------------------------------- crypto/Kconfig | 1 + 3 files changed, 38 insertions(+), 313 deletions(-) diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S index 26b40de..a120f52 100644 --- a/arch/x86/crypto/aes-x86_64-asm_64.S +++ b/arch/x86/crypto/aes-x86_64-asm_64.S @@ -8,10 +8,10 @@ * including this sentence is retained in full. */ -.extern aes_ft_tab -.extern aes_it_tab -.extern aes_fl_tab -.extern aes_il_tab +.extern crypto_ft_tab +.extern crypto_it_tab +.extern crypto_fl_tab +.extern crypto_il_tab .text @@ -56,13 +56,13 @@ .align 8; \ FUNC: movq r1,r2; \ movq r3,r4; \ - leaq BASE+KEY+52(r8),r9; \ + leaq BASE+KEY+48+4(r8),r9; \ movq r10,r11; \ movl (r7),r5 ## E; \ movl 4(r7),r1 ## E; \ movl 8(r7),r6 ## E; \ movl 12(r7),r7 ## E; \ - movl BASE(r8),r10 ## E; \ + movl BASE+0(r8),r10 ## E; \ xorl -48(r9),r5 ## E; \ xorl -44(r9),r1 ## E; \ xorl -40(r9),r6 ## E; \ @@ -154,37 +154,37 @@ FUNC: movq r1,r2; \ /* void aes_enc_blk(stuct crypto_tfm *tfm, u8 *out, const u8 *in) */ entry(aes_enc_blk,0,enc128,enc192) - encrypt_round(aes_ft_tab,-96) - encrypt_round(aes_ft_tab,-80) -enc192: encrypt_round(aes_ft_tab,-64) - encrypt_round(aes_ft_tab,-48) -enc128: encrypt_round(aes_ft_tab,-32) - encrypt_round(aes_ft_tab,-16) - encrypt_round(aes_ft_tab, 0) - encrypt_round(aes_ft_tab, 16) - encrypt_round(aes_ft_tab, 32) - encrypt_round(aes_ft_tab, 48) - encrypt_round(aes_ft_tab, 64) - encrypt_round(aes_ft_tab, 80) - encrypt_round(aes_ft_tab, 96) - encrypt_final(aes_fl_tab,112) + encrypt_round(crypto_ft_tab,-96) + encrypt_round(crypto_ft_tab,-80) +enc192: encrypt_round(crypto_ft_tab,-64) + encrypt_round(crypto_ft_tab,-48) +enc128: encrypt_round(crypto_ft_tab,-32) + encrypt_round(crypto_ft_tab,-16) + encrypt_round(crypto_ft_tab, 0) + encrypt_round(crypto_ft_tab, 16) + encrypt_round(crypto_ft_tab, 32) + encrypt_round(crypto_ft_tab, 48) + encrypt_round(crypto_ft_tab, 64) + encrypt_round(crypto_ft_tab, 80) + encrypt_round(crypto_ft_tab, 96) + encrypt_final(crypto_fl_tab,112) return /* void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in) */ entry(aes_dec_blk,240,dec128,dec192) - decrypt_round(aes_it_tab,-96) - decrypt_round(aes_it_tab,-80) -dec192: decrypt_round(aes_it_tab,-64) - decrypt_round(aes_it_tab,-48) -dec128: decrypt_round(aes_it_tab,-32) - decrypt_round(aes_it_tab,-16) - decrypt_round(aes_it_tab, 0) - decrypt_round(aes_it_tab, 16) - decrypt_round(aes_it_tab, 32) - decrypt_round(aes_it_tab, 48) - decrypt_round(aes_it_tab, 64) - decrypt_round(aes_it_tab, 80) - decrypt_round(aes_it_tab, 96) - decrypt_final(aes_il_tab,112) + decrypt_round(crypto_it_tab,-96) + decrypt_round(crypto_it_tab,-80) +dec192: decrypt_round(crypto_it_tab,-64) + decrypt_round(crypto_it_tab,-48) +dec128: decrypt_round(crypto_it_tab,-32) + decrypt_round(crypto_it_tab,-16) + decrypt_round(crypto_it_tab, 0) + decrypt_round(crypto_it_tab, 16) + decrypt_round(crypto_it_tab, 32) + decrypt_round(crypto_it_tab, 48) + decrypt_round(crypto_it_tab, 64) + decrypt_round(crypto_it_tab, 80) + decrypt_round(crypto_it_tab, 96) + decrypt_final(crypto_il_tab,112) return diff --git a/arch/x86/crypto/aes_64.c b/arch/x86/crypto/aes_64.c index 0b38a4c..d7a41a9 100644 --- a/arch/x86/crypto/aes_64.c +++ b/arch/x86/crypto/aes_64.c @@ -1,284 +1,9 @@ /* - * Cryptographic API. + * Glue Code for AES Cipher Algorithm * - * AES Cipher Algorithm. - * - * Based on Brian Gladman's code. - * - * Linux developers: - * Alexander Kjeldaas - * Herbert Valerio Riedel - * Kyle McMartin - * Adam J. Richter (conversion to 2.5 API). - * Andreas Steinmetz (adapted to x86_64 assembler) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * --------------------------------------------------------------------------- - * Copyright (c) 2002, Dr Brian Gladman , Worcester, UK. - * All rights reserved. - * - * LICENSE TERMS - * - * The free distribution and use of this software in both source and binary - * form is allowed (with or without changes) provided that: - * - * 1. distributions of this source code include the above copyright - * notice, this list of conditions and the following disclaimer; - * - * 2. distributions in binary form include the above copyright - * notice, this list of conditions and the following disclaimer - * in the documentation and/or other associated materials; - * - * 3. the copyright holder's name is not used to endorse products - * built using this software without specific written permission. - * - * ALTERNATIVELY, provided that this notice is retained in full, this product - * may be distributed under the terms of the GNU General Public License (GPL), - * in which case the provisions of the GPL apply INSTEAD OF those given above. - * - * DISCLAIMER - * - * This software is provided 'as is' with no explicit or implied warranties - * in respect of its properties, including, but not limited to, correctness - * and/or fitness for purpose. - * --------------------------------------------------------------------------- */ -/* Some changes from the Gladman version: - s/RIJNDAEL(e_key)/E_KEY/g - s/RIJNDAEL(d_key)/D_KEY/g -*/ - -#include #include -#include -#include -#include -#include -#include -#include - -/* - * #define byte(x, nr) ((unsigned char)((x) >> (nr*8))) - */ -static inline u8 byte(const u32 x, const unsigned n) -{ - return x >> (n << 3); -} - -struct aes_ctx -{ - u32 key_length; - u32 buf[120]; -}; - -#define E_KEY (&ctx->buf[0]) -#define D_KEY (&ctx->buf[60]) - -static u8 pow_tab[256] __initdata; -static u8 log_tab[256] __initdata; -static u8 sbx_tab[256] __initdata; -static u8 isb_tab[256] __initdata; -static u32 rco_tab[10]; -u32 aes_ft_tab[4][256]; -u32 aes_it_tab[4][256]; - -u32 aes_fl_tab[4][256]; -u32 aes_il_tab[4][256]; - -static inline u8 f_mult(u8 a, u8 b) -{ - u8 aa = log_tab[a], cc = aa + log_tab[b]; - - return pow_tab[cc + (cc < aa ? 1 : 0)]; -} - -#define ff_mult(a, b) (a && b ? f_mult(a, b) : 0) - -#define ls_box(x) \ - (aes_fl_tab[0][byte(x, 0)] ^ \ - aes_fl_tab[1][byte(x, 1)] ^ \ - aes_fl_tab[2][byte(x, 2)] ^ \ - aes_fl_tab[3][byte(x, 3)]) - -static void __init gen_tabs(void) -{ - u32 i, t; - u8 p, q; - - /* log and power tables for GF(2**8) finite field with - 0x011b as modular polynomial - the simplest primitive - root is 0x03, used here to generate the tables */ - - for (i = 0, p = 1; i < 256; ++i) { - pow_tab[i] = (u8)p; - log_tab[p] = (u8)i; - - p ^= (p << 1) ^ (p & 0x80 ? 0x01b : 0); - } - - log_tab[1] = 0; - - for (i = 0, p = 1; i < 10; ++i) { - rco_tab[i] = p; - - p = (p << 1) ^ (p & 0x80 ? 0x01b : 0); - } - - for (i = 0; i < 256; ++i) { - p = (i ? pow_tab[255 - log_tab[i]] : 0); - q = ((p >> 7) | (p << 1)) ^ ((p >> 6) | (p << 2)); - p ^= 0x63 ^ q ^ ((q >> 6) | (q << 2)); - sbx_tab[i] = p; - isb_tab[p] = (u8)i; - } - - for (i = 0; i < 256; ++i) { - p = sbx_tab[i]; - - t = p; - aes_fl_tab[0][i] = t; - aes_fl_tab[1][i] = rol32(t, 8); - aes_fl_tab[2][i] = rol32(t, 16); - aes_fl_tab[3][i] = rol32(t, 24); - - t = ((u32)ff_mult(2, p)) | - ((u32)p << 8) | - ((u32)p << 16) | ((u32)ff_mult(3, p) << 24); - - aes_ft_tab[0][i] = t; - aes_ft_tab[1][i] = rol32(t, 8); - aes_ft_tab[2][i] = rol32(t, 16); - aes_ft_tab[3][i] = rol32(t, 24); - - p = isb_tab[i]; - - t = p; - aes_il_tab[0][i] = t; - aes_il_tab[1][i] = rol32(t, 8); - aes_il_tab[2][i] = rol32(t, 16); - aes_il_tab[3][i] = rol32(t, 24); - - t = ((u32)ff_mult(14, p)) | - ((u32)ff_mult(9, p) << 8) | - ((u32)ff_mult(13, p) << 16) | - ((u32)ff_mult(11, p) << 24); - - aes_it_tab[0][i] = t; - aes_it_tab[1][i] = rol32(t, 8); - aes_it_tab[2][i] = rol32(t, 16); - aes_it_tab[3][i] = rol32(t, 24); - } -} - -#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b) - -#define imix_col(y, x) \ - u = star_x(x); \ - v = star_x(u); \ - w = star_x(v); \ - t = w ^ (x); \ - (y) = u ^ v ^ w; \ - (y) ^= ror32(u ^ t, 8) ^ \ - ror32(v ^ t, 16) ^ \ - ror32(t, 24) - -/* initialise the key schedule from the user supplied key */ - -#define loop4(i) \ -{ \ - t = ror32(t, 8); t = ls_box(t) ^ rco_tab[i]; \ - t ^= E_KEY[4 * i]; E_KEY[4 * i + 4] = t; \ - t ^= E_KEY[4 * i + 1]; E_KEY[4 * i + 5] = t; \ - t ^= E_KEY[4 * i + 2]; E_KEY[4 * i + 6] = t; \ - t ^= E_KEY[4 * i + 3]; E_KEY[4 * i + 7] = t; \ -} - -#define loop6(i) \ -{ \ - t = ror32(t, 8); t = ls_box(t) ^ rco_tab[i]; \ - t ^= E_KEY[6 * i]; E_KEY[6 * i + 6] = t; \ - t ^= E_KEY[6 * i + 1]; E_KEY[6 * i + 7] = t; \ - t ^= E_KEY[6 * i + 2]; E_KEY[6 * i + 8] = t; \ - t ^= E_KEY[6 * i + 3]; E_KEY[6 * i + 9] = t; \ - t ^= E_KEY[6 * i + 4]; E_KEY[6 * i + 10] = t; \ - t ^= E_KEY[6 * i + 5]; E_KEY[6 * i + 11] = t; \ -} - -#define loop8(i) \ -{ \ - t = ror32(t, 8); ; t = ls_box(t) ^ rco_tab[i]; \ - t ^= E_KEY[8 * i]; E_KEY[8 * i + 8] = t; \ - t ^= E_KEY[8 * i + 1]; E_KEY[8 * i + 9] = t; \ - t ^= E_KEY[8 * i + 2]; E_KEY[8 * i + 10] = t; \ - t ^= E_KEY[8 * i + 3]; E_KEY[8 * i + 11] = t; \ - t = E_KEY[8 * i + 4] ^ ls_box(t); \ - E_KEY[8 * i + 12] = t; \ - t ^= E_KEY[8 * i + 5]; E_KEY[8 * i + 13] = t; \ - t ^= E_KEY[8 * i + 6]; E_KEY[8 * i + 14] = t; \ - t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \ -} - -static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, - unsigned int key_len) -{ - struct aes_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *key = (const __le32 *)in_key; - u32 *flags = &tfm->crt_flags; - u32 i, j, t, u, v, w; - - if (key_len % 8) { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; - return -EINVAL; - } - - ctx->key_length = key_len; - - D_KEY[key_len + 24] = E_KEY[0] = le32_to_cpu(key[0]); - D_KEY[key_len + 25] = E_KEY[1] = le32_to_cpu(key[1]); - D_KEY[key_len + 26] = E_KEY[2] = le32_to_cpu(key[2]); - D_KEY[key_len + 27] = E_KEY[3] = le32_to_cpu(key[3]); - - switch (key_len) { - case 16: - t = E_KEY[3]; - for (i = 0; i < 10; ++i) - loop4(i); - break; - - case 24: - E_KEY[4] = le32_to_cpu(key[4]); - t = E_KEY[5] = le32_to_cpu(key[5]); - for (i = 0; i < 8; ++i) - loop6 (i); - break; - - case 32: - E_KEY[4] = le32_to_cpu(key[4]); - E_KEY[5] = le32_to_cpu(key[5]); - E_KEY[6] = le32_to_cpu(key[6]); - t = E_KEY[7] = le32_to_cpu(key[7]); - for (i = 0; i < 7; ++i) - loop8(i); - break; - } - - D_KEY[0] = E_KEY[key_len + 24]; - D_KEY[1] = E_KEY[key_len + 25]; - D_KEY[2] = E_KEY[key_len + 26]; - D_KEY[3] = E_KEY[key_len + 27]; - - for (i = 4; i < key_len + 24; ++i) { - j = key_len + 24 - (i & ~3) + (i & 3); - imix_col(D_KEY[j], E_KEY[i]); - } - - return 0; -} asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in); asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in); @@ -299,14 +24,14 @@ static struct crypto_alg aes_alg = { .cra_priority = 200, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct aes_ctx), + .cra_ctxsize = sizeof(struct crypto_aes_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), .cra_u = { .cipher = { .cia_min_keysize = AES_MIN_KEY_SIZE, .cia_max_keysize = AES_MAX_KEY_SIZE, - .cia_setkey = aes_set_key, + .cia_setkey = crypto_aes_set_key, .cia_encrypt = aes_encrypt, .cia_decrypt = aes_decrypt } @@ -315,7 +40,6 @@ static struct crypto_alg aes_alg = { static int __init aes_init(void) { - gen_tabs(); return crypto_register_alg(&aes_alg); } diff --git a/crypto/Kconfig b/crypto/Kconfig index 1f32071..3f0bc0e 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -350,6 +350,7 @@ config CRYPTO_AES_X86_64 tristate "AES cipher algorithms (x86_64)" depends on (X86 || UML_X86) && 64BIT select CRYPTO_ALGAPI + select CRYPTO_AES help AES cipher algorithms (FIPS-197). AES uses the Rijndael algorithm. -- cgit v1.1 From b345cee90a3ffec5eca6d6c1c59bd0d1feb453d4 Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Thu, 8 Nov 2007 21:27:05 +0800 Subject: [CRYPTO] ctr: Remove default M NO other block mode is M by default. Signed-off-by: Sebastian Siewior Signed-off-by: Herbert Xu --- crypto/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index 3f0bc0e..d9666e3 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -199,7 +199,6 @@ config CRYPTO_CTR tristate "CTR support" select CRYPTO_BLKCIPHER select CRYPTO_MANAGER - default m help CTR: Counter mode This block cipher algorithm is required for IPSec. -- cgit v1.1 From 5157dea8139cf0edc4834d528531e642c0d27e37 Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Sat, 10 Nov 2007 19:07:16 +0800 Subject: [CRYPTO] aes-i586: Remove setkey The setkey() function can be shared with the generic algorithm. Signed-off-by: Sebastian Siewior Signed-off-by: Herbert Xu --- arch/x86/crypto/aes-i586-asm_32.S | 89 ++++---- arch/x86/crypto/aes_32.c | 461 +------------------------------------- crypto/Kconfig | 1 + 3 files changed, 47 insertions(+), 504 deletions(-) diff --git a/arch/x86/crypto/aes-i586-asm_32.S b/arch/x86/crypto/aes-i586-asm_32.S index f942f0c..1093bed 100644 --- a/arch/x86/crypto/aes-i586-asm_32.S +++ b/arch/x86/crypto/aes-i586-asm_32.S @@ -46,9 +46,9 @@ #define in_blk 16 /* offsets in crypto_tfm structure */ -#define ekey (crypto_tfm_ctx_offset + 0) -#define nrnd (crypto_tfm_ctx_offset + 256) -#define dkey (crypto_tfm_ctx_offset + 260) +#define klen (crypto_tfm_ctx_offset + 0) +#define ekey (crypto_tfm_ctx_offset + 4) +#define dkey (crypto_tfm_ctx_offset + 244) // register mapping for encrypt and decrypt subroutines @@ -221,8 +221,8 @@ .global aes_enc_blk -.extern ft_tab -.extern fl_tab +.extern crypto_ft_tab +.extern crypto_fl_tab .align 4 @@ -236,7 +236,7 @@ aes_enc_blk: 1: push %ebx mov in_blk+4(%esp),%r2 push %esi - mov nrnd(%ebp),%r3 // number of rounds + mov klen(%ebp),%r3 // key size push %edi #if ekey != 0 lea ekey(%ebp),%ebp // key pointer @@ -255,26 +255,26 @@ aes_enc_blk: sub $8,%esp // space for register saves on stack add $16,%ebp // increment to next round key - cmp $12,%r3 + cmp $24,%r3 jb 4f // 10 rounds for 128-bit key lea 32(%ebp),%ebp je 3f // 12 rounds for 192-bit key lea 32(%ebp),%ebp -2: fwd_rnd1( -64(%ebp) ,ft_tab) // 14 rounds for 256-bit key - fwd_rnd2( -48(%ebp) ,ft_tab) -3: fwd_rnd1( -32(%ebp) ,ft_tab) // 12 rounds for 192-bit key - fwd_rnd2( -16(%ebp) ,ft_tab) -4: fwd_rnd1( (%ebp) ,ft_tab) // 10 rounds for 128-bit key - fwd_rnd2( +16(%ebp) ,ft_tab) - fwd_rnd1( +32(%ebp) ,ft_tab) - fwd_rnd2( +48(%ebp) ,ft_tab) - fwd_rnd1( +64(%ebp) ,ft_tab) - fwd_rnd2( +80(%ebp) ,ft_tab) - fwd_rnd1( +96(%ebp) ,ft_tab) - fwd_rnd2(+112(%ebp) ,ft_tab) - fwd_rnd1(+128(%ebp) ,ft_tab) - fwd_rnd2(+144(%ebp) ,fl_tab) // last round uses a different table +2: fwd_rnd1( -64(%ebp), crypto_ft_tab) // 14 rounds for 256-bit key + fwd_rnd2( -48(%ebp), crypto_ft_tab) +3: fwd_rnd1( -32(%ebp), crypto_ft_tab) // 12 rounds for 192-bit key + fwd_rnd2( -16(%ebp), crypto_ft_tab) +4: fwd_rnd1( (%ebp), crypto_ft_tab) // 10 rounds for 128-bit key + fwd_rnd2( +16(%ebp), crypto_ft_tab) + fwd_rnd1( +32(%ebp), crypto_ft_tab) + fwd_rnd2( +48(%ebp), crypto_ft_tab) + fwd_rnd1( +64(%ebp), crypto_ft_tab) + fwd_rnd2( +80(%ebp), crypto_ft_tab) + fwd_rnd1( +96(%ebp), crypto_ft_tab) + fwd_rnd2(+112(%ebp), crypto_ft_tab) + fwd_rnd1(+128(%ebp), crypto_ft_tab) + fwd_rnd2(+144(%ebp), crypto_fl_tab) // last round uses a different table // move final values to the output array. CAUTION: the // order of these assigns rely on the register mappings @@ -297,8 +297,8 @@ aes_enc_blk: .global aes_dec_blk -.extern it_tab -.extern il_tab +.extern crypto_it_tab +.extern crypto_il_tab .align 4 @@ -312,14 +312,11 @@ aes_dec_blk: 1: push %ebx mov in_blk+4(%esp),%r2 push %esi - mov nrnd(%ebp),%r3 // number of rounds + mov klen(%ebp),%r3 // key size push %edi #if dkey != 0 lea dkey(%ebp),%ebp // key pointer #endif - mov %r3,%r0 - shl $4,%r0 - add %r0,%ebp // input four columns and xor in first round key @@ -333,27 +330,27 @@ aes_dec_blk: xor 12(%ebp),%r5 sub $8,%esp // space for register saves on stack - sub $16,%ebp // increment to next round key - cmp $12,%r3 + add $16,%ebp // increment to next round key + cmp $24,%r3 jb 4f // 10 rounds for 128-bit key - lea -32(%ebp),%ebp + lea 32(%ebp),%ebp je 3f // 12 rounds for 192-bit key - lea -32(%ebp),%ebp - -2: inv_rnd1( +64(%ebp), it_tab) // 14 rounds for 256-bit key - inv_rnd2( +48(%ebp), it_tab) -3: inv_rnd1( +32(%ebp), it_tab) // 12 rounds for 192-bit key - inv_rnd2( +16(%ebp), it_tab) -4: inv_rnd1( (%ebp), it_tab) // 10 rounds for 128-bit key - inv_rnd2( -16(%ebp), it_tab) - inv_rnd1( -32(%ebp), it_tab) - inv_rnd2( -48(%ebp), it_tab) - inv_rnd1( -64(%ebp), it_tab) - inv_rnd2( -80(%ebp), it_tab) - inv_rnd1( -96(%ebp), it_tab) - inv_rnd2(-112(%ebp), it_tab) - inv_rnd1(-128(%ebp), it_tab) - inv_rnd2(-144(%ebp), il_tab) // last round uses a different table + lea 32(%ebp),%ebp + +2: inv_rnd1( -64(%ebp), crypto_it_tab) // 14 rounds for 256-bit key + inv_rnd2( -48(%ebp), crypto_it_tab) +3: inv_rnd1( -32(%ebp), crypto_it_tab) // 12 rounds for 192-bit key + inv_rnd2( -16(%ebp), crypto_it_tab) +4: inv_rnd1( (%ebp), crypto_it_tab) // 10 rounds for 128-bit key + inv_rnd2( +16(%ebp), crypto_it_tab) + inv_rnd1( +32(%ebp), crypto_it_tab) + inv_rnd2( +48(%ebp), crypto_it_tab) + inv_rnd1( +64(%ebp), crypto_it_tab) + inv_rnd2( +80(%ebp), crypto_it_tab) + inv_rnd1( +96(%ebp), crypto_it_tab) + inv_rnd2(+112(%ebp), crypto_it_tab) + inv_rnd1(+128(%ebp), crypto_it_tab) + inv_rnd2(+144(%ebp), crypto_il_tab) // last round uses a different table // move final values to the output array. CAUTION: the // order of these assigns rely on the register mappings diff --git a/arch/x86/crypto/aes_32.c b/arch/x86/crypto/aes_32.c index 9b0ab50..8556d95 100644 --- a/arch/x86/crypto/aes_32.c +++ b/arch/x86/crypto/aes_32.c @@ -1,468 +1,14 @@ -/* - * +/* * Glue Code for optimized 586 assembler version of AES - * - * Copyright (c) 2002, Dr Brian Gladman <>, Worcester, UK. - * All rights reserved. - * - * LICENSE TERMS - * - * The free distribution and use of this software in both source and binary - * form is allowed (with or without changes) provided that: - * - * 1. distributions of this source code include the above copyright - * notice, this list of conditions and the following disclaimer; - * - * 2. distributions in binary form include the above copyright - * notice, this list of conditions and the following disclaimer - * in the documentation and/or other associated materials; - * - * 3. the copyright holder's name is not used to endorse products - * built using this software without specific written permission. - * - * ALTERNATIVELY, provided that this notice is retained in full, this product - * may be distributed under the terms of the GNU General Public License (GPL), - * in which case the provisions of the GPL apply INSTEAD OF those given above. - * - * DISCLAIMER - * - * This software is provided 'as is' with no explicit or implied warranties - * in respect of its properties, including, but not limited to, correctness - * and/or fitness for purpose. - * - * Copyright (c) 2003, Adam J. Richter (conversion to - * 2.5 API). - * Copyright (c) 2003, 2004 Fruhwirth Clemens - * Copyright (c) 2004 Red Hat, Inc., James Morris - * */ -#include #include -#include #include -#include -#include #include -#include asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); -#define AES_KS_LENGTH 4 * AES_BLOCK_SIZE -#define RC_LENGTH 29 - -struct aes_ctx { - u32 ekey[AES_KS_LENGTH]; - u32 rounds; - u32 dkey[AES_KS_LENGTH]; -}; - -#define WPOLY 0x011b -#define bytes2word(b0, b1, b2, b3) \ - (((u32)(b3) << 24) | ((u32)(b2) << 16) | ((u32)(b1) << 8) | (b0)) - -/* define the finite field multiplies required for Rijndael */ -#define f2(x) ((x) ? pow[log[x] + 0x19] : 0) -#define f3(x) ((x) ? pow[log[x] + 0x01] : 0) -#define f9(x) ((x) ? pow[log[x] + 0xc7] : 0) -#define fb(x) ((x) ? pow[log[x] + 0x68] : 0) -#define fd(x) ((x) ? pow[log[x] + 0xee] : 0) -#define fe(x) ((x) ? pow[log[x] + 0xdf] : 0) -#define fi(x) ((x) ? pow[255 - log[x]]: 0) - -static inline u32 upr(u32 x, int n) -{ - return (x << 8 * n) | (x >> (32 - 8 * n)); -} - -static inline u8 bval(u32 x, int n) -{ - return x >> 8 * n; -} - -/* The forward and inverse affine transformations used in the S-box */ -#define fwd_affine(x) \ - (w = (u32)x, w ^= (w<<1)^(w<<2)^(w<<3)^(w<<4), 0x63^(u8)(w^(w>>8))) - -#define inv_affine(x) \ - (w = (u32)x, w = (w<<1)^(w<<3)^(w<<6), 0x05^(u8)(w^(w>>8))) - -static u32 rcon_tab[RC_LENGTH]; - -u32 ft_tab[4][256]; -u32 fl_tab[4][256]; -static u32 im_tab[4][256]; -u32 il_tab[4][256]; -u32 it_tab[4][256]; - -static void gen_tabs(void) -{ - u32 i, w; - u8 pow[512], log[256]; - - /* - * log and power tables for GF(2^8) finite field with - * WPOLY as modular polynomial - the simplest primitive - * root is 0x03, used here to generate the tables. - */ - i = 0; w = 1; - - do { - pow[i] = (u8)w; - pow[i + 255] = (u8)w; - log[w] = (u8)i++; - w ^= (w << 1) ^ (w & 0x80 ? WPOLY : 0); - } while (w != 1); - - for(i = 0, w = 1; i < RC_LENGTH; ++i) { - rcon_tab[i] = bytes2word(w, 0, 0, 0); - w = f2(w); - } - - for(i = 0; i < 256; ++i) { - u8 b; - - b = fwd_affine(fi((u8)i)); - w = bytes2word(f2(b), b, b, f3(b)); - - /* tables for a normal encryption round */ - ft_tab[0][i] = w; - ft_tab[1][i] = upr(w, 1); - ft_tab[2][i] = upr(w, 2); - ft_tab[3][i] = upr(w, 3); - w = bytes2word(b, 0, 0, 0); - - /* - * tables for last encryption round - * (may also be used in the key schedule) - */ - fl_tab[0][i] = w; - fl_tab[1][i] = upr(w, 1); - fl_tab[2][i] = upr(w, 2); - fl_tab[3][i] = upr(w, 3); - - b = fi(inv_affine((u8)i)); - w = bytes2word(fe(b), f9(b), fd(b), fb(b)); - - /* tables for the inverse mix column operation */ - im_tab[0][b] = w; - im_tab[1][b] = upr(w, 1); - im_tab[2][b] = upr(w, 2); - im_tab[3][b] = upr(w, 3); - - /* tables for a normal decryption round */ - it_tab[0][i] = w; - it_tab[1][i] = upr(w,1); - it_tab[2][i] = upr(w,2); - it_tab[3][i] = upr(w,3); - - w = bytes2word(b, 0, 0, 0); - - /* tables for last decryption round */ - il_tab[0][i] = w; - il_tab[1][i] = upr(w,1); - il_tab[2][i] = upr(w,2); - il_tab[3][i] = upr(w,3); - } -} - -#define four_tables(x,tab,vf,rf,c) \ -( tab[0][bval(vf(x,0,c),rf(0,c))] ^ \ - tab[1][bval(vf(x,1,c),rf(1,c))] ^ \ - tab[2][bval(vf(x,2,c),rf(2,c))] ^ \ - tab[3][bval(vf(x,3,c),rf(3,c))] \ -) - -#define vf1(x,r,c) (x) -#define rf1(r,c) (r) -#define rf2(r,c) ((r-c)&3) - -#define inv_mcol(x) four_tables(x,im_tab,vf1,rf1,0) -#define ls_box(x,c) four_tables(x,fl_tab,vf1,rf2,c) - -#define ff(x) inv_mcol(x) - -#define ke4(k,i) \ -{ \ - k[4*(i)+4] = ss[0] ^= ls_box(ss[3],3) ^ rcon_tab[i]; \ - k[4*(i)+5] = ss[1] ^= ss[0]; \ - k[4*(i)+6] = ss[2] ^= ss[1]; \ - k[4*(i)+7] = ss[3] ^= ss[2]; \ -} - -#define kel4(k,i) \ -{ \ - k[4*(i)+4] = ss[0] ^= ls_box(ss[3],3) ^ rcon_tab[i]; \ - k[4*(i)+5] = ss[1] ^= ss[0]; \ - k[4*(i)+6] = ss[2] ^= ss[1]; k[4*(i)+7] = ss[3] ^= ss[2]; \ -} - -#define ke6(k,i) \ -{ \ - k[6*(i)+ 6] = ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i]; \ - k[6*(i)+ 7] = ss[1] ^= ss[0]; \ - k[6*(i)+ 8] = ss[2] ^= ss[1]; \ - k[6*(i)+ 9] = ss[3] ^= ss[2]; \ - k[6*(i)+10] = ss[4] ^= ss[3]; \ - k[6*(i)+11] = ss[5] ^= ss[4]; \ -} - -#define kel6(k,i) \ -{ \ - k[6*(i)+ 6] = ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i]; \ - k[6*(i)+ 7] = ss[1] ^= ss[0]; \ - k[6*(i)+ 8] = ss[2] ^= ss[1]; \ - k[6*(i)+ 9] = ss[3] ^= ss[2]; \ -} - -#define ke8(k,i) \ -{ \ - k[8*(i)+ 8] = ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i]; \ - k[8*(i)+ 9] = ss[1] ^= ss[0]; \ - k[8*(i)+10] = ss[2] ^= ss[1]; \ - k[8*(i)+11] = ss[3] ^= ss[2]; \ - k[8*(i)+12] = ss[4] ^= ls_box(ss[3],0); \ - k[8*(i)+13] = ss[5] ^= ss[4]; \ - k[8*(i)+14] = ss[6] ^= ss[5]; \ - k[8*(i)+15] = ss[7] ^= ss[6]; \ -} - -#define kel8(k,i) \ -{ \ - k[8*(i)+ 8] = ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i]; \ - k[8*(i)+ 9] = ss[1] ^= ss[0]; \ - k[8*(i)+10] = ss[2] ^= ss[1]; \ - k[8*(i)+11] = ss[3] ^= ss[2]; \ -} - -#define kdf4(k,i) \ -{ \ - ss[0] = ss[0] ^ ss[2] ^ ss[1] ^ ss[3]; \ - ss[1] = ss[1] ^ ss[3]; \ - ss[2] = ss[2] ^ ss[3]; \ - ss[3] = ss[3]; \ - ss[4] = ls_box(ss[(i+3) % 4], 3) ^ rcon_tab[i]; \ - ss[i % 4] ^= ss[4]; \ - ss[4] ^= k[4*(i)]; \ - k[4*(i)+4] = ff(ss[4]); \ - ss[4] ^= k[4*(i)+1]; \ - k[4*(i)+5] = ff(ss[4]); \ - ss[4] ^= k[4*(i)+2]; \ - k[4*(i)+6] = ff(ss[4]); \ - ss[4] ^= k[4*(i)+3]; \ - k[4*(i)+7] = ff(ss[4]); \ -} - -#define kd4(k,i) \ -{ \ - ss[4] = ls_box(ss[(i+3) % 4], 3) ^ rcon_tab[i]; \ - ss[i % 4] ^= ss[4]; \ - ss[4] = ff(ss[4]); \ - k[4*(i)+4] = ss[4] ^= k[4*(i)]; \ - k[4*(i)+5] = ss[4] ^= k[4*(i)+1]; \ - k[4*(i)+6] = ss[4] ^= k[4*(i)+2]; \ - k[4*(i)+7] = ss[4] ^= k[4*(i)+3]; \ -} - -#define kdl4(k,i) \ -{ \ - ss[4] = ls_box(ss[(i+3) % 4], 3) ^ rcon_tab[i]; \ - ss[i % 4] ^= ss[4]; \ - k[4*(i)+4] = (ss[0] ^= ss[1]) ^ ss[2] ^ ss[3]; \ - k[4*(i)+5] = ss[1] ^ ss[3]; \ - k[4*(i)+6] = ss[0]; \ - k[4*(i)+7] = ss[1]; \ -} - -#define kdf6(k,i) \ -{ \ - ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i]; \ - k[6*(i)+ 6] = ff(ss[0]); \ - ss[1] ^= ss[0]; \ - k[6*(i)+ 7] = ff(ss[1]); \ - ss[2] ^= ss[1]; \ - k[6*(i)+ 8] = ff(ss[2]); \ - ss[3] ^= ss[2]; \ - k[6*(i)+ 9] = ff(ss[3]); \ - ss[4] ^= ss[3]; \ - k[6*(i)+10] = ff(ss[4]); \ - ss[5] ^= ss[4]; \ - k[6*(i)+11] = ff(ss[5]); \ -} - -#define kd6(k,i) \ -{ \ - ss[6] = ls_box(ss[5],3) ^ rcon_tab[i]; \ - ss[0] ^= ss[6]; ss[6] = ff(ss[6]); \ - k[6*(i)+ 6] = ss[6] ^= k[6*(i)]; \ - ss[1] ^= ss[0]; \ - k[6*(i)+ 7] = ss[6] ^= k[6*(i)+ 1]; \ - ss[2] ^= ss[1]; \ - k[6*(i)+ 8] = ss[6] ^= k[6*(i)+ 2]; \ - ss[3] ^= ss[2]; \ - k[6*(i)+ 9] = ss[6] ^= k[6*(i)+ 3]; \ - ss[4] ^= ss[3]; \ - k[6*(i)+10] = ss[6] ^= k[6*(i)+ 4]; \ - ss[5] ^= ss[4]; \ - k[6*(i)+11] = ss[6] ^= k[6*(i)+ 5]; \ -} - -#define kdl6(k,i) \ -{ \ - ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i]; \ - k[6*(i)+ 6] = ss[0]; \ - ss[1] ^= ss[0]; \ - k[6*(i)+ 7] = ss[1]; \ - ss[2] ^= ss[1]; \ - k[6*(i)+ 8] = ss[2]; \ - ss[3] ^= ss[2]; \ - k[6*(i)+ 9] = ss[3]; \ -} - -#define kdf8(k,i) \ -{ \ - ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i]; \ - k[8*(i)+ 8] = ff(ss[0]); \ - ss[1] ^= ss[0]; \ - k[8*(i)+ 9] = ff(ss[1]); \ - ss[2] ^= ss[1]; \ - k[8*(i)+10] = ff(ss[2]); \ - ss[3] ^= ss[2]; \ - k[8*(i)+11] = ff(ss[3]); \ - ss[4] ^= ls_box(ss[3],0); \ - k[8*(i)+12] = ff(ss[4]); \ - ss[5] ^= ss[4]; \ - k[8*(i)+13] = ff(ss[5]); \ - ss[6] ^= ss[5]; \ - k[8*(i)+14] = ff(ss[6]); \ - ss[7] ^= ss[6]; \ - k[8*(i)+15] = ff(ss[7]); \ -} - -#define kd8(k,i) \ -{ \ - u32 __g = ls_box(ss[7],3) ^ rcon_tab[i]; \ - ss[0] ^= __g; \ - __g = ff(__g); \ - k[8*(i)+ 8] = __g ^= k[8*(i)]; \ - ss[1] ^= ss[0]; \ - k[8*(i)+ 9] = __g ^= k[8*(i)+ 1]; \ - ss[2] ^= ss[1]; \ - k[8*(i)+10] = __g ^= k[8*(i)+ 2]; \ - ss[3] ^= ss[2]; \ - k[8*(i)+11] = __g ^= k[8*(i)+ 3]; \ - __g = ls_box(ss[3],0); \ - ss[4] ^= __g; \ - __g = ff(__g); \ - k[8*(i)+12] = __g ^= k[8*(i)+ 4]; \ - ss[5] ^= ss[4]; \ - k[8*(i)+13] = __g ^= k[8*(i)+ 5]; \ - ss[6] ^= ss[5]; \ - k[8*(i)+14] = __g ^= k[8*(i)+ 6]; \ - ss[7] ^= ss[6]; \ - k[8*(i)+15] = __g ^= k[8*(i)+ 7]; \ -} - -#define kdl8(k,i) \ -{ \ - ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i]; \ - k[8*(i)+ 8] = ss[0]; \ - ss[1] ^= ss[0]; \ - k[8*(i)+ 9] = ss[1]; \ - ss[2] ^= ss[1]; \ - k[8*(i)+10] = ss[2]; \ - ss[3] ^= ss[2]; \ - k[8*(i)+11] = ss[3]; \ -} - -static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, - unsigned int key_len) -{ - int i; - u32 ss[8]; - struct aes_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *key = (const __le32 *)in_key; - u32 *flags = &tfm->crt_flags; - - /* encryption schedule */ - - ctx->ekey[0] = ss[0] = le32_to_cpu(key[0]); - ctx->ekey[1] = ss[1] = le32_to_cpu(key[1]); - ctx->ekey[2] = ss[2] = le32_to_cpu(key[2]); - ctx->ekey[3] = ss[3] = le32_to_cpu(key[3]); - - switch(key_len) { - case 16: - for (i = 0; i < 9; i++) - ke4(ctx->ekey, i); - kel4(ctx->ekey, 9); - ctx->rounds = 10; - break; - - case 24: - ctx->ekey[4] = ss[4] = le32_to_cpu(key[4]); - ctx->ekey[5] = ss[5] = le32_to_cpu(key[5]); - for (i = 0; i < 7; i++) - ke6(ctx->ekey, i); - kel6(ctx->ekey, 7); - ctx->rounds = 12; - break; - - case 32: - ctx->ekey[4] = ss[4] = le32_to_cpu(key[4]); - ctx->ekey[5] = ss[5] = le32_to_cpu(key[5]); - ctx->ekey[6] = ss[6] = le32_to_cpu(key[6]); - ctx->ekey[7] = ss[7] = le32_to_cpu(key[7]); - for (i = 0; i < 6; i++) - ke8(ctx->ekey, i); - kel8(ctx->ekey, 6); - ctx->rounds = 14; - break; - - default: - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; - return -EINVAL; - } - - /* decryption schedule */ - - ctx->dkey[0] = ss[0] = le32_to_cpu(key[0]); - ctx->dkey[1] = ss[1] = le32_to_cpu(key[1]); - ctx->dkey[2] = ss[2] = le32_to_cpu(key[2]); - ctx->dkey[3] = ss[3] = le32_to_cpu(key[3]); - - switch (key_len) { - case 16: - kdf4(ctx->dkey, 0); - for (i = 1; i < 9; i++) - kd4(ctx->dkey, i); - kdl4(ctx->dkey, 9); - break; - - case 24: - ctx->dkey[4] = ff(ss[4] = le32_to_cpu(key[4])); - ctx->dkey[5] = ff(ss[5] = le32_to_cpu(key[5])); - kdf6(ctx->dkey, 0); - for (i = 1; i < 7; i++) - kd6(ctx->dkey, i); - kdl6(ctx->dkey, 7); - break; - - case 32: - ctx->dkey[4] = ff(ss[4] = le32_to_cpu(key[4])); - ctx->dkey[5] = ff(ss[5] = le32_to_cpu(key[5])); - ctx->dkey[6] = ff(ss[6] = le32_to_cpu(key[6])); - ctx->dkey[7] = ff(ss[7] = le32_to_cpu(key[7])); - kdf8(ctx->dkey, 0); - for (i = 1; i < 6; i++) - kd8(ctx->dkey, i); - kdl8(ctx->dkey, 6); - break; - } - return 0; -} - static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { aes_enc_blk(tfm, dst, src); @@ -479,14 +25,14 @@ static struct crypto_alg aes_alg = { .cra_priority = 200, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct aes_ctx), + .cra_ctxsize = sizeof(struct crypto_aes_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), .cra_u = { .cipher = { .cia_min_keysize = AES_MIN_KEY_SIZE, .cia_max_keysize = AES_MAX_KEY_SIZE, - .cia_setkey = aes_set_key, + .cia_setkey = crypto_aes_set_key, .cia_encrypt = aes_encrypt, .cia_decrypt = aes_decrypt } @@ -495,7 +41,6 @@ static struct crypto_alg aes_alg = { static int __init aes_init(void) { - gen_tabs(); return crypto_register_alg(&aes_alg); } diff --git a/crypto/Kconfig b/crypto/Kconfig index d9666e3..cf115b1 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -328,6 +328,7 @@ config CRYPTO_AES_586 tristate "AES cipher algorithms (i586)" depends on (X86 || UML_X86) && !64BIT select CRYPTO_ALGAPI + select CRYPTO_AES help AES cipher algorithms (FIPS-197). AES uses the Rijndael algorithm. -- cgit v1.1 From cd7c3bfe54270f41ac52be6b725a7194d99175b4 Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Sat, 10 Nov 2007 19:29:33 +0800 Subject: [CRYPTO] geode: Add fallback for unsupported modes The Geode AES crypto engine supports only 128 bit long key. This patch adds fallback for other key sizes which are required by the AES standard. Signed-off-by: Sebastian Siewior Acked-by: Jordan Crouse Signed-off-by: Herbert Xu --- drivers/crypto/geode-aes.c | 243 +++++++++++++++++++++++++++++++++++++-------- drivers/crypto/geode-aes.h | 6 ++ 2 files changed, 206 insertions(+), 43 deletions(-) diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index 181d42c..0ca92d4 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c @@ -113,18 +113,103 @@ geode_aes_crypt(struct geode_aes_op *op) /* CRYPTO-API Functions */ -static int -geode_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int len) +static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key, + unsigned int len) { struct geode_aes_op *op = crypto_tfm_ctx(tfm); + unsigned int ret; + + op->keylen = len; + + if (len == AES_KEYSIZE_128) { + memcpy(op->key, key, len); + return 0; + } - if (len != AES_KEY_LENGTH) { + if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) { + /* not supported at all */ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } - memcpy(op->key, key, len); - return 0; + /* + * The requested key size is not supported by HW, do a fallback + */ + op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; + op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); + + ret = crypto_cipher_setkey(op->fallback.cip, key, len); + if (ret) { + tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; + tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK); + } + return ret; +} + +static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key, + unsigned int len) +{ + struct geode_aes_op *op = crypto_tfm_ctx(tfm); + unsigned int ret; + + op->keylen = len; + + if (len == AES_KEYSIZE_128) { + memcpy(op->key, key, len); + return 0; + } + + if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) { + /* not supported at all */ + tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + /* + * The requested key size is not supported by HW, do a fallback + */ + op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; + op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); + + ret = crypto_blkcipher_setkey(op->fallback.blk, key, len); + if (ret) { + tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; + tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK); + } + return ret; +} + +static int fallback_blk_dec(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + unsigned int ret; + struct crypto_blkcipher *tfm; + struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); + + tfm = desc->tfm; + desc->tfm = op->fallback.blk; + + ret = crypto_blkcipher_decrypt(desc, dst, src, nbytes); + + desc->tfm = tfm; + return ret; +} +static int fallback_blk_enc(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + unsigned int ret; + struct crypto_blkcipher *tfm; + struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); + + tfm = desc->tfm; + desc->tfm = op->fallback.blk; + + ret = crypto_blkcipher_encrypt(desc, dst, src, nbytes); + + desc->tfm = tfm; + return ret; } static void @@ -132,8 +217,10 @@ geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct geode_aes_op *op = crypto_tfm_ctx(tfm); - if ((out == NULL) || (in == NULL)) + if (unlikely(op->keylen != AES_KEYSIZE_128)) { + crypto_cipher_encrypt_one(op->fallback.cip, out, in); return; + } op->src = (void *) in; op->dst = (void *) out; @@ -151,8 +238,10 @@ geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct geode_aes_op *op = crypto_tfm_ctx(tfm); - if ((out == NULL) || (in == NULL)) + if (unlikely(op->keylen != AES_KEYSIZE_128)) { + crypto_cipher_decrypt_one(op->fallback.cip, out, in); return; + } op->src = (void *) in; op->dst = (void *) out; @@ -164,24 +253,50 @@ geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) geode_aes_crypt(op); } +static int fallback_init_cip(struct crypto_tfm *tfm) +{ + const char *name = tfm->__crt_alg->cra_name; + struct geode_aes_op *op = crypto_tfm_ctx(tfm); + + op->fallback.cip = crypto_alloc_cipher(name, 0, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); + + if (IS_ERR(op->fallback.cip)) { + printk(KERN_ERR "Error allocating fallback algo %s\n", name); + return PTR_ERR(op->fallback.blk); + } + + return 0; +} + +static void fallback_exit_cip(struct crypto_tfm *tfm) +{ + struct geode_aes_op *op = crypto_tfm_ctx(tfm); + + crypto_free_cipher(op->fallback.cip); + op->fallback.cip = NULL; +} static struct crypto_alg geode_alg = { - .cra_name = "aes", - .cra_driver_name = "geode-aes-128", - .cra_priority = 300, - .cra_alignmask = 15, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_name = "aes", + .cra_driver_name = "geode-aes", + .cra_priority = 300, + .cra_alignmask = 15, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER | + CRYPTO_ALG_NEED_FALLBACK, + .cra_init = fallback_init_cip, + .cra_exit = fallback_exit_cip, .cra_blocksize = AES_MIN_BLOCK_SIZE, .cra_ctxsize = sizeof(struct geode_aes_op), - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(geode_alg.cra_list), - .cra_u = { - .cipher = { - .cia_min_keysize = AES_KEY_LENGTH, - .cia_max_keysize = AES_KEY_LENGTH, - .cia_setkey = geode_setkey, - .cia_encrypt = geode_encrypt, - .cia_decrypt = geode_decrypt + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(geode_alg.cra_list), + .cra_u = { + .cipher = { + .cia_min_keysize = AES_MIN_KEY_SIZE, + .cia_max_keysize = AES_MAX_KEY_SIZE, + .cia_setkey = geode_setkey_cip, + .cia_encrypt = geode_encrypt, + .cia_decrypt = geode_decrypt } } }; @@ -195,6 +310,9 @@ geode_cbc_decrypt(struct blkcipher_desc *desc, struct blkcipher_walk walk; int err, ret; + if (unlikely(op->keylen != AES_KEYSIZE_128)) + return fallback_blk_dec(desc, dst, src, nbytes); + blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); memcpy(op->iv, walk.iv, AES_IV_LENGTH); @@ -225,6 +343,9 @@ geode_cbc_encrypt(struct blkcipher_desc *desc, struct blkcipher_walk walk; int err, ret; + if (unlikely(op->keylen != AES_KEYSIZE_128)) + return fallback_blk_enc(desc, dst, src, nbytes); + blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); memcpy(op->iv, walk.iv, AES_IV_LENGTH); @@ -245,22 +366,49 @@ geode_cbc_encrypt(struct blkcipher_desc *desc, return err; } +static int fallback_init_blk(struct crypto_tfm *tfm) +{ + const char *name = tfm->__crt_alg->cra_name; + struct geode_aes_op *op = crypto_tfm_ctx(tfm); + + op->fallback.blk = crypto_alloc_blkcipher(name, 0, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); + + if (IS_ERR(op->fallback.blk)) { + printk(KERN_ERR "Error allocating fallback algo %s\n", name); + return PTR_ERR(op->fallback.blk); + } + + return 0; +} + +static void fallback_exit_blk(struct crypto_tfm *tfm) +{ + struct geode_aes_op *op = crypto_tfm_ctx(tfm); + + crypto_free_blkcipher(op->fallback.blk); + op->fallback.blk = NULL; +} + static struct crypto_alg geode_cbc_alg = { .cra_name = "cbc(aes)", - .cra_driver_name = "cbc-aes-geode-128", + .cra_driver_name = "cbc-aes-geode", .cra_priority = 400, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | + CRYPTO_ALG_NEED_FALLBACK, + .cra_init = fallback_init_blk, + .cra_exit = fallback_exit_blk, .cra_blocksize = AES_MIN_BLOCK_SIZE, .cra_ctxsize = sizeof(struct geode_aes_op), .cra_alignmask = 15, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(geode_cbc_alg.cra_list), - .cra_u = { - .blkcipher = { - .min_keysize = AES_KEY_LENGTH, - .max_keysize = AES_KEY_LENGTH, - .setkey = geode_setkey, + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(geode_cbc_alg.cra_list), + .cra_u = { + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = geode_setkey_blk, .encrypt = geode_cbc_encrypt, .decrypt = geode_cbc_decrypt, .ivsize = AES_IV_LENGTH, @@ -277,6 +425,9 @@ geode_ecb_decrypt(struct blkcipher_desc *desc, struct blkcipher_walk walk; int err, ret; + if (unlikely(op->keylen != AES_KEYSIZE_128)) + return fallback_blk_dec(desc, dst, src, nbytes); + blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -304,6 +455,9 @@ geode_ecb_encrypt(struct blkcipher_desc *desc, struct blkcipher_walk walk; int err, ret; + if (unlikely(op->keylen != AES_KEYSIZE_128)) + return fallback_blk_enc(desc, dst, src, nbytes); + blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -323,21 +477,24 @@ geode_ecb_encrypt(struct blkcipher_desc *desc, } static struct crypto_alg geode_ecb_alg = { - .cra_name = "ecb(aes)", - .cra_driver_name = "ecb-aes-geode-128", + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-geode", .cra_priority = 400, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | + CRYPTO_ALG_NEED_FALLBACK, + .cra_init = fallback_init_blk, + .cra_exit = fallback_exit_blk, .cra_blocksize = AES_MIN_BLOCK_SIZE, .cra_ctxsize = sizeof(struct geode_aes_op), .cra_alignmask = 15, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(geode_ecb_alg.cra_list), - .cra_u = { - .blkcipher = { - .min_keysize = AES_KEY_LENGTH, - .max_keysize = AES_KEY_LENGTH, - .setkey = geode_setkey, + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(geode_ecb_alg.cra_list), + .cra_u = { + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = geode_setkey_blk, .encrypt = geode_ecb_encrypt, .decrypt = geode_ecb_decrypt, } @@ -367,7 +524,7 @@ geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) if ((ret = pci_enable_device(dev))) return ret; - if ((ret = pci_request_regions(dev, "geode-aes-128"))) + if ((ret = pci_request_regions(dev, "geode-aes"))) goto eenable; _iobase = pci_iomap(dev, 0, 0); diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h index 2f1d559..14cc763 100644 --- a/drivers/crypto/geode-aes.h +++ b/drivers/crypto/geode-aes.h @@ -66,6 +66,12 @@ struct geode_aes_op { u8 key[AES_KEY_LENGTH]; u8 iv[AES_IV_LENGTH]; + + union { + struct crypto_blkcipher *blk; + struct crypto_cipher *cip; + } fallback; + u32 keylen; }; #endif -- cgit v1.1 From cd12fb906d2591e80da9edcbd4794b9b916d7489 Mon Sep 17 00:00:00 2001 From: Jonathan Lynch Date: Sat, 10 Nov 2007 20:08:25 +0800 Subject: [CRYPTO] sha256-generic: Extend sha256_generic.c to support SHA-224 Resubmitting this patch which extends sha256_generic.c to support SHA-224 as described in FIPS 180-2 and RFC 3874. HMAC-SHA-224 as described in RFC4231 is then supported through the hmac interface. Patch includes test vectors for SHA-224 and HMAC-SHA-224. SHA-224 chould be chosen as a hash algorithm when 112 bits of security strength is required. Patch generated against the 2.6.24-rc1 kernel and tested against 2.6.24-rc1-git14 which includes fix for scatter gather implementation for HMAC. Signed-off-by: Jonathan Lynch Signed-off-by: Herbert Xu --- crypto/Kconfig | 5 +- crypto/sha256_generic.c | 72 +++++++++++++++++++++--- crypto/tcrypt.c | 22 +++++++- crypto/tcrypt.h | 142 ++++++++++++++++++++++++++++++++++++++++++++++++ include/crypto/sha.h | 12 ++++ 5 files changed, 241 insertions(+), 12 deletions(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index cf115b1..7758454b 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -91,7 +91,7 @@ config CRYPTO_SHA1 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). config CRYPTO_SHA256 - tristate "SHA256 digest algorithm" + tristate "SHA224 and SHA256 digest algorithm" select CRYPTO_ALGAPI help SHA256 secure hash standard (DFIPS 180-2). @@ -99,6 +99,9 @@ config CRYPTO_SHA256 This version of SHA implements a 256 bit hash with 128 bits of security against collision attacks. + This code also includes SHA-224, a 224 bit hash with 112 bits + of security against collision attacks. + config CRYPTO_SHA512 tristate "SHA384 and SHA512 digest algorithms" select CRYPTO_ALGAPI diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index fd3918b..3cc93fd 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c @@ -9,6 +9,7 @@ * Copyright (c) Jean-Luc Cooke * Copyright (c) Andrew McDonald * Copyright (c) 2002 James Morris + * SHA224 Support Copyright 2007 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -218,6 +219,22 @@ static void sha256_transform(u32 *state, const u8 *input) memset(W, 0, 64 * sizeof(u32)); } + +static void sha224_init(struct crypto_tfm *tfm) +{ + struct sha256_ctx *sctx = crypto_tfm_ctx(tfm); + sctx->state[0] = SHA224_H0; + sctx->state[1] = SHA224_H1; + sctx->state[2] = SHA224_H2; + sctx->state[3] = SHA224_H3; + sctx->state[4] = SHA224_H4; + sctx->state[5] = SHA224_H5; + sctx->state[6] = SHA224_H6; + sctx->state[7] = SHA224_H7; + sctx->count[0] = 0; + sctx->count[1] = 0; +} + static void sha256_init(struct crypto_tfm *tfm) { struct sha256_ctx *sctx = crypto_tfm_ctx(tfm); @@ -294,8 +311,17 @@ static void sha256_final(struct crypto_tfm *tfm, u8 *out) memset(sctx, 0, sizeof(*sctx)); } +static void sha224_final(struct crypto_tfm *tfm, u8 *hash) +{ + u8 D[SHA256_DIGEST_SIZE]; + + sha256_final(tfm, D); + + memcpy(hash, D, SHA224_DIGEST_SIZE); + memset(D, 0, SHA256_DIGEST_SIZE); +} -static struct crypto_alg alg = { +static struct crypto_alg sha256 = { .cra_name = "sha256", .cra_driver_name= "sha256-generic", .cra_flags = CRYPTO_ALG_TYPE_DIGEST, @@ -303,28 +329,58 @@ static struct crypto_alg alg = { .cra_ctxsize = sizeof(struct sha256_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 3, - .cra_list = LIST_HEAD_INIT(alg.cra_list), + .cra_list = LIST_HEAD_INIT(sha256.cra_list), .cra_u = { .digest = { .dia_digestsize = SHA256_DIGEST_SIZE, - .dia_init = sha256_init, - .dia_update = sha256_update, - .dia_final = sha256_final } } + .dia_init = sha256_init, + .dia_update = sha256_update, + .dia_final = sha256_final } } +}; + +static struct crypto_alg sha224 = { + .cra_name = "sha224", + .cra_driver_name = "sha224-generic", + .cra_flags = CRYPTO_ALG_TYPE_DIGEST, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sha256_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 3, + .cra_list = LIST_HEAD_INIT(sha224.cra_list), + .cra_u = { .digest = { + .dia_digestsize = SHA224_DIGEST_SIZE, + .dia_init = sha224_init, + .dia_update = sha256_update, + .dia_final = sha224_final } } }; static int __init init(void) { - return crypto_register_alg(&alg); + int ret = 0; + + ret = crypto_register_alg(&sha224); + + if (ret < 0) + return ret; + + ret = crypto_register_alg(&sha256); + + if (ret < 0) + crypto_unregister_alg(&sha224); + + return ret; } static void __exit fini(void) { - crypto_unregister_alg(&alg); + crypto_unregister_alg(&sha224); + crypto_unregister_alg(&sha256); } module_init(init); module_exit(fini); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm"); +MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm"); +MODULE_ALIAS("sha224"); MODULE_ALIAS("sha256"); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index aa84bc4..4d364cc 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -12,6 +12,7 @@ * Software Foundation; either version 2 of the License, or (at your option) * any later version. * + * 2007-11-06 Added SHA-224 and SHA-224-HMAC tests * 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests * 2004-08-09 Added cipher speed tests (Reyk Floeter ) * 2003-09-14 Rewritten by Kartikey Mahendra Bhatt @@ -74,8 +75,9 @@ static char *xbuf; static char *tvmem; static char *check[] = { - "des", "md5", "des3_ede", "rot13", "sha1", "sha256", "blowfish", - "twofish", "serpent", "sha384", "sha512", "md4", "aes", "cast6", + "des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", + "blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes", + "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", "camellia", "seed", NULL @@ -918,6 +920,8 @@ static void do_test(void) test_hash("md4", md4_tv_template, MD4_TEST_VECTORS); + test_hash("sha224", sha224_tv_template, SHA224_TEST_VECTORS); + test_hash("sha256", sha256_tv_template, SHA256_TEST_VECTORS); //BLOWFISH @@ -1067,6 +1071,8 @@ static void do_test(void) HMAC_MD5_TEST_VECTORS); test_hash("hmac(sha1)", hmac_sha1_tv_template, HMAC_SHA1_TEST_VECTORS); + test_hash("hmac(sha224)", hmac_sha224_tv_template, + HMAC_SHA224_TEST_VECTORS); test_hash("hmac(sha256)", hmac_sha256_tv_template, HMAC_SHA256_TEST_VECTORS); test_hash("hmac(sha384)", hmac_sha384_tv_template, @@ -1299,6 +1305,9 @@ static void do_test(void) camellia_cbc_dec_tv_template, CAMELLIA_CBC_DEC_TEST_VECTORS); break; + case 33: + test_hash("sha224", sha224_tv_template, SHA224_TEST_VECTORS); + break; case 100: test_hash("hmac(md5)", hmac_md5_tv_template, @@ -1324,7 +1333,10 @@ static void do_test(void) test_hash("hmac(sha512)", hmac_sha512_tv_template, HMAC_SHA512_TEST_VECTORS); break; - + case 105: + test_hash("hmac(sha224)", hmac_sha224_tv_template, + HMAC_SHA224_TEST_VECTORS); + break; case 200: test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, @@ -1459,6 +1471,10 @@ static void do_test(void) test_hash_speed("tgr192", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; + case 313: + test_hash_speed("sha224", sec, generic_hash_speed_template); + if (mode > 300 && mode < 400) break; + case 399: break; diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index f7f9b23..b91585e 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -173,6 +173,33 @@ static struct hash_testvec sha1_tv_template[] = { } }; + +/* + * SHA224 test vectors from from FIPS PUB 180-2 + */ +#define SHA224_TEST_VECTORS 2 + +static struct hash_testvec sha224_tv_template[] = { + { + .plaintext = "abc", + .psize = 3, + .digest = { 0x23, 0x09, 0x7D, 0x22, 0x34, 0x05, 0xD8, 0x22, + 0x86, 0x42, 0xA4, 0x77, 0xBD, 0xA2, 0x55, 0xB3, + 0x2A, 0xAD, 0xBC, 0xE4, 0xBD, 0xA0, 0xB3, 0xF7, + 0xE3, 0x6C, 0x9D, 0xA7}, + }, { + .plaintext = + "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", + .psize = 56, + .digest = { 0x75, 0x38, 0x8B, 0x16, 0x51, 0x27, 0x76, 0xCC, + 0x5D, 0xBA, 0x5D, 0xA1, 0xFD, 0x89, 0x01, 0x50, + 0xB0, 0xC6, 0x45, 0x5C, 0xB4, 0xF5, 0x8B, 0x19, + 0x52, 0x52, 0x25, 0x25 }, + .np = 2, + .tap = { 28, 28 } + } +}; + /* * SHA256 test vectors from from NIST */ @@ -817,6 +844,121 @@ static struct hash_testvec hmac_sha1_tv_template[] = { }, }; + +/* + * SHA224 HMAC test vectors from RFC4231 + */ +#define HMAC_SHA224_TEST_VECTORS 4 + +static struct hash_testvec hmac_sha224_tv_template[] = { + { + .key = { 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, 0x0b }, + .ksize = 20, + /* ("Hi There") */ + .plaintext = { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65 }, + .psize = 8, + .digest = { 0x89, 0x6f, 0xb1, 0x12, 0x8a, 0xbb, 0xdf, 0x19, + 0x68, 0x32, 0x10, 0x7c, 0xd4, 0x9d, 0xf3, 0x3f, + 0x47, 0xb4, 0xb1, 0x16, 0x99, 0x12, 0xba, 0x4f, + 0x53, 0x68, 0x4b, 0x22}, + }, { + .key = { 0x4a, 0x65, 0x66, 0x65 }, /* ("Jefe") */ + .ksize = 4, + /* ("what do ya want for nothing?") */ + .plaintext = { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20, + 0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20, + 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68, + 0x69, 0x6e, 0x67, 0x3f }, + .psize = 28, + .digest = { 0xa3, 0x0e, 0x01, 0x09, 0x8b, 0xc6, 0xdb, 0xbf, + 0x45, 0x69, 0x0f, 0x3a, 0x7e, 0x9e, 0x6d, 0x0f, + 0x8b, 0xbe, 0xa2, 0xa3, 0x9e, 0x61, 0x48, 0x00, + 0x8f, 0xd0, 0x5e, 0x44 }, + .np = 4, + .tap = { 7, 7, 7, 7 } + }, { + .key = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa }, + .ksize = 131, + /* ("Test Using Larger Than Block-Size Key - Hash Key First") */ + .plaintext = { 0x54, 0x65, 0x73, 0x74, 0x20, 0x55, 0x73, 0x69, + 0x6e, 0x67, 0x20, 0x4c, 0x61, 0x72, 0x67, 0x65, + 0x72, 0x20, 0x54, 0x68, 0x61, 0x6e, 0x20, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x2d, 0x53, 0x69, 0x7a, + 0x65, 0x20, 0x4b, 0x65, 0x79, 0x20, 0x2d, 0x20, + 0x48, 0x61, 0x73, 0x68, 0x20, 0x4b, 0x65, 0x79, + 0x20, 0x46, 0x69, 0x72, 0x73, 0x74 }, + .psize = 54, + .digest = { 0x95, 0xe9, 0xa0, 0xdb, 0x96, 0x20, 0x95, 0xad, + 0xae, 0xbe, 0x9b, 0x2d, 0x6f, 0x0d, 0xbc, 0xe2, + 0xd4, 0x99, 0xf1, 0x12, 0xf2, 0xd2, 0xb7, 0x27, + 0x3f, 0xa6, 0x87, 0x0e }, + }, { + .key = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaa }, + .ksize = 131, + /* ("This is a test using a larger than block-size key and a") + (" larger than block-size data. The key needs to be") + (" hashed before being used by the HMAC algorithm.") */ + .plaintext = { 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, + 0x61, 0x20, 0x74, 0x65, 0x73, 0x74, 0x20, 0x75, + 0x73, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, 0x6c, + 0x61, 0x72, 0x67, 0x65, 0x72, 0x20, 0x74, 0x68, + 0x61, 0x6e, 0x20, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x2d, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x6b, 0x65, + 0x79, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x61, 0x20, + 0x6c, 0x61, 0x72, 0x67, 0x65, 0x72, 0x20, 0x74, + 0x68, 0x61, 0x6e, 0x20, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x2d, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x20, 0x54, 0x68, 0x65, + 0x20, 0x6b, 0x65, 0x79, 0x20, 0x6e, 0x65, 0x65, + 0x64, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x65, + 0x20, 0x68, 0x61, 0x73, 0x68, 0x65, 0x64, 0x20, + 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x20, 0x62, + 0x65, 0x69, 0x6e, 0x67, 0x20, 0x75, 0x73, 0x65, + 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x48, 0x4d, 0x41, 0x43, 0x20, 0x61, 0x6c, + 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x2e }, + .psize = 152, + .digest = { 0x3a, 0x85, 0x41, 0x66, 0xac, 0x5d, 0x9f, 0x02, + 0x3f, 0x54, 0xd5, 0x17, 0xd0, 0xb3, 0x9d, 0xbd, + 0x94, 0x67, 0x70, 0xdb, 0x9c, 0x2b, 0x95, 0xc9, + 0xf6, 0xf5, 0x65, 0xd1 }, + }, +}; + /* * HMAC-SHA256 test vectors from * draft-ietf-ipsec-ciph-sha-256-01.txt diff --git a/include/crypto/sha.h b/include/crypto/sha.h index 0686e1f..c0ccc2b 100644 --- a/include/crypto/sha.h +++ b/include/crypto/sha.h @@ -8,6 +8,9 @@ #define SHA1_DIGEST_SIZE 20 #define SHA1_BLOCK_SIZE 64 +#define SHA224_DIGEST_SIZE 28 +#define SHA224_BLOCK_SIZE 64 + #define SHA256_DIGEST_SIZE 32 #define SHA256_BLOCK_SIZE 64 @@ -23,6 +26,15 @@ #define SHA1_H3 0x10325476UL #define SHA1_H4 0xc3d2e1f0UL +#define SHA224_H0 0xc1059ed8UL +#define SHA224_H1 0x367cd507UL +#define SHA224_H2 0x3070dd17UL +#define SHA224_H3 0xf70e5939UL +#define SHA224_H4 0xffc00b31UL +#define SHA224_H5 0x68581511UL +#define SHA224_H6 0x64f98fa7UL +#define SHA224_H7 0xbefa4fa4UL + #define SHA256_H0 0x6a09e667UL #define SHA256_H1 0xbb67ae85UL #define SHA256_H2 0x3c6ef372UL -- cgit v1.1 From a1e6ef2f1e01f2aa9ed930e1089fc85dc745bf7a Mon Sep 17 00:00:00 2001 From: Evgeniy Polyakov Date: Sat, 10 Nov 2007 20:24:18 +0800 Subject: [CRYPTO] hifn: Schedule callback invocation to tasklet. This patch forces HIFN driver to invoke crypto request callbacks from tasklet (softirq context) instead of hardirq context, since network stack expects it to be called from bottom halves. It is done by simply scheduling callback invocation via dedicated tasklet. Workqueue solution was dropped because of tooo slow rescheduling performance (7 times slower than tasklet, for mode details one can check this link: http://tservice.net.ru/~s0mbre/blog/devel/other/2007_11_09.html). Driver passed all AES and DES tests in tcryt.c module. Signed-off-by: Evgeniy Polyakov Signed-off-by: Herbert Xu --- drivers/crypto/hifn_795x.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 391c20a..7b7c854 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -426,6 +427,8 @@ struct hifn_device u8 snum; + struct tasklet_struct tasklet; + struct crypto_queue queue; struct list_head alg_list; }; @@ -1879,7 +1882,7 @@ static irqreturn_t hifn_interrupt(int irq, void *data) hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); } - hifn_check_for_completion(dev, 0); + tasklet_schedule(&dev->tasklet); hifn_clear_rings(dev); return IRQ_HANDLED; @@ -2408,6 +2411,19 @@ err_out_exit: return err; } +static void hifn_tasklet_callback(unsigned long data) +{ + struct hifn_device *dev = (struct hifn_device *)data; + + /* + * This is ok to call this without lock being held, + * althogh it modifies some parameters used in parallel, + * (like dev->success), but they are used in process + * context or update is atomic (like setting dev->sa[i] to NULL). + */ + hifn_check_for_completion(dev, 0); +} + static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int err, i; @@ -2489,6 +2505,8 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, dev); + tasklet_init(&dev->tasklet, hifn_tasklet_callback, (unsigned long)dev); + crypto_init_queue(&dev->queue, 1); err = request_irq(dev->irq, hifn_interrupt, IRQF_SHARED, dev->name, dev); @@ -2524,6 +2542,7 @@ err_out_stop_device: hifn_stop_device(dev); err_out_free_irq: free_irq(dev->irq, dev->name); + tasklet_kill(&dev->tasklet); err_out_free_desc: pci_free_consistent(pdev, sizeof(struct hifn_dma), dev->desc_virt, dev->desc_dma); @@ -2563,6 +2582,7 @@ static void hifn_remove(struct pci_dev *pdev) hifn_stop_device(dev); free_irq(dev->irq, dev->name); + tasklet_kill(&dev->tasklet); hifn_flush(dev); -- cgit v1.1 From 2707b937f322ba6b437f3dd1de05b9bb9756d803 Mon Sep 17 00:00:00 2001 From: Jan Glauber Date: Mon, 12 Nov 2007 21:56:38 +0800 Subject: [CRYPTO] hifn: Make Kconfig option depend on PCI The HIFN driver is currently selectable on s390 but wont compile. Since it looks like HIFN needs PCI make the Kconfig dependent on PCI, which is not available on s390. Signed-off-by: Jan Glauber Acked-by: Evgeniy Polyakov Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index c2de135..d848e1b 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -88,6 +88,7 @@ config CRYPTO_DEV_HIFN_795X select CRYPTO_DES select CRYPTO_ALGAPI select CRYPTO_ABLKCIPHER + depends on PCI help This option allows you to have support for HIFN 795x crypto adapters. -- cgit v1.1 From 102d49d3d0f0f471b338b6805001fc3ca7bf663b Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 13 Nov 2007 21:55:28 +0800 Subject: [CRYPTO] hifn: Add missing includes alpha: drivers/crypto/hifn_795x.c: In function 'ablkcipher_walk_init': drivers/crypto/hifn_795x.c:1231: error: implicit declaration of function 'sg_init_table' drivers/crypto/hifn_795x.c:1243: error: implicit declaration of function 'sg_set_page' drivers/crypto/hifn_795x.c: In function 'ablkcipher_walk_exit': drivers/crypto/hifn_795x.c:1257: error: implicit declaration of function 'sg_page' drivers/crypto/hifn_795x.c:1257: warning: passing argument 1 of '__free_pages' makes pointer from integer without a cast drivers/crypto/hifn_795x.c: In function 'ablkcipher_add': drivers/crypto/hifn_795x.c:1278: warning: passing argument 1 of 'kmap_atomic' makes pointer from integer without a cast drivers/crypto/hifn_795x.c: In function 'ablkcipher_walk': drivers/crypto/hifn_795x.c:1336: warning: passing argument 1 of 'kmap_atomic' makes pointer from integer without a cast drivers/crypto/hifn_795x.c: In function 'hifn_setup_session': drivers/crypto/hifn_795x.c:1465: warning: assignment makes pointer from integer without a cast drivers/crypto/hifn_795x.c:1469: warning: assignment makes pointer from integer without a cast drivers/crypto/hifn_795x.c:1472: warning: assignment makes pointer from integer without a cast drivers/crypto/hifn_795x.c: In function 'ablkcipher_get': drivers/crypto/hifn_795x.c:1593: warning: passing argument 1 of 'kmap_atomic' makes pointer from integer without a cast {standard input}: Assembler messages: {standard input}:7: Warning: setting incorrect section attributes for .got drivers/crypto/hifn_795x.c: In function 'hifn_process_ready': drivers/crypto/hifn_795x.c:1653: warning: passing argument 1 of 'kmap_atomic' makes pointer from integer without a cast drivers/crypto/hifn_795x.c: In function 'hifn_probe': drivers/crypto/hifn_795x.c:2438: error: 'DMA_32BIT_MASK' undeclared (first use in this function) drivers/crypto/hifn_795x.c:2438: error: (Each undeclared identifier is reported only once drivers/crypto/hifn_795x.c:2438: error: for each function it appears in.) drivers/crypto/hifn_795x.c:2443: warning: format '%d' expects type 'int', but argument 4 has type 'long int' drivers/crypto/hifn_795x.c:2443: warning: format '%d' expects type 'int', but argument 4 has type 'long int' Signed-off-by: Andrew Morton Signed-off-by: Herbert Xu --- drivers/crypto/hifn_795x.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 7b7c854..fec32aa 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -25,6 +25,8 @@ #include #include #include +#include +#include #include #include #include -- cgit v1.1 From 468577abe37ff7b453a9ac613e0ea155349203ae Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 15 Nov 2007 12:08:45 +0800 Subject: [CRYPTO] scatterwalk: Use generic scatterlist chaining This patch converts the crypto scatterwalk code to use the generic scatterlist chaining rather the version specific to crypto. Signed-off-by: Herbert Xu --- crypto/digest.c | 3 +-- crypto/hmac.c | 2 +- crypto/scatterwalk.c | 2 +- crypto/scatterwalk.h | 5 ----- 4 files changed, 3 insertions(+), 9 deletions(-) diff --git a/crypto/digest.c b/crypto/digest.c index 8871dec..d3e827a 100644 --- a/crypto/digest.c +++ b/crypto/digest.c @@ -21,7 +21,6 @@ #include #include "internal.h" -#include "scatterwalk.h" static int init(struct hash_desc *desc) { @@ -77,7 +76,7 @@ static int update2(struct hash_desc *desc, if (!nbytes) break; - sg = scatterwalk_sg_next(sg); + sg = sg_next(sg); } return 0; diff --git a/crypto/hmac.c b/crypto/hmac.c index 0f05be7..34c3706 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -160,7 +160,7 @@ static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg, sg_init_table(sg1, 2); sg_set_buf(sg1, ipad, bs); - sg_set_page(&sg1[1], (void *) sg, 0, 0); + sg_chain(sg1, 2, sg); sg_init_table(sg2, 1); sg_set_buf(sg2, opad, bs + ds); diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index b9bbda0..206c39a 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -62,7 +62,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out, walk->offset += PAGE_SIZE - 1; walk->offset &= PAGE_MASK; if (walk->offset >= walk->sg->offset + walk->sg->length) - scatterwalk_start(walk, scatterwalk_sg_next(walk->sg)); + scatterwalk_start(walk, sg_next(walk->sg)); } } diff --git a/crypto/scatterwalk.h b/crypto/scatterwalk.h index 87ed681..fd5517d 100644 --- a/crypto/scatterwalk.h +++ b/crypto/scatterwalk.h @@ -20,11 +20,6 @@ #include "internal.h" -static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg) -{ - return (++sg)->length ? sg : (void *) sg_page(sg); -} - static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in, struct scatter_walk *walk_out) { -- cgit v1.1 From 86f578de5ba6ea11ead9284d9f036fee01ba5893 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 15 Nov 2007 19:00:06 +0800 Subject: [CRYPTO] doc: Update api-intro.txt This patch updates the list of transforms we support and clarifies that the Block Ciphers interface in fact supports all ciphers including stream ciphers. It also removes the obsolete Configuration Notes section and adds the linux-crypto mailing list as the primary bug reporting address. Finally it documents the fact that setkey should only be called from user context. Signed-off-by: Herbert Xu --- Documentation/crypto/api-intro.txt | 41 +++++++++++++++----------------------- 1 file changed, 16 insertions(+), 25 deletions(-) diff --git a/Documentation/crypto/api-intro.txt b/Documentation/crypto/api-intro.txt index a2ac6d2..8b49302 100644 --- a/Documentation/crypto/api-intro.txt +++ b/Documentation/crypto/api-intro.txt @@ -33,9 +33,16 @@ The idea is to make the user interface and algorithm registration API very simple, while hiding the core logic from both. Many good ideas from existing APIs such as Cryptoapi and Nettle have been adapted for this. -The API currently supports three types of transforms: Ciphers, Digests and -Compressors. The compression algorithms especially seem to be performing -very well so far. +The API currently supports five main types of transforms: AEAD (Authenticated +Encryption with Associated Data), Block Ciphers, Ciphers, Compressors and +Hashes. + +Please note that Block Ciphers is somewhat of a misnomer. It is in fact +meant to support all ciphers including stream ciphers. The difference +between Block Ciphers and Ciphers is that the latter operates on exactly +one block while the former can operate on an arbitrary amount of data, +subject to block size requirements (i.e., non-stream ciphers can only +process multiples of blocks). Support for hardware crypto devices via an asynchronous interface is under development. @@ -69,29 +76,12 @@ Here's an example of how to use the API: Many real examples are available in the regression test module (tcrypt.c). -CONFIGURATION NOTES - -As Triple DES is part of the DES module, for those using modular builds, -add the following line to /etc/modprobe.conf: - - alias des3_ede des - -The Null algorithms reside in the crypto_null module, so these lines -should also be added: - - alias cipher_null crypto_null - alias digest_null crypto_null - alias compress_null crypto_null - -The SHA384 algorithm shares code within the SHA512 module, so you'll -also need: - alias sha384 sha512 - - DEVELOPER NOTES Transforms may only be allocated in user context, and cryptographic -methods may only be called from softirq and user contexts. +methods may only be called from softirq and user contexts. For +transforms with a setkey method it too should only be called from +user context. When using the API for ciphers, performance will be optimal if each scatterlist contains data which is a multiple of the cipher's block @@ -130,8 +120,9 @@ might already be working on. BUGS Send bug reports to: -Herbert Xu -Cc: David S. Miller +linux-crypto@vger.kernel.org +Cc: Herbert Xu , + David S. Miller FURTHER INFORMATION -- cgit v1.1 From 332f8840f7095d294f9bb066b175a100bcde214c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 15 Nov 2007 22:36:07 +0800 Subject: [CRYPTO] ablkcipher: Add distinct ABLKCIPHER type Up until now we have ablkcipher algorithms have been identified as type BLKCIPHER with the ASYNC bit set. This is suboptimal because ablkcipher refers to two things. On the one hand it refers to the top-level ablkcipher interface with requests. On the other hand it refers to and algorithm type underneath. As it is you cannot request a synchronous block cipher algorithm with the ablkcipher interface on top. This is a problem because we want to be able to eventually phase out the blkcipher top-level interface. This patch fixes this by making ABLKCIPHER its own type, just as we have distinct types for HASH and DIGEST. The type it associated with the algorithm implementation only. Which top-level interface is used for synchronous block ciphers is then determined by the mask that's used. If it's a specific mask then the old blkcipher interface is given, otherwise we go with the new ablkcipher interface. Signed-off-by: Herbert Xu --- crypto/authenc.c | 2 +- crypto/blkcipher.c | 9 +++------ crypto/cryptd.c | 4 ++-- drivers/crypto/hifn_795x.c | 2 +- include/crypto/algapi.h | 4 ++-- include/linux/crypto.h | 18 ++++++++++-------- 6 files changed, 19 insertions(+), 20 deletions(-) diff --git a/crypto/authenc.c b/crypto/authenc.c index 126a529..bc4e608 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -292,7 +292,7 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) goto out_put_auth; enc = crypto_attr_alg(tb[3], CRYPTO_ALG_TYPE_BLKCIPHER, - CRYPTO_ALG_TYPE_MASK); + CRYPTO_ALG_TYPE_BLKCIPHER_MASK); inst = ERR_PTR(PTR_ERR(enc)); if (IS_ERR(enc)) goto out_put_auth; diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index f6c67f9..180d914 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -433,9 +433,8 @@ static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type, struct blkcipher_alg *cipher = &alg->cra_blkcipher; unsigned int len = alg->cra_ctxsize; - type ^= CRYPTO_ALG_ASYNC; - mask &= CRYPTO_ALG_ASYNC; - if ((type & mask) && cipher->ivsize) { + if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK && + cipher->ivsize) { len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1); len += cipher->ivsize; } @@ -482,9 +481,7 @@ static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) if (alg->ivsize > PAGE_SIZE / 8) return -EINVAL; - type ^= CRYPTO_ALG_ASYNC; - mask &= CRYPTO_ALG_ASYNC; - if (type & mask) + if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK) return crypto_init_blkcipher_ops_sync(tfm); else return crypto_init_blkcipher_ops_async(tfm); diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 8bf2da8..1a5c45b 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -228,7 +228,7 @@ static struct crypto_instance *cryptd_alloc_blkcipher( struct crypto_alg *alg; alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, - CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_PTR(PTR_ERR(alg)); @@ -236,7 +236,7 @@ static struct crypto_instance *cryptd_alloc_blkcipher( if (IS_ERR(inst)) goto out_put_alg; - inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_ASYNC; + inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; inst->alg.cra_type = &crypto_ablkcipher_type; inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index fec32aa..bf817d4 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -2361,7 +2361,7 @@ static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t) snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", t->drv_name); alg->alg.cra_priority = 300; - alg->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_ASYNC; + alg->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; alg->alg.cra_blocksize = t->bsize; alg->alg.cra_ctxsize = sizeof(struct hifn_context); alg->alg.cra_alignmask = 15; diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index b9b05d3..88619f9 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -191,7 +191,7 @@ static inline struct crypto_ablkcipher *crypto_spawn_ablkcipher( struct crypto_spawn *spawn) { u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; - u32 mask = CRYPTO_ALG_TYPE_MASK; + u32 mask = CRYPTO_ALG_TYPE_BLKCIPHER_MASK; return __crypto_ablkcipher_cast(crypto_spawn_tfm(spawn, type, mask)); } @@ -200,7 +200,7 @@ static inline struct crypto_blkcipher *crypto_spawn_blkcipher( struct crypto_spawn *spawn) { u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; - u32 mask = CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC; + u32 mask = CRYPTO_ALG_TYPE_MASK; return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask)); } diff --git a/include/linux/crypto.h b/include/linux/crypto.h index f3110eb..f56ae87 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -33,10 +33,12 @@ #define CRYPTO_ALG_TYPE_DIGEST 0x00000002 #define CRYPTO_ALG_TYPE_HASH 0x00000003 #define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 -#define CRYPTO_ALG_TYPE_COMPRESS 0x00000005 -#define CRYPTO_ALG_TYPE_AEAD 0x00000006 +#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 +#define CRYPTO_ALG_TYPE_COMPRESS 0x00000008 +#define CRYPTO_ALG_TYPE_AEAD 0x00000009 #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e +#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c #define CRYPTO_ALG_LARVAL 0x00000010 #define CRYPTO_ALG_DEAD 0x00000020 @@ -530,7 +532,7 @@ static inline struct crypto_ablkcipher *crypto_alloc_ablkcipher( { type &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_BLKCIPHER; - mask |= CRYPTO_ALG_TYPE_MASK; + mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; return __crypto_ablkcipher_cast( crypto_alloc_base(alg_name, type, mask)); @@ -552,7 +554,7 @@ static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, { type &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_BLKCIPHER; - mask |= CRYPTO_ALG_TYPE_MASK; + mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; return crypto_has_alg(alg_name, type, mask); } @@ -841,9 +843,9 @@ static inline struct crypto_blkcipher *crypto_blkcipher_cast( static inline struct crypto_blkcipher *crypto_alloc_blkcipher( const char *alg_name, u32 type, u32 mask) { - type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + type &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_BLKCIPHER; - mask |= CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC; + mask |= CRYPTO_ALG_TYPE_MASK; return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask)); } @@ -861,9 +863,9 @@ static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) { - type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + type &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_BLKCIPHER; - mask |= CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC; + mask |= CRYPTO_ALG_TYPE_MASK; return crypto_has_alg(alg_name, type, mask); } -- cgit v1.1 From 2407d60872dd2a95404c6048f775f3b64d438f4b Mon Sep 17 00:00:00 2001 From: Tan Swee Heng Date: Fri, 23 Nov 2007 19:45:00 +0800 Subject: [CRYPTO] salsa20: Salsa20 stream cipher This patch implements the Salsa20 stream cipher using the blkcipher interface. The core cipher code comes from Daniel Bernstein's submission to eSTREAM: http://www.ecrypt.eu.org/stream/svn/viewcvs.cgi/ecrypt/trunk/submissions/salsa20/full/ref/ The test vectors comes from: http://www.ecrypt.eu.org/stream/svn/viewcvs.cgi/ecrypt/trunk/submissions/salsa20/full/ It has been tested successfully with "modprobe tcrypt mode=34" on an UML instance. Signed-off-by: Tan Swee Heng Signed-off-by: Herbert Xu --- crypto/Kconfig | 12 +++ crypto/Makefile | 1 + crypto/salsa20_generic.c | 243 +++++++++++++++++++++++++++++++++++++++++++++++ crypto/tcrypt.c | 8 +- crypto/tcrypt.h | 161 +++++++++++++++++++++++++++++++ 5 files changed, 424 insertions(+), 1 deletion(-) create mode 100644 crypto/salsa20_generic.c diff --git a/crypto/Kconfig b/crypto/Kconfig index 7758454b..8d6cac9 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -454,6 +454,18 @@ config CRYPTO_SEED See also: +config CRYPTO_SALSA20 + tristate "Salsa20 stream cipher algorithm (EXPERIMENTAL)" + depends on EXPERIMENTAL + select CRYPTO_BLKCIPHER + help + Salsa20 stream cipher algorithm. + + Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT + Stream Cipher Project. See + + The Salsa20 stream cipher algorithm is designed by Daniel J. + Bernstein . See config CRYPTO_DEFLATE tristate "Deflate compression algorithm" diff --git a/crypto/Makefile b/crypto/Makefile index 1f87db2..9b1476e 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -49,6 +49,7 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o obj-$(CONFIG_CRYPTO_SEED) += seed.o +obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c new file mode 100644 index 0000000..b49328a --- /dev/null +++ b/crypto/salsa20_generic.c @@ -0,0 +1,243 @@ +/* + * Salsa20: Salsa20 stream cipher algorithm + * + * Copyright (c) 2007 Tan Swee Heng + * + * Derived from: + * - salsa20.c: Public domain C code by Daniel J. Bernstein + * + * Salsa20 is a stream cipher candidate in eSTREAM, the ECRYPT Stream + * Cipher Project. It is designed by Daniel J. Bernstein . + * More information about eSTREAM and Salsa20 can be found here: + * http://www.ecrypt.eu.org/stream/ + * http://cr.yp.to/snuffle.html + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#define SALSA20_IV_SIZE 8U +#define SALSA20_MIN_KEY_SIZE 16U +#define SALSA20_MAX_KEY_SIZE 32U + +/* + * Start of code taken from D. J. Bernstein's reference implementation. + * With some modifications and optimizations made to suit our needs. + */ + +/* +salsa20-ref.c version 20051118 +D. J. Bernstein +Public domain. +*/ + +#define ROTATE(v,n) (((v) << (n)) | ((v) >> (32 - (n)))) +#define XOR(v,w) ((v) ^ (w)) +#define PLUS(v,w) (((v) + (w))) +#define PLUSONE(v) (PLUS((v),1)) +#define U32TO8_LITTLE(p, v) \ + { (p)[0] = (v >> 0) & 0xff; (p)[1] = (v >> 8) & 0xff; \ + (p)[2] = (v >> 16) & 0xff; (p)[3] = (v >> 24) & 0xff; } +#define U8TO32_LITTLE(p) \ + (((u32)((p)[0]) ) | ((u32)((p)[1]) << 8) | \ + ((u32)((p)[2]) << 16) | ((u32)((p)[3]) << 24) ) + +struct salsa20_ctx +{ + u32 input[16]; +}; + +static void salsa20_wordtobyte(u8 output[64], const u32 input[16]) +{ + u32 x[16]; + int i; + + memcpy(x, input, sizeof(x)); + for (i = 20; i > 0; i -= 2) { + x[ 4] = XOR(x[ 4],ROTATE(PLUS(x[ 0],x[12]), 7)); + x[ 8] = XOR(x[ 8],ROTATE(PLUS(x[ 4],x[ 0]), 9)); + x[12] = XOR(x[12],ROTATE(PLUS(x[ 8],x[ 4]),13)); + x[ 0] = XOR(x[ 0],ROTATE(PLUS(x[12],x[ 8]),18)); + x[ 9] = XOR(x[ 9],ROTATE(PLUS(x[ 5],x[ 1]), 7)); + x[13] = XOR(x[13],ROTATE(PLUS(x[ 9],x[ 5]), 9)); + x[ 1] = XOR(x[ 1],ROTATE(PLUS(x[13],x[ 9]),13)); + x[ 5] = XOR(x[ 5],ROTATE(PLUS(x[ 1],x[13]),18)); + x[14] = XOR(x[14],ROTATE(PLUS(x[10],x[ 6]), 7)); + x[ 2] = XOR(x[ 2],ROTATE(PLUS(x[14],x[10]), 9)); + x[ 6] = XOR(x[ 6],ROTATE(PLUS(x[ 2],x[14]),13)); + x[10] = XOR(x[10],ROTATE(PLUS(x[ 6],x[ 2]),18)); + x[ 3] = XOR(x[ 3],ROTATE(PLUS(x[15],x[11]), 7)); + x[ 7] = XOR(x[ 7],ROTATE(PLUS(x[ 3],x[15]), 9)); + x[11] = XOR(x[11],ROTATE(PLUS(x[ 7],x[ 3]),13)); + x[15] = XOR(x[15],ROTATE(PLUS(x[11],x[ 7]),18)); + x[ 1] = XOR(x[ 1],ROTATE(PLUS(x[ 0],x[ 3]), 7)); + x[ 2] = XOR(x[ 2],ROTATE(PLUS(x[ 1],x[ 0]), 9)); + x[ 3] = XOR(x[ 3],ROTATE(PLUS(x[ 2],x[ 1]),13)); + x[ 0] = XOR(x[ 0],ROTATE(PLUS(x[ 3],x[ 2]),18)); + x[ 6] = XOR(x[ 6],ROTATE(PLUS(x[ 5],x[ 4]), 7)); + x[ 7] = XOR(x[ 7],ROTATE(PLUS(x[ 6],x[ 5]), 9)); + x[ 4] = XOR(x[ 4],ROTATE(PLUS(x[ 7],x[ 6]),13)); + x[ 5] = XOR(x[ 5],ROTATE(PLUS(x[ 4],x[ 7]),18)); + x[11] = XOR(x[11],ROTATE(PLUS(x[10],x[ 9]), 7)); + x[ 8] = XOR(x[ 8],ROTATE(PLUS(x[11],x[10]), 9)); + x[ 9] = XOR(x[ 9],ROTATE(PLUS(x[ 8],x[11]),13)); + x[10] = XOR(x[10],ROTATE(PLUS(x[ 9],x[ 8]),18)); + x[12] = XOR(x[12],ROTATE(PLUS(x[15],x[14]), 7)); + x[13] = XOR(x[13],ROTATE(PLUS(x[12],x[15]), 9)); + x[14] = XOR(x[14],ROTATE(PLUS(x[13],x[12]),13)); + x[15] = XOR(x[15],ROTATE(PLUS(x[14],x[13]),18)); + } + for (i = 0; i < 16; ++i) + x[i] = PLUS(x[i],input[i]); + for (i = 0; i < 16; ++i) + U32TO8_LITTLE(output + 4 * i,x[i]); +} + +static const char sigma[16] = "expand 32-byte k"; +static const char tau[16] = "expand 16-byte k"; + +static void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, u32 kbytes) +{ + const char *constants; + + ctx->input[1] = U8TO32_LITTLE(k + 0); + ctx->input[2] = U8TO32_LITTLE(k + 4); + ctx->input[3] = U8TO32_LITTLE(k + 8); + ctx->input[4] = U8TO32_LITTLE(k + 12); + if (kbytes == 32) { /* recommended */ + k += 16; + constants = sigma; + } else { /* kbytes == 16 */ + constants = tau; + } + ctx->input[11] = U8TO32_LITTLE(k + 0); + ctx->input[12] = U8TO32_LITTLE(k + 4); + ctx->input[13] = U8TO32_LITTLE(k + 8); + ctx->input[14] = U8TO32_LITTLE(k + 12); + ctx->input[0] = U8TO32_LITTLE(constants + 0); + ctx->input[5] = U8TO32_LITTLE(constants + 4); + ctx->input[10] = U8TO32_LITTLE(constants + 8); + ctx->input[15] = U8TO32_LITTLE(constants + 12); +} + +static void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv) +{ + ctx->input[6] = U8TO32_LITTLE(iv + 0); + ctx->input[7] = U8TO32_LITTLE(iv + 4); + ctx->input[8] = 0; + ctx->input[9] = 0; +} + +static void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, u8 *dst, + const u8 *src, unsigned int bytes) +{ + u8 buf[64]; + int i; + + if (dst != src) + memcpy(dst, src, bytes); + + while (bytes) { + salsa20_wordtobyte(buf, ctx->input); + + ctx->input[8] = PLUSONE(ctx->input[8]); + if (!ctx->input[8]) + ctx->input[9] = PLUSONE(ctx->input[9]); + + if (bytes <= 64) { + for (i = 0; i < bytes/4; ++i) + ((u32*)dst)[i] ^= ((u32*)buf)[i]; + for (i = bytes - bytes % 4; i < bytes; ++i) + dst[i] ^= buf[i]; + return; + } + + for (i = 0; i < 64/4; ++i) + ((u32*)dst)[i] ^= ((u32*)buf)[i]; + bytes -= 64; + dst += 64; + } +} + +/* + * End of code taken from D. J. Bernstein's reference implementation. + */ + +static int setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keysize) +{ + struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm); + salsa20_keysetup(ctx, key, keysize); + return 0; +} + +static int encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct blkcipher_walk walk; + struct crypto_blkcipher *tfm = desc->tfm; + struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm); + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + salsa20_ivsetup(ctx, walk.iv); + salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, + walk.src.virt.addr, nbytes); + + err = blkcipher_walk_done(desc, &walk, 0); + return err; +} + +static struct crypto_alg alg = { + .cra_name = "salsa20", + .cra_driver_name = "salsa20-generic", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_type = &crypto_blkcipher_type, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct salsa20_ctx), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(alg.cra_list), + .cra_u = { + .blkcipher = { + .setkey = setkey, + .encrypt = encrypt, + .decrypt = encrypt, + .min_keysize = SALSA20_MIN_KEY_SIZE, + .max_keysize = SALSA20_MAX_KEY_SIZE, + .ivsize = SALSA20_IV_SIZE, + } + } +}; + +static int __init init(void) +{ + return crypto_register_alg(&alg); +} + +static void __exit fini(void) +{ + crypto_unregister_alg(&alg); +} + +module_init(init); +module_exit(fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm"); +MODULE_ALIAS("salsa20"); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 4d364cc..b8cb1d1 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -80,7 +80,7 @@ static char *check[] = { "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", - "camellia", "seed", NULL + "camellia", "seed", "salsa20", NULL }; static void hexdump(unsigned char *buf, unsigned int len) @@ -1309,6 +1309,12 @@ static void do_test(void) test_hash("sha224", sha224_tv_template, SHA224_TEST_VECTORS); break; + case 34: + test_cipher("salsa20", ENCRYPT, + salsa20_stream_enc_tv_template, + SALSA20_STREAM_ENC_TEST_VECTORS); + break; + case 100: test_hash("hmac(md5)", hmac_md5_tv_template, HMAC_MD5_TEST_VECTORS); diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index b91585e..6ffc411 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -4644,6 +4644,167 @@ static struct cipher_testvec seed_dec_tv_template[] = { } }; +#define SALSA20_STREAM_ENC_TEST_VECTORS 4 +static struct cipher_testvec salsa20_stream_enc_tv_template[] = { + /* + * Testvectors from verified.test-vectors submitted to ECRYPT. + * They are truncated to size 39, 64, 111, 129 to test a variety + * of input length. + */ + { /* Set 3, vector 0 */ + .key = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F + }, + .klen = 16, + .iv = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .input = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + .ilen = 39, + .result = { + 0x2D, 0xD5, 0xC3, 0xF7, 0xBA, 0x2B, 0x20, 0xF7, + 0x68, 0x02, 0x41, 0x0C, 0x68, 0x86, 0x88, 0x89, + 0x5A, 0xD8, 0xC1, 0xBD, 0x4E, 0xA6, 0xC9, 0xB1, + 0x40, 0xFB, 0x9B, 0x90, 0xE2, 0x10, 0x49, 0xBF, + 0x58, 0x3F, 0x52, 0x79, 0x70, 0xEB, 0xC1, + }, + .rlen = 39, + }, { /* Set 5, vector 0 */ + .key = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 + }, + .klen = 16, + .iv = { 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .input = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + .ilen = 64, + .result = { + 0xB6, 0x6C, 0x1E, 0x44, 0x46, 0xDD, 0x95, 0x57, + 0xE5, 0x78, 0xE2, 0x23, 0xB0, 0xB7, 0x68, 0x01, + 0x7B, 0x23, 0xB2, 0x67, 0xBB, 0x02, 0x34, 0xAE, + 0x46, 0x26, 0xBF, 0x44, 0x3F, 0x21, 0x97, 0x76, + 0x43, 0x6F, 0xB1, 0x9F, 0xD0, 0xE8, 0x86, 0x6F, + 0xCD, 0x0D, 0xE9, 0xA9, 0x53, 0x8F, 0x4A, 0x09, + 0xCA, 0x9A, 0xC0, 0x73, 0x2E, 0x30, 0xBC, 0xF9, + 0x8E, 0x4F, 0x13, 0xE4, 0xB9, 0xE2, 0x01, 0xD9, + }, + .rlen = 64, + }, { /* Set 3, vector 27 */ + .key = { + 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, + 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, + 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, + 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A + }, + .klen = 32, + .iv = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .input = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + .ilen = 111, + .result = { + 0xAE, 0x39, 0x50, 0x8E, 0xAC, 0x9A, 0xEC, 0xE7, + 0xBF, 0x97, 0xBB, 0x20, 0xB9, 0xDE, 0xE4, 0x1F, + 0x87, 0xD9, 0x47, 0xF8, 0x28, 0x91, 0x35, 0x98, + 0xDB, 0x72, 0xCC, 0x23, 0x29, 0x48, 0x56, 0x5E, + 0x83, 0x7E, 0x0B, 0xF3, 0x7D, 0x5D, 0x38, 0x7B, + 0x2D, 0x71, 0x02, 0xB4, 0x3B, 0xB5, 0xD8, 0x23, + 0xB0, 0x4A, 0xDF, 0x3C, 0xEC, 0xB6, 0xD9, 0x3B, + 0x9B, 0xA7, 0x52, 0xBE, 0xC5, 0xD4, 0x50, 0x59, + + 0x15, 0x14, 0xB4, 0x0E, 0x40, 0xE6, 0x53, 0xD1, + 0x83, 0x9C, 0x5B, 0xA0, 0x92, 0x29, 0x6B, 0x5E, + 0x96, 0x5B, 0x1E, 0x2F, 0xD3, 0xAC, 0xC1, 0x92, + 0xB1, 0x41, 0x3F, 0x19, 0x2F, 0xC4, 0x3B, 0xC6, + 0x95, 0x46, 0x45, 0x54, 0xE9, 0x75, 0x03, 0x08, + 0x44, 0xAF, 0xE5, 0x8A, 0x81, 0x12, 0x09, + }, + .rlen = 111, + + }, { /* Set 5, vector 27 */ + .key = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 + }, + .klen = 32, + .iv = { 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00 }, + .input = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + 0x00, + }, + .ilen = 129, + .result = { + 0xD2, 0xDB, 0x1A, 0x5C, 0xF1, 0xC1, 0xAC, 0xDB, + 0xE8, 0x1A, 0x7A, 0x43, 0x40, 0xEF, 0x53, 0x43, + 0x5E, 0x7F, 0x4B, 0x1A, 0x50, 0x52, 0x3F, 0x8D, + 0x28, 0x3D, 0xCF, 0x85, 0x1D, 0x69, 0x6E, 0x60, + 0xF2, 0xDE, 0x74, 0x56, 0x18, 0x1B, 0x84, 0x10, + 0xD4, 0x62, 0xBA, 0x60, 0x50, 0xF0, 0x61, 0xF2, + 0x1C, 0x78, 0x7F, 0xC1, 0x24, 0x34, 0xAF, 0x58, + 0xBF, 0x2C, 0x59, 0xCA, 0x90, 0x77, 0xF3, 0xB0, + + 0x5B, 0x4A, 0xDF, 0x89, 0xCE, 0x2C, 0x2F, 0xFC, + 0x67, 0xF0, 0xE3, 0x45, 0xE8, 0xB3, 0xB3, 0x75, + 0xA0, 0x95, 0x71, 0xA1, 0x29, 0x39, 0x94, 0xCA, + 0x45, 0x2F, 0xBD, 0xCB, 0x10, 0xB6, 0xBE, 0x9F, + 0x8E, 0xF9, 0xB2, 0x01, 0x0A, 0x5A, 0x0A, 0xB7, + 0x6B, 0x9D, 0x70, 0x8E, 0x4B, 0xD6, 0x2F, 0xCD, + 0x2E, 0x40, 0x48, 0x75, 0xE9, 0xE2, 0x21, 0x45, + 0x0B, 0xC9, 0xB6, 0xB5, 0x66, 0xBC, 0x9A, 0x59, + + 0x5A, + }, + .rlen = 129, + } +}; + /* * Compression stuff. */ -- cgit v1.1 From 984e976f5382ff09351ddd3b023937611396d739 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Wed, 21 Nov 2007 12:24:45 +0800 Subject: [HWRNG]: move status polling loop to data_present callbacks Handle waiting for new random within the drivers themselves, this allows to use better suited timeouts for the individual rngs. Signed-off-by: Patrick McHardy Acked-by: Michael Buesch Signed-off-by: Herbert Xu --- drivers/char/hw_random/amd-rng.c | 12 ++++++++++-- drivers/char/hw_random/core.c | 24 ++++++------------------ drivers/char/hw_random/geode-rng.c | 12 ++++++++++-- drivers/char/hw_random/intel-rng.c | 15 ++++++++++++--- drivers/char/hw_random/omap-rng.c | 13 +++++++++++-- drivers/char/hw_random/pasemi-rng.c | 14 +++++++++++--- drivers/char/hw_random/via-rng.c | 19 ++++++++++++------- include/linux/hw_random.h | 2 +- 8 files changed, 73 insertions(+), 38 deletions(-) diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index 556fd81..c422e87 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c @@ -28,6 +28,7 @@ #include #include #include +#include #include @@ -52,11 +53,18 @@ MODULE_DEVICE_TABLE(pci, pci_tbl); static struct pci_dev *amd_pdev; -static int amd_rng_data_present(struct hwrng *rng) +static int amd_rng_data_present(struct hwrng *rng, int wait) { u32 pmbase = (u32)rng->priv; + int data, i; - return !!(inl(pmbase + 0xF4) & 1); + for (i = 0; i < 20; i++) { + data = !!(inl(pmbase + 0xF4) & 1); + if (data || !wait) + break; + udelay(10); + } + return data; } static int amd_rng_data_read(struct hwrng *rng, u32 *data) diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 26a860a..0118b98 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -66,11 +66,11 @@ static inline void hwrng_cleanup(struct hwrng *rng) rng->cleanup(rng); } -static inline int hwrng_data_present(struct hwrng *rng) +static inline int hwrng_data_present(struct hwrng *rng, int wait) { if (!rng->data_present) return 1; - return rng->data_present(rng); + return rng->data_present(rng, wait); } static inline int hwrng_data_read(struct hwrng *rng, u32 *data) @@ -94,8 +94,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf, { u32 data; ssize_t ret = 0; - int i, err = 0; - int data_present; + int err = 0; int bytes_read; while (size) { @@ -107,21 +106,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf, err = -ENODEV; goto out; } - if (filp->f_flags & O_NONBLOCK) { - data_present = hwrng_data_present(current_rng); - } else { - /* Some RNG require some time between data_reads to gather - * new entropy. Poll it. - */ - for (i = 0; i < 20; i++) { - data_present = hwrng_data_present(current_rng); - if (data_present) - break; - udelay(10); - } - } + bytes_read = 0; - if (data_present) + if (hwrng_data_present(current_rng, + !(filp->f_flags & O_NONBLOCK))) bytes_read = hwrng_data_read(current_rng, &data); mutex_unlock(&rng_mutex); diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c index 8e8658d..fed4ef5 100644 --- a/drivers/char/hw_random/geode-rng.c +++ b/drivers/char/hw_random/geode-rng.c @@ -28,6 +28,7 @@ #include #include #include +#include #include @@ -61,11 +62,18 @@ static int geode_rng_data_read(struct hwrng *rng, u32 *data) return 4; } -static int geode_rng_data_present(struct hwrng *rng) +static int geode_rng_data_present(struct hwrng *rng, int wait) { void __iomem *mem = (void __iomem *)rng->priv; + int data, i; - return !!(readl(mem + GEODE_RNG_STATUS_REG)); + for (i = 0; i < 20; i++) { + data = !!(readl(mem + GEODE_RNG_STATUS_REG)); + if (data || !wait) + break; + udelay(10); + } + return data; } diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c index 753f460..5cc651e 100644 --- a/drivers/char/hw_random/intel-rng.c +++ b/drivers/char/hw_random/intel-rng.c @@ -29,6 +29,7 @@ #include #include #include +#include #include @@ -162,11 +163,19 @@ static inline u8 hwstatus_set(void __iomem *mem, return hwstatus_get(mem); } -static int intel_rng_data_present(struct hwrng *rng) +static int intel_rng_data_present(struct hwrng *rng, int wait) { void __iomem *mem = (void __iomem *)rng->priv; - - return !!(readb(mem + INTEL_RNG_STATUS) & INTEL_RNG_DATA_PRESENT); + int data, i; + + for (i = 0; i < 20; i++) { + data = !!(readb(mem + INTEL_RNG_STATUS) & + INTEL_RNG_DATA_PRESENT); + if (data || !wait) + break; + udelay(10); + } + return data; } static int intel_rng_data_read(struct hwrng *rng, u32 *data) diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 3f35a1c..7e31995 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -29,6 +29,7 @@ #include #include #include +#include #include @@ -65,9 +66,17 @@ static void omap_rng_write_reg(int reg, u32 val) } /* REVISIT: Does the status bit really work on 16xx? */ -static int omap_rng_data_present(struct hwrng *rng) +static int omap_rng_data_present(struct hwrng *rng, int wait) { - return omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1; + int data, i; + + for (i = 0; i < 20; i++) { + data = omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1; + if (data || !wait) + break; + udelay(10); + } + return data; } static int omap_rng_data_read(struct hwrng *rng, u32 *data) diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c index fa6040b..621adf2 100644 --- a/drivers/char/hw_random/pasemi-rng.c +++ b/drivers/char/hw_random/pasemi-rng.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -44,9 +45,16 @@ static int pasemi_rng_data_present(struct hwrng *rng) { void __iomem *rng_regs = (void __iomem *)rng->priv; - - return (in_le32(rng_regs + SDCRNG_CTL_REG) - & SDCRNG_CTL_FVLD_M) ? 1 : 0; + int data, i; + + for (i = 0; i < 20; i++) { + data = (in_le32(rng_regs + SDCRNG_CTL_REG) + & SDCRNG_CTL_FVLD_M) ? 1 : 0; + if (data || !wait) + break; + udelay(10); + } + return data; } static int pasemi_rng_data_read(struct hwrng *rng, u32 *data) diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index ec435cb..868e39f 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -77,10 +78,11 @@ static inline u32 xstore(u32 *addr, u32 edx_in) return eax_out; } -static int via_rng_data_present(struct hwrng *rng) +static int via_rng_data_present(struct hwrng *rng, int wait) { u32 bytes_out; u32 *via_rng_datum = (u32 *)(&rng->priv); + int i; /* We choose the recommended 1-byte-per-instruction RNG rate, * for greater randomness at the expense of speed. Larger @@ -95,12 +97,15 @@ static int via_rng_data_present(struct hwrng *rng) * completes. */ - *via_rng_datum = 0; /* paranoia, not really necessary */ - bytes_out = xstore(via_rng_datum, VIA_RNG_CHUNK_1); - bytes_out &= VIA_XSTORE_CNT_MASK; - if (bytes_out == 0) - return 0; - return 1; + for (i = 0; i < 20; i++) { + *via_rng_datum = 0; /* paranoia, not really necessary */ + bytes_out = xstore(via_rng_datum, VIA_RNG_CHUNK_1); + bytes_out &= VIA_XSTORE_CNT_MASK; + if (bytes_out || !wait) + break; + udelay(10); + } + return bytes_out ? 1 : 0; } static int via_rng_data_read(struct hwrng *rng, u32 *data) diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index 21ea761..85d1191 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -33,7 +33,7 @@ struct hwrng { const char *name; int (*init)(struct hwrng *rng); void (*cleanup)(struct hwrng *rng); - int (*data_present)(struct hwrng *rng); + int (*data_present)(struct hwrng *rng, int wait); int (*data_read)(struct hwrng *rng, u32 *data); unsigned long priv; -- cgit v1.1 From 37a8023ce59bfc1fa24067fd94aee7b286f4c01b Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Wed, 21 Nov 2007 12:47:13 +0800 Subject: [HIFN]: Improve PLL initialization The current PLL initalization has a number of deficiencies: - uses fixed multiplier of 8, which overclocks the chip when using a reference clock that operates at frequencies above 33MHz. According to a comment in the BSD source, this is true for the external clock on almost all every board. - writes to a reserved bit - doesn't follow the initialization procedure specified in chapter 6.11.1 of the HIFN hardware users guide - doesn't allow to use the PCI clock This patch adds a module parameter to specify the reference clock (pci or external) and its frequency and uses that to calculate the optimum multiplier to reach the maximal speed. By default it uses the external clock and assumes a speed of 66MHz, which effectively halfs the frequency currently used. Signed-off-by: Patrick McHardy Acked-by: Evgeniy Polyakov Signed-off-by: Herbert Xu --- drivers/crypto/hifn_795x.c | 110 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 108 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index bf817d4..de594bc 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -47,6 +48,11 @@ #define dprintk(f, a...) do {} while (0) #endif +static char hifn_pll_ref[sizeof("extNNN")] = "ext"; +module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444); +MODULE_PARM_DESC(hifn_pll_ref, + "PLL reference clock (pci[freq] or ext[freq], default ext)"); + static atomic_t hifn_dev_number; #define ACRYPTO_OP_DECRYPT 0 @@ -286,7 +292,26 @@ static atomic_t hifn_dev_number; #define HIFN_DMACNFG_DMARESET 0x00000002 /* DMA Reset # */ #define HIFN_DMACNFG_MSTRESET 0x00000001 /* Master Reset # */ -#define HIFN_PLL_7956 0x00001d18 /* 7956 PLL config value */ +/* PLL configuration register */ +#define HIFN_PLL_REF_CLK_HBI 0x00000000 /* HBI reference clock */ +#define HIFN_PLL_REF_CLK_PLL 0x00000001 /* PLL reference clock */ +#define HIFN_PLL_BP 0x00000002 /* Reference clock bypass */ +#define HIFN_PLL_PK_CLK_HBI 0x00000000 /* PK engine HBI clock */ +#define HIFN_PLL_PK_CLK_PLL 0x00000008 /* PK engine PLL clock */ +#define HIFN_PLL_PE_CLK_HBI 0x00000000 /* PE engine HBI clock */ +#define HIFN_PLL_PE_CLK_PLL 0x00000010 /* PE engine PLL clock */ +#define HIFN_PLL_RESERVED_1 0x00000400 /* Reserved bit, must be 1 */ +#define HIFN_PLL_ND_SHIFT 11 /* Clock multiplier shift */ +#define HIFN_PLL_ND_MULT_2 0x00000000 /* PLL clock multiplier 2 */ +#define HIFN_PLL_ND_MULT_4 0x00000800 /* PLL clock multiplier 4 */ +#define HIFN_PLL_ND_MULT_6 0x00001000 /* PLL clock multiplier 6 */ +#define HIFN_PLL_ND_MULT_8 0x00001800 /* PLL clock multiplier 8 */ +#define HIFN_PLL_ND_MULT_10 0x00002000 /* PLL clock multiplier 10 */ +#define HIFN_PLL_ND_MULT_12 0x00002800 /* PLL clock multiplier 12 */ +#define HIFN_PLL_IS_1_8 0x00000000 /* charge pump (mult. 1-8) */ +#define HIFN_PLL_IS_9_12 0x00010000 /* charge pump (mult. 9-12) */ + +#define HIFN_PLL_FCK_MAX 266 /* Maximum PLL frequency */ /* Public key reset register (HIFN_1_PUB_RESET) */ #define HIFN_PUBRST_RESET 0x00000001 /* reset public/rng unit */ @@ -871,6 +896,64 @@ static void hifn_init_dma(struct hifn_device *dev) dma->cmdk = dma->srck = dma->dstk = dma->resk = 0; } +/* + * Initialize the PLL. We need to know the frequency of the reference clock + * to calculate the optimal multiplier. For PCI we assume 66MHz, since that + * allows us to operate without the risk of overclocking the chip. If it + * actually uses 33MHz, the chip will operate at half the speed, this can be + * overriden by specifying the frequency as module parameter (pci33). + * + * Unfortunately the PCI clock is not very suitable since the HIFN needs a + * stable clock and the PCI clock frequency may vary, so the default is the + * external clock. There is no way to find out its frequency, we default to + * 66MHz since according to Mike Ham of HiFn, almost every board in existence + * has an external crystal populated at 66MHz. + */ +static void hifn_init_pll(struct hifn_device *dev) +{ + unsigned int freq, m; + u32 pllcfg; + + pllcfg = HIFN_1_PLL | HIFN_PLL_RESERVED_1; + + if (strncmp(hifn_pll_ref, "ext", 3) == 0) + pllcfg |= HIFN_PLL_REF_CLK_PLL; + else + pllcfg |= HIFN_PLL_REF_CLK_HBI; + + if (hifn_pll_ref[3] != '\0') + freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10); + else { + freq = 66; + printk(KERN_INFO "hifn795x: assuming %uMHz clock speed, " + "override with hifn_pll_ref=%.3s\n", + freq, hifn_pll_ref); + } + + m = HIFN_PLL_FCK_MAX / freq; + + pllcfg |= (m / 2 - 1) << HIFN_PLL_ND_SHIFT; + if (m <= 8) + pllcfg |= HIFN_PLL_IS_1_8; + else + pllcfg |= HIFN_PLL_IS_9_12; + + /* Select clock source and enable clock bypass */ + hifn_write_1(dev, HIFN_1_PLL, pllcfg | + HIFN_PLL_PK_CLK_HBI | HIFN_PLL_PE_CLK_HBI | HIFN_PLL_BP); + + /* Let the chip lock to the input clock */ + mdelay(10); + + /* Disable clock bypass */ + hifn_write_1(dev, HIFN_1_PLL, pllcfg | + HIFN_PLL_PK_CLK_HBI | HIFN_PLL_PE_CLK_HBI); + + /* Switch the engines to the PLL */ + hifn_write_1(dev, HIFN_1_PLL, pllcfg | + HIFN_PLL_PK_CLK_PLL | HIFN_PLL_PE_CLK_PLL); +} + static void hifn_init_registers(struct hifn_device *dev) { u32 dptr = dev->desc_dma; @@ -938,7 +1021,7 @@ static void hifn_init_registers(struct hifn_device *dev) #else hifn_write_0(dev, HIFN_0_PUCNFG, 0x10342); #endif - hifn_write_1(dev, HIFN_1_PLL, HIFN_PLL_7956); + hifn_init_pll(dev); hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | @@ -2621,8 +2704,31 @@ static struct pci_driver hifn_pci_driver = { static int __devinit hifn_init(void) { + unsigned int freq; int err; + if (strncmp(hifn_pll_ref, "ext", 3) && + strncmp(hifn_pll_ref, "pci", 3)) { + printk(KERN_ERR "hifn795x: invalid hifn_pll_ref clock, " + "must be pci or ext"); + return -EINVAL; + } + + /* + * For the 7955/7956 the reference clock frequency must be in the + * range of 20MHz-100MHz. For the 7954 the upper bound is 66.67MHz, + * but this chip is currently not supported. + */ + if (hifn_pll_ref[3] != '\0') { + freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10); + if (freq < 20 || freq > 100) { + printk(KERN_ERR "hifn795x: invalid hifn_pll_ref " + "frequency, must be in the range " + "of 20-100"); + return -EINVAL; + } + } + err = pci_register_driver(&hifn_pci_driver); if (err < 0) { dprintk("Failed to register PCI driver for %s device.\n", -- cgit v1.1 From fcd06755936d2209b69650d2a7cc99cbcd3ccc67 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Wed, 21 Nov 2007 12:51:52 +0800 Subject: [HIFN]: Add support for using the random number generator Signed-off-by: Patrick McHardy Acked-by: Evgeniy Polyakov Signed-off-by: Herbert Xu --- drivers/crypto/hifn_795x.c | 82 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 81 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index de594bc..1a19700 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -31,6 +31,8 @@ #include #include #include +#include +#include #include #include @@ -458,6 +460,14 @@ struct hifn_device struct crypto_queue queue; struct list_head alg_list; + + unsigned int pk_clk_freq; + +#if defined(CONFIG_HW_RANDOM) || defined(CONFIG_HW_RANDOM_MODULE) + unsigned int rng_wait_time; + ktime_t rngtime; + struct hwrng rng; +#endif }; #define HIFN_D_LENGTH 0x0000ffff @@ -785,6 +795,56 @@ static struct pci2id { } }; +#if defined(CONFIG_HW_RANDOM) || defined(CONFIG_HW_RANDOM_MODULE) +static int hifn_rng_data_present(struct hwrng *rng, int wait) +{ + struct hifn_device *dev = (struct hifn_device *)rng->priv; + s64 nsec; + + nsec = ktime_to_ns(ktime_sub(ktime_get(), dev->rngtime)); + nsec -= dev->rng_wait_time; + if (nsec <= 0) + return 1; + if (!wait) + return 0; + ndelay(nsec); + return 1; +} + +static int hifn_rng_data_read(struct hwrng *rng, u32 *data) +{ + struct hifn_device *dev = (struct hifn_device *)rng->priv; + + *data = hifn_read_1(dev, HIFN_1_RNG_DATA); + dev->rngtime = ktime_get(); + return 4; +} + +static int hifn_register_rng(struct hifn_device *dev) +{ + /* + * We must wait at least 256 Pk_clk cycles between two reads of the rng. + */ + dev->rng_wait_time = DIV_ROUND_UP(NSEC_PER_SEC, dev->pk_clk_freq) * + 256; + + dev->rng.name = dev->name; + dev->rng.data_present = hifn_rng_data_present, + dev->rng.data_read = hifn_rng_data_read, + dev->rng.priv = (unsigned long)dev; + + return hwrng_register(&dev->rng); +} + +static void hifn_unregister_rng(struct hifn_device *dev) +{ + hwrng_unregister(&dev->rng); +} +#else +#define hifn_register_rng(dev) 0 +#define hifn_unregister_rng(dev) +#endif + static int hifn_init_pubrng(struct hifn_device *dev) { int i; @@ -820,6 +880,11 @@ static int hifn_init_pubrng(struct hifn_device *dev) dprintk("Chip %s: RNG engine has been successfully initialised.\n", dev->name); +#if defined(CONFIG_HW_RANDOM) || defined(CONFIG_HW_RANDOM_MODULE) + /* First value must be discarded */ + hifn_read_1(dev, HIFN_1_RNG_DATA); + dev->rngtime = ktime_get(); +#endif return 0; } @@ -952,6 +1017,14 @@ static void hifn_init_pll(struct hifn_device *dev) /* Switch the engines to the PLL */ hifn_write_1(dev, HIFN_1_PLL, pllcfg | HIFN_PLL_PK_CLK_PLL | HIFN_PLL_PE_CLK_PLL); + + /* + * The Fpk_clk runs at half the total speed. Its frequency is needed to + * calculate the minimum time between two reads of the rng. Since 33MHz + * is actually 33.333... we overestimate the frequency here, resulting + * in slightly larger intervals. + */ + dev->pk_clk_freq = 1000000 * (freq + 1) * m / 2; } static void hifn_init_registers(struct hifn_device *dev) @@ -2609,10 +2682,14 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_out_stop_device; - err = hifn_register_alg(dev); + err = hifn_register_rng(dev); if (err) goto err_out_stop_device; + err = hifn_register_alg(dev); + if (err) + goto err_out_unregister_rng; + INIT_DELAYED_WORK(&dev->work, hifn_work); schedule_delayed_work(&dev->work, HZ); @@ -2622,6 +2699,8 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; +err_out_unregister_rng: + hifn_unregister_rng(dev); err_out_stop_device: hifn_reset_dma(dev, 1); hifn_stop_device(dev); @@ -2662,6 +2741,7 @@ static void hifn_remove(struct pci_dev *pdev) cancel_delayed_work(&dev->work); flush_scheduled_work(); + hifn_unregister_rng(dev); hifn_unregister_alg(dev); hifn_reset_dma(dev, 1); hifn_stop_device(dev); -- cgit v1.1 From 7613636def82092a5c7b6322078a2af832410417 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 20 Nov 2007 17:26:06 +0800 Subject: [CRYPTO] api: Add crypto_inc and crypto_xor With the addition of more stream ciphers we need to curb the proliferation of ad-hoc xor functions. This patch creates a generic pair of functions, crypto_inc and crypto_xor which does big-endian increment and exclusive or, respectively. For optimum performance, they both use u32 operations so alignment must be as that of u32 even though the arguments are of type u8 *. Signed-off-by: Herbert Xu --- crypto/algapi.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ include/crypto/algapi.h | 4 ++++ 2 files changed, 51 insertions(+) diff --git a/crypto/algapi.c b/crypto/algapi.c index 8383282..08eca6d 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -605,6 +605,53 @@ int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm) } EXPORT_SYMBOL_GPL(crypto_tfm_in_queue); +static inline void crypto_inc_byte(u8 *a, unsigned int size) +{ + u8 *b = (a + size); + u8 c; + + for (; size; size--) { + c = *--b + 1; + *b = c; + if (c) + break; + } +} + +void crypto_inc(u8 *a, unsigned int size) +{ + __be32 *b = (__be32 *)(a + size); + u32 c; + + for (; size >= 4; size -= 4) { + c = be32_to_cpu(*--b) + 1; + *b = cpu_to_be32(c); + if (c) + return; + } + + crypto_inc_byte(a, size); +} +EXPORT_SYMBOL_GPL(crypto_inc); + +static inline void crypto_xor_byte(u8 *a, const u8 *b, unsigned int size) +{ + for (; size; size--) + *a++ ^= *b++; +} + +void crypto_xor(u8 *dst, const u8 *src, unsigned int size) +{ + u32 *a = (u32 *)dst; + u32 *b = (u32 *)src; + + for (; size >= 4; size -= 4) + *a++ ^= *b++; + + crypto_xor_byte((u8 *)a, (u8 *)b, size); +} +EXPORT_SYMBOL_GPL(crypto_xor); + static int __init crypto_algapi_init(void) { crypto_init_proc(); diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 88619f9..2cdb227 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -124,6 +124,10 @@ int crypto_enqueue_request(struct crypto_queue *queue, struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); +/* These functions require the input/output to be aligned as u32. */ +void crypto_inc(u8 *a, unsigned int size); +void crypto_xor(u8 *dst, const u8 *src, unsigned int size); + int blkcipher_walk_done(struct blkcipher_desc *desc, struct blkcipher_walk *walk, int err); int blkcipher_walk_virt(struct blkcipher_desc *desc, -- cgit v1.1 From 3c7f076da557eadb37240d70b0399ff9763fa2ae Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 20 Nov 2007 17:33:39 +0800 Subject: [CRYPTO] cbc: Use crypto_xor This patch replaces the custom xor in CBC with the generic crypto_xor. Signed-off-by: Herbert Xu --- crypto/cbc.c | 93 +++++++++++------------------------------------------------- 1 file changed, 16 insertions(+), 77 deletions(-) diff --git a/crypto/cbc.c b/crypto/cbc.c index 1f2649e..b013d6f 100644 --- a/crypto/cbc.c +++ b/crypto/cbc.c @@ -20,7 +20,6 @@ struct crypto_cbc_ctx { struct crypto_cipher *child; - void (*xor)(u8 *dst, const u8 *src, unsigned int bs); }; static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key, @@ -41,9 +40,7 @@ static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key, static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc, struct blkcipher_walk *walk, - struct crypto_cipher *tfm, - void (*xor)(u8 *, const u8 *, - unsigned int)) + struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_encrypt; @@ -54,7 +51,7 @@ static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc, u8 *iv = walk->iv; do { - xor(iv, src, bsize); + crypto_xor(iv, src, bsize); fn(crypto_cipher_tfm(tfm), dst, iv); memcpy(iv, dst, bsize); @@ -67,9 +64,7 @@ static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc, static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc, struct blkcipher_walk *walk, - struct crypto_cipher *tfm, - void (*xor)(u8 *, const u8 *, - unsigned int)) + struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_encrypt; @@ -79,7 +74,7 @@ static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc, u8 *iv = walk->iv; do { - xor(src, iv, bsize); + crypto_xor(src, iv, bsize); fn(crypto_cipher_tfm(tfm), src, src); iv = src; @@ -99,7 +94,6 @@ static int crypto_cbc_encrypt(struct blkcipher_desc *desc, struct crypto_blkcipher *tfm = desc->tfm; struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm); struct crypto_cipher *child = ctx->child; - void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor; int err; blkcipher_walk_init(&walk, dst, src, nbytes); @@ -107,11 +101,9 @@ static int crypto_cbc_encrypt(struct blkcipher_desc *desc, while ((nbytes = walk.nbytes)) { if (walk.src.virt.addr == walk.dst.virt.addr) - nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child, - xor); + nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child); else - nbytes = crypto_cbc_encrypt_segment(desc, &walk, child, - xor); + nbytes = crypto_cbc_encrypt_segment(desc, &walk, child); err = blkcipher_walk_done(desc, &walk, nbytes); } @@ -120,9 +112,7 @@ static int crypto_cbc_encrypt(struct blkcipher_desc *desc, static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc, struct blkcipher_walk *walk, - struct crypto_cipher *tfm, - void (*xor)(u8 *, const u8 *, - unsigned int)) + struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_decrypt; @@ -134,7 +124,7 @@ static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc, do { fn(crypto_cipher_tfm(tfm), dst, src); - xor(dst, iv, bsize); + crypto_xor(dst, iv, bsize); iv = src; src += bsize; @@ -148,9 +138,7 @@ static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc, static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc, struct blkcipher_walk *walk, - struct crypto_cipher *tfm, - void (*xor)(u8 *, const u8 *, - unsigned int)) + struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_decrypt; @@ -171,11 +159,11 @@ static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc, fn(crypto_cipher_tfm(tfm), src, src); if ((nbytes -= bsize) < bsize) break; - xor(src, src - bsize, bsize); + crypto_xor(src, src - bsize, bsize); src -= bsize; } - xor(src, first_iv, bsize); + crypto_xor(src, first_iv, bsize); return nbytes; } @@ -188,7 +176,6 @@ static int crypto_cbc_decrypt(struct blkcipher_desc *desc, struct crypto_blkcipher *tfm = desc->tfm; struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm); struct crypto_cipher *child = ctx->child; - void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor; int err; blkcipher_walk_init(&walk, dst, src, nbytes); @@ -196,48 +183,15 @@ static int crypto_cbc_decrypt(struct blkcipher_desc *desc, while ((nbytes = walk.nbytes)) { if (walk.src.virt.addr == walk.dst.virt.addr) - nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child, - xor); + nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child); else - nbytes = crypto_cbc_decrypt_segment(desc, &walk, child, - xor); + nbytes = crypto_cbc_decrypt_segment(desc, &walk, child); err = blkcipher_walk_done(desc, &walk, nbytes); } return err; } -static void xor_byte(u8 *a, const u8 *b, unsigned int bs) -{ - do { - *a++ ^= *b++; - } while (--bs); -} - -static void xor_quad(u8 *dst, const u8 *src, unsigned int bs) -{ - u32 *a = (u32 *)dst; - u32 *b = (u32 *)src; - - do { - *a++ ^= *b++; - } while ((bs -= 4)); -} - -static void xor_64(u8 *a, const u8 *b, unsigned int bs) -{ - ((u32 *)a)[0] ^= ((u32 *)b)[0]; - ((u32 *)a)[1] ^= ((u32 *)b)[1]; -} - -static void xor_128(u8 *a, const u8 *b, unsigned int bs) -{ - ((u32 *)a)[0] ^= ((u32 *)b)[0]; - ((u32 *)a)[1] ^= ((u32 *)b)[1]; - ((u32 *)a)[2] ^= ((u32 *)b)[2]; - ((u32 *)a)[3] ^= ((u32 *)b)[3]; -} - static int crypto_cbc_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = (void *)tfm->__crt_alg; @@ -245,22 +199,6 @@ static int crypto_cbc_init_tfm(struct crypto_tfm *tfm) struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_cipher *cipher; - switch (crypto_tfm_alg_blocksize(tfm)) { - case 8: - ctx->xor = xor_64; - break; - - case 16: - ctx->xor = xor_128; - break; - - default: - if (crypto_tfm_alg_blocksize(tfm) % 4) - ctx->xor = xor_byte; - else - ctx->xor = xor_quad; - } - cipher = crypto_spawn_cipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); @@ -300,8 +238,9 @@ static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb) inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_type = &crypto_blkcipher_type; - if (!(alg->cra_blocksize % 4)) - inst->alg.cra_alignmask |= 3; + /* We access the data as u32s when xoring. */ + inst->alg.cra_alignmask |= __alignof__(u32) - 1; + inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; -- cgit v1.1 From 50b6544e1371bfe884f787107a8de0c2f8546e8f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 20 Nov 2007 17:36:00 +0800 Subject: [CRYPTO] cbc: Require block size to be a power of 2 All common block ciphers have a block size that's a power of 2. In fact, all of our block ciphers obey this rule. If we require this then CBC can be optimised to avoid an expensive divide on in-place decryption. I've also changed the saving of the first IV in the in-place decryption case to the last IV because that lets us use walk->iv (which is already aligned) for the xor operation where alignment is required. Signed-off-by: Herbert Xu --- crypto/cbc.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/crypto/cbc.c b/crypto/cbc.c index b013d6f..6affff8 100644 --- a/crypto/cbc.c +++ b/crypto/cbc.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -143,17 +144,13 @@ static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc, void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_decrypt; int bsize = crypto_cipher_blocksize(tfm); - unsigned long alignmask = crypto_cipher_alignmask(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; - u8 stack[bsize + alignmask]; - u8 *first_iv = (u8 *)ALIGN((unsigned long)stack, alignmask + 1); - - memcpy(first_iv, walk->iv, bsize); + u8 last_iv[bsize]; /* Start of the last block. */ - src += nbytes - nbytes % bsize - bsize; - memcpy(walk->iv, src, bsize); + src += nbytes - (nbytes & (bsize - 1)) - bsize; + memcpy(last_iv, src, bsize); for (;;) { fn(crypto_cipher_tfm(tfm), src, src); @@ -163,7 +160,8 @@ static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc, src -= bsize; } - crypto_xor(src, first_iv, bsize); + crypto_xor(src, walk->iv, bsize); + memcpy(walk->iv, last_iv, bsize); return nbytes; } @@ -228,6 +226,10 @@ static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb) if (IS_ERR(alg)) return ERR_PTR(PTR_ERR(alg)); + inst = ERR_PTR(-EINVAL); + if (!is_power_of_2(alg->cra_blocksize)) + goto out_put_alg; + inst = crypto_alloc_instance("cbc", alg); if (IS_ERR(inst)) goto out_put_alg; -- cgit v1.1 From d0b9007a27206fe944d9db72e13dab157b8e118c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 20 Nov 2007 17:49:49 +0800 Subject: [CRYPTO] pcbc: Use crypto_xor This patch replaces the custom xor in CBC with the generic crypto_xor. It changes the operations for in-place encryption slightly to avoid calling crypto_xor with tmpbuf since it is not necessarily aligned. Signed-off-by: Herbert Xu --- crypto/pcbc.c | 105 ++++++++++++++-------------------------------------------- 1 file changed, 24 insertions(+), 81 deletions(-) diff --git a/crypto/pcbc.c b/crypto/pcbc.c index c3ed8a1..fe70477 100644 --- a/crypto/pcbc.c +++ b/crypto/pcbc.c @@ -24,7 +24,6 @@ struct crypto_pcbc_ctx { struct crypto_cipher *child; - void (*xor)(u8 *dst, const u8 *src, unsigned int bs); }; static int crypto_pcbc_setkey(struct crypto_tfm *parent, const u8 *key, @@ -45,9 +44,7 @@ static int crypto_pcbc_setkey(struct crypto_tfm *parent, const u8 *key, static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc, struct blkcipher_walk *walk, - struct crypto_cipher *tfm, - void (*xor)(u8 *, const u8 *, - unsigned int)) + struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_encrypt; @@ -58,10 +55,10 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc, u8 *iv = walk->iv; do { - xor(iv, src, bsize); + crypto_xor(iv, src, bsize); fn(crypto_cipher_tfm(tfm), dst, iv); memcpy(iv, dst, bsize); - xor(iv, src, bsize); + crypto_xor(iv, src, bsize); src += bsize; dst += bsize; @@ -72,9 +69,7 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc, static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc, struct blkcipher_walk *walk, - struct crypto_cipher *tfm, - void (*xor)(u8 *, const u8 *, - unsigned int)) + struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_encrypt; @@ -86,10 +81,10 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc, do { memcpy(tmpbuf, src, bsize); - xor(iv, tmpbuf, bsize); + crypto_xor(iv, src, bsize); fn(crypto_cipher_tfm(tfm), src, iv); - memcpy(iv, src, bsize); - xor(iv, tmpbuf, bsize); + memcpy(iv, tmpbuf, bsize); + crypto_xor(iv, src, bsize); src += bsize; } while ((nbytes -= bsize) >= bsize); @@ -107,7 +102,6 @@ static int crypto_pcbc_encrypt(struct blkcipher_desc *desc, struct crypto_blkcipher *tfm = desc->tfm; struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm); struct crypto_cipher *child = ctx->child; - void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor; int err; blkcipher_walk_init(&walk, dst, src, nbytes); @@ -115,11 +109,11 @@ static int crypto_pcbc_encrypt(struct blkcipher_desc *desc, while ((nbytes = walk.nbytes)) { if (walk.src.virt.addr == walk.dst.virt.addr) - nbytes = crypto_pcbc_encrypt_inplace(desc, &walk, child, - xor); + nbytes = crypto_pcbc_encrypt_inplace(desc, &walk, + child); else - nbytes = crypto_pcbc_encrypt_segment(desc, &walk, child, - xor); + nbytes = crypto_pcbc_encrypt_segment(desc, &walk, + child); err = blkcipher_walk_done(desc, &walk, nbytes); } @@ -128,9 +122,7 @@ static int crypto_pcbc_encrypt(struct blkcipher_desc *desc, static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc, struct blkcipher_walk *walk, - struct crypto_cipher *tfm, - void (*xor)(u8 *, const u8 *, - unsigned int)) + struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_decrypt; @@ -142,9 +134,9 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc, do { fn(crypto_cipher_tfm(tfm), dst, src); - xor(dst, iv, bsize); + crypto_xor(dst, iv, bsize); memcpy(iv, src, bsize); - xor(iv, dst, bsize); + crypto_xor(iv, dst, bsize); src += bsize; dst += bsize; @@ -157,9 +149,7 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc, static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc, struct blkcipher_walk *walk, - struct crypto_cipher *tfm, - void (*xor)(u8 *, const u8 *, - unsigned int)) + struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_decrypt; @@ -172,9 +162,9 @@ static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc, do { memcpy(tmpbuf, src, bsize); fn(crypto_cipher_tfm(tfm), src, src); - xor(src, iv, bsize); + crypto_xor(src, iv, bsize); memcpy(iv, tmpbuf, bsize); - xor(iv, src, bsize); + crypto_xor(iv, src, bsize); src += bsize; } while ((nbytes -= bsize) >= bsize); @@ -192,7 +182,6 @@ static int crypto_pcbc_decrypt(struct blkcipher_desc *desc, struct crypto_blkcipher *tfm = desc->tfm; struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm); struct crypto_cipher *child = ctx->child; - void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor; int err; blkcipher_walk_init(&walk, dst, src, nbytes); @@ -200,48 +189,17 @@ static int crypto_pcbc_decrypt(struct blkcipher_desc *desc, while ((nbytes = walk.nbytes)) { if (walk.src.virt.addr == walk.dst.virt.addr) - nbytes = crypto_pcbc_decrypt_inplace(desc, &walk, child, - xor); + nbytes = crypto_pcbc_decrypt_inplace(desc, &walk, + child); else - nbytes = crypto_pcbc_decrypt_segment(desc, &walk, child, - xor); + nbytes = crypto_pcbc_decrypt_segment(desc, &walk, + child); err = blkcipher_walk_done(desc, &walk, nbytes); } return err; } -static void xor_byte(u8 *a, const u8 *b, unsigned int bs) -{ - do { - *a++ ^= *b++; - } while (--bs); -} - -static void xor_quad(u8 *dst, const u8 *src, unsigned int bs) -{ - u32 *a = (u32 *)dst; - u32 *b = (u32 *)src; - - do { - *a++ ^= *b++; - } while ((bs -= 4)); -} - -static void xor_64(u8 *a, const u8 *b, unsigned int bs) -{ - ((u32 *)a)[0] ^= ((u32 *)b)[0]; - ((u32 *)a)[1] ^= ((u32 *)b)[1]; -} - -static void xor_128(u8 *a, const u8 *b, unsigned int bs) -{ - ((u32 *)a)[0] ^= ((u32 *)b)[0]; - ((u32 *)a)[1] ^= ((u32 *)b)[1]; - ((u32 *)a)[2] ^= ((u32 *)b)[2]; - ((u32 *)a)[3] ^= ((u32 *)b)[3]; -} - static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = (void *)tfm->__crt_alg; @@ -249,22 +207,6 @@ static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm) struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_cipher *cipher; - switch (crypto_tfm_alg_blocksize(tfm)) { - case 8: - ctx->xor = xor_64; - break; - - case 16: - ctx->xor = xor_128; - break; - - default: - if (crypto_tfm_alg_blocksize(tfm) % 4) - ctx->xor = xor_byte; - else - ctx->xor = xor_quad; - } - cipher = crypto_spawn_cipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); @@ -304,8 +246,9 @@ static struct crypto_instance *crypto_pcbc_alloc(struct rtattr **tb) inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_type = &crypto_blkcipher_type; - if (!(alg->cra_blocksize % 4)) - inst->alg.cra_alignmask |= 3; + /* We access the data as u32s when xoring. */ + inst->alg.cra_alignmask |= __alignof__(u32) - 1; + inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; -- cgit v1.1 From 3f8214ea335e422702340d7e835921e78367f99d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 20 Nov 2007 20:32:56 +0800 Subject: [CRYPTO] ctr: Use crypto_inc and crypto_xor This patch replaces the custom inc/xor in CTR with the generic functions. Signed-off-by: Herbert Xu --- crypto/ctr.c | 71 ++++++++++++++---------------------------------------------- 1 file changed, 16 insertions(+), 55 deletions(-) diff --git a/crypto/ctr.c b/crypto/ctr.c index b974a9f..b816e95 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -31,51 +31,6 @@ struct crypto_ctr_ctx { u8 *nonce; }; -static inline void __ctr_inc_byte(u8 *a, unsigned int size) -{ - u8 *b = (a + size); - u8 c; - - for (; size; size--) { - c = *--b + 1; - *b = c; - if (c) - break; - } -} - -static void ctr_inc_quad(u8 *a, unsigned int size) -{ - __be32 *b = (__be32 *)(a + size); - u32 c; - - for (; size >= 4; size -=4) { - c = be32_to_cpu(*--b) + 1; - *b = cpu_to_be32(c); - if (c) - return; - } - - __ctr_inc_byte(a, size); -} - -static void xor_byte(u8 *a, const u8 *b, unsigned int bs) -{ - for (; bs; bs--) - *a++ ^= *b++; -} - -static void xor_quad(u8 *dst, const u8 *src, unsigned int bs) -{ - u32 *a = (u32 *)dst; - u32 *b = (u32 *)src; - - for (; bs >= 4; bs -= 4) - *a++ ^= *b++; - - xor_byte((u8 *)a, (u8 *)b, bs); -} - static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key, unsigned int keylen) { @@ -111,7 +66,8 @@ static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk, void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_encrypt; unsigned int bsize = crypto_cipher_blocksize(tfm); - unsigned long alignmask = crypto_cipher_alignmask(tfm); + unsigned long alignmask = crypto_cipher_alignmask(tfm) | + (__alignof__(u32) - 1); u8 ks[bsize + alignmask]; u8 *keystream = (u8 *)ALIGN((unsigned long)ks, alignmask + 1); u8 *src = walk->src.virt.addr; @@ -121,13 +77,13 @@ static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk, do { /* create keystream */ fn(crypto_cipher_tfm(tfm), keystream, ctrblk); - xor_quad(keystream, src, min(nbytes, bsize)); + crypto_xor(keystream, src, min(nbytes, bsize)); /* copy result into dst */ memcpy(dst, keystream, min(nbytes, bsize)); /* increment counter in counterblock */ - ctr_inc_quad(ctrblk + (bsize - countersize), countersize); + crypto_inc(ctrblk + bsize - countersize, countersize); if (nbytes < bsize) break; @@ -148,7 +104,8 @@ static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk, void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_encrypt; unsigned int bsize = crypto_cipher_blocksize(tfm); - unsigned long alignmask = crypto_cipher_alignmask(tfm); + unsigned long alignmask = crypto_cipher_alignmask(tfm) | + (__alignof__(u32) - 1); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; u8 ks[bsize + alignmask]; @@ -157,10 +114,10 @@ static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk, do { /* create keystream */ fn(crypto_cipher_tfm(tfm), keystream, ctrblk); - xor_quad(src, keystream, min(nbytes, bsize)); + crypto_xor(src, keystream, min(nbytes, bsize)); /* increment counter in counterblock */ - ctr_inc_quad(ctrblk + (bsize - countersize), countersize); + crypto_inc(ctrblk + bsize - countersize, countersize); if (nbytes < bsize) break; @@ -184,7 +141,8 @@ static int crypto_ctr_crypt(struct blkcipher_desc *desc, unsigned int bsize = crypto_cipher_blocksize(child); struct ctr_instance_ctx *ictx = crypto_instance_ctx(crypto_tfm_alg_instance(&tfm->base)); - unsigned long alignmask = crypto_cipher_alignmask(child); + unsigned long alignmask = crypto_cipher_alignmask(child) | + (__alignof__(u32) - 1); u8 cblk[bsize + alignmask]; u8 *counterblk = (u8 *)ALIGN((unsigned long)cblk, alignmask + 1); int err; @@ -198,8 +156,7 @@ static int crypto_ctr_crypt(struct blkcipher_desc *desc, memcpy(counterblk + ictx->noncesize, walk.iv, ictx->ivsize); /* initialize counter portion of counter block */ - ctr_inc_quad(counterblk + (bsize - ictx->countersize), - ictx->countersize); + crypto_inc(counterblk + bsize - ictx->countersize, ictx->countersize); while (walk.nbytes) { if (walk.src.virt.addr == walk.dst.virt.addr) @@ -284,6 +241,10 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) (countersize > alg->cra_blocksize) || (countersize < 4)) goto out_put_alg; + /* If this is false we'd fail the alignment of crypto_inc. */ + if ((alg->cra_blocksize - countersize) % 4) + goto out_put_alg; + inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); err = -ENOMEM; if (!inst) @@ -316,7 +277,7 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; inst->alg.cra_priority = alg->cra_priority; inst->alg.cra_blocksize = 1; - inst->alg.cra_alignmask = 3; + inst->alg.cra_alignmask = __alignof__(u32) - 1; inst->alg.cra_type = &crypto_blkcipher_type; inst->alg.cra_blkcipher.ivsize = ivsize; -- cgit v1.1 From 2ddae4a64491f790799e2adbfaec72a23dc2e7ef Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Fri, 23 Nov 2007 21:05:55 +0800 Subject: [CRYPTO] camellia: Code shrink Remove unused macro params. Use (u8)(expr) instead of (expr) & 0xff, helps gcc to realize how to use simpler commands. Move CAMELLIA_FLS macro closer to encrypt/decrypt routines. Signed-off-by: Denys Vlasenko Signed-off-by: Herbert Xu --- crypto/camellia.c | 269 +++++++++++++++++++++++++++--------------------------- 1 file changed, 135 insertions(+), 134 deletions(-) diff --git a/crypto/camellia.c b/crypto/camellia.c index 2e129ab..9b1f068 100644 --- a/crypto/camellia.c +++ b/crypto/camellia.c @@ -310,6 +310,12 @@ static const u32 camellia_sp4404[256] = { #define CAMELLIA_BLOCK_SIZE 16 #define CAMELLIA_TABLE_BYTE_LEN 272 +/* + * NB: L and R below stand for 'left' and 'right' as in written numbers. + * That is, in (xxxL,xxxR) pair xxxL holds most significant digits, + * _not_ least significant ones! + */ + /* key constants */ @@ -329,8 +335,7 @@ static const u32 camellia_sp4404[256] = { /* * macros */ - -# define GETU32(v, pt) \ +#define GETU32(v, pt) \ do { \ /* latest breed of gcc is clever enough to use move */ \ memcpy(&(v), (pt), 4); \ @@ -363,64 +368,25 @@ static const u32 camellia_sp4404[256] = { rr = (w0 << (bits - 32)) + (w1 >> (64 - bits)); \ } while(0) - #define CAMELLIA_F(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) \ do { \ il = xl ^ kl; \ ir = xr ^ kr; \ t0 = il >> 16; \ t1 = ir >> 16; \ - yl = camellia_sp1110[ir & 0xff] \ - ^ camellia_sp0222[(t1 >> 8) & 0xff] \ - ^ camellia_sp3033[t1 & 0xff] \ - ^ camellia_sp4404[(ir >> 8) & 0xff]; \ - yr = camellia_sp1110[(t0 >> 8) & 0xff] \ - ^ camellia_sp0222[t0 & 0xff] \ - ^ camellia_sp3033[(il >> 8) & 0xff] \ - ^ camellia_sp4404[il & 0xff]; \ + yl = camellia_sp1110[(u8)(ir )] \ + ^ camellia_sp0222[ (t1 >> 8)] \ + ^ camellia_sp3033[(u8)(t1 )] \ + ^ camellia_sp4404[(u8)(ir >> 8)]; \ + yr = camellia_sp1110[ (t0 >> 8)] \ + ^ camellia_sp0222[(u8)(t0 )] \ + ^ camellia_sp3033[(u8)(il >> 8)] \ + ^ camellia_sp4404[(u8)(il )]; \ yl ^= yr; \ yr = ROR8(yr); \ yr ^= yl; \ } while(0) - -/* - * for speed up - * - */ -#define CAMELLIA_FLS(ll, lr, rl, rr, kll, klr, krl, krr, t0, t1, t2, t3) \ - do { \ - t0 = kll; \ - t2 = krr; \ - t0 &= ll; \ - t2 |= rr; \ - rl ^= t2; \ - lr ^= ROL1(t0); \ - t3 = krl; \ - t1 = klr; \ - t3 &= rl; \ - t1 |= lr; \ - ll ^= t1; \ - rr ^= ROL1(t3); \ - } while(0) - -#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) \ - do { \ - ir = camellia_sp1110[xr & 0xff]; \ - il = camellia_sp1110[(xl>>24) & 0xff]; \ - ir ^= camellia_sp0222[(xr>>24) & 0xff]; \ - il ^= camellia_sp0222[(xl>>16) & 0xff]; \ - ir ^= camellia_sp3033[(xr>>16) & 0xff]; \ - il ^= camellia_sp3033[(xl>>8) & 0xff]; \ - ir ^= camellia_sp4404[(xr>>8) & 0xff]; \ - il ^= camellia_sp4404[xl & 0xff]; \ - il ^= kl; \ - ir ^= il ^ kr; \ - yl ^= ir; \ - yr ^= ROR8(il) ^ ir; \ - } while(0) - - #define SUBKEY_L(INDEX) (subkey[(INDEX)*2]) #define SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1]) @@ -1000,6 +966,41 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey) } +/* + * Encrypt/decrypt + */ +#define CAMELLIA_FLS(ll, lr, rl, rr, kll, klr, krl, krr, t0, t1, t2, t3) \ + do { \ + t0 = kll; \ + t2 = krr; \ + t0 &= ll; \ + t2 |= rr; \ + rl ^= t2; \ + lr ^= ROL1(t0); \ + t3 = krl; \ + t1 = klr; \ + t3 &= rl; \ + t1 |= lr; \ + ll ^= t1; \ + rr ^= ROL1(t3); \ + } while(0) + +#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \ + do { \ + ir = camellia_sp1110[(u8)xr]; \ + il = camellia_sp1110[ (xl >> 24)]; \ + ir ^= camellia_sp0222[ (xr >> 24)]; \ + il ^= camellia_sp0222[(u8)(xl >> 16)]; \ + ir ^= camellia_sp3033[(u8)(xr >> 16)]; \ + il ^= camellia_sp3033[(u8)(xl >> 8)]; \ + ir ^= camellia_sp4404[(u8)(xr >> 8)]; \ + il ^= camellia_sp4404[(u8)xl]; \ + il ^= kl; \ + ir ^= il ^ kr; \ + yl ^= ir; \ + yr ^= ROR8(il) ^ ir; \ + } while(0) + static void camellia_encrypt128(const u32 *subkey, u32 *io_text) { u32 il,ir,t0,t1; /* temporary variables */ @@ -1015,22 +1016,22 @@ static void camellia_encrypt128(const u32 *subkey, u32 *io_text) /* main iteration */ CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(2),SUBKEY_R(2), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(3),SUBKEY_R(3), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(4),SUBKEY_R(4), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(5),SUBKEY_R(5), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(6),SUBKEY_R(6), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(7),SUBKEY_R(7), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_FLS(io[0],io[1],io[2],io[3], SUBKEY_L(8),SUBKEY_R(8), @@ -1039,22 +1040,22 @@ static void camellia_encrypt128(const u32 *subkey, u32 *io_text) CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(10),SUBKEY_R(10), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(11),SUBKEY_R(11), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(12),SUBKEY_R(12), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(13),SUBKEY_R(13), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(14),SUBKEY_R(14), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(15),SUBKEY_R(15), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_FLS(io[0],io[1],io[2],io[3], SUBKEY_L(16),SUBKEY_R(16), @@ -1063,22 +1064,22 @@ static void camellia_encrypt128(const u32 *subkey, u32 *io_text) CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(18),SUBKEY_R(18), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(19),SUBKEY_R(19), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(20),SUBKEY_R(20), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(21),SUBKEY_R(21), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(22),SUBKEY_R(22), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(23),SUBKEY_R(23), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); /* post whitening but kw4 */ io_text[0] = io[2] ^ SUBKEY_L(24); @@ -1102,22 +1103,22 @@ static void camellia_decrypt128(const u32 *subkey, u32 *io_text) /* main iteration */ CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(23),SUBKEY_R(23), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(22),SUBKEY_R(22), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(21),SUBKEY_R(21), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(20),SUBKEY_R(20), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(19),SUBKEY_R(19), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(18),SUBKEY_R(18), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_FLS(io[0],io[1],io[2],io[3], SUBKEY_L(17),SUBKEY_R(17), @@ -1126,22 +1127,22 @@ static void camellia_decrypt128(const u32 *subkey, u32 *io_text) CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(15),SUBKEY_R(15), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(14),SUBKEY_R(14), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(13),SUBKEY_R(13), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(12),SUBKEY_R(12), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(11),SUBKEY_R(11), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(10),SUBKEY_R(10), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_FLS(io[0],io[1],io[2],io[3], SUBKEY_L(9),SUBKEY_R(9), @@ -1150,22 +1151,22 @@ static void camellia_decrypt128(const u32 *subkey, u32 *io_text) CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(7),SUBKEY_R(7), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(6),SUBKEY_R(6), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(5),SUBKEY_R(5), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(4),SUBKEY_R(4), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(3),SUBKEY_R(3), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(2),SUBKEY_R(2), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); /* post whitening but kw4 */ io_text[0] = io[2] ^ SUBKEY_L(0); @@ -1189,22 +1190,22 @@ static void camellia_encrypt256(const u32 *subkey, u32 *io_text) /* main iteration */ CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(2),SUBKEY_R(2), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(3),SUBKEY_R(3), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(4),SUBKEY_R(4), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(5),SUBKEY_R(5), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(6),SUBKEY_R(6), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(7),SUBKEY_R(7), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_FLS(io[0],io[1],io[2],io[3], SUBKEY_L(8),SUBKEY_R(8), @@ -1213,22 +1214,22 @@ static void camellia_encrypt256(const u32 *subkey, u32 *io_text) CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(10),SUBKEY_R(10), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(11),SUBKEY_R(11), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(12),SUBKEY_R(12), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(13),SUBKEY_R(13), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(14),SUBKEY_R(14), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(15),SUBKEY_R(15), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_FLS(io[0],io[1],io[2],io[3], SUBKEY_L(16),SUBKEY_R(16), @@ -1237,22 +1238,22 @@ static void camellia_encrypt256(const u32 *subkey, u32 *io_text) CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(18),SUBKEY_R(18), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(19),SUBKEY_R(19), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(20),SUBKEY_R(20), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(21),SUBKEY_R(21), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(22),SUBKEY_R(22), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(23),SUBKEY_R(23), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_FLS(io[0],io[1],io[2],io[3], SUBKEY_L(24),SUBKEY_R(24), @@ -1261,22 +1262,22 @@ static void camellia_encrypt256(const u32 *subkey, u32 *io_text) CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(26),SUBKEY_R(26), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(27),SUBKEY_R(27), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(28),SUBKEY_R(28), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(29),SUBKEY_R(29), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(30),SUBKEY_R(30), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(31),SUBKEY_R(31), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); /* post whitening but kw4 */ io_text[0] = io[2] ^ SUBKEY_L(32); @@ -1300,22 +1301,22 @@ static void camellia_decrypt256(const u32 *subkey, u32 *io_text) /* main iteration */ CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(31),SUBKEY_R(31), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(30),SUBKEY_R(30), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(29),SUBKEY_R(29), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(28),SUBKEY_R(28), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(27),SUBKEY_R(27), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(26),SUBKEY_R(26), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_FLS(io[0],io[1],io[2],io[3], SUBKEY_L(25),SUBKEY_R(25), @@ -1324,22 +1325,22 @@ static void camellia_decrypt256(const u32 *subkey, u32 *io_text) CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(23),SUBKEY_R(23), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(22),SUBKEY_R(22), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(21),SUBKEY_R(21), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(20),SUBKEY_R(20), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(19),SUBKEY_R(19), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(18),SUBKEY_R(18), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_FLS(io[0],io[1],io[2],io[3], SUBKEY_L(17),SUBKEY_R(17), @@ -1348,22 +1349,22 @@ static void camellia_decrypt256(const u32 *subkey, u32 *io_text) CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(15),SUBKEY_R(15), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(14),SUBKEY_R(14), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(13),SUBKEY_R(13), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(12),SUBKEY_R(12), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(11),SUBKEY_R(11), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(10),SUBKEY_R(10), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_FLS(io[0],io[1],io[2],io[3], SUBKEY_L(9),SUBKEY_R(9), @@ -1372,22 +1373,22 @@ static void camellia_decrypt256(const u32 *subkey, u32 *io_text) CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(7),SUBKEY_R(7), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(6),SUBKEY_R(6), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(5),SUBKEY_R(5), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(4),SUBKEY_R(4), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); CAMELLIA_ROUNDSM(io[0],io[1], SUBKEY_L(3),SUBKEY_R(3), - io[2],io[3],il,ir,t0,t1); + io[2],io[3],il,ir); CAMELLIA_ROUNDSM(io[2],io[3], SUBKEY_L(2),SUBKEY_R(2), - io[0],io[1],il,ir,t0,t1); + io[0],io[1],il,ir); /* post whitening but kw4 */ io_text[0] = io[2] ^ SUBKEY_L(0); @@ -1399,7 +1400,7 @@ static void camellia_decrypt256(const u32 *subkey, u32 *io_text) struct camellia_ctx { int key_length; - u32 key_table[CAMELLIA_TABLE_BYTE_LEN / 4]; + u32 key_table[CAMELLIA_TABLE_BYTE_LEN / sizeof(u32)]; }; static int -- cgit v1.1 From acca79a664859e3ddaea87af86d4ccfb2e07cd65 Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Fri, 23 Nov 2007 21:10:03 +0800 Subject: [CRYPTO] camellia: Merge encrypt/decrypt routines for all key lengths unifies encrypt/decrypt routines for different key lengths. This reduces module size by ~25%, with tiny (less than 1%) speed impact. Also collapses encrypt/decrypt into more readable (visually shorter) form using macros. Signed-off-by: Denys Vlasenko Signed-off-by: Herbert Xu --- crypto/camellia.c | 509 ++++++++++++------------------------------------------ 1 file changed, 106 insertions(+), 403 deletions(-) diff --git a/crypto/camellia.c b/crypto/camellia.c index 9b1f068..0534e6b 100644 --- a/crypto/camellia.c +++ b/crypto/camellia.c @@ -1001,400 +1001,115 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey) yr ^= ROR8(il) ^ ir; \ } while(0) -static void camellia_encrypt128(const u32 *subkey, u32 *io_text) +/* max = 24: 128bit encrypt, max = 32: 256bit encrypt */ +static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max) { u32 il,ir,t0,t1; /* temporary variables */ - u32 io[4]; - /* pre whitening but absorb kw2 */ - io[0] = io_text[0] ^ SUBKEY_L(0); - io[1] = io_text[1] ^ SUBKEY_R(0); - io[2] = io_text[2]; - io[3] = io_text[3]; + io[0] ^= SUBKEY_L(0); + io[1] ^= SUBKEY_R(0); /* main iteration */ - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(2),SUBKEY_R(2), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(3),SUBKEY_R(3), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(4),SUBKEY_R(4), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(5),SUBKEY_R(5), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(6),SUBKEY_R(6), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(7),SUBKEY_R(7), - io[0],io[1],il,ir); - - CAMELLIA_FLS(io[0],io[1],io[2],io[3], - SUBKEY_L(8),SUBKEY_R(8), - SUBKEY_L(9),SUBKEY_R(9), - t0,t1,il,ir); - - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(10),SUBKEY_R(10), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(11),SUBKEY_R(11), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(12),SUBKEY_R(12), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(13),SUBKEY_R(13), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(14),SUBKEY_R(14), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(15),SUBKEY_R(15), - io[0],io[1],il,ir); - - CAMELLIA_FLS(io[0],io[1],io[2],io[3], - SUBKEY_L(16),SUBKEY_R(16), - SUBKEY_L(17),SUBKEY_R(17), - t0,t1,il,ir); - - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(18),SUBKEY_R(18), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(19),SUBKEY_R(19), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(20),SUBKEY_R(20), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(21),SUBKEY_R(21), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(22),SUBKEY_R(22), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(23),SUBKEY_R(23), - io[0],io[1],il,ir); - - /* post whitening but kw4 */ - io_text[0] = io[2] ^ SUBKEY_L(24); - io_text[1] = io[3] ^ SUBKEY_R(24); - io_text[2] = io[0]; - io_text[3] = io[1]; -} - -static void camellia_decrypt128(const u32 *subkey, u32 *io_text) -{ - u32 il,ir,t0,t1; /* temporary variables */ - - u32 io[4]; - - /* pre whitening but absorb kw2 */ - io[0] = io_text[0] ^ SUBKEY_L(24); - io[1] = io_text[1] ^ SUBKEY_R(24); - io[2] = io_text[2]; - io[3] = io_text[3]; +#define ROUNDS(i) do { \ + CAMELLIA_ROUNDSM(io[0],io[1], \ + SUBKEY_L(i + 2),SUBKEY_R(i + 2), \ + io[2],io[3],il,ir); \ + CAMELLIA_ROUNDSM(io[2],io[3], \ + SUBKEY_L(i + 3),SUBKEY_R(i + 3), \ + io[0],io[1],il,ir); \ + CAMELLIA_ROUNDSM(io[0],io[1], \ + SUBKEY_L(i + 4),SUBKEY_R(i + 4), \ + io[2],io[3],il,ir); \ + CAMELLIA_ROUNDSM(io[2],io[3], \ + SUBKEY_L(i + 5),SUBKEY_R(i + 5), \ + io[0],io[1],il,ir); \ + CAMELLIA_ROUNDSM(io[0],io[1], \ + SUBKEY_L(i + 6),SUBKEY_R(i + 6), \ + io[2],io[3],il,ir); \ + CAMELLIA_ROUNDSM(io[2],io[3], \ + SUBKEY_L(i + 7),SUBKEY_R(i + 7), \ + io[0],io[1],il,ir); \ +} while (0) +#define FLS(i) do { \ + CAMELLIA_FLS(io[0],io[1],io[2],io[3], \ + SUBKEY_L(i + 0),SUBKEY_R(i + 0), \ + SUBKEY_L(i + 1),SUBKEY_R(i + 1), \ + t0,t1,il,ir); \ +} while (0) + + ROUNDS(0); + FLS(8); + ROUNDS(8); + FLS(16); + ROUNDS(16); + if (max == 32) { + FLS(24); + ROUNDS(24); + } - /* main iteration */ - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(23),SUBKEY_R(23), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(22),SUBKEY_R(22), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(21),SUBKEY_R(21), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(20),SUBKEY_R(20), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(19),SUBKEY_R(19), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(18),SUBKEY_R(18), - io[0],io[1],il,ir); - - CAMELLIA_FLS(io[0],io[1],io[2],io[3], - SUBKEY_L(17),SUBKEY_R(17), - SUBKEY_L(16),SUBKEY_R(16), - t0,t1,il,ir); - - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(15),SUBKEY_R(15), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(14),SUBKEY_R(14), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(13),SUBKEY_R(13), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(12),SUBKEY_R(12), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(11),SUBKEY_R(11), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(10),SUBKEY_R(10), - io[0],io[1],il,ir); - - CAMELLIA_FLS(io[0],io[1],io[2],io[3], - SUBKEY_L(9),SUBKEY_R(9), - SUBKEY_L(8),SUBKEY_R(8), - t0,t1,il,ir); - - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(7),SUBKEY_R(7), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(6),SUBKEY_R(6), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(5),SUBKEY_R(5), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(4),SUBKEY_R(4), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(3),SUBKEY_R(3), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(2),SUBKEY_R(2), - io[0],io[1],il,ir); +#undef ROUNDS +#undef FLS /* post whitening but kw4 */ - io_text[0] = io[2] ^ SUBKEY_L(0); - io_text[1] = io[3] ^ SUBKEY_R(0); - io_text[2] = io[0]; - io_text[3] = io[1]; + io[2] ^= SUBKEY_L(max); + io[3] ^= SUBKEY_R(max); + /* NB: io[0],[1] should be swapped with [2],[3] by caller! */ } -static void camellia_encrypt256(const u32 *subkey, u32 *io_text) +static void camellia_do_decrypt(const u32 *subkey, u32 *io, unsigned i) { - u32 il,ir,t0,t1; /* temporary variables */ - - u32 io[4]; + u32 il,ir,t0,t1; /* temporary variables */ /* pre whitening but absorb kw2 */ - io[0] = io_text[0] ^ SUBKEY_L(0); - io[1] = io_text[1] ^ SUBKEY_R(0); - io[2] = io_text[2]; - io[3] = io_text[3]; + io[0] ^= SUBKEY_L(i); + io[1] ^= SUBKEY_R(i); /* main iteration */ - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(2),SUBKEY_R(2), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(3),SUBKEY_R(3), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(4),SUBKEY_R(4), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(5),SUBKEY_R(5), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(6),SUBKEY_R(6), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(7),SUBKEY_R(7), - io[0],io[1],il,ir); - - CAMELLIA_FLS(io[0],io[1],io[2],io[3], - SUBKEY_L(8),SUBKEY_R(8), - SUBKEY_L(9),SUBKEY_R(9), - t0,t1,il,ir); - - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(10),SUBKEY_R(10), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(11),SUBKEY_R(11), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(12),SUBKEY_R(12), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(13),SUBKEY_R(13), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(14),SUBKEY_R(14), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(15),SUBKEY_R(15), - io[0],io[1],il,ir); - - CAMELLIA_FLS(io[0],io[1],io[2],io[3], - SUBKEY_L(16),SUBKEY_R(16), - SUBKEY_L(17),SUBKEY_R(17), - t0,t1,il,ir); - - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(18),SUBKEY_R(18), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(19),SUBKEY_R(19), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(20),SUBKEY_R(20), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(21),SUBKEY_R(21), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(22),SUBKEY_R(22), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(23),SUBKEY_R(23), - io[0],io[1],il,ir); - - CAMELLIA_FLS(io[0],io[1],io[2],io[3], - SUBKEY_L(24),SUBKEY_R(24), - SUBKEY_L(25),SUBKEY_R(25), - t0,t1,il,ir); - - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(26),SUBKEY_R(26), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(27),SUBKEY_R(27), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(28),SUBKEY_R(28), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(29),SUBKEY_R(29), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(30),SUBKEY_R(30), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(31),SUBKEY_R(31), - io[0],io[1],il,ir); - - /* post whitening but kw4 */ - io_text[0] = io[2] ^ SUBKEY_L(32); - io_text[1] = io[3] ^ SUBKEY_R(32); - io_text[2] = io[0]; - io_text[3] = io[1]; -} - -static void camellia_decrypt256(const u32 *subkey, u32 *io_text) -{ - u32 il,ir,t0,t1; /* temporary variables */ +#define ROUNDS(i) do { \ + CAMELLIA_ROUNDSM(io[0],io[1], \ + SUBKEY_L(i + 7),SUBKEY_R(i + 7), \ + io[2],io[3],il,ir); \ + CAMELLIA_ROUNDSM(io[2],io[3], \ + SUBKEY_L(i + 6),SUBKEY_R(i + 6), \ + io[0],io[1],il,ir); \ + CAMELLIA_ROUNDSM(io[0],io[1], \ + SUBKEY_L(i + 5),SUBKEY_R(i + 5), \ + io[2],io[3],il,ir); \ + CAMELLIA_ROUNDSM(io[2],io[3], \ + SUBKEY_L(i + 4),SUBKEY_R(i + 4), \ + io[0],io[1],il,ir); \ + CAMELLIA_ROUNDSM(io[0],io[1], \ + SUBKEY_L(i + 3),SUBKEY_R(i + 3), \ + io[2],io[3],il,ir); \ + CAMELLIA_ROUNDSM(io[2],io[3], \ + SUBKEY_L(i + 2),SUBKEY_R(i + 2), \ + io[0],io[1],il,ir); \ +} while (0) +#define FLS(i) do { \ + CAMELLIA_FLS(io[0],io[1],io[2],io[3], \ + SUBKEY_L(i + 1),SUBKEY_R(i + 1), \ + SUBKEY_L(i + 0),SUBKEY_R(i + 0), \ + t0,t1,il,ir); \ +} while (0) + + if (i == 32) { + ROUNDS(24); + FLS(24); + } + ROUNDS(16); + FLS(16); + ROUNDS(8); + FLS(8); + ROUNDS(0); - u32 io[4]; - - /* pre whitening but absorb kw2 */ - io[0] = io_text[0] ^ SUBKEY_L(32); - io[1] = io_text[1] ^ SUBKEY_R(32); - io[2] = io_text[2]; - io[3] = io_text[3]; - - /* main iteration */ - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(31),SUBKEY_R(31), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(30),SUBKEY_R(30), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(29),SUBKEY_R(29), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(28),SUBKEY_R(28), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(27),SUBKEY_R(27), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(26),SUBKEY_R(26), - io[0],io[1],il,ir); - - CAMELLIA_FLS(io[0],io[1],io[2],io[3], - SUBKEY_L(25),SUBKEY_R(25), - SUBKEY_L(24),SUBKEY_R(24), - t0,t1,il,ir); - - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(23),SUBKEY_R(23), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(22),SUBKEY_R(22), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(21),SUBKEY_R(21), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(20),SUBKEY_R(20), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(19),SUBKEY_R(19), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(18),SUBKEY_R(18), - io[0],io[1],il,ir); - - CAMELLIA_FLS(io[0],io[1],io[2],io[3], - SUBKEY_L(17),SUBKEY_R(17), - SUBKEY_L(16),SUBKEY_R(16), - t0,t1,il,ir); - - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(15),SUBKEY_R(15), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(14),SUBKEY_R(14), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(13),SUBKEY_R(13), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(12),SUBKEY_R(12), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(11),SUBKEY_R(11), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(10),SUBKEY_R(10), - io[0],io[1],il,ir); - - CAMELLIA_FLS(io[0],io[1],io[2],io[3], - SUBKEY_L(9),SUBKEY_R(9), - SUBKEY_L(8),SUBKEY_R(8), - t0,t1,il,ir); - - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(7),SUBKEY_R(7), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(6),SUBKEY_R(6), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(5),SUBKEY_R(5), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(4),SUBKEY_R(4), - io[0],io[1],il,ir); - CAMELLIA_ROUNDSM(io[0],io[1], - SUBKEY_L(3),SUBKEY_R(3), - io[2],io[3],il,ir); - CAMELLIA_ROUNDSM(io[2],io[3], - SUBKEY_L(2),SUBKEY_R(2), - io[0],io[1],il,ir); +#undef ROUNDS +#undef FLS /* post whitening but kw4 */ - io_text[0] = io[2] ^ SUBKEY_L(0); - io_text[1] = io[3] ^ SUBKEY_R(0); - io_text[2] = io[0]; - io_text[3] = io[1]; + io[2] ^= SUBKEY_L(0); + io[3] ^= SUBKEY_R(0); + /* NB: 0,1 should be swapped with 2,3 by caller! */ } @@ -1446,21 +1161,15 @@ static void camellia_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) tmp[2] = be32_to_cpu(src[2]); tmp[3] = be32_to_cpu(src[3]); - switch (cctx->key_length) { - case 16: - camellia_encrypt128(cctx->key_table, tmp); - break; - case 24: - /* fall through */ - case 32: - camellia_encrypt256(cctx->key_table, tmp); - break; - } + camellia_do_encrypt(cctx->key_table, tmp, + cctx->key_length == 16 ? 24 : 32 /* for key lengths of 24 and 32 */ + ); - dst[0] = cpu_to_be32(tmp[0]); - dst[1] = cpu_to_be32(tmp[1]); - dst[2] = cpu_to_be32(tmp[2]); - dst[3] = cpu_to_be32(tmp[3]); + /* do_encrypt returns 0,1 swapped with 2,3 */ + dst[0] = cpu_to_be32(tmp[2]); + dst[1] = cpu_to_be32(tmp[3]); + dst[2] = cpu_to_be32(tmp[0]); + dst[3] = cpu_to_be32(tmp[1]); } static void camellia_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) @@ -1476,21 +1185,15 @@ static void camellia_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) tmp[2] = be32_to_cpu(src[2]); tmp[3] = be32_to_cpu(src[3]); - switch (cctx->key_length) { - case 16: - camellia_decrypt128(cctx->key_table, tmp); - break; - case 24: - /* fall through */ - case 32: - camellia_decrypt256(cctx->key_table, tmp); - break; - } + camellia_do_decrypt(cctx->key_table, tmp, + cctx->key_length == 16 ? 24 : 32 /* for key lengths of 24 and 32 */ + ); - dst[0] = cpu_to_be32(tmp[0]); - dst[1] = cpu_to_be32(tmp[1]); - dst[2] = cpu_to_be32(tmp[2]); - dst[3] = cpu_to_be32(tmp[3]); + /* do_decrypt returns 0,1 swapped with 2,3 */ + dst[0] = cpu_to_be32(tmp[2]); + dst[1] = cpu_to_be32(tmp[3]); + dst[2] = cpu_to_be32(tmp[0]); + dst[3] = cpu_to_be32(tmp[1]); } static struct crypto_alg camellia_alg = { -- cgit v1.1 From dedcf8b0647572ca00547efef58dfab6b8dddf83 Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Fri, 23 Nov 2007 21:14:24 +0800 Subject: [CRYPTO] camellia: Move common code into camellia_setup_tail Move "key XOR is end of F-function" code part into camellia_setup_tail(), it is sufficiently similar between camellia_setup128 and camellia_setup256. Signed-off-by: Denys Vlasenko Signed-off-by: Herbert Xu --- crypto/camellia.c | 264 +++++++++++++++++++++--------------------------------- 1 file changed, 104 insertions(+), 160 deletions(-) diff --git a/crypto/camellia.c b/crypto/camellia.c index 0534e6b..86af42e 100644 --- a/crypto/camellia.c +++ b/crypto/camellia.c @@ -390,10 +390,104 @@ static const u32 camellia_sp4404[256] = { #define SUBKEY_L(INDEX) (subkey[(INDEX)*2]) #define SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1]) -static void camellia_setup_tail(u32 *subkey, int max) +static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) { - u32 dw; - int i = 2; + u32 dw, tl, tr; + int i; + + /* key XOR is end of F-function */ + SUBKEY_L(0) = subL[0] ^ subL[2];/* kw1 */ + SUBKEY_R(0) = subR[0] ^ subR[2]; + SUBKEY_L(2) = subL[3]; /* round 1 */ + SUBKEY_R(2) = subR[3]; + SUBKEY_L(3) = subL[2] ^ subL[4]; /* round 2 */ + SUBKEY_R(3) = subR[2] ^ subR[4]; + SUBKEY_L(4) = subL[3] ^ subL[5]; /* round 3 */ + SUBKEY_R(4) = subR[3] ^ subR[5]; + SUBKEY_L(5) = subL[4] ^ subL[6]; /* round 4 */ + SUBKEY_R(5) = subR[4] ^ subR[6]; + SUBKEY_L(6) = subL[5] ^ subL[7]; /* round 5 */ + SUBKEY_R(6) = subR[5] ^ subR[7]; + tl = subL[10] ^ (subR[10] & ~subR[8]); + dw = tl & subL[8], /* FL(kl1) */ + tr = subR[10] ^ ROL1(dw); + SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ + SUBKEY_R(7) = subR[6] ^ tr; + SUBKEY_L(8) = subL[8]; /* FL(kl1) */ + SUBKEY_R(8) = subR[8]; + SUBKEY_L(9) = subL[9]; /* FLinv(kl2) */ + SUBKEY_R(9) = subR[9]; + tl = subL[7] ^ (subR[7] & ~subR[9]); + dw = tl & subL[9], /* FLinv(kl2) */ + tr = subR[7] ^ ROL1(dw); + SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ + SUBKEY_R(10) = tr ^ subR[11]; + SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */ + SUBKEY_R(11) = subR[10] ^ subR[12]; + SUBKEY_L(12) = subL[11] ^ subL[13]; /* round 9 */ + SUBKEY_R(12) = subR[11] ^ subR[13]; + SUBKEY_L(13) = subL[12] ^ subL[14]; /* round 10 */ + SUBKEY_R(13) = subR[12] ^ subR[14]; + SUBKEY_L(14) = subL[13] ^ subL[15]; /* round 11 */ + SUBKEY_R(14) = subR[13] ^ subR[15]; + tl = subL[18] ^ (subR[18] & ~subR[16]); + dw = tl & subL[16], /* FL(kl3) */ + tr = subR[18] ^ ROL1(dw); + SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ + SUBKEY_R(15) = subR[14] ^ tr; + SUBKEY_L(16) = subL[16]; /* FL(kl3) */ + SUBKEY_R(16) = subR[16]; + SUBKEY_L(17) = subL[17]; /* FLinv(kl4) */ + SUBKEY_R(17) = subR[17]; + tl = subL[15] ^ (subR[15] & ~subR[17]); + dw = tl & subL[17], /* FLinv(kl4) */ + tr = subR[15] ^ ROL1(dw); + SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */ + SUBKEY_R(18) = tr ^ subR[19]; + SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */ + SUBKEY_R(19) = subR[18] ^ subR[20]; + SUBKEY_L(20) = subL[19] ^ subL[21]; /* round 15 */ + SUBKEY_R(20) = subR[19] ^ subR[21]; + SUBKEY_L(21) = subL[20] ^ subL[22]; /* round 16 */ + SUBKEY_R(21) = subR[20] ^ subR[22]; + SUBKEY_L(22) = subL[21] ^ subL[23]; /* round 17 */ + SUBKEY_R(22) = subR[21] ^ subR[23]; + if (max == 24) { + SUBKEY_L(23) = subL[22]; /* round 18 */ + SUBKEY_R(23) = subR[22]; + SUBKEY_L(24) = subL[24] ^ subL[23]; /* kw3 */ + SUBKEY_R(24) = subR[24] ^ subR[23]; + } else { + tl = subL[26] ^ (subR[26] & ~subR[24]); + dw = tl & subL[24], /* FL(kl5) */ + tr = subR[26] ^ ROL1(dw); + SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */ + SUBKEY_R(23) = subR[22] ^ tr; + SUBKEY_L(24) = subL[24]; /* FL(kl5) */ + SUBKEY_R(24) = subR[24]; + SUBKEY_L(25) = subL[25]; /* FLinv(kl6) */ + SUBKEY_R(25) = subR[25]; + tl = subL[23] ^ (subR[23] & ~subR[25]); + dw = tl & subL[25], /* FLinv(kl6) */ + tr = subR[23] ^ ROL1(dw); + SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */ + SUBKEY_R(26) = tr ^ subR[27]; + SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */ + SUBKEY_R(27) = subR[26] ^ subR[28]; + SUBKEY_L(28) = subL[27] ^ subL[29]; /* round 21 */ + SUBKEY_R(28) = subR[27] ^ subR[29]; + SUBKEY_L(29) = subL[28] ^ subL[30]; /* round 22 */ + SUBKEY_R(29) = subR[28] ^ subR[30]; + SUBKEY_L(30) = subL[29] ^ subL[31]; /* round 23 */ + SUBKEY_R(30) = subR[29] ^ subR[31]; + SUBKEY_L(31) = subL[30]; /* round 24 */ + SUBKEY_R(31) = subR[30]; + SUBKEY_L(32) = subL[32] ^ subL[31]; /* kw3 */ + SUBKEY_R(32) = subR[32] ^ subR[31]; + } + + /* apply the inverse of the last half of P-function */ + i = 2; do { dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = ROL8(dw);/* round 1 */ SUBKEY_R(i + 0) = SUBKEY_L(i + 0) ^ dw; SUBKEY_L(i + 0) = dw; @@ -415,21 +509,19 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey) { u32 kll, klr, krl, krr; u32 il, ir, t0, t1, w0, w1; - u32 kw4l, kw4r, dw, tl, tr; + u32 kw4l, kw4r, dw; u32 subL[26]; u32 subR[26]; /** - * k == kll || klr || krl || krr (|| is concatination) + * k == kll || klr || krl || krr (|| is concatenation) */ GETU32(kll, key ); GETU32(klr, key + 4); GETU32(krl, key + 8); GETU32(krr, key + 12); - /** - * generate KL dependent subkeys - */ + /* generate KL dependent subkeys */ /* kw1 */ subL[0] = kll; subR[0] = klr; /* kw2 */ @@ -574,70 +666,7 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey) /* kw1 */ subL[0] ^= kw4l; subR[0] ^= kw4r; - /* key XOR is end of F-function */ - SUBKEY_L(0) = subL[0] ^ subL[2];/* kw1 */ - SUBKEY_R(0) = subR[0] ^ subR[2]; - SUBKEY_L(2) = subL[3]; /* round 1 */ - SUBKEY_R(2) = subR[3]; - SUBKEY_L(3) = subL[2] ^ subL[4]; /* round 2 */ - SUBKEY_R(3) = subR[2] ^ subR[4]; - SUBKEY_L(4) = subL[3] ^ subL[5]; /* round 3 */ - SUBKEY_R(4) = subR[3] ^ subR[5]; - SUBKEY_L(5) = subL[4] ^ subL[6]; /* round 4 */ - SUBKEY_R(5) = subR[4] ^ subR[6]; - SUBKEY_L(6) = subL[5] ^ subL[7]; /* round 5 */ - SUBKEY_R(6) = subR[5] ^ subR[7]; - tl = subL[10] ^ (subR[10] & ~subR[8]); - dw = tl & subL[8], /* FL(kl1) */ - tr = subR[10] ^ ROL1(dw); - SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ - SUBKEY_R(7) = subR[6] ^ tr; - SUBKEY_L(8) = subL[8]; /* FL(kl1) */ - SUBKEY_R(8) = subR[8]; - SUBKEY_L(9) = subL[9]; /* FLinv(kl2) */ - SUBKEY_R(9) = subR[9]; - tl = subL[7] ^ (subR[7] & ~subR[9]); - dw = tl & subL[9], /* FLinv(kl2) */ - tr = subR[7] ^ ROL1(dw); - SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ - SUBKEY_R(10) = tr ^ subR[11]; - SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */ - SUBKEY_R(11) = subR[10] ^ subR[12]; - SUBKEY_L(12) = subL[11] ^ subL[13]; /* round 9 */ - SUBKEY_R(12) = subR[11] ^ subR[13]; - SUBKEY_L(13) = subL[12] ^ subL[14]; /* round 10 */ - SUBKEY_R(13) = subR[12] ^ subR[14]; - SUBKEY_L(14) = subL[13] ^ subL[15]; /* round 11 */ - SUBKEY_R(14) = subR[13] ^ subR[15]; - tl = subL[18] ^ (subR[18] & ~subR[16]); - dw = tl & subL[16], /* FL(kl3) */ - tr = subR[18] ^ ROL1(dw); - SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ - SUBKEY_R(15) = subR[14] ^ tr; - SUBKEY_L(16) = subL[16]; /* FL(kl3) */ - SUBKEY_R(16) = subR[16]; - SUBKEY_L(17) = subL[17]; /* FLinv(kl4) */ - SUBKEY_R(17) = subR[17]; - tl = subL[15] ^ (subR[15] & ~subR[17]); - dw = tl & subL[17], /* FLinv(kl4) */ - tr = subR[15] ^ ROL1(dw); - SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */ - SUBKEY_R(18) = tr ^ subR[19]; - SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */ - SUBKEY_R(19) = subR[18] ^ subR[20]; - SUBKEY_L(20) = subL[19] ^ subL[21]; /* round 15 */ - SUBKEY_R(20) = subR[19] ^ subR[21]; - SUBKEY_L(21) = subL[20] ^ subL[22]; /* round 16 */ - SUBKEY_R(21) = subR[20] ^ subR[22]; - SUBKEY_L(22) = subL[21] ^ subL[23]; /* round 17 */ - SUBKEY_R(22) = subR[21] ^ subR[23]; - SUBKEY_L(23) = subL[22]; /* round 18 */ - SUBKEY_R(23) = subR[22]; - SUBKEY_L(24) = subL[24] ^ subL[23]; /* kw3 */ - SUBKEY_R(24) = subR[24] ^ subR[23]; - - /* apply the inverse of the last half of P-function */ - camellia_setup_tail(subkey, 24); + camellia_setup_tail(subkey, subL, subR, 24); } static void camellia_setup256(const unsigned char *key, u32 *subkey) @@ -645,13 +674,13 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) u32 kll, klr, krl, krr; /* left half of key */ u32 krll, krlr, krrl, krrr; /* right half of key */ u32 il, ir, t0, t1, w0, w1; /* temporary variables */ - u32 kw4l, kw4r, dw, tl, tr; + u32 kw4l, kw4r, dw; u32 subL[34]; u32 subR[34]; /** * key = (kll || klr || krl || krr || krll || krlr || krrl || krrr) - * (|| is concatination) + * (|| is concatenation) */ GETU32(kll, key ); GETU32(klr, key + 4); @@ -862,92 +891,7 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) /* kw1 */ subL[0] ^= kw4l; subR[0] ^= kw4r; - /* key XOR is end of F-function */ - SUBKEY_L(0) = subL[0] ^ subL[2];/* kw1 */ - SUBKEY_R(0) = subR[0] ^ subR[2]; - SUBKEY_L(2) = subL[3]; /* round 1 */ - SUBKEY_R(2) = subR[3]; - SUBKEY_L(3) = subL[2] ^ subL[4]; /* round 2 */ - SUBKEY_R(3) = subR[2] ^ subR[4]; - SUBKEY_L(4) = subL[3] ^ subL[5]; /* round 3 */ - SUBKEY_R(4) = subR[3] ^ subR[5]; - SUBKEY_L(5) = subL[4] ^ subL[6]; /* round 4 */ - SUBKEY_R(5) = subR[4] ^ subR[6]; - SUBKEY_L(6) = subL[5] ^ subL[7]; /* round 5 */ - SUBKEY_R(6) = subR[5] ^ subR[7]; - tl = subL[10] ^ (subR[10] & ~subR[8]); - dw = tl & subL[8], /* FL(kl1) */ - tr = subR[10] ^ ROL1(dw); - SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ - SUBKEY_R(7) = subR[6] ^ tr; - SUBKEY_L(8) = subL[8]; /* FL(kl1) */ - SUBKEY_R(8) = subR[8]; - SUBKEY_L(9) = subL[9]; /* FLinv(kl2) */ - SUBKEY_R(9) = subR[9]; - tl = subL[7] ^ (subR[7] & ~subR[9]); - dw = tl & subL[9], /* FLinv(kl2) */ - tr = subR[7] ^ ROL1(dw); - SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ - SUBKEY_R(10) = tr ^ subR[11]; - SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */ - SUBKEY_R(11) = subR[10] ^ subR[12]; - SUBKEY_L(12) = subL[11] ^ subL[13]; /* round 9 */ - SUBKEY_R(12) = subR[11] ^ subR[13]; - SUBKEY_L(13) = subL[12] ^ subL[14]; /* round 10 */ - SUBKEY_R(13) = subR[12] ^ subR[14]; - SUBKEY_L(14) = subL[13] ^ subL[15]; /* round 11 */ - SUBKEY_R(14) = subR[13] ^ subR[15]; - tl = subL[18] ^ (subR[18] & ~subR[16]); - dw = tl & subL[16], /* FL(kl3) */ - tr = subR[18] ^ ROL1(dw); - SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ - SUBKEY_R(15) = subR[14] ^ tr; - SUBKEY_L(16) = subL[16]; /* FL(kl3) */ - SUBKEY_R(16) = subR[16]; - SUBKEY_L(17) = subL[17]; /* FLinv(kl4) */ - SUBKEY_R(17) = subR[17]; - tl = subL[15] ^ (subR[15] & ~subR[17]); - dw = tl & subL[17], /* FLinv(kl4) */ - tr = subR[15] ^ ROL1(dw); - SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */ - SUBKEY_R(18) = tr ^ subR[19]; - SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */ - SUBKEY_R(19) = subR[18] ^ subR[20]; - SUBKEY_L(20) = subL[19] ^ subL[21]; /* round 15 */ - SUBKEY_R(20) = subR[19] ^ subR[21]; - SUBKEY_L(21) = subL[20] ^ subL[22]; /* round 16 */ - SUBKEY_R(21) = subR[20] ^ subR[22]; - SUBKEY_L(22) = subL[21] ^ subL[23]; /* round 17 */ - SUBKEY_R(22) = subR[21] ^ subR[23]; - tl = subL[26] ^ (subR[26] & ~subR[24]); - dw = tl & subL[24], /* FL(kl5) */ - tr = subR[26] ^ ROL1(dw); - SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */ - SUBKEY_R(23) = subR[22] ^ tr; - SUBKEY_L(24) = subL[24]; /* FL(kl5) */ - SUBKEY_R(24) = subR[24]; - SUBKEY_L(25) = subL[25]; /* FLinv(kl6) */ - SUBKEY_R(25) = subR[25]; - tl = subL[23] ^ (subR[23] & ~subR[25]); - dw = tl & subL[25], /* FLinv(kl6) */ - tr = subR[23] ^ ROL1(dw); - SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */ - SUBKEY_R(26) = tr ^ subR[27]; - SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */ - SUBKEY_R(27) = subR[26] ^ subR[28]; - SUBKEY_L(28) = subL[27] ^ subL[29]; /* round 21 */ - SUBKEY_R(28) = subR[27] ^ subR[29]; - SUBKEY_L(29) = subL[28] ^ subL[30]; /* round 22 */ - SUBKEY_R(29) = subR[28] ^ subR[30]; - SUBKEY_L(30) = subL[29] ^ subL[31]; /* round 23 */ - SUBKEY_R(30) = subR[29] ^ subR[31]; - SUBKEY_L(31) = subL[30]; /* round 24 */ - SUBKEY_R(31) = subR[30]; - SUBKEY_L(32) = subL[32] ^ subL[31]; /* kw3 */ - SUBKEY_R(32) = subR[32] ^ subR[31]; - - /* apply the inverse of the last half of P-function */ - camellia_setup_tail(subkey, 32); + camellia_setup_tail(subkey, subL, subR, 32); } static void camellia_setup192(const unsigned char *key, u32 *subkey) -- cgit v1.1 From ff85a8082f0665fe6f79d50eb79bdccb98cabfa2 Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Fri, 23 Nov 2007 21:21:03 +0800 Subject: [CRYPTO] camellia: Move more common code into camellia_setup_tail Analogously to camellia7 patch, move "absorb kw2 to other subkeys" and "absorb kw4 to other subkeys" code parts into camellia_setup_tail(). This further reduces source and object code size at the cost of two brances in key setup code. Signed-off-by: Denys Vlasenko Signed-off-by: Herbert Xu --- crypto/camellia.c | 218 +++++++++++++++++++++--------------------------------- 1 file changed, 84 insertions(+), 134 deletions(-) diff --git a/crypto/camellia.c b/crypto/camellia.c index 86af42e..493fee7 100644 --- a/crypto/camellia.c +++ b/crypto/camellia.c @@ -393,8 +393,92 @@ static const u32 camellia_sp4404[256] = { static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) { u32 dw, tl, tr; + u32 kw4l, kw4r; int i; + /* absorb kw2 to other subkeys */ + /* round 2 */ + subL[3] ^= subL[1]; subR[3] ^= subR[1]; + /* round 4 */ + subL[5] ^= subL[1]; subR[5] ^= subR[1]; + /* round 6 */ + subL[7] ^= subL[1]; subR[7] ^= subR[1]; + subL[1] ^= subR[1] & ~subR[9]; + dw = subL[1] & subL[9], + subR[1] ^= ROL1(dw); /* modified for FLinv(kl2) */ + /* round 8 */ + subL[11] ^= subL[1]; subR[11] ^= subR[1]; + /* round 10 */ + subL[13] ^= subL[1]; subR[13] ^= subR[1]; + /* round 12 */ + subL[15] ^= subL[1]; subR[15] ^= subR[1]; + subL[1] ^= subR[1] & ~subR[17]; + dw = subL[1] & subL[17], + subR[1] ^= ROL1(dw); /* modified for FLinv(kl4) */ + /* round 14 */ + subL[19] ^= subL[1]; subR[19] ^= subR[1]; + /* round 16 */ + subL[21] ^= subL[1]; subR[21] ^= subR[1]; + /* round 18 */ + subL[23] ^= subL[1]; subR[23] ^= subR[1]; + if (max == 24) { + /* kw3 */ + subL[24] ^= subL[1]; subR[24] ^= subR[1]; + + /* absorb kw4 to other subkeys */ + kw4l = subL[25]; kw4r = subR[25]; + } else { + subL[1] ^= subR[1] & ~subR[25]; + dw = subL[1] & subL[25], + subR[1] ^= ROL1(dw); /* modified for FLinv(kl6) */ + /* round 20 */ + subL[27] ^= subL[1]; subR[27] ^= subR[1]; + /* round 22 */ + subL[29] ^= subL[1]; subR[29] ^= subR[1]; + /* round 24 */ + subL[31] ^= subL[1]; subR[31] ^= subR[1]; + /* kw3 */ + subL[32] ^= subL[1]; subR[32] ^= subR[1]; + + /* absorb kw4 to other subkeys */ + kw4l = subL[33]; kw4r = subR[33]; + /* round 23 */ + subL[30] ^= kw4l; subR[30] ^= kw4r; + /* round 21 */ + subL[28] ^= kw4l; subR[28] ^= kw4r; + /* round 19 */ + subL[26] ^= kw4l; subR[26] ^= kw4r; + kw4l ^= kw4r & ~subR[24]; + dw = kw4l & subL[24], + kw4r ^= ROL1(dw); /* modified for FL(kl5) */ + } + /* round 17 */ + subL[22] ^= kw4l; subR[22] ^= kw4r; + /* round 15 */ + subL[20] ^= kw4l; subR[20] ^= kw4r; + /* round 13 */ + subL[18] ^= kw4l; subR[18] ^= kw4r; + kw4l ^= kw4r & ~subR[16]; + dw = kw4l & subL[16], + kw4r ^= ROL1(dw); /* modified for FL(kl3) */ + /* round 11 */ + subL[14] ^= kw4l; subR[14] ^= kw4r; + /* round 9 */ + subL[12] ^= kw4l; subR[12] ^= kw4r; + /* round 7 */ + subL[10] ^= kw4l; subR[10] ^= kw4r; + kw4l ^= kw4r & ~subR[8]; + dw = kw4l & subL[8], + kw4r ^= ROL1(dw); /* modified for FL(kl1) */ + /* round 5 */ + subL[6] ^= kw4l; subR[6] ^= kw4r; + /* round 3 */ + subL[4] ^= kw4l; subR[4] ^= kw4r; + /* round 1 */ + subL[2] ^= kw4l; subR[2] ^= kw4r; + /* kw1 */ + subL[0] ^= kw4l; subR[0] ^= kw4r; + /* key XOR is end of F-function */ SUBKEY_L(0) = subL[0] ^ subL[2];/* kw1 */ SUBKEY_R(0) = subR[0] ^ subR[2]; @@ -509,7 +593,6 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey) { u32 kll, klr, krl, krr; u32 il, ir, t0, t1, w0, w1; - u32 kw4l, kw4r, dw; u32 subL[26]; u32 subR[26]; @@ -609,63 +692,6 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey) subL[24] = kll; subR[24] = klr; subL[25] = krl; subR[25] = krr; - /* absorb kw2 to other subkeys */ - /* round 2 */ - subL[3] ^= subL[1]; subR[3] ^= subR[1]; - /* round 4 */ - subL[5] ^= subL[1]; subR[5] ^= subR[1]; - /* round 6 */ - subL[7] ^= subL[1]; subR[7] ^= subR[1]; - subL[1] ^= subR[1] & ~subR[9]; - dw = subL[1] & subL[9], - subR[1] ^= ROL1(dw); /* modified for FLinv(kl2) */ - /* round 8 */ - subL[11] ^= subL[1]; subR[11] ^= subR[1]; - /* round 10 */ - subL[13] ^= subL[1]; subR[13] ^= subR[1]; - /* round 12 */ - subL[15] ^= subL[1]; subR[15] ^= subR[1]; - subL[1] ^= subR[1] & ~subR[17]; - dw = subL[1] & subL[17], - subR[1] ^= ROL1(dw); /* modified for FLinv(kl4) */ - /* round 14 */ - subL[19] ^= subL[1]; subR[19] ^= subR[1]; - /* round 16 */ - subL[21] ^= subL[1]; subR[21] ^= subR[1]; - /* round 18 */ - subL[23] ^= subL[1]; subR[23] ^= subR[1]; - /* kw3 */ - subL[24] ^= subL[1]; subR[24] ^= subR[1]; - - /* absorb kw4 to other subkeys */ - kw4l = subL[25]; kw4r = subR[25]; - /* round 17 */ - subL[22] ^= kw4l; subR[22] ^= kw4r; - /* round 15 */ - subL[20] ^= kw4l; subR[20] ^= kw4r; - /* round 13 */ - subL[18] ^= kw4l; subR[18] ^= kw4r; - kw4l ^= kw4r & ~subR[16]; - dw = kw4l & subL[16], - kw4r ^= ROL1(dw); /* modified for FL(kl3) */ - /* round 11 */ - subL[14] ^= kw4l; subR[14] ^= kw4r; - /* round 9 */ - subL[12] ^= kw4l; subR[12] ^= kw4r; - /* round 7 */ - subL[10] ^= kw4l; subR[10] ^= kw4r; - kw4l ^= kw4r & ~subR[8]; - dw = kw4l & subL[8], - kw4r ^= ROL1(dw); /* modified for FL(kl1) */ - /* round 5 */ - subL[6] ^= kw4l; subR[6] ^= kw4r; - /* round 3 */ - subL[4] ^= kw4l; subR[4] ^= kw4r; - /* round 1 */ - subL[2] ^= kw4l; subR[2] ^= kw4r; - /* kw1 */ - subL[0] ^= kw4l; subR[0] ^= kw4r; - camellia_setup_tail(subkey, subL, subR, 24); } @@ -674,7 +700,6 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) u32 kll, klr, krl, krr; /* left half of key */ u32 krll, krlr, krrl, krrr; /* right half of key */ u32 il, ir, t0, t1, w0, w1; /* temporary variables */ - u32 kw4l, kw4r, dw; u32 subL[34]; u32 subR[34]; @@ -816,81 +841,6 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) /* kw4 */ subL[33] = krrl; subR[33] = krrr; - /* absorb kw2 to other subkeys */ - /* round 2 */ - subL[3] ^= subL[1]; subR[3] ^= subR[1]; - /* round 4 */ - subL[5] ^= subL[1]; subR[5] ^= subR[1]; - /* round 6 */ - subL[7] ^= subL[1]; subR[7] ^= subR[1]; - subL[1] ^= subR[1] & ~subR[9]; - dw = subL[1] & subL[9], - subR[1] ^= ROL1(dw); /* modified for FLinv(kl2) */ - /* round 8 */ - subL[11] ^= subL[1]; subR[11] ^= subR[1]; - /* round 10 */ - subL[13] ^= subL[1]; subR[13] ^= subR[1]; - /* round 12 */ - subL[15] ^= subL[1]; subR[15] ^= subR[1]; - subL[1] ^= subR[1] & ~subR[17]; - dw = subL[1] & subL[17], - subR[1] ^= ROL1(dw); /* modified for FLinv(kl4) */ - /* round 14 */ - subL[19] ^= subL[1]; subR[19] ^= subR[1]; - /* round 16 */ - subL[21] ^= subL[1]; subR[21] ^= subR[1]; - /* round 18 */ - subL[23] ^= subL[1]; subR[23] ^= subR[1]; - subL[1] ^= subR[1] & ~subR[25]; - dw = subL[1] & subL[25], - subR[1] ^= ROL1(dw); /* modified for FLinv(kl6) */ - /* round 20 */ - subL[27] ^= subL[1]; subR[27] ^= subR[1]; - /* round 22 */ - subL[29] ^= subL[1]; subR[29] ^= subR[1]; - /* round 24 */ - subL[31] ^= subL[1]; subR[31] ^= subR[1]; - /* kw3 */ - subL[32] ^= subL[1]; subR[32] ^= subR[1]; - - /* absorb kw4 to other subkeys */ - kw4l = subL[33]; kw4r = subR[33]; - /* round 23 */ - subL[30] ^= kw4l; subR[30] ^= kw4r; - /* round 21 */ - subL[28] ^= kw4l; subR[28] ^= kw4r; - /* round 19 */ - subL[26] ^= kw4l; subR[26] ^= kw4r; - kw4l ^= kw4r & ~subR[24]; - dw = kw4l & subL[24], - kw4r ^= ROL1(dw); /* modified for FL(kl5) */ - /* round 17 */ - subL[22] ^= kw4l; subR[22] ^= kw4r; - /* round 15 */ - subL[20] ^= kw4l; subR[20] ^= kw4r; - /* round 13 */ - subL[18] ^= kw4l; subR[18] ^= kw4r; - kw4l ^= kw4r & ~subR[16]; - dw = kw4l & subL[16], - kw4r ^= ROL1(dw); /* modified for FL(kl3) */ - /* round 11 */ - subL[14] ^= kw4l; subR[14] ^= kw4r; - /* round 9 */ - subL[12] ^= kw4l; subR[12] ^= kw4r; - /* round 7 */ - subL[10] ^= kw4l; subR[10] ^= kw4r; - kw4l ^= kw4r & ~subR[8]; - dw = kw4l & subL[8], - kw4r ^= ROL1(dw); /* modified for FL(kl1) */ - /* round 5 */ - subL[6] ^= kw4l; subR[6] ^= kw4r; - /* round 3 */ - subL[4] ^= kw4l; subR[4] ^= kw4r; - /* round 1 */ - subL[2] ^= kw4l; subR[2] ^= kw4r; - /* kw1 */ - subL[0] ^= kw4l; subR[0] ^= kw4r; - camellia_setup_tail(subkey, subL, subR, 32); } -- cgit v1.1 From e3a4ea4fd2e5f154ae9233f1ce30e7564e5cbcfc Mon Sep 17 00:00:00 2001 From: Mikko Herranen Date: Mon, 26 Nov 2007 22:12:07 +0800 Subject: [CRYPTO] tcrypt: Add aead support Add AEAD support to tcrypt, needed by GCM. Signed-off-by: Mikko Herranen Reviewed-by: Mika Kukkonen Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 258 ++++++++++++++++++++++++++++++++++++++++++++++++++++++-- crypto/tcrypt.h | 22 +++++ 2 files changed, 271 insertions(+), 9 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index b8cb1d1..b343d81 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -6,12 +6,14 @@ * * Copyright (c) 2002 James Morris * Copyright (c) 2002 Jean-Francois Dive + * Copyright (c) 2007 Nokia Siemens Networks * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * + * 2007-11-13 Added AEAD support * 2007-11-06 Added SHA-224 and SHA-224-HMAC tests * 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests * 2004-08-09 Added cipher speed tests (Reyk Floeter ) @@ -72,6 +74,7 @@ static unsigned int sec; static int mode; static char *xbuf; +static char *axbuf; static char *tvmem; static char *check[] = { @@ -169,6 +172,7 @@ static void test_hash(char *algo, struct hash_testvec *template, /* setup the dummy buffer first */ memset(xbuf, 0, XBUFSIZE); + memset(axbuf, 0, XBUFSIZE); j = 0; for (i = 0; i < tcount; i++) { @@ -217,6 +221,233 @@ out: crypto_free_hash(tfm); } +static void test_aead(char *algo, int enc, struct aead_testvec *template, + unsigned int tcount) +{ + unsigned int ret, i, j, k, temp; + unsigned int tsize; + char *q; + struct crypto_aead *tfm; + char *key; + struct aead_testvec *aead_tv; + struct aead_request *req; + struct scatterlist sg[8]; + struct scatterlist asg[8]; + const char *e; + struct tcrypt_result result; + + if (enc == ENCRYPT) + e = "encryption"; + else + e = "decryption"; + + printk(KERN_INFO "\ntesting %s %s\n", algo, e); + + tsize = sizeof(struct aead_testvec); + tsize *= tcount; + + if (tsize > TVMEMSIZE) { + printk(KERN_INFO "template (%u) too big for tvmem (%u)\n", + tsize, TVMEMSIZE); + return; + } + + memcpy(tvmem, template, tsize); + aead_tv = (void *)tvmem; + + init_completion(&result.completion); + + tfm = crypto_alloc_aead(algo, 0, 0); + + if (IS_ERR(tfm)) { + printk(KERN_INFO "failed to load transform for %s: %ld\n", + algo, PTR_ERR(tfm)); + return; + } + + req = aead_request_alloc(tfm, GFP_KERNEL); + if (!req) { + printk(KERN_INFO "failed to allocate request for %s\n", algo); + goto out; + } + + aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + tcrypt_complete, &result); + + for (i = 0, j = 0; i < tcount; i++) { + if (!aead_tv[i].np) { + printk(KERN_INFO "test %u (%d bit key):\n", + ++j, aead_tv[i].klen * 8); + + crypto_aead_clear_flags(tfm, ~0); + if (aead_tv[i].wk) + crypto_aead_set_flags( + tfm, CRYPTO_TFM_REQ_WEAK_KEY); + key = aead_tv[i].key; + + ret = crypto_aead_setkey(tfm, key, + aead_tv[i].klen); + if (ret) { + printk(KERN_INFO "setkey() failed flags=%x\n", + crypto_aead_get_flags(tfm)); + + if (!aead_tv[i].fail) + goto out; + } + + sg_init_one(&sg[0], aead_tv[i].input, + aead_tv[i].ilen); + + sg_init_one(&asg[0], aead_tv[i].assoc, + aead_tv[i].alen); + + aead_request_set_crypt(req, sg, sg, + aead_tv[i].ilen, + aead_tv[i].iv); + + aead_request_set_assoc(req, asg, aead_tv[i].alen); + + if (enc) { + ret = crypto_aead_encrypt(req); + } else { + memcpy(req->__ctx, aead_tv[i].tag, + aead_tv[i].tlen); + ret = crypto_aead_decrypt(req); + } + + switch (ret) { + case 0: + break; + case -EINPROGRESS: + case -EBUSY: + ret = wait_for_completion_interruptible( + &result.completion); + if (!ret && !(ret = result.err)) { + INIT_COMPLETION(result.completion); + break; + } + /* fall through */ + default: + printk(KERN_INFO "%s () failed err=%d\n", + e, -ret); + goto out; + } + + q = kmap(sg_page(&sg[0])) + sg[0].offset; + hexdump(q, aead_tv[i].rlen); + printk(KERN_INFO "auth tag: "); + hexdump((unsigned char *)req->__ctx, aead_tv[i].tlen); + + printk(KERN_INFO "enc/dec: %s\n", + memcmp(q, aead_tv[i].result, + aead_tv[i].rlen) ? "fail" : "pass"); + + printk(KERN_INFO "auth tag: %s\n", + memcmp(req->__ctx, aead_tv[i].tag, + aead_tv[i].tlen) ? "fail" : "pass"); + } + } + + printk(KERN_INFO "\ntesting %s %s across pages (chunking)\n", algo, e); + memset(xbuf, 0, XBUFSIZE); + + for (i = 0, j = 0; i < tcount; i++) { + if (aead_tv[i].np) { + printk(KERN_INFO "test %u (%d bit key):\n", + ++j, aead_tv[i].klen * 8); + + crypto_aead_clear_flags(tfm, ~0); + if (aead_tv[i].wk) + crypto_aead_set_flags( + tfm, CRYPTO_TFM_REQ_WEAK_KEY); + key = aead_tv[i].key; + + ret = crypto_aead_setkey(tfm, key, aead_tv[i].klen); + if (ret) { + printk(KERN_INFO "setkey() failed flags=%x\n", + crypto_aead_get_flags(tfm)); + + if (!aead_tv[i].fail) + goto out; + } + + sg_init_table(sg, aead_tv[i].np); + for (k = 0, temp = 0; k < aead_tv[i].np; k++) { + memcpy(&xbuf[IDX[k]], + aead_tv[i].input + temp, + aead_tv[i].tap[k]); + temp += aead_tv[i].tap[k]; + sg_set_buf(&sg[k], &xbuf[IDX[k]], + aead_tv[i].tap[k]); + } + + sg_init_table(asg, aead_tv[i].anp); + for (k = 0, temp = 0; k < aead_tv[i].anp; k++) { + memcpy(&axbuf[IDX[k]], + aead_tv[i].assoc + temp, + aead_tv[i].atap[k]); + temp += aead_tv[i].atap[k]; + sg_set_buf(&asg[k], &axbuf[IDX[k]], + aead_tv[i].atap[k]); + } + + aead_request_set_crypt(req, sg, sg, + aead_tv[i].ilen, + aead_tv[i].iv); + + aead_request_set_assoc(req, asg, aead_tv[i].alen); + + if (enc) { + ret = crypto_aead_encrypt(req); + } else { + memcpy(req->__ctx, aead_tv[i].tag, + aead_tv[i].tlen); + ret = crypto_aead_decrypt(req); + } + + switch (ret) { + case 0: + break; + case -EINPROGRESS: + case -EBUSY: + ret = wait_for_completion_interruptible( + &result.completion); + if (!ret && !(ret = result.err)) { + INIT_COMPLETION(result.completion); + break; + } + /* fall through */ + default: + printk(KERN_INFO "%s () failed err=%d\n", + e, -ret); + goto out; + } + + for (k = 0, temp = 0; k < aead_tv[i].np; k++) { + printk(KERN_INFO "page %u\n", k); + q = kmap(sg_page(&sg[k])) + sg[k].offset; + hexdump(q, aead_tv[i].tap[k]); + printk(KERN_INFO "%s\n", + memcmp(q, aead_tv[i].result + temp, + aead_tv[i].tap[k]) ? + "fail" : "pass"); + + temp += aead_tv[i].tap[k]; + } + printk(KERN_INFO "auth tag: "); + hexdump((unsigned char *)req->__ctx, aead_tv[i].tlen); + + printk(KERN_INFO "auth tag: %s\n", + memcmp(req->__ctx, aead_tv[i].tag, + aead_tv[i].tlen) ? "fail" : "pass"); + } + } + +out: + crypto_free_aead(tfm); + aead_request_free(req); +} + static void test_cipher(char *algo, int enc, struct cipher_testvec *template, unsigned int tcount) { @@ -1497,20 +1728,21 @@ static void do_test(void) static int __init init(void) { + int err = -ENOMEM; + tvmem = kmalloc(TVMEMSIZE, GFP_KERNEL); if (tvmem == NULL) - return -ENOMEM; + return err; xbuf = kmalloc(XBUFSIZE, GFP_KERNEL); - if (xbuf == NULL) { - kfree(tvmem); - return -ENOMEM; - } + if (xbuf == NULL) + goto err_free_tv; - do_test(); + axbuf = kmalloc(XBUFSIZE, GFP_KERNEL); + if (axbuf == NULL) + goto err_free_xbuf; - kfree(xbuf); - kfree(tvmem); + do_test(); /* We intentionaly return -EAGAIN to prevent keeping * the module. It does all its work from init() @@ -1518,7 +1750,15 @@ static int __init init(void) * => we don't need it in the memory, do we? * -- mludvig */ - return -EAGAIN; + err = -EAGAIN; + + kfree(axbuf); + err_free_xbuf: + kfree(xbuf); + err_free_tv: + kfree(tvmem); + + return err; } /* diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 6ffc411..865196a 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -6,12 +6,14 @@ * * Copyright (c) 2002 James Morris * Copyright (c) 2002 Jean-Francois Dive + * Copyright (c) 2007 Nokia Siemens Networks * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * + * 2007-11-13 Added AEAD support * 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests * 2004-08-09 Cipher speed tests by Reyk Floeter * 2003-09-14 Changes by Kartikey Mahendra Bhatt @@ -51,6 +53,26 @@ struct cipher_testvec { unsigned short rlen; }; +struct aead_testvec { + char key[MAX_KEYLEN] __attribute__ ((__aligned__(4))); + char iv[MAX_IVLEN]; + char input[512]; + char assoc[512]; + char result[512]; + char tag[128]; + unsigned char tap[MAX_TAP]; + unsigned char atap[MAX_TAP]; + int np; + int anp; + unsigned char fail; + unsigned char wk; /* weak key flag */ + unsigned char klen; + unsigned short ilen; + unsigned short alen; + unsigned short rlen; + unsigned short tlen; +}; + struct cipher_speed { unsigned char klen; unsigned int blen; -- cgit v1.1 From 28db8e3e38e593d22e2c69942bb1ca7be2a35f05 Mon Sep 17 00:00:00 2001 From: Mikko Herranen Date: Mon, 26 Nov 2007 22:24:11 +0800 Subject: [CRYPTO] gcm: New algorithm Add GCM/GMAC support to cryptoapi. GCM (Galois/Counter Mode) is an AEAD mode of operations for any block cipher with a block size of 16. The typical example is AES-GCM. Signed-off-by: Mikko Herranen Reviewed-by: Mika Kukkonen Signed-off-by: Herbert Xu --- crypto/Kconfig | 9 ++ crypto/Makefile | 1 + crypto/gcm.c | 465 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ crypto/tcrypt.c | 5 + crypto/tcrypt.h | 368 ++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 848 insertions(+) create mode 100644 crypto/gcm.c diff --git a/crypto/Kconfig b/crypto/Kconfig index 8d6cac9..40ae92c 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -206,6 +206,15 @@ config CRYPTO_CTR CTR: Counter mode This block cipher algorithm is required for IPSec. +config CRYPTO_GCM + tristate "GCM/GMAC support" + select CRYPTO_CTR + select CRYPTO_AEAD + select CRYPTO_GF128MUL + help + Support for Galois/Counter Mode (GCM) and Galois Message + Authentication Code (GMAC). Required for IPSec. + config CRYPTO_CRYPTD tristate "Software async crypto daemon" select CRYPTO_ABLKCIPHER diff --git a/crypto/Makefile b/crypto/Makefile index 9b1476e..957343c 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -33,6 +33,7 @@ obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o obj-$(CONFIG_CRYPTO_LRW) += lrw.o obj-$(CONFIG_CRYPTO_XTS) += xts.o obj-$(CONFIG_CRYPTO_CTR) += ctr.o +obj-$(CONFIG_CRYPTO_GCM) += gcm.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o obj-$(CONFIG_CRYPTO_DES) += des_generic.o obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o diff --git a/crypto/gcm.c b/crypto/gcm.c new file mode 100644 index 0000000..ad8b8b9 --- /dev/null +++ b/crypto/gcm.c @@ -0,0 +1,465 @@ +/* + * GCM: Galois/Counter Mode. + * + * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "scatterwalk.h" + +struct gcm_instance_ctx { + struct crypto_spawn ctr; +}; + +struct crypto_gcm_ctx { + struct crypto_ablkcipher *ctr; + struct gf128mul_4k *gf128; +}; + +struct crypto_gcm_ghash_ctx { + u32 bytes; + u32 flags; + struct gf128mul_4k *gf128; + u8 buffer[16]; +}; + +struct crypto_gcm_req_priv_ctx { + u8 auth_tag[16]; + u8 counter[16]; + struct crypto_gcm_ghash_ctx ghash; +}; + +static void crypto_gcm_ghash_init(struct crypto_gcm_ghash_ctx *ctx, u32 flags, + struct gf128mul_4k *gf128) +{ + ctx->bytes = 0; + ctx->flags = flags; + ctx->gf128 = gf128; + memset(ctx->buffer, 0, 16); +} + +static void crypto_gcm_ghash_update(struct crypto_gcm_ghash_ctx *ctx, + const u8 *src, unsigned int srclen) +{ + u8 *dst = ctx->buffer; + + if (ctx->bytes) { + int n = min(srclen, ctx->bytes); + u8 *pos = dst + (16 - ctx->bytes); + + ctx->bytes -= n; + srclen -= n; + + while (n--) + *pos++ ^= *src++; + + if (!ctx->bytes) + gf128mul_4k_lle((be128 *)dst, ctx->gf128); + } + + while (srclen >= 16) { + crypto_xor(dst, src, 16); + gf128mul_4k_lle((be128 *)dst, ctx->gf128); + src += 16; + srclen -= 16; + } + + if (srclen) { + ctx->bytes = 16 - srclen; + while (srclen--) + *dst++ ^= *src++; + } +} + +static void crypto_gcm_ghash_update_sg(struct crypto_gcm_ghash_ctx *ctx, + struct scatterlist *sg, int len) +{ + struct scatter_walk walk; + u8 *src; + int n; + + scatterwalk_start(&walk, sg); + + while (len) { + n = scatterwalk_clamp(&walk, len); + + if (!n) { + scatterwalk_start(&walk, sg_next(walk.sg)); + n = scatterwalk_clamp(&walk, len); + } + + src = scatterwalk_map(&walk, 0); + + crypto_gcm_ghash_update(ctx, src, n); + len -= n; + + scatterwalk_unmap(src, 0); + scatterwalk_advance(&walk, n); + scatterwalk_done(&walk, 0, len); + if (len) + crypto_yield(ctx->flags); + } +} + +static void crypto_gcm_ghash_flush(struct crypto_gcm_ghash_ctx *ctx) +{ + u8 *dst = ctx->buffer; + + if (ctx->bytes) { + u8 *tmp = dst + (16 - ctx->bytes); + + while (ctx->bytes--) + *tmp++ ^= 0; + + gf128mul_4k_lle((be128 *)dst, ctx->gf128); + } + + ctx->bytes = 0; +} + +static void crypto_gcm_ghash_final_xor(struct crypto_gcm_ghash_ctx *ctx, + unsigned int authlen, + unsigned int cryptlen, u8 *dst) +{ + u8 *buf = ctx->buffer; + u128 lengths; + + lengths.a = cpu_to_be64(authlen * 8); + lengths.b = cpu_to_be64(cryptlen * 8); + + crypto_gcm_ghash_flush(ctx); + crypto_xor(buf, (u8 *)&lengths, 16); + gf128mul_4k_lle((be128 *)buf, ctx->gf128); + crypto_xor(dst, buf, 16); +} + +static inline void crypto_gcm_set_counter(u8 *counterblock, u32 value) +{ + *((u32 *)&counterblock[12]) = cpu_to_be32(value); +} + +static int crypto_gcm_encrypt_counter(struct crypto_aead *aead, u8 *block, + u32 value, const u8 *iv) +{ + struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); + struct crypto_ablkcipher *ctr = ctx->ctr; + struct ablkcipher_request req; + struct scatterlist sg; + u8 counterblock[16]; + + if (iv == NULL) + memset(counterblock, 0, 12); + else + memcpy(counterblock, iv, 12); + + crypto_gcm_set_counter(counterblock, value); + + sg_init_one(&sg, block, 16); + ablkcipher_request_set_tfm(&req, ctr); + ablkcipher_request_set_crypt(&req, &sg, &sg, 16, counterblock); + ablkcipher_request_set_callback(&req, 0, NULL, NULL); + memset(block, 0, 16); + return crypto_ablkcipher_encrypt(&req); +} + +static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); + struct crypto_ablkcipher *ctr = ctx->ctr; + int alignmask = crypto_ablkcipher_alignmask(ctr); + u8 alignbuf[16+alignmask]; + u8 *hash = (u8 *)ALIGN((unsigned long)alignbuf, alignmask+1); + int err = 0; + + crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); + crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) & + CRYPTO_TFM_REQ_MASK); + + err = crypto_ablkcipher_setkey(ctr, key, keylen); + if (err) + goto out; + + crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) & + CRYPTO_TFM_RES_MASK); + + err = crypto_gcm_encrypt_counter(aead, hash, -1, NULL); + if (err) + goto out; + + if (ctx->gf128 != NULL) + gf128mul_free_4k(ctx->gf128); + + ctx->gf128 = gf128mul_init_4k_lle((be128 *)hash); + + if (ctx->gf128 == NULL) + err = -ENOMEM; + + out: + return err; +} + +static int crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, + struct aead_request *req, + void (*done)(struct crypto_async_request *, + int)) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); + struct crypto_gcm_req_priv_ctx *pctx = aead_request_ctx(req); + u32 flags = req->base.tfm->crt_flags; + u8 *auth_tag = pctx->auth_tag; + u8 *counter = pctx->counter; + struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; + int err = 0; + + ablkcipher_request_set_tfm(ablk_req, ctx->ctr); + ablkcipher_request_set_callback(ablk_req, aead_request_flags(req), + done, req); + ablkcipher_request_set_crypt(ablk_req, req->src, req->dst, + req->cryptlen, counter); + + err = crypto_gcm_encrypt_counter(aead, auth_tag, 0, req->iv); + if (err) + goto out; + + memcpy(counter, req->iv, 12); + crypto_gcm_set_counter(counter, 1); + + crypto_gcm_ghash_init(ghash, flags, ctx->gf128); + + if (req->assoclen) { + crypto_gcm_ghash_update_sg(ghash, req->assoc, req->assoclen); + crypto_gcm_ghash_flush(ghash); + } + + out: + return err; +} + +static void crypto_gcm_encrypt_done(struct crypto_async_request *areq, int err) +{ + struct aead_request *req = areq->data; + struct crypto_gcm_req_priv_ctx *pctx = aead_request_ctx(req); + u8 *auth_tag = pctx->auth_tag; + struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; + + crypto_gcm_ghash_update_sg(ghash, req->dst, req->cryptlen); + crypto_gcm_ghash_final_xor(ghash, req->assoclen, req->cryptlen, + auth_tag); + + aead_request_complete(req, err); +} + +static int crypto_gcm_encrypt(struct aead_request *req) +{ + struct ablkcipher_request abreq; + struct crypto_gcm_req_priv_ctx *pctx = aead_request_ctx(req); + u8 *auth_tag = pctx->auth_tag; + struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; + int err = 0; + + err = crypto_gcm_init_crypt(&abreq, req, crypto_gcm_encrypt_done); + if (err) + return err; + + if (req->cryptlen) { + err = crypto_ablkcipher_encrypt(&abreq); + if (err) + return err; + + crypto_gcm_ghash_update_sg(ghash, req->dst, req->cryptlen); + } + + crypto_gcm_ghash_final_xor(ghash, req->assoclen, req->cryptlen, + auth_tag); + + return err; +} + +static void crypto_gcm_decrypt_done(struct crypto_async_request *areq, int err) +{ + aead_request_complete(areq->data, err); +} + +static int crypto_gcm_decrypt(struct aead_request *req) +{ + struct ablkcipher_request abreq; + struct crypto_gcm_req_priv_ctx *pctx = aead_request_ctx(req); + u8 *auth_tag = pctx->auth_tag; + struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; + u8 tag[16]; + int err; + + if (!req->cryptlen) + return -EINVAL; + + memcpy(tag, auth_tag, 16); + err = crypto_gcm_init_crypt(&abreq, req, crypto_gcm_decrypt_done); + if (err) + return err; + + crypto_gcm_ghash_update_sg(ghash, req->src, req->cryptlen); + crypto_gcm_ghash_final_xor(ghash, req->assoclen, req->cryptlen, + auth_tag); + + if (memcmp(tag, auth_tag, 16)) + return -EINVAL; + + return crypto_ablkcipher_decrypt(&abreq); +} + +static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst); + struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_ablkcipher *ctr; + unsigned long align; + int err; + + ctr = crypto_spawn_ablkcipher(&ictx->ctr); + err = PTR_ERR(ctr); + if (IS_ERR(ctr)) + return err; + + ctx->ctr = ctr; + ctx->gf128 = NULL; + + align = max_t(unsigned long, crypto_ablkcipher_alignmask(ctr), + __alignof__(u32) - 1); + align &= ~(crypto_tfm_ctx_alignment() - 1); + tfm->crt_aead.reqsize = align + sizeof(struct crypto_gcm_req_priv_ctx); + + return 0; +} + +static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->gf128 != NULL) + gf128mul_free_4k(ctx->gf128); + + crypto_free_ablkcipher(ctx->ctr); +} + +static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb) +{ + struct crypto_instance *inst; + struct crypto_alg *ctr; + struct crypto_alg *cipher; + struct gcm_instance_ctx *ctx; + int err; + char ctr_name[CRYPTO_MAX_ALG_NAME]; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD); + if (err) + return ERR_PTR(err); + + cipher = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK); + + inst = ERR_PTR(PTR_ERR(cipher)); + if (IS_ERR(cipher)) + return inst; + + inst = ERR_PTR(ENAMETOOLONG); + if (snprintf( + ctr_name, CRYPTO_MAX_ALG_NAME, + "ctr(%s,0,16,4)", cipher->cra_name) >= CRYPTO_MAX_ALG_NAME) + return inst; + + ctr = crypto_alg_mod_lookup(ctr_name, CRYPTO_ALG_TYPE_BLKCIPHER, + CRYPTO_ALG_TYPE_MASK); + + if (IS_ERR(ctr)) + return ERR_PTR(PTR_ERR(ctr)); + + if (cipher->cra_blocksize != 16) + goto out_put_ctr; + + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); + err = -ENOMEM; + if (!inst) + goto out_put_ctr; + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, + "gcm(%s)", cipher->cra_name) >= CRYPTO_MAX_ALG_NAME || + snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "gcm(%s)", cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + + ctx = crypto_instance_ctx(inst); + err = crypto_init_spawn(&ctx->ctr, ctr, inst, CRYPTO_ALG_TYPE_MASK); + if (err) + goto err_free_inst; + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; + inst->alg.cra_priority = ctr->cra_priority; + inst->alg.cra_blocksize = 16; + inst->alg.cra_alignmask = __alignof__(u32) - 1; + inst->alg.cra_type = &crypto_aead_type; + inst->alg.cra_aead.ivsize = 12; + inst->alg.cra_aead.authsize = 16; + inst->alg.cra_ctxsize = sizeof(struct crypto_gcm_ctx); + inst->alg.cra_init = crypto_gcm_init_tfm; + inst->alg.cra_exit = crypto_gcm_exit_tfm; + inst->alg.cra_aead.setkey = crypto_gcm_setkey; + inst->alg.cra_aead.encrypt = crypto_gcm_encrypt; + inst->alg.cra_aead.decrypt = crypto_gcm_decrypt; + +out: + crypto_mod_put(ctr); + return inst; +err_free_inst: + kfree(inst); +out_put_ctr: + inst = ERR_PTR(err); + goto out; +} + +static void crypto_gcm_free(struct crypto_instance *inst) +{ + struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst); + + crypto_drop_spawn(&ctx->ctr); + kfree(inst); +} + +static struct crypto_template crypto_gcm_tmpl = { + .name = "gcm", + .alloc = crypto_gcm_alloc, + .free = crypto_gcm_free, + .module = THIS_MODULE, +}; + +static int __init crypto_gcm_module_init(void) +{ + return crypto_register_template(&crypto_gcm_tmpl); +} + +static void __exit crypto_gcm_module_exit(void) +{ + crypto_unregister_template(&crypto_gcm_tmpl); +} + +module_init(crypto_gcm_module_init); +module_exit(crypto_gcm_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Galois/Counter Mode"); +MODULE_AUTHOR("Mikko Herranen "); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index b343d81..1e12b86 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -13,6 +13,7 @@ * Software Foundation; either version 2 of the License, or (at your option) * any later version. * + * 2007-11-13 Added GCM tests * 2007-11-13 Added AEAD support * 2007-11-06 Added SHA-224 and SHA-224-HMAC tests * 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests @@ -1208,6 +1209,10 @@ static void do_test(void) AES_CTR_ENC_TEST_VECTORS); test_cipher("ctr(aes,4,8,4)", DECRYPT, aes_ctr_dec_tv_template, AES_CTR_DEC_TEST_VECTORS); + test_aead("gcm(aes)", ENCRYPT, aes_gcm_enc_tv_template, + AES_GCM_ENC_TEST_VECTORS); + test_aead("gcm(aes)", DECRYPT, aes_gcm_dec_tv_template, + AES_GCM_DEC_TEST_VECTORS); //CAST5 test_cipher("ecb(cast5)", ENCRYPT, cast5_enc_tv_template, diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 865196a..2384c41 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -13,6 +13,7 @@ * Software Foundation; either version 2 of the License, or (at your option) * any later version. * + * 2007-11-13 Added GCM tests * 2007-11-13 Added AEAD support * 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests * 2004-08-09 Cipher speed tests by Reyk Floeter @@ -2312,6 +2313,8 @@ static struct cipher_testvec cast6_dec_tv_template[] = { #define AES_XTS_DEC_TEST_VECTORS 4 #define AES_CTR_ENC_TEST_VECTORS 6 #define AES_CTR_DEC_TEST_VECTORS 6 +#define AES_GCM_ENC_TEST_VECTORS 9 +#define AES_GCM_DEC_TEST_VECTORS 8 static struct cipher_testvec aes_enc_tv_template[] = { { /* From FIPS-197 */ @@ -3529,6 +3532,371 @@ static struct cipher_testvec aes_ctr_dec_tv_template[] = { }, }; +static struct aead_testvec aes_gcm_enc_tv_template[] = { + { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */ + .klen = 16, + .tag = { 0x58, 0xe2, 0xfc, 0xce, 0xfa, 0x7e, 0x30, 0x61, + 0x36, 0x7f, 0x1d, 0x57, 0xa4, 0xe7, 0x45, 0x5a }, + .tlen = 16 + }, { + .klen = 16, + .ilen = 16, + .result = { 0x03, 0x88, 0xda, 0xce, 0x60, 0xb6, 0xa3, 0x92, + 0xf3, 0x28, 0xc2, 0xb9, 0x71, 0xb2, 0xfe, 0x78 }, + .rlen = 16, + .tag = { 0xab, 0x6e, 0x47, 0xd4, 0x2c, 0xec, 0x13, 0xbd, + 0xf5, 0x3a, 0x67, 0xb2, 0x12, 0x57, 0xbd, 0xdf }, + .tlen = 16 + }, { + .key = { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, + 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 }, + .klen = 16, + .iv = { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad, + 0xde, 0xca, 0xf8, 0x88 }, + .input = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5, + 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a, + 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda, + 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72, + 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53, + 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25, + 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57, + 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 }, + .ilen = 64, + .result = { 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24, + 0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c, + 0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0, + 0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e, + 0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c, + 0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05, + 0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97, + 0x3d, 0x58, 0xe0, 0x91, 0x47, 0x3f, 0x59, 0x85 }, + .rlen = 64, + .tag = { 0x4d, 0x5c, 0x2a, 0xf3, 0x27, 0xcd, 0x64, 0xa6, + 0x2c, 0xf3, 0x5a, 0xbd, 0x2b, 0xa6, 0xfa, 0xb4 }, + .tlen = 16 + }, { + .key = { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, + 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 }, + .klen = 16, + .iv = { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad, + 0xde, 0xca, 0xf8, 0x88 }, + .input = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5, + 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a, + 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda, + 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72, + 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53, + 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25, + 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57, + 0xba, 0x63, 0x7b, 0x39 }, + .ilen = 60, + .assoc = { 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef, + 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef, + 0xab, 0xad, 0xda, 0xd2 }, + .alen = 20, + .result = { 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24, + 0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c, + 0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0, + 0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e, + 0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c, + 0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05, + 0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97, + 0x3d, 0x58, 0xe0, 0x91 }, + .rlen = 60, + .tag = { 0x5b, 0xc9, 0x4f, 0xbc, 0x32, 0x21, 0xa5, 0xdb, + 0x94, 0xfa, 0xe9, 0x5a, 0xe7, 0x12, 0x1a, 0x47 }, + .tlen = 16 + }, { + .klen = 24, + .tag = { 0xcd, 0x33, 0xb2, 0x8a, 0xc7, 0x73, 0xf7, 0x4b, + 0xa0, 0x0e, 0xd1, 0xf3, 0x12, 0x57, 0x24, 0x35 }, + .tlen = 16 + }, { + .klen = 24, + .ilen = 16, + .result = { 0x98, 0xe7, 0x24, 0x7c, 0x07, 0xf0, 0xfe, 0x41, + 0x1c, 0x26, 0x7e, 0x43, 0x84, 0xb0, 0xf6, 0x00 }, + .rlen = 16, + .tag = { 0x2f, 0xf5, 0x8d, 0x80, 0x03, 0x39, 0x27, 0xab, + 0x8e, 0xf4, 0xd4, 0x58, 0x75, 0x14, 0xf0, 0xfb }, + .tlen = 16 + }, { + .key = { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, + 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08, + 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c }, + .klen = 24, + .iv = { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad, + 0xde, 0xca, 0xf8, 0x88 }, + .input = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5, + 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a, + 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda, + 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72, + 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53, + 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25, + 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57, + 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 }, + .ilen = 64, + .result = { 0x39, 0x80, 0xca, 0x0b, 0x3c, 0x00, 0xe8, 0x41, + 0xeb, 0x06, 0xfa, 0xc4, 0x87, 0x2a, 0x27, 0x57, + 0x85, 0x9e, 0x1c, 0xea, 0xa6, 0xef, 0xd9, 0x84, + 0x62, 0x85, 0x93, 0xb4, 0x0c, 0xa1, 0xe1, 0x9c, + 0x7d, 0x77, 0x3d, 0x00, 0xc1, 0x44, 0xc5, 0x25, + 0xac, 0x61, 0x9d, 0x18, 0xc8, 0x4a, 0x3f, 0x47, + 0x18, 0xe2, 0x44, 0x8b, 0x2f, 0xe3, 0x24, 0xd9, + 0xcc, 0xda, 0x27, 0x10, 0xac, 0xad, 0xe2, 0x56 }, + .rlen = 64, + .tag = { 0x99, 0x24, 0xa7, 0xc8, 0x58, 0x73, 0x36, 0xbf, + 0xb1, 0x18, 0x02, 0x4d, 0xb8, 0x67, 0x4a, 0x14 }, + .tlen = 16 + }, { + .key = { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, + 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08, + 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c }, + .klen = 24, + .iv = { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad, + 0xde, 0xca, 0xf8, 0x88 }, + .input = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5, + 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a, + 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda, + 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72, + 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53, + 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25, + 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57, + 0xba, 0x63, 0x7b, 0x39 }, + .ilen = 60, + .assoc = { 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef, + 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef, + 0xab, 0xad, 0xda, 0xd2 }, + .alen = 20, + .result = { 0x39, 0x80, 0xca, 0x0b, 0x3c, 0x00, 0xe8, 0x41, + 0xeb, 0x06, 0xfa, 0xc4, 0x87, 0x2a, 0x27, 0x57, + 0x85, 0x9e, 0x1c, 0xea, 0xa6, 0xef, 0xd9, 0x84, + 0x62, 0x85, 0x93, 0xb4, 0x0c, 0xa1, 0xe1, 0x9c, + 0x7d, 0x77, 0x3d, 0x00, 0xc1, 0x44, 0xc5, 0x25, + 0xac, 0x61, 0x9d, 0x18, 0xc8, 0x4a, 0x3f, 0x47, + 0x18, 0xe2, 0x44, 0x8b, 0x2f, 0xe3, 0x24, 0xd9, + 0xcc, 0xda, 0x27, 0x10 }, + .rlen = 60, + .tag = { 0x25, 0x19, 0x49, 0x8e, 0x80, 0xf1, 0x47, 0x8f, + 0x37, 0xba, 0x55, 0xbd, 0x6d, 0x27, 0x61, 0x8c }, + .tlen = 16, + .np = 2, + .tap = { 32, 28 }, + .anp = 2, + .atap = { 8, 12 } + }, { + .klen = 32, + .tag = { 0x53, 0x0f, 0x8a, 0xfb, 0xc7, 0x45, 0x36, 0xb9, + 0xa9, 0x63, 0xb4, 0xf1, 0xc4, 0xcb, 0x73, 0x8b }, + .tlen = 16 + } +}; + +static struct aead_testvec aes_gcm_dec_tv_template[] = { + { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */ + .klen = 32, + .input = { 0xce, 0xa7, 0x40, 0x3d, 0x4d, 0x60, 0x6b, 0x6e, + 0x07, 0x4e, 0xc5, 0xd3, 0xba, 0xf3, 0x9d, 0x18 }, + .ilen = 16, + .rlen = 16, + .tag = { 0xd0, 0xd1, 0xc8, 0xa7, 0x99, 0x99, 0x6b, 0xf0, + 0x26, 0x5b, 0x98, 0xb5, 0xd4, 0x8a, 0xb9, 0x19 }, + .tlen = 16 + }, { + .key = { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, + 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08, + 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, + 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 }, + .klen = 32, + .iv = { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad, + 0xde, 0xca, 0xf8, 0x88 }, + .input = { 0x52, 0x2d, 0xc1, 0xf0, 0x99, 0x56, 0x7d, 0x07, + 0xf4, 0x7f, 0x37, 0xa3, 0x2a, 0x84, 0x42, 0x7d, + 0x64, 0x3a, 0x8c, 0xdc, 0xbf, 0xe5, 0xc0, 0xc9, + 0x75, 0x98, 0xa2, 0xbd, 0x25, 0x55, 0xd1, 0xaa, + 0x8c, 0xb0, 0x8e, 0x48, 0x59, 0x0d, 0xbb, 0x3d, + 0xa7, 0xb0, 0x8b, 0x10, 0x56, 0x82, 0x88, 0x38, + 0xc5, 0xf6, 0x1e, 0x63, 0x93, 0xba, 0x7a, 0x0a, + 0xbc, 0xc9, 0xf6, 0x62, 0x89, 0x80, 0x15, 0xad }, + .ilen = 64, + .result = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5, + 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a, + 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda, + 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72, + 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53, + 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25, + 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57, + 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 }, + .rlen = 64, + .tag = { 0xb0, 0x94, 0xda, 0xc5, 0xd9, 0x34, 0x71, 0xbd, + 0xec, 0x1a, 0x50, 0x22, 0x70, 0xe3, 0xcc, 0x6c }, + .tlen = 16 + }, { + .key = { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, + 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08, + 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, + 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 }, + .klen = 32, + .iv = { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad, + 0xde, 0xca, 0xf8, 0x88 }, + .input = { 0x52, 0x2d, 0xc1, 0xf0, 0x99, 0x56, 0x7d, 0x07, + 0xf4, 0x7f, 0x37, 0xa3, 0x2a, 0x84, 0x42, 0x7d, + 0x64, 0x3a, 0x8c, 0xdc, 0xbf, 0xe5, 0xc0, 0xc9, + 0x75, 0x98, 0xa2, 0xbd, 0x25, 0x55, 0xd1, 0xaa, + 0x8c, 0xb0, 0x8e, 0x48, 0x59, 0x0d, 0xbb, 0x3d, + 0xa7, 0xb0, 0x8b, 0x10, 0x56, 0x82, 0x88, 0x38, + 0xc5, 0xf6, 0x1e, 0x63, 0x93, 0xba, 0x7a, 0x0a, + 0xbc, 0xc9, 0xf6, 0x62 }, + .ilen = 60, + .assoc = { 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef, + 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef, + 0xab, 0xad, 0xda, 0xd2 }, + .alen = 20, + .result = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5, + 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a, + 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda, + 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72, + 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53, + 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25, + 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57, + 0xba, 0x63, 0x7b, 0x39 }, + .rlen = 60, + .tag = { 0x76, 0xfc, 0x6e, 0xce, 0x0f, 0x4e, 0x17, 0x68, + 0xcd, 0xdf, 0x88, 0x53, 0xbb, 0x2d, 0x55, 0x1b }, + .tlen = 16, + .np = 2, + .tap = { 48, 12 }, + .anp = 3, + .atap = { 8, 8, 4 } + }, { + .key = { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, + 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 }, + .klen = 16, + .iv = { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad, + 0xde, 0xca, 0xf8, 0x88 }, + .input = { 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24, + 0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c, + 0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0, + 0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e, + 0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c, + 0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05, + 0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97, + 0x3d, 0x58, 0xe0, 0x91, 0x47, 0x3f, 0x59, 0x85 }, + .ilen = 64, + .result = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5, + 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a, + 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda, + 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72, + 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53, + 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25, + 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57, + 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 }, + .rlen = 64, + .tag = { 0x4d, 0x5c, 0x2a, 0xf3, 0x27, 0xcd, 0x64, 0xa6, + 0x2c, 0xf3, 0x5a, 0xbd, 0x2b, 0xa6, 0xfa, 0xb4 }, + .tlen = 16 + }, { + .key = { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, + 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 }, + .klen = 16, + .iv = { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad, + 0xde, 0xca, 0xf8, 0x88 }, + .input = { 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24, + 0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c, + 0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0, + 0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e, + 0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c, + 0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05, + 0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97, + 0x3d, 0x58, 0xe0, 0x91 }, + .ilen = 60, + .assoc = { 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef, + 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef, + 0xab, 0xad, 0xda, 0xd2 }, + .alen = 20, + .result = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5, + 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a, + 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda, + 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72, + 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53, + 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25, + 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57, + 0xba, 0x63, 0x7b, 0x39 }, + .rlen = 60, + .tag = { 0x5b, 0xc9, 0x4f, 0xbc, 0x32, 0x21, 0xa5, 0xdb, + 0x94, 0xfa, 0xe9, 0x5a, 0xe7, 0x12, 0x1a, 0x47 }, + .tlen = 16 + }, { + .klen = 24, + .input = { 0x98, 0xe7, 0x24, 0x7c, 0x07, 0xf0, 0xfe, 0x41, + 0x1c, 0x26, 0x7e, 0x43, 0x84, 0xb0, 0xf6, 0x00 }, + .ilen = 16, + .rlen = 16, + .tag = { 0x2f, 0xf5, 0x8d, 0x80, 0x03, 0x39, 0x27, 0xab, + 0x8e, 0xf4, 0xd4, 0x58, 0x75, 0x14, 0xf0, 0xfb }, + .tlen = 16 + }, { + .key = { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, + 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08, + 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c }, + .klen = 24, + .iv = { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad, + 0xde, 0xca, 0xf8, 0x88 }, + .input = { 0x39, 0x80, 0xca, 0x0b, 0x3c, 0x00, 0xe8, 0x41, + 0xeb, 0x06, 0xfa, 0xc4, 0x87, 0x2a, 0x27, 0x57, + 0x85, 0x9e, 0x1c, 0xea, 0xa6, 0xef, 0xd9, 0x84, + 0x62, 0x85, 0x93, 0xb4, 0x0c, 0xa1, 0xe1, 0x9c, + 0x7d, 0x77, 0x3d, 0x00, 0xc1, 0x44, 0xc5, 0x25, + 0xac, 0x61, 0x9d, 0x18, 0xc8, 0x4a, 0x3f, 0x47, + 0x18, 0xe2, 0x44, 0x8b, 0x2f, 0xe3, 0x24, 0xd9, + 0xcc, 0xda, 0x27, 0x10, 0xac, 0xad, 0xe2, 0x56 }, + .ilen = 64, + .result = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5, + 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a, + 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda, + 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72, + 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53, + 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25, + 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57, + 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 }, + .rlen = 64, + .tag = { 0x99, 0x24, 0xa7, 0xc8, 0x58, 0x73, 0x36, 0xbf, + 0xb1, 0x18, 0x02, 0x4d, 0xb8, 0x67, 0x4a, 0x14 }, + .tlen = 16 + }, { + .key = { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, + 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08, + 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c }, + .klen = 24, + .iv = { 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad, + 0xde, 0xca, 0xf8, 0x88 }, + .input = { 0x39, 0x80, 0xca, 0x0b, 0x3c, 0x00, 0xe8, 0x41, + 0xeb, 0x06, 0xfa, 0xc4, 0x87, 0x2a, 0x27, 0x57, + 0x85, 0x9e, 0x1c, 0xea, 0xa6, 0xef, 0xd9, 0x84, + 0x62, 0x85, 0x93, 0xb4, 0x0c, 0xa1, 0xe1, 0x9c, + 0x7d, 0x77, 0x3d, 0x00, 0xc1, 0x44, 0xc5, 0x25, + 0xac, 0x61, 0x9d, 0x18, 0xc8, 0x4a, 0x3f, 0x47, + 0x18, 0xe2, 0x44, 0x8b, 0x2f, 0xe3, 0x24, 0xd9, + 0xcc, 0xda, 0x27, 0x10 }, + .ilen = 60, + .assoc = { 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef, + 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef, + 0xab, 0xad, 0xda, 0xd2 }, + .alen = 20, + .result = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5, + 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a, + 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda, + 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72, + 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53, + 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25, + 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57, + 0xba, 0x63, 0x7b, 0x39 }, + .rlen = 60, + .tag = { 0x25, 0x19, 0x49, 0x8e, 0x80, 0xf1, 0x47, 0x8f, + 0x37, 0xba, 0x55, 0xbd, 0x6d, 0x27, 0x61, 0x8c }, + .tlen = 16 + } +}; + /* Cast5 test vectors from RFC 2144 */ #define CAST5_ENC_TEST_VECTORS 3 #define CAST5_DEC_TEST_VECTORS 3 -- cgit v1.1 From 06e1a8f0505426a97292174a959560fd86ea0a3d Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Fri, 30 Nov 2007 00:15:11 +1100 Subject: [CRYPTO] aes-asm: Merge common glue code 32 bit and 64 bit glue code is using (now) the same piece code. This patch unifies them. Signed-off-by: Sebastian Siewior Signed-off-by: Herbert Xu --- arch/x86/crypto/Makefile | 4 ++-- arch/x86/crypto/aes_32.c | 58 ---------------------------------------------- arch/x86/crypto/aes_64.c | 56 -------------------------------------------- arch/x86/crypto/aes_glue.c | 57 +++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 59 insertions(+), 116 deletions(-) delete mode 100644 arch/x86/crypto/aes_32.c delete mode 100644 arch/x86/crypto/aes_64.c create mode 100644 arch/x86/crypto/aes_glue.c diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 46bb609..b8fbb43 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -8,8 +8,8 @@ obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o -aes-i586-y := aes-i586-asm_32.o aes_32.o +aes-i586-y := aes-i586-asm_32.o aes_glue.o twofish-i586-y := twofish-i586-asm_32.o twofish_32.o -aes-x86_64-y := aes-x86_64-asm_64.o aes_64.o +aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_64.o diff --git a/arch/x86/crypto/aes_32.c b/arch/x86/crypto/aes_32.c deleted file mode 100644 index 8556d95..0000000 --- a/arch/x86/crypto/aes_32.c +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Glue Code for optimized 586 assembler version of AES - */ - -#include -#include -#include - -asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); -asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); - -static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) -{ - aes_enc_blk(tfm, dst, src); -} - -static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) -{ - aes_dec_blk(tfm, dst, src); -} - -static struct crypto_alg aes_alg = { - .cra_name = "aes", - .cra_driver_name = "aes-i586", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct crypto_aes_ctx), - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), - .cra_u = { - .cipher = { - .cia_min_keysize = AES_MIN_KEY_SIZE, - .cia_max_keysize = AES_MAX_KEY_SIZE, - .cia_setkey = crypto_aes_set_key, - .cia_encrypt = aes_encrypt, - .cia_decrypt = aes_decrypt - } - } -}; - -static int __init aes_init(void) -{ - return crypto_register_alg(&aes_alg); -} - -static void __exit aes_fini(void) -{ - crypto_unregister_alg(&aes_alg); -} - -module_init(aes_init); -module_exit(aes_fini); - -MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, i586 asm optimized"); -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_AUTHOR("Fruhwirth Clemens, James Morris, Brian Gladman, Adam Richter"); -MODULE_ALIAS("aes"); diff --git a/arch/x86/crypto/aes_64.c b/arch/x86/crypto/aes_64.c deleted file mode 100644 index d7a41a9..0000000 --- a/arch/x86/crypto/aes_64.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Glue Code for AES Cipher Algorithm - * - */ - -#include - -asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in); -asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in); - -static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) -{ - aes_enc_blk(tfm, dst, src); -} - -static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) -{ - aes_dec_blk(tfm, dst, src); -} - -static struct crypto_alg aes_alg = { - .cra_name = "aes", - .cra_driver_name = "aes-x86_64", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct crypto_aes_ctx), - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), - .cra_u = { - .cipher = { - .cia_min_keysize = AES_MIN_KEY_SIZE, - .cia_max_keysize = AES_MAX_KEY_SIZE, - .cia_setkey = crypto_aes_set_key, - .cia_encrypt = aes_encrypt, - .cia_decrypt = aes_decrypt - } - } -}; - -static int __init aes_init(void) -{ - return crypto_register_alg(&aes_alg); -} - -static void __exit aes_fini(void) -{ - crypto_unregister_alg(&aes_alg); -} - -module_init(aes_init); -module_exit(aes_fini); - -MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("aes"); diff --git a/arch/x86/crypto/aes_glue.c b/arch/x86/crypto/aes_glue.c new file mode 100644 index 0000000..71f4578 --- /dev/null +++ b/arch/x86/crypto/aes_glue.c @@ -0,0 +1,57 @@ +/* + * Glue Code for the asm optimized version of the AES Cipher Algorithm + * + */ + +#include + +asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in); +asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in); + +static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ + aes_enc_blk(tfm, dst, src); +} + +static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ + aes_dec_blk(tfm, dst, src); +} + +static struct crypto_alg aes_alg = { + .cra_name = "aes", + .cra_driver_name = "aes-asm", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto_aes_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), + .cra_u = { + .cipher = { + .cia_min_keysize = AES_MIN_KEY_SIZE, + .cia_max_keysize = AES_MAX_KEY_SIZE, + .cia_setkey = crypto_aes_set_key, + .cia_encrypt = aes_encrypt, + .cia_decrypt = aes_decrypt + } + } +}; + +static int __init aes_init(void) +{ + return crypto_register_alg(&aes_alg); +} + +static void __exit aes_fini(void) +{ + crypto_unregister_alg(&aes_alg); +} + +module_init(aes_init); +module_exit(aes_fini); + +MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, asm optimized"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("aes"); +MODULE_ALIAS("aes-asm"); -- cgit v1.1 From 0971eb0de9446b66bd45696338f54948314db379 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 30 Nov 2007 00:23:53 +1100 Subject: [CRYPTO] ctr: Fix multi-page processing When the data spans across a page boundary, CTR may incorrectly process a partial block in the middle because the blkcipher walking code may supply partial blocks in the middle as long as the total length of the supplied data is more than a block. CTR is supposed to return any unused partial block in that case to the walker. This patch fixes this by doing exactly that, returning partial blocks to the walker unless we received less than a block-worth of data to start with. This also allows us to optimise the bulk of the processing since we no longer have to worry about partial blocks until the very end. Thanks to Tan Swee Heng for fixes and actually testing this :) Signed-off-by: Herbert Xu --- crypto/ctr.c | 64 +++++++++++++++++++++++++++++++----------------------------- 1 file changed, 33 insertions(+), 31 deletions(-) diff --git a/crypto/ctr.c b/crypto/ctr.c index b816e95..57da7d0 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -59,6 +59,21 @@ static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key, return err; } +static void crypto_ctr_crypt_final(struct blkcipher_walk *walk, + struct crypto_cipher *tfm, u8 *ctrblk, + unsigned int countersize) +{ + unsigned int bsize = crypto_cipher_blocksize(tfm); + u8 *keystream = ctrblk + bsize; + u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + unsigned int nbytes = walk->nbytes; + + crypto_cipher_encrypt_one(tfm, keystream, ctrblk); + crypto_xor(keystream, src, nbytes); + memcpy(dst, keystream, nbytes); +} + static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk, struct crypto_cipher *tfm, u8 *ctrblk, unsigned int countersize) @@ -66,35 +81,23 @@ static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk, void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_encrypt; unsigned int bsize = crypto_cipher_blocksize(tfm); - unsigned long alignmask = crypto_cipher_alignmask(tfm) | - (__alignof__(u32) - 1); - u8 ks[bsize + alignmask]; - u8 *keystream = (u8 *)ALIGN((unsigned long)ks, alignmask + 1); u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; do { /* create keystream */ - fn(crypto_cipher_tfm(tfm), keystream, ctrblk); - crypto_xor(keystream, src, min(nbytes, bsize)); - - /* copy result into dst */ - memcpy(dst, keystream, min(nbytes, bsize)); + fn(crypto_cipher_tfm(tfm), dst, ctrblk); + crypto_xor(dst, src, bsize); /* increment counter in counterblock */ crypto_inc(ctrblk + bsize - countersize, countersize); - if (nbytes < bsize) - break; - src += bsize; dst += bsize; - nbytes -= bsize; - - } while (nbytes); + } while ((nbytes -= bsize) >= bsize); - return 0; + return nbytes; } static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk, @@ -104,30 +107,22 @@ static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk, void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_encrypt; unsigned int bsize = crypto_cipher_blocksize(tfm); - unsigned long alignmask = crypto_cipher_alignmask(tfm) | - (__alignof__(u32) - 1); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; - u8 ks[bsize + alignmask]; - u8 *keystream = (u8 *)ALIGN((unsigned long)ks, alignmask + 1); + u8 *keystream = ctrblk + bsize; do { /* create keystream */ fn(crypto_cipher_tfm(tfm), keystream, ctrblk); - crypto_xor(src, keystream, min(nbytes, bsize)); + crypto_xor(src, keystream, bsize); /* increment counter in counterblock */ crypto_inc(ctrblk + bsize - countersize, countersize); - if (nbytes < bsize) - break; - src += bsize; - nbytes -= bsize; + } while ((nbytes -= bsize) >= bsize); - } while (nbytes); - - return 0; + return nbytes; } static int crypto_ctr_crypt(struct blkcipher_desc *desc, @@ -143,7 +138,7 @@ static int crypto_ctr_crypt(struct blkcipher_desc *desc, crypto_instance_ctx(crypto_tfm_alg_instance(&tfm->base)); unsigned long alignmask = crypto_cipher_alignmask(child) | (__alignof__(u32) - 1); - u8 cblk[bsize + alignmask]; + u8 cblk[bsize * 2 + alignmask]; u8 *counterblk = (u8 *)ALIGN((unsigned long)cblk, alignmask + 1); int err; @@ -158,7 +153,7 @@ static int crypto_ctr_crypt(struct blkcipher_desc *desc, /* initialize counter portion of counter block */ crypto_inc(counterblk + bsize - ictx->countersize, ictx->countersize); - while (walk.nbytes) { + while (walk.nbytes >= bsize) { if (walk.src.virt.addr == walk.dst.virt.addr) nbytes = crypto_ctr_crypt_inplace(&walk, child, counterblk, @@ -170,6 +165,13 @@ static int crypto_ctr_crypt(struct blkcipher_desc *desc, err = blkcipher_walk_done(desc, &walk, nbytes); } + + if (walk.nbytes) { + crypto_ctr_crypt_final(&walk, child, counterblk, + ictx->countersize); + err = blkcipher_walk_done(desc, &walk, 0); + } + return err; } @@ -277,7 +279,7 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; inst->alg.cra_priority = alg->cra_priority; inst->alg.cra_blocksize = 1; - inst->alg.cra_alignmask = __alignof__(u32) - 1; + inst->alg.cra_alignmask = alg->cra_alignmask | (__alignof__(u32) - 1); inst->alg.cra_type = &crypto_blkcipher_type; inst->alg.cra_blkcipher.ivsize = ivsize; -- cgit v1.1 From 6d1a69d53a34e6d906551d92e7639b739332b177 Mon Sep 17 00:00:00 2001 From: Tan Swee Heng Date: Fri, 30 Nov 2007 00:30:11 +1100 Subject: [CRYPTO] tcrypt: Support for large test vectors Currently the number of entries in a cipher test vector template is limited by TVMEMSIZE/sizeof(struct cipher_testvec). This patch circumvents the problem by pointing cipher_tv to each entry in the template, rather than the template itself. Signed-off-by: Tan Swee Heng Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 70 ++++++++++++++++++++++++++++----------------------------- 1 file changed, 34 insertions(+), 36 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 1e12b86..71dc02a 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -471,15 +471,11 @@ static void test_cipher(char *algo, int enc, printk("\ntesting %s %s\n", algo, e); tsize = sizeof (struct cipher_testvec); - tsize *= tcount; - if (tsize > TVMEMSIZE) { printk("template (%u) too big for tvmem (%u)\n", tsize, TVMEMSIZE); return; } - - memcpy(tvmem, template, tsize); cipher_tv = (void *)tvmem; init_completion(&result.completion); @@ -503,33 +499,34 @@ static void test_cipher(char *algo, int enc, j = 0; for (i = 0; i < tcount; i++) { - if (!(cipher_tv[i].np)) { + memcpy(cipher_tv, &template[i], tsize); + if (!(cipher_tv->np)) { j++; printk("test %u (%d bit key):\n", - j, cipher_tv[i].klen * 8); + j, cipher_tv->klen * 8); crypto_ablkcipher_clear_flags(tfm, ~0); - if (cipher_tv[i].wk) + if (cipher_tv->wk) crypto_ablkcipher_set_flags( tfm, CRYPTO_TFM_REQ_WEAK_KEY); - key = cipher_tv[i].key; + key = cipher_tv->key; ret = crypto_ablkcipher_setkey(tfm, key, - cipher_tv[i].klen); + cipher_tv->klen); if (ret) { printk("setkey() failed flags=%x\n", crypto_ablkcipher_get_flags(tfm)); - if (!cipher_tv[i].fail) + if (!cipher_tv->fail) goto out; } - sg_init_one(&sg[0], cipher_tv[i].input, - cipher_tv[i].ilen); + sg_init_one(&sg[0], cipher_tv->input, + cipher_tv->ilen); ablkcipher_request_set_crypt(req, sg, sg, - cipher_tv[i].ilen, - cipher_tv[i].iv); + cipher_tv->ilen, + cipher_tv->iv); ret = enc ? crypto_ablkcipher_encrypt(req) : @@ -553,11 +550,11 @@ static void test_cipher(char *algo, int enc, } q = kmap(sg_page(&sg[0])) + sg[0].offset; - hexdump(q, cipher_tv[i].rlen); + hexdump(q, cipher_tv->rlen); printk("%s\n", - memcmp(q, cipher_tv[i].result, - cipher_tv[i].rlen) ? "fail" : "pass"); + memcmp(q, cipher_tv->result, + cipher_tv->rlen) ? "fail" : "pass"); } } @@ -566,41 +563,42 @@ static void test_cipher(char *algo, int enc, j = 0; for (i = 0; i < tcount; i++) { - if (cipher_tv[i].np) { + memcpy(cipher_tv, &template[i], tsize); + if (cipher_tv->np) { j++; printk("test %u (%d bit key):\n", - j, cipher_tv[i].klen * 8); + j, cipher_tv->klen * 8); crypto_ablkcipher_clear_flags(tfm, ~0); - if (cipher_tv[i].wk) + if (cipher_tv->wk) crypto_ablkcipher_set_flags( tfm, CRYPTO_TFM_REQ_WEAK_KEY); - key = cipher_tv[i].key; + key = cipher_tv->key; ret = crypto_ablkcipher_setkey(tfm, key, - cipher_tv[i].klen); + cipher_tv->klen); if (ret) { printk("setkey() failed flags=%x\n", crypto_ablkcipher_get_flags(tfm)); - if (!cipher_tv[i].fail) + if (!cipher_tv->fail) goto out; } temp = 0; - sg_init_table(sg, cipher_tv[i].np); - for (k = 0; k < cipher_tv[i].np; k++) { + sg_init_table(sg, cipher_tv->np); + for (k = 0; k < cipher_tv->np; k++) { memcpy(&xbuf[IDX[k]], - cipher_tv[i].input + temp, - cipher_tv[i].tap[k]); - temp += cipher_tv[i].tap[k]; + cipher_tv->input + temp, + cipher_tv->tap[k]); + temp += cipher_tv->tap[k]; sg_set_buf(&sg[k], &xbuf[IDX[k]], - cipher_tv[i].tap[k]); + cipher_tv->tap[k]); } ablkcipher_request_set_crypt(req, sg, sg, - cipher_tv[i].ilen, - cipher_tv[i].iv); + cipher_tv->ilen, + cipher_tv->iv); ret = enc ? crypto_ablkcipher_encrypt(req) : @@ -624,15 +622,15 @@ static void test_cipher(char *algo, int enc, } temp = 0; - for (k = 0; k < cipher_tv[i].np; k++) { + for (k = 0; k < cipher_tv->np; k++) { printk("page %u\n", k); q = kmap(sg_page(&sg[k])) + sg[k].offset; - hexdump(q, cipher_tv[i].tap[k]); + hexdump(q, cipher_tv->tap[k]); printk("%s\n", - memcmp(q, cipher_tv[i].result + temp, - cipher_tv[i].tap[k]) ? "fail" : + memcmp(q, cipher_tv->result + temp, + cipher_tv->tap[k]) ? "fail" : "pass"); - temp += cipher_tv[i].tap[k]; + temp += cipher_tv->tap[k]; } } } -- cgit v1.1 From a773edb3ed0c3288f5ae76adc7d48c934ccfcf8c Mon Sep 17 00:00:00 2001 From: Tan Swee Heng Date: Fri, 30 Nov 2007 00:36:07 +1100 Subject: [CRYPTO] tcrypt: AES CTR large test vector This patch adds a large AES CTR mode test vector. The test vector is 4100 bytes in size. It was generated using a C++ program that called Crypto++. Note that this patch increases considerably the size of "struct cipher_testvec" and hence the size of tcrypt.ko. Signed-off-by: Tan Swee Heng Signed-off-by: Herbert Xu --- crypto/tcrypt.h | 1051 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 1048 insertions(+), 3 deletions(-) diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 2384c41..d9d97a6 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -43,8 +43,8 @@ struct hash_testvec { struct cipher_testvec { char key[MAX_KEYLEN] __attribute__ ((__aligned__(4))); char iv[MAX_IVLEN]; - char input[512]; - char result[512]; + char input[4100]; + char result[4100]; unsigned char tap[MAX_TAP]; int np; unsigned char fail; @@ -2311,7 +2311,7 @@ static struct cipher_testvec cast6_dec_tv_template[] = { #define AES_LRW_DEC_TEST_VECTORS 8 #define AES_XTS_ENC_TEST_VECTORS 4 #define AES_XTS_DEC_TEST_VECTORS 4 -#define AES_CTR_ENC_TEST_VECTORS 6 +#define AES_CTR_ENC_TEST_VECTORS 7 #define AES_CTR_DEC_TEST_VECTORS 6 #define AES_GCM_ENC_TEST_VECTORS 9 #define AES_GCM_DEC_TEST_VECTORS 8 @@ -3438,6 +3438,1051 @@ static struct cipher_testvec aes_ctr_enc_tv_template[] = { 0xb8, 0x30, 0x6b, 0x50, 0x8f, 0x83, 0x9d, 0x6a, 0x55, 0x30, 0x83, 0x1d, 0x93, 0x44, 0xaf, 0x1c }, .rlen = 32, + }, { + // generated using Crypto++ + .key = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x00, 0x00, 0x00, 0x00, + }, + .klen = 32 + 4, + .iv = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + .input = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, + 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, + 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, + 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, + 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, + 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, + 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, + 0x00, 0x03, 0x06, 0x09, 0x0c, 0x0f, 0x12, 0x15, + 0x18, 0x1b, 0x1e, 0x21, 0x24, 0x27, 0x2a, 0x2d, + 0x30, 0x33, 0x36, 0x39, 0x3c, 0x3f, 0x42, 0x45, + 0x48, 0x4b, 0x4e, 0x51, 0x54, 0x57, 0x5a, 0x5d, + 0x60, 0x63, 0x66, 0x69, 0x6c, 0x6f, 0x72, 0x75, + 0x78, 0x7b, 0x7e, 0x81, 0x84, 0x87, 0x8a, 0x8d, + 0x90, 0x93, 0x96, 0x99, 0x9c, 0x9f, 0xa2, 0xa5, + 0xa8, 0xab, 0xae, 0xb1, 0xb4, 0xb7, 0xba, 0xbd, + 0xc0, 0xc3, 0xc6, 0xc9, 0xcc, 0xcf, 0xd2, 0xd5, + 0xd8, 0xdb, 0xde, 0xe1, 0xe4, 0xe7, 0xea, 0xed, + 0xf0, 0xf3, 0xf6, 0xf9, 0xfc, 0xff, 0x02, 0x05, + 0x08, 0x0b, 0x0e, 0x11, 0x14, 0x17, 0x1a, 0x1d, + 0x20, 0x23, 0x26, 0x29, 0x2c, 0x2f, 0x32, 0x35, + 0x38, 0x3b, 0x3e, 0x41, 0x44, 0x47, 0x4a, 0x4d, + 0x50, 0x53, 0x56, 0x59, 0x5c, 0x5f, 0x62, 0x65, + 0x68, 0x6b, 0x6e, 0x71, 0x74, 0x77, 0x7a, 0x7d, + 0x80, 0x83, 0x86, 0x89, 0x8c, 0x8f, 0x92, 0x95, + 0x98, 0x9b, 0x9e, 0xa1, 0xa4, 0xa7, 0xaa, 0xad, + 0xb0, 0xb3, 0xb6, 0xb9, 0xbc, 0xbf, 0xc2, 0xc5, + 0xc8, 0xcb, 0xce, 0xd1, 0xd4, 0xd7, 0xda, 0xdd, + 0xe0, 0xe3, 0xe6, 0xe9, 0xec, 0xef, 0xf2, 0xf5, + 0xf8, 0xfb, 0xfe, 0x01, 0x04, 0x07, 0x0a, 0x0d, + 0x10, 0x13, 0x16, 0x19, 0x1c, 0x1f, 0x22, 0x25, + 0x28, 0x2b, 0x2e, 0x31, 0x34, 0x37, 0x3a, 0x3d, + 0x40, 0x43, 0x46, 0x49, 0x4c, 0x4f, 0x52, 0x55, + 0x58, 0x5b, 0x5e, 0x61, 0x64, 0x67, 0x6a, 0x6d, + 0x70, 0x73, 0x76, 0x79, 0x7c, 0x7f, 0x82, 0x85, + 0x88, 0x8b, 0x8e, 0x91, 0x94, 0x97, 0x9a, 0x9d, + 0xa0, 0xa3, 0xa6, 0xa9, 0xac, 0xaf, 0xb2, 0xb5, + 0xb8, 0xbb, 0xbe, 0xc1, 0xc4, 0xc7, 0xca, 0xcd, + 0xd0, 0xd3, 0xd6, 0xd9, 0xdc, 0xdf, 0xe2, 0xe5, + 0xe8, 0xeb, 0xee, 0xf1, 0xf4, 0xf7, 0xfa, 0xfd, + 0x00, 0x05, 0x0a, 0x0f, 0x14, 0x19, 0x1e, 0x23, + 0x28, 0x2d, 0x32, 0x37, 0x3c, 0x41, 0x46, 0x4b, + 0x50, 0x55, 0x5a, 0x5f, 0x64, 0x69, 0x6e, 0x73, + 0x78, 0x7d, 0x82, 0x87, 0x8c, 0x91, 0x96, 0x9b, + 0xa0, 0xa5, 0xaa, 0xaf, 0xb4, 0xb9, 0xbe, 0xc3, + 0xc8, 0xcd, 0xd2, 0xd7, 0xdc, 0xe1, 0xe6, 0xeb, + 0xf0, 0xf5, 0xfa, 0xff, 0x04, 0x09, 0x0e, 0x13, + 0x18, 0x1d, 0x22, 0x27, 0x2c, 0x31, 0x36, 0x3b, + 0x40, 0x45, 0x4a, 0x4f, 0x54, 0x59, 0x5e, 0x63, + 0x68, 0x6d, 0x72, 0x77, 0x7c, 0x81, 0x86, 0x8b, + 0x90, 0x95, 0x9a, 0x9f, 0xa4, 0xa9, 0xae, 0xb3, + 0xb8, 0xbd, 0xc2, 0xc7, 0xcc, 0xd1, 0xd6, 0xdb, + 0xe0, 0xe5, 0xea, 0xef, 0xf4, 0xf9, 0xfe, 0x03, + 0x08, 0x0d, 0x12, 0x17, 0x1c, 0x21, 0x26, 0x2b, + 0x30, 0x35, 0x3a, 0x3f, 0x44, 0x49, 0x4e, 0x53, + 0x58, 0x5d, 0x62, 0x67, 0x6c, 0x71, 0x76, 0x7b, + 0x80, 0x85, 0x8a, 0x8f, 0x94, 0x99, 0x9e, 0xa3, + 0xa8, 0xad, 0xb2, 0xb7, 0xbc, 0xc1, 0xc6, 0xcb, + 0xd0, 0xd5, 0xda, 0xdf, 0xe4, 0xe9, 0xee, 0xf3, + 0xf8, 0xfd, 0x02, 0x07, 0x0c, 0x11, 0x16, 0x1b, + 0x20, 0x25, 0x2a, 0x2f, 0x34, 0x39, 0x3e, 0x43, + 0x48, 0x4d, 0x52, 0x57, 0x5c, 0x61, 0x66, 0x6b, + 0x70, 0x75, 0x7a, 0x7f, 0x84, 0x89, 0x8e, 0x93, + 0x98, 0x9d, 0xa2, 0xa7, 0xac, 0xb1, 0xb6, 0xbb, + 0xc0, 0xc5, 0xca, 0xcf, 0xd4, 0xd9, 0xde, 0xe3, + 0xe8, 0xed, 0xf2, 0xf7, 0xfc, 0x01, 0x06, 0x0b, + 0x10, 0x15, 0x1a, 0x1f, 0x24, 0x29, 0x2e, 0x33, + 0x38, 0x3d, 0x42, 0x47, 0x4c, 0x51, 0x56, 0x5b, + 0x60, 0x65, 0x6a, 0x6f, 0x74, 0x79, 0x7e, 0x83, + 0x88, 0x8d, 0x92, 0x97, 0x9c, 0xa1, 0xa6, 0xab, + 0xb0, 0xb5, 0xba, 0xbf, 0xc4, 0xc9, 0xce, 0xd3, + 0xd8, 0xdd, 0xe2, 0xe7, 0xec, 0xf1, 0xf6, 0xfb, + 0x00, 0x07, 0x0e, 0x15, 0x1c, 0x23, 0x2a, 0x31, + 0x38, 0x3f, 0x46, 0x4d, 0x54, 0x5b, 0x62, 0x69, + 0x70, 0x77, 0x7e, 0x85, 0x8c, 0x93, 0x9a, 0xa1, + 0xa8, 0xaf, 0xb6, 0xbd, 0xc4, 0xcb, 0xd2, 0xd9, + 0xe0, 0xe7, 0xee, 0xf5, 0xfc, 0x03, 0x0a, 0x11, + 0x18, 0x1f, 0x26, 0x2d, 0x34, 0x3b, 0x42, 0x49, + 0x50, 0x57, 0x5e, 0x65, 0x6c, 0x73, 0x7a, 0x81, + 0x88, 0x8f, 0x96, 0x9d, 0xa4, 0xab, 0xb2, 0xb9, + 0xc0, 0xc7, 0xce, 0xd5, 0xdc, 0xe3, 0xea, 0xf1, + 0xf8, 0xff, 0x06, 0x0d, 0x14, 0x1b, 0x22, 0x29, + 0x30, 0x37, 0x3e, 0x45, 0x4c, 0x53, 0x5a, 0x61, + 0x68, 0x6f, 0x76, 0x7d, 0x84, 0x8b, 0x92, 0x99, + 0xa0, 0xa7, 0xae, 0xb5, 0xbc, 0xc3, 0xca, 0xd1, + 0xd8, 0xdf, 0xe6, 0xed, 0xf4, 0xfb, 0x02, 0x09, + 0x10, 0x17, 0x1e, 0x25, 0x2c, 0x33, 0x3a, 0x41, + 0x48, 0x4f, 0x56, 0x5d, 0x64, 0x6b, 0x72, 0x79, + 0x80, 0x87, 0x8e, 0x95, 0x9c, 0xa3, 0xaa, 0xb1, + 0xb8, 0xbf, 0xc6, 0xcd, 0xd4, 0xdb, 0xe2, 0xe9, + 0xf0, 0xf7, 0xfe, 0x05, 0x0c, 0x13, 0x1a, 0x21, + 0x28, 0x2f, 0x36, 0x3d, 0x44, 0x4b, 0x52, 0x59, + 0x60, 0x67, 0x6e, 0x75, 0x7c, 0x83, 0x8a, 0x91, + 0x98, 0x9f, 0xa6, 0xad, 0xb4, 0xbb, 0xc2, 0xc9, + 0xd0, 0xd7, 0xde, 0xe5, 0xec, 0xf3, 0xfa, 0x01, + 0x08, 0x0f, 0x16, 0x1d, 0x24, 0x2b, 0x32, 0x39, + 0x40, 0x47, 0x4e, 0x55, 0x5c, 0x63, 0x6a, 0x71, + 0x78, 0x7f, 0x86, 0x8d, 0x94, 0x9b, 0xa2, 0xa9, + 0xb0, 0xb7, 0xbe, 0xc5, 0xcc, 0xd3, 0xda, 0xe1, + 0xe8, 0xef, 0xf6, 0xfd, 0x04, 0x0b, 0x12, 0x19, + 0x20, 0x27, 0x2e, 0x35, 0x3c, 0x43, 0x4a, 0x51, + 0x58, 0x5f, 0x66, 0x6d, 0x74, 0x7b, 0x82, 0x89, + 0x90, 0x97, 0x9e, 0xa5, 0xac, 0xb3, 0xba, 0xc1, + 0xc8, 0xcf, 0xd6, 0xdd, 0xe4, 0xeb, 0xf2, 0xf9, + 0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f, + 0x48, 0x51, 0x5a, 0x63, 0x6c, 0x75, 0x7e, 0x87, + 0x90, 0x99, 0xa2, 0xab, 0xb4, 0xbd, 0xc6, 0xcf, + 0xd8, 0xe1, 0xea, 0xf3, 0xfc, 0x05, 0x0e, 0x17, + 0x20, 0x29, 0x32, 0x3b, 0x44, 0x4d, 0x56, 0x5f, + 0x68, 0x71, 0x7a, 0x83, 0x8c, 0x95, 0x9e, 0xa7, + 0xb0, 0xb9, 0xc2, 0xcb, 0xd4, 0xdd, 0xe6, 0xef, + 0xf8, 0x01, 0x0a, 0x13, 0x1c, 0x25, 0x2e, 0x37, + 0x40, 0x49, 0x52, 0x5b, 0x64, 0x6d, 0x76, 0x7f, + 0x88, 0x91, 0x9a, 0xa3, 0xac, 0xb5, 0xbe, 0xc7, + 0xd0, 0xd9, 0xe2, 0xeb, 0xf4, 0xfd, 0x06, 0x0f, + 0x18, 0x21, 0x2a, 0x33, 0x3c, 0x45, 0x4e, 0x57, + 0x60, 0x69, 0x72, 0x7b, 0x84, 0x8d, 0x96, 0x9f, + 0xa8, 0xb1, 0xba, 0xc3, 0xcc, 0xd5, 0xde, 0xe7, + 0xf0, 0xf9, 0x02, 0x0b, 0x14, 0x1d, 0x26, 0x2f, + 0x38, 0x41, 0x4a, 0x53, 0x5c, 0x65, 0x6e, 0x77, + 0x80, 0x89, 0x92, 0x9b, 0xa4, 0xad, 0xb6, 0xbf, + 0xc8, 0xd1, 0xda, 0xe3, 0xec, 0xf5, 0xfe, 0x07, + 0x10, 0x19, 0x22, 0x2b, 0x34, 0x3d, 0x46, 0x4f, + 0x58, 0x61, 0x6a, 0x73, 0x7c, 0x85, 0x8e, 0x97, + 0xa0, 0xa9, 0xb2, 0xbb, 0xc4, 0xcd, 0xd6, 0xdf, + 0xe8, 0xf1, 0xfa, 0x03, 0x0c, 0x15, 0x1e, 0x27, + 0x30, 0x39, 0x42, 0x4b, 0x54, 0x5d, 0x66, 0x6f, + 0x78, 0x81, 0x8a, 0x93, 0x9c, 0xa5, 0xae, 0xb7, + 0xc0, 0xc9, 0xd2, 0xdb, 0xe4, 0xed, 0xf6, 0xff, + 0x08, 0x11, 0x1a, 0x23, 0x2c, 0x35, 0x3e, 0x47, + 0x50, 0x59, 0x62, 0x6b, 0x74, 0x7d, 0x86, 0x8f, + 0x98, 0xa1, 0xaa, 0xb3, 0xbc, 0xc5, 0xce, 0xd7, + 0xe0, 0xe9, 0xf2, 0xfb, 0x04, 0x0d, 0x16, 0x1f, + 0x28, 0x31, 0x3a, 0x43, 0x4c, 0x55, 0x5e, 0x67, + 0x70, 0x79, 0x82, 0x8b, 0x94, 0x9d, 0xa6, 0xaf, + 0xb8, 0xc1, 0xca, 0xd3, 0xdc, 0xe5, 0xee, 0xf7, + 0x00, 0x0b, 0x16, 0x21, 0x2c, 0x37, 0x42, 0x4d, + 0x58, 0x63, 0x6e, 0x79, 0x84, 0x8f, 0x9a, 0xa5, + 0xb0, 0xbb, 0xc6, 0xd1, 0xdc, 0xe7, 0xf2, 0xfd, + 0x08, 0x13, 0x1e, 0x29, 0x34, 0x3f, 0x4a, 0x55, + 0x60, 0x6b, 0x76, 0x81, 0x8c, 0x97, 0xa2, 0xad, + 0xb8, 0xc3, 0xce, 0xd9, 0xe4, 0xef, 0xfa, 0x05, + 0x10, 0x1b, 0x26, 0x31, 0x3c, 0x47, 0x52, 0x5d, + 0x68, 0x73, 0x7e, 0x89, 0x94, 0x9f, 0xaa, 0xb5, + 0xc0, 0xcb, 0xd6, 0xe1, 0xec, 0xf7, 0x02, 0x0d, + 0x18, 0x23, 0x2e, 0x39, 0x44, 0x4f, 0x5a, 0x65, + 0x70, 0x7b, 0x86, 0x91, 0x9c, 0xa7, 0xb2, 0xbd, + 0xc8, 0xd3, 0xde, 0xe9, 0xf4, 0xff, 0x0a, 0x15, + 0x20, 0x2b, 0x36, 0x41, 0x4c, 0x57, 0x62, 0x6d, + 0x78, 0x83, 0x8e, 0x99, 0xa4, 0xaf, 0xba, 0xc5, + 0xd0, 0xdb, 0xe6, 0xf1, 0xfc, 0x07, 0x12, 0x1d, + 0x28, 0x33, 0x3e, 0x49, 0x54, 0x5f, 0x6a, 0x75, + 0x80, 0x8b, 0x96, 0xa1, 0xac, 0xb7, 0xc2, 0xcd, + 0xd8, 0xe3, 0xee, 0xf9, 0x04, 0x0f, 0x1a, 0x25, + 0x30, 0x3b, 0x46, 0x51, 0x5c, 0x67, 0x72, 0x7d, + 0x88, 0x93, 0x9e, 0xa9, 0xb4, 0xbf, 0xca, 0xd5, + 0xe0, 0xeb, 0xf6, 0x01, 0x0c, 0x17, 0x22, 0x2d, + 0x38, 0x43, 0x4e, 0x59, 0x64, 0x6f, 0x7a, 0x85, + 0x90, 0x9b, 0xa6, 0xb1, 0xbc, 0xc7, 0xd2, 0xdd, + 0xe8, 0xf3, 0xfe, 0x09, 0x14, 0x1f, 0x2a, 0x35, + 0x40, 0x4b, 0x56, 0x61, 0x6c, 0x77, 0x82, 0x8d, + 0x98, 0xa3, 0xae, 0xb9, 0xc4, 0xcf, 0xda, 0xe5, + 0xf0, 0xfb, 0x06, 0x11, 0x1c, 0x27, 0x32, 0x3d, + 0x48, 0x53, 0x5e, 0x69, 0x74, 0x7f, 0x8a, 0x95, + 0xa0, 0xab, 0xb6, 0xc1, 0xcc, 0xd7, 0xe2, 0xed, + 0xf8, 0x03, 0x0e, 0x19, 0x24, 0x2f, 0x3a, 0x45, + 0x50, 0x5b, 0x66, 0x71, 0x7c, 0x87, 0x92, 0x9d, + 0xa8, 0xb3, 0xbe, 0xc9, 0xd4, 0xdf, 0xea, 0xf5, + 0x00, 0x0d, 0x1a, 0x27, 0x34, 0x41, 0x4e, 0x5b, + 0x68, 0x75, 0x82, 0x8f, 0x9c, 0xa9, 0xb6, 0xc3, + 0xd0, 0xdd, 0xea, 0xf7, 0x04, 0x11, 0x1e, 0x2b, + 0x38, 0x45, 0x52, 0x5f, 0x6c, 0x79, 0x86, 0x93, + 0xa0, 0xad, 0xba, 0xc7, 0xd4, 0xe1, 0xee, 0xfb, + 0x08, 0x15, 0x22, 0x2f, 0x3c, 0x49, 0x56, 0x63, + 0x70, 0x7d, 0x8a, 0x97, 0xa4, 0xb1, 0xbe, 0xcb, + 0xd8, 0xe5, 0xf2, 0xff, 0x0c, 0x19, 0x26, 0x33, + 0x40, 0x4d, 0x5a, 0x67, 0x74, 0x81, 0x8e, 0x9b, + 0xa8, 0xb5, 0xc2, 0xcf, 0xdc, 0xe9, 0xf6, 0x03, + 0x10, 0x1d, 0x2a, 0x37, 0x44, 0x51, 0x5e, 0x6b, + 0x78, 0x85, 0x92, 0x9f, 0xac, 0xb9, 0xc6, 0xd3, + 0xe0, 0xed, 0xfa, 0x07, 0x14, 0x21, 0x2e, 0x3b, + 0x48, 0x55, 0x62, 0x6f, 0x7c, 0x89, 0x96, 0xa3, + 0xb0, 0xbd, 0xca, 0xd7, 0xe4, 0xf1, 0xfe, 0x0b, + 0x18, 0x25, 0x32, 0x3f, 0x4c, 0x59, 0x66, 0x73, + 0x80, 0x8d, 0x9a, 0xa7, 0xb4, 0xc1, 0xce, 0xdb, + 0xe8, 0xf5, 0x02, 0x0f, 0x1c, 0x29, 0x36, 0x43, + 0x50, 0x5d, 0x6a, 0x77, 0x84, 0x91, 0x9e, 0xab, + 0xb8, 0xc5, 0xd2, 0xdf, 0xec, 0xf9, 0x06, 0x13, + 0x20, 0x2d, 0x3a, 0x47, 0x54, 0x61, 0x6e, 0x7b, + 0x88, 0x95, 0xa2, 0xaf, 0xbc, 0xc9, 0xd6, 0xe3, + 0xf0, 0xfd, 0x0a, 0x17, 0x24, 0x31, 0x3e, 0x4b, + 0x58, 0x65, 0x72, 0x7f, 0x8c, 0x99, 0xa6, 0xb3, + 0xc0, 0xcd, 0xda, 0xe7, 0xf4, 0x01, 0x0e, 0x1b, + 0x28, 0x35, 0x42, 0x4f, 0x5c, 0x69, 0x76, 0x83, + 0x90, 0x9d, 0xaa, 0xb7, 0xc4, 0xd1, 0xde, 0xeb, + 0xf8, 0x05, 0x12, 0x1f, 0x2c, 0x39, 0x46, 0x53, + 0x60, 0x6d, 0x7a, 0x87, 0x94, 0xa1, 0xae, 0xbb, + 0xc8, 0xd5, 0xe2, 0xef, 0xfc, 0x09, 0x16, 0x23, + 0x30, 0x3d, 0x4a, 0x57, 0x64, 0x71, 0x7e, 0x8b, + 0x98, 0xa5, 0xb2, 0xbf, 0xcc, 0xd9, 0xe6, 0xf3, + 0x00, 0x0f, 0x1e, 0x2d, 0x3c, 0x4b, 0x5a, 0x69, + 0x78, 0x87, 0x96, 0xa5, 0xb4, 0xc3, 0xd2, 0xe1, + 0xf0, 0xff, 0x0e, 0x1d, 0x2c, 0x3b, 0x4a, 0x59, + 0x68, 0x77, 0x86, 0x95, 0xa4, 0xb3, 0xc2, 0xd1, + 0xe0, 0xef, 0xfe, 0x0d, 0x1c, 0x2b, 0x3a, 0x49, + 0x58, 0x67, 0x76, 0x85, 0x94, 0xa3, 0xb2, 0xc1, + 0xd0, 0xdf, 0xee, 0xfd, 0x0c, 0x1b, 0x2a, 0x39, + 0x48, 0x57, 0x66, 0x75, 0x84, 0x93, 0xa2, 0xb1, + 0xc0, 0xcf, 0xde, 0xed, 0xfc, 0x0b, 0x1a, 0x29, + 0x38, 0x47, 0x56, 0x65, 0x74, 0x83, 0x92, 0xa1, + 0xb0, 0xbf, 0xce, 0xdd, 0xec, 0xfb, 0x0a, 0x19, + 0x28, 0x37, 0x46, 0x55, 0x64, 0x73, 0x82, 0x91, + 0xa0, 0xaf, 0xbe, 0xcd, 0xdc, 0xeb, 0xfa, 0x09, + 0x18, 0x27, 0x36, 0x45, 0x54, 0x63, 0x72, 0x81, + 0x90, 0x9f, 0xae, 0xbd, 0xcc, 0xdb, 0xea, 0xf9, + 0x08, 0x17, 0x26, 0x35, 0x44, 0x53, 0x62, 0x71, + 0x80, 0x8f, 0x9e, 0xad, 0xbc, 0xcb, 0xda, 0xe9, + 0xf8, 0x07, 0x16, 0x25, 0x34, 0x43, 0x52, 0x61, + 0x70, 0x7f, 0x8e, 0x9d, 0xac, 0xbb, 0xca, 0xd9, + 0xe8, 0xf7, 0x06, 0x15, 0x24, 0x33, 0x42, 0x51, + 0x60, 0x6f, 0x7e, 0x8d, 0x9c, 0xab, 0xba, 0xc9, + 0xd8, 0xe7, 0xf6, 0x05, 0x14, 0x23, 0x32, 0x41, + 0x50, 0x5f, 0x6e, 0x7d, 0x8c, 0x9b, 0xaa, 0xb9, + 0xc8, 0xd7, 0xe6, 0xf5, 0x04, 0x13, 0x22, 0x31, + 0x40, 0x4f, 0x5e, 0x6d, 0x7c, 0x8b, 0x9a, 0xa9, + 0xb8, 0xc7, 0xd6, 0xe5, 0xf4, 0x03, 0x12, 0x21, + 0x30, 0x3f, 0x4e, 0x5d, 0x6c, 0x7b, 0x8a, 0x99, + 0xa8, 0xb7, 0xc6, 0xd5, 0xe4, 0xf3, 0x02, 0x11, + 0x20, 0x2f, 0x3e, 0x4d, 0x5c, 0x6b, 0x7a, 0x89, + 0x98, 0xa7, 0xb6, 0xc5, 0xd4, 0xe3, 0xf2, 0x01, + 0x10, 0x1f, 0x2e, 0x3d, 0x4c, 0x5b, 0x6a, 0x79, + 0x88, 0x97, 0xa6, 0xb5, 0xc4, 0xd3, 0xe2, 0xf1, + 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, + 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff, + 0x10, 0x21, 0x32, 0x43, 0x54, 0x65, 0x76, 0x87, + 0x98, 0xa9, 0xba, 0xcb, 0xdc, 0xed, 0xfe, 0x0f, + 0x20, 0x31, 0x42, 0x53, 0x64, 0x75, 0x86, 0x97, + 0xa8, 0xb9, 0xca, 0xdb, 0xec, 0xfd, 0x0e, 0x1f, + 0x30, 0x41, 0x52, 0x63, 0x74, 0x85, 0x96, 0xa7, + 0xb8, 0xc9, 0xda, 0xeb, 0xfc, 0x0d, 0x1e, 0x2f, + 0x40, 0x51, 0x62, 0x73, 0x84, 0x95, 0xa6, 0xb7, + 0xc8, 0xd9, 0xea, 0xfb, 0x0c, 0x1d, 0x2e, 0x3f, + 0x50, 0x61, 0x72, 0x83, 0x94, 0xa5, 0xb6, 0xc7, + 0xd8, 0xe9, 0xfa, 0x0b, 0x1c, 0x2d, 0x3e, 0x4f, + 0x60, 0x71, 0x82, 0x93, 0xa4, 0xb5, 0xc6, 0xd7, + 0xe8, 0xf9, 0x0a, 0x1b, 0x2c, 0x3d, 0x4e, 0x5f, + 0x70, 0x81, 0x92, 0xa3, 0xb4, 0xc5, 0xd6, 0xe7, + 0xf8, 0x09, 0x1a, 0x2b, 0x3c, 0x4d, 0x5e, 0x6f, + 0x80, 0x91, 0xa2, 0xb3, 0xc4, 0xd5, 0xe6, 0xf7, + 0x08, 0x19, 0x2a, 0x3b, 0x4c, 0x5d, 0x6e, 0x7f, + 0x90, 0xa1, 0xb2, 0xc3, 0xd4, 0xe5, 0xf6, 0x07, + 0x18, 0x29, 0x3a, 0x4b, 0x5c, 0x6d, 0x7e, 0x8f, + 0xa0, 0xb1, 0xc2, 0xd3, 0xe4, 0xf5, 0x06, 0x17, + 0x28, 0x39, 0x4a, 0x5b, 0x6c, 0x7d, 0x8e, 0x9f, + 0xb0, 0xc1, 0xd2, 0xe3, 0xf4, 0x05, 0x16, 0x27, + 0x38, 0x49, 0x5a, 0x6b, 0x7c, 0x8d, 0x9e, 0xaf, + 0xc0, 0xd1, 0xe2, 0xf3, 0x04, 0x15, 0x26, 0x37, + 0x48, 0x59, 0x6a, 0x7b, 0x8c, 0x9d, 0xae, 0xbf, + 0xd0, 0xe1, 0xf2, 0x03, 0x14, 0x25, 0x36, 0x47, + 0x58, 0x69, 0x7a, 0x8b, 0x9c, 0xad, 0xbe, 0xcf, + 0xe0, 0xf1, 0x02, 0x13, 0x24, 0x35, 0x46, 0x57, + 0x68, 0x79, 0x8a, 0x9b, 0xac, 0xbd, 0xce, 0xdf, + 0xf0, 0x01, 0x12, 0x23, 0x34, 0x45, 0x56, 0x67, + 0x78, 0x89, 0x9a, 0xab, 0xbc, 0xcd, 0xde, 0xef, + 0x00, 0x13, 0x26, 0x39, 0x4c, 0x5f, 0x72, 0x85, + 0x98, 0xab, 0xbe, 0xd1, 0xe4, 0xf7, 0x0a, 0x1d, + 0x30, 0x43, 0x56, 0x69, 0x7c, 0x8f, 0xa2, 0xb5, + 0xc8, 0xdb, 0xee, 0x01, 0x14, 0x27, 0x3a, 0x4d, + 0x60, 0x73, 0x86, 0x99, 0xac, 0xbf, 0xd2, 0xe5, + 0xf8, 0x0b, 0x1e, 0x31, 0x44, 0x57, 0x6a, 0x7d, + 0x90, 0xa3, 0xb6, 0xc9, 0xdc, 0xef, 0x02, 0x15, + 0x28, 0x3b, 0x4e, 0x61, 0x74, 0x87, 0x9a, 0xad, + 0xc0, 0xd3, 0xe6, 0xf9, 0x0c, 0x1f, 0x32, 0x45, + 0x58, 0x6b, 0x7e, 0x91, 0xa4, 0xb7, 0xca, 0xdd, + 0xf0, 0x03, 0x16, 0x29, 0x3c, 0x4f, 0x62, 0x75, + 0x88, 0x9b, 0xae, 0xc1, 0xd4, 0xe7, 0xfa, 0x0d, + 0x20, 0x33, 0x46, 0x59, 0x6c, 0x7f, 0x92, 0xa5, + 0xb8, 0xcb, 0xde, 0xf1, 0x04, 0x17, 0x2a, 0x3d, + 0x50, 0x63, 0x76, 0x89, 0x9c, 0xaf, 0xc2, 0xd5, + 0xe8, 0xfb, 0x0e, 0x21, 0x34, 0x47, 0x5a, 0x6d, + 0x80, 0x93, 0xa6, 0xb9, 0xcc, 0xdf, 0xf2, 0x05, + 0x18, 0x2b, 0x3e, 0x51, 0x64, 0x77, 0x8a, 0x9d, + 0xb0, 0xc3, 0xd6, 0xe9, 0xfc, 0x0f, 0x22, 0x35, + 0x48, 0x5b, 0x6e, 0x81, 0x94, 0xa7, 0xba, 0xcd, + 0xe0, 0xf3, 0x06, 0x19, 0x2c, 0x3f, 0x52, 0x65, + 0x78, 0x8b, 0x9e, 0xb1, 0xc4, 0xd7, 0xea, 0xfd, + 0x10, 0x23, 0x36, 0x49, 0x5c, 0x6f, 0x82, 0x95, + 0xa8, 0xbb, 0xce, 0xe1, 0xf4, 0x07, 0x1a, 0x2d, + 0x40, 0x53, 0x66, 0x79, 0x8c, 0x9f, 0xb2, 0xc5, + 0xd8, 0xeb, 0xfe, 0x11, 0x24, 0x37, 0x4a, 0x5d, + 0x70, 0x83, 0x96, 0xa9, 0xbc, 0xcf, 0xe2, 0xf5, + 0x08, 0x1b, 0x2e, 0x41, 0x54, 0x67, 0x7a, 0x8d, + 0xa0, 0xb3, 0xc6, 0xd9, 0xec, 0xff, 0x12, 0x25, + 0x38, 0x4b, 0x5e, 0x71, 0x84, 0x97, 0xaa, 0xbd, + 0xd0, 0xe3, 0xf6, 0x09, 0x1c, 0x2f, 0x42, 0x55, + 0x68, 0x7b, 0x8e, 0xa1, 0xb4, 0xc7, 0xda, 0xed, + 0x00, 0x15, 0x2a, 0x3f, 0x54, 0x69, 0x7e, 0x93, + 0xa8, 0xbd, 0xd2, 0xe7, 0xfc, 0x11, 0x26, 0x3b, + 0x50, 0x65, 0x7a, 0x8f, 0xa4, 0xb9, 0xce, 0xe3, + 0xf8, 0x0d, 0x22, 0x37, 0x4c, 0x61, 0x76, 0x8b, + 0xa0, 0xb5, 0xca, 0xdf, 0xf4, 0x09, 0x1e, 0x33, + 0x48, 0x5d, 0x72, 0x87, 0x9c, 0xb1, 0xc6, 0xdb, + 0xf0, 0x05, 0x1a, 0x2f, 0x44, 0x59, 0x6e, 0x83, + 0x98, 0xad, 0xc2, 0xd7, 0xec, 0x01, 0x16, 0x2b, + 0x40, 0x55, 0x6a, 0x7f, 0x94, 0xa9, 0xbe, 0xd3, + 0xe8, 0xfd, 0x12, 0x27, 0x3c, 0x51, 0x66, 0x7b, + 0x90, 0xa5, 0xba, 0xcf, 0xe4, 0xf9, 0x0e, 0x23, + 0x38, 0x4d, 0x62, 0x77, 0x8c, 0xa1, 0xb6, 0xcb, + 0xe0, 0xf5, 0x0a, 0x1f, 0x34, 0x49, 0x5e, 0x73, + 0x88, 0x9d, 0xb2, 0xc7, 0xdc, 0xf1, 0x06, 0x1b, + 0x30, 0x45, 0x5a, 0x6f, 0x84, 0x99, 0xae, 0xc3, + 0xd8, 0xed, 0x02, 0x17, 0x2c, 0x41, 0x56, 0x6b, + 0x80, 0x95, 0xaa, 0xbf, 0xd4, 0xe9, 0xfe, 0x13, + 0x28, 0x3d, 0x52, 0x67, 0x7c, 0x91, 0xa6, 0xbb, + 0xd0, 0xe5, 0xfa, 0x0f, 0x24, 0x39, 0x4e, 0x63, + 0x78, 0x8d, 0xa2, 0xb7, 0xcc, 0xe1, 0xf6, 0x0b, + 0x20, 0x35, 0x4a, 0x5f, 0x74, 0x89, 0x9e, 0xb3, + 0xc8, 0xdd, 0xf2, 0x07, 0x1c, 0x31, 0x46, 0x5b, + 0x70, 0x85, 0x9a, 0xaf, 0xc4, 0xd9, 0xee, 0x03, + 0x18, 0x2d, 0x42, 0x57, 0x6c, 0x81, 0x96, 0xab, + 0xc0, 0xd5, 0xea, 0xff, 0x14, 0x29, 0x3e, 0x53, + 0x68, 0x7d, 0x92, 0xa7, 0xbc, 0xd1, 0xe6, 0xfb, + 0x10, 0x25, 0x3a, 0x4f, 0x64, 0x79, 0x8e, 0xa3, + 0xb8, 0xcd, 0xe2, 0xf7, 0x0c, 0x21, 0x36, 0x4b, + 0x60, 0x75, 0x8a, 0x9f, 0xb4, 0xc9, 0xde, 0xf3, + 0x08, 0x1d, 0x32, 0x47, 0x5c, 0x71, 0x86, 0x9b, + 0xb0, 0xc5, 0xda, 0xef, 0x04, 0x19, 0x2e, 0x43, + 0x58, 0x6d, 0x82, 0x97, 0xac, 0xc1, 0xd6, 0xeb, + 0x00, 0x17, 0x2e, 0x45, 0x5c, 0x73, 0x8a, 0xa1, + 0xb8, 0xcf, 0xe6, 0xfd, 0x14, 0x2b, 0x42, 0x59, + 0x70, 0x87, 0x9e, 0xb5, 0xcc, 0xe3, 0xfa, 0x11, + 0x28, 0x3f, 0x56, 0x6d, 0x84, 0x9b, 0xb2, 0xc9, + 0xe0, 0xf7, 0x0e, 0x25, 0x3c, 0x53, 0x6a, 0x81, + 0x98, 0xaf, 0xc6, 0xdd, 0xf4, 0x0b, 0x22, 0x39, + 0x50, 0x67, 0x7e, 0x95, 0xac, 0xc3, 0xda, 0xf1, + 0x08, 0x1f, 0x36, 0x4d, 0x64, 0x7b, 0x92, 0xa9, + 0xc0, 0xd7, 0xee, 0x05, 0x1c, 0x33, 0x4a, 0x61, + 0x78, 0x8f, 0xa6, 0xbd, 0xd4, 0xeb, 0x02, 0x19, + 0x30, 0x47, 0x5e, 0x75, 0x8c, 0xa3, 0xba, 0xd1, + 0xe8, 0xff, 0x16, 0x2d, 0x44, 0x5b, 0x72, 0x89, + 0xa0, 0xb7, 0xce, 0xe5, 0xfc, 0x13, 0x2a, 0x41, + 0x58, 0x6f, 0x86, 0x9d, 0xb4, 0xcb, 0xe2, 0xf9, + 0x10, 0x27, 0x3e, 0x55, 0x6c, 0x83, 0x9a, 0xb1, + 0xc8, 0xdf, 0xf6, 0x0d, 0x24, 0x3b, 0x52, 0x69, + 0x80, 0x97, 0xae, 0xc5, 0xdc, 0xf3, 0x0a, 0x21, + 0x38, 0x4f, 0x66, 0x7d, 0x94, 0xab, 0xc2, 0xd9, + 0xf0, 0x07, 0x1e, 0x35, 0x4c, 0x63, 0x7a, 0x91, + 0xa8, 0xbf, 0xd6, 0xed, 0x04, 0x1b, 0x32, 0x49, + 0x60, 0x77, 0x8e, 0xa5, 0xbc, 0xd3, 0xea, 0x01, + 0x18, 0x2f, 0x46, 0x5d, 0x74, 0x8b, 0xa2, 0xb9, + 0xd0, 0xe7, 0xfe, 0x15, 0x2c, 0x43, 0x5a, 0x71, + 0x88, 0x9f, 0xb6, 0xcd, 0xe4, 0xfb, 0x12, 0x29, + 0x40, 0x57, 0x6e, 0x85, 0x9c, 0xb3, 0xca, 0xe1, + 0xf8, 0x0f, 0x26, 0x3d, 0x54, 0x6b, 0x82, 0x99, + 0xb0, 0xc7, 0xde, 0xf5, 0x0c, 0x23, 0x3a, 0x51, + 0x68, 0x7f, 0x96, 0xad, 0xc4, 0xdb, 0xf2, 0x09, + 0x20, 0x37, 0x4e, 0x65, 0x7c, 0x93, 0xaa, 0xc1, + 0xd8, 0xef, 0x06, 0x1d, 0x34, 0x4b, 0x62, 0x79, + 0x90, 0xa7, 0xbe, 0xd5, 0xec, 0x03, 0x1a, 0x31, + 0x48, 0x5f, 0x76, 0x8d, 0xa4, 0xbb, 0xd2, 0xe9, + 0x00, 0x19, 0x32, 0x4b, 0x64, 0x7d, 0x96, 0xaf, + 0xc8, 0xe1, 0xfa, 0x13, 0x2c, 0x45, 0x5e, 0x77, + 0x90, 0xa9, 0xc2, 0xdb, 0xf4, 0x0d, 0x26, 0x3f, + 0x58, 0x71, 0x8a, 0xa3, 0xbc, 0xd5, 0xee, 0x07, + 0x20, 0x39, 0x52, 0x6b, 0x84, 0x9d, 0xb6, 0xcf, + 0xe8, 0x01, 0x1a, 0x33, 0x4c, 0x65, 0x7e, 0x97, + 0xb0, 0xc9, 0xe2, 0xfb, 0x14, 0x2d, 0x46, 0x5f, + 0x78, 0x91, 0xaa, 0xc3, 0xdc, 0xf5, 0x0e, 0x27, + 0x40, 0x59, 0x72, 0x8b, 0xa4, 0xbd, 0xd6, 0xef, + 0x08, 0x21, 0x3a, 0x53, 0x6c, 0x85, 0x9e, 0xb7, + 0xd0, 0xe9, 0x02, 0x1b, 0x34, 0x4d, 0x66, 0x7f, + 0x98, 0xb1, 0xca, 0xe3, 0xfc, 0x15, 0x2e, 0x47, + 0x60, 0x79, 0x92, 0xab, 0xc4, 0xdd, 0xf6, 0x0f, + 0x28, 0x41, 0x5a, 0x73, 0x8c, 0xa5, 0xbe, 0xd7, + 0xf0, 0x09, 0x22, 0x3b, 0x54, 0x6d, 0x86, 0x9f, + 0xb8, 0xd1, 0xea, 0x03, 0x1c, 0x35, 0x4e, 0x67, + 0x80, 0x99, 0xb2, 0xcb, 0xe4, 0xfd, 0x16, 0x2f, + 0x48, 0x61, 0x7a, 0x93, 0xac, 0xc5, 0xde, 0xf7, + 0x10, 0x29, 0x42, 0x5b, 0x74, 0x8d, 0xa6, 0xbf, + 0xd8, 0xf1, 0x0a, 0x23, 0x3c, 0x55, 0x6e, 0x87, + 0xa0, 0xb9, 0xd2, 0xeb, 0x04, 0x1d, 0x36, 0x4f, + 0x68, 0x81, 0x9a, 0xb3, 0xcc, 0xe5, 0xfe, 0x17, + 0x30, 0x49, 0x62, 0x7b, 0x94, 0xad, 0xc6, 0xdf, + 0xf8, 0x11, 0x2a, 0x43, 0x5c, 0x75, 0x8e, 0xa7, + 0xc0, 0xd9, 0xf2, 0x0b, 0x24, 0x3d, 0x56, 0x6f, + 0x88, 0xa1, 0xba, 0xd3, 0xec, 0x05, 0x1e, 0x37, + 0x50, 0x69, 0x82, 0x9b, 0xb4, 0xcd, 0xe6, 0xff, + 0x18, 0x31, 0x4a, 0x63, 0x7c, 0x95, 0xae, 0xc7, + 0xe0, 0xf9, 0x12, 0x2b, 0x44, 0x5d, 0x76, 0x8f, + 0xa8, 0xc1, 0xda, 0xf3, 0x0c, 0x25, 0x3e, 0x57, + 0x70, 0x89, 0xa2, 0xbb, 0xd4, 0xed, 0x06, 0x1f, + 0x38, 0x51, 0x6a, 0x83, 0x9c, 0xb5, 0xce, 0xe7, + 0x00, 0x1b, 0x36, 0x51, 0x6c, 0x87, 0xa2, 0xbd, + 0xd8, 0xf3, 0x0e, 0x29, 0x44, 0x5f, 0x7a, 0x95, + 0xb0, 0xcb, 0xe6, 0x01, 0x1c, 0x37, 0x52, 0x6d, + 0x88, 0xa3, 0xbe, 0xd9, 0xf4, 0x0f, 0x2a, 0x45, + 0x60, 0x7b, 0x96, 0xb1, 0xcc, 0xe7, 0x02, 0x1d, + 0x38, 0x53, 0x6e, 0x89, 0xa4, 0xbf, 0xda, 0xf5, + 0x10, 0x2b, 0x46, 0x61, 0x7c, 0x97, 0xb2, 0xcd, + 0xe8, 0x03, 0x1e, 0x39, 0x54, 0x6f, 0x8a, 0xa5, + 0xc0, 0xdb, 0xf6, 0x11, 0x2c, 0x47, 0x62, 0x7d, + 0x98, 0xb3, 0xce, 0xe9, 0x04, 0x1f, 0x3a, 0x55, + 0x70, 0x8b, 0xa6, 0xc1, 0xdc, 0xf7, 0x12, 0x2d, + 0x48, 0x63, 0x7e, 0x99, 0xb4, 0xcf, 0xea, 0x05, + 0x20, 0x3b, 0x56, 0x71, 0x8c, 0xa7, 0xc2, 0xdd, + 0xf8, 0x13, 0x2e, 0x49, 0x64, 0x7f, 0x9a, 0xb5, + 0xd0, 0xeb, 0x06, 0x21, 0x3c, 0x57, 0x72, 0x8d, + 0xa8, 0xc3, 0xde, 0xf9, 0x14, 0x2f, 0x4a, 0x65, + 0x80, 0x9b, 0xb6, 0xd1, 0xec, 0x07, 0x22, 0x3d, + 0x58, 0x73, 0x8e, 0xa9, 0xc4, 0xdf, 0xfa, 0x15, + 0x30, 0x4b, 0x66, 0x81, 0x9c, 0xb7, 0xd2, 0xed, + 0x08, 0x23, 0x3e, 0x59, 0x74, 0x8f, 0xaa, 0xc5, + 0xe0, 0xfb, 0x16, 0x31, 0x4c, 0x67, 0x82, 0x9d, + 0xb8, 0xd3, 0xee, 0x09, 0x24, 0x3f, 0x5a, 0x75, + 0x90, 0xab, 0xc6, 0xe1, 0xfc, 0x17, 0x32, 0x4d, + 0x68, 0x83, 0x9e, 0xb9, 0xd4, 0xef, 0x0a, 0x25, + 0x40, 0x5b, 0x76, 0x91, 0xac, 0xc7, 0xe2, 0xfd, + 0x18, 0x33, 0x4e, 0x69, 0x84, 0x9f, 0xba, 0xd5, + 0xf0, 0x0b, 0x26, 0x41, 0x5c, 0x77, 0x92, 0xad, + 0xc8, 0xe3, 0xfe, 0x19, 0x34, 0x4f, 0x6a, 0x85, + 0xa0, 0xbb, 0xd6, 0xf1, 0x0c, 0x27, 0x42, 0x5d, + 0x78, 0x93, 0xae, 0xc9, 0xe4, 0xff, 0x1a, 0x35, + 0x50, 0x6b, 0x86, 0xa1, 0xbc, 0xd7, 0xf2, 0x0d, + 0x28, 0x43, 0x5e, 0x79, 0x94, 0xaf, 0xca, 0xe5, + 0x00, 0x1d, 0x3a, 0x57, 0x74, 0x91, 0xae, 0xcb, + 0xe8, 0x05, 0x22, 0x3f, 0x5c, 0x79, 0x96, 0xb3, + 0xd0, 0xed, 0x0a, 0x27, 0x44, 0x61, 0x7e, 0x9b, + 0xb8, 0xd5, 0xf2, 0x0f, 0x2c, 0x49, 0x66, 0x83, + 0xa0, 0xbd, 0xda, 0xf7, 0x14, 0x31, 0x4e, 0x6b, + 0x88, 0xa5, 0xc2, 0xdf, 0xfc, 0x19, 0x36, 0x53, + 0x70, 0x8d, 0xaa, 0xc7, 0xe4, 0x01, 0x1e, 0x3b, + 0x58, 0x75, 0x92, 0xaf, 0xcc, 0xe9, 0x06, 0x23, + 0x40, 0x5d, 0x7a, 0x97, 0xb4, 0xd1, 0xee, 0x0b, + 0x28, 0x45, 0x62, 0x7f, 0x9c, 0xb9, 0xd6, 0xf3, + 0x10, 0x2d, 0x4a, 0x67, 0x84, 0xa1, 0xbe, 0xdb, + 0xf8, 0x15, 0x32, 0x4f, 0x6c, 0x89, 0xa6, 0xc3, + 0xe0, 0xfd, 0x1a, 0x37, 0x54, 0x71, 0x8e, 0xab, + 0xc8, 0xe5, 0x02, 0x1f, 0x3c, 0x59, 0x76, 0x93, + 0xb0, 0xcd, 0xea, 0x07, 0x24, 0x41, 0x5e, 0x7b, + 0x98, 0xb5, 0xd2, 0xef, 0x0c, 0x29, 0x46, 0x63, + 0x80, 0x9d, 0xba, 0xd7, 0xf4, 0x11, 0x2e, 0x4b, + 0x68, 0x85, 0xa2, 0xbf, 0xdc, 0xf9, 0x16, 0x33, + 0x50, 0x6d, 0x8a, 0xa7, 0xc4, 0xe1, 0xfe, 0x1b, + 0x38, 0x55, 0x72, 0x8f, 0xac, 0xc9, 0xe6, 0x03, + 0x20, 0x3d, 0x5a, 0x77, 0x94, 0xb1, 0xce, 0xeb, + 0x08, 0x25, 0x42, 0x5f, 0x7c, 0x99, 0xb6, 0xd3, + 0xf0, 0x0d, 0x2a, 0x47, 0x64, 0x81, 0x9e, 0xbb, + 0xd8, 0xf5, 0x12, 0x2f, 0x4c, 0x69, 0x86, 0xa3, + 0xc0, 0xdd, 0xfa, 0x17, 0x34, 0x51, 0x6e, 0x8b, + 0xa8, 0xc5, 0xe2, 0xff, 0x1c, 0x39, 0x56, 0x73, + 0x90, 0xad, 0xca, 0xe7, 0x04, 0x21, 0x3e, 0x5b, + 0x78, 0x95, 0xb2, 0xcf, 0xec, 0x09, 0x26, 0x43, + 0x60, 0x7d, 0x9a, 0xb7, 0xd4, 0xf1, 0x0e, 0x2b, + 0x48, 0x65, 0x82, 0x9f, 0xbc, 0xd9, 0xf6, 0x13, + 0x30, 0x4d, 0x6a, 0x87, 0xa4, 0xc1, 0xde, 0xfb, + 0x18, 0x35, 0x52, 0x6f, 0x8c, 0xa9, 0xc6, 0xe3, + 0x00, 0x1f, 0x3e, 0x5d, 0x7c, 0x9b, 0xba, 0xd9, + 0xf8, 0x17, 0x36, 0x55, 0x74, 0x93, 0xb2, 0xd1, + 0xf0, 0x0f, 0x2e, 0x4d, 0x6c, 0x8b, 0xaa, 0xc9, + 0xe8, 0x07, 0x26, 0x45, 0x64, 0x83, 0xa2, 0xc1, + 0xe0, 0xff, 0x1e, 0x3d, 0x5c, 0x7b, 0x9a, 0xb9, + 0xd8, 0xf7, 0x16, 0x35, 0x54, 0x73, 0x92, 0xb1, + 0xd0, 0xef, 0x0e, 0x2d, 0x4c, 0x6b, 0x8a, 0xa9, + 0xc8, 0xe7, 0x06, 0x25, 0x44, 0x63, 0x82, 0xa1, + 0xc0, 0xdf, 0xfe, 0x1d, 0x3c, 0x5b, 0x7a, 0x99, + 0xb8, 0xd7, 0xf6, 0x15, 0x34, 0x53, 0x72, 0x91, + 0xb0, 0xcf, 0xee, 0x0d, 0x2c, 0x4b, 0x6a, 0x89, + 0xa8, 0xc7, 0xe6, 0x05, 0x24, 0x43, 0x62, 0x81, + 0xa0, 0xbf, 0xde, 0xfd, 0x1c, 0x3b, 0x5a, 0x79, + 0x98, 0xb7, 0xd6, 0xf5, 0x14, 0x33, 0x52, 0x71, + 0x90, 0xaf, 0xce, 0xed, 0x0c, 0x2b, 0x4a, 0x69, + 0x88, 0xa7, 0xc6, 0xe5, 0x04, 0x23, 0x42, 0x61, + 0x80, 0x9f, 0xbe, 0xdd, 0xfc, 0x1b, 0x3a, 0x59, + 0x78, 0x97, 0xb6, 0xd5, 0xf4, 0x13, 0x32, 0x51, + 0x70, 0x8f, 0xae, 0xcd, 0xec, 0x0b, 0x2a, 0x49, + 0x68, 0x87, 0xa6, 0xc5, 0xe4, 0x03, 0x22, 0x41, + 0x60, 0x7f, 0x9e, 0xbd, 0xdc, 0xfb, 0x1a, 0x39, + 0x58, 0x77, 0x96, 0xb5, 0xd4, 0xf3, 0x12, 0x31, + 0x50, 0x6f, 0x8e, 0xad, 0xcc, 0xeb, 0x0a, 0x29, + 0x48, 0x67, 0x86, 0xa5, 0xc4, 0xe3, 0x02, 0x21, + 0x40, 0x5f, 0x7e, 0x9d, 0xbc, 0xdb, 0xfa, 0x19, + 0x38, 0x57, 0x76, 0x95, 0xb4, 0xd3, 0xf2, 0x11, + 0x30, 0x4f, 0x6e, 0x8d, 0xac, 0xcb, 0xea, 0x09, + 0x28, 0x47, 0x66, 0x85, 0xa4, 0xc3, 0xe2, 0x01, + 0x20, 0x3f, 0x5e, 0x7d, 0x9c, 0xbb, 0xda, 0xf9, + 0x18, 0x37, 0x56, 0x75, 0x94, 0xb3, 0xd2, 0xf1, + 0x10, 0x2f, 0x4e, 0x6d, 0x8c, 0xab, 0xca, 0xe9, + 0x08, 0x27, 0x46, 0x65, 0x84, 0xa3, 0xc2, 0xe1, + 0x00, 0x21, 0x42, 0x63, + }, + .ilen = 4100, + .result = { + 0xf0, 0x5c, 0x74, 0xad, 0x4e, 0xbc, 0x99, 0xe2, + 0xae, 0xff, 0x91, 0x3a, 0x44, 0xcf, 0x38, 0x32, + 0x1e, 0xad, 0xa7, 0xcd, 0xa1, 0x39, 0x95, 0xaa, + 0x10, 0xb1, 0xb3, 0x2e, 0x04, 0x31, 0x8f, 0x86, + 0xf2, 0x62, 0x74, 0x70, 0x0c, 0xa4, 0x46, 0x08, + 0xa8, 0xb7, 0x99, 0xa8, 0xe9, 0xd2, 0x73, 0x79, + 0x7e, 0x6e, 0xd4, 0x8f, 0x1e, 0xc7, 0x8e, 0x31, + 0x0b, 0xfa, 0x4b, 0xce, 0xfd, 0xf3, 0x57, 0x71, + 0xe9, 0x46, 0x03, 0xa5, 0x3d, 0x34, 0x00, 0xe2, + 0x18, 0xff, 0x75, 0x6d, 0x06, 0x2d, 0x00, 0xab, + 0xb9, 0x3e, 0x6c, 0x59, 0xc5, 0x84, 0x06, 0xb5, + 0x8b, 0xd0, 0x89, 0x9c, 0x4a, 0x79, 0x16, 0xc6, + 0x3d, 0x74, 0x54, 0xfa, 0x44, 0xcd, 0x23, 0x26, + 0x5c, 0xcf, 0x7e, 0x28, 0x92, 0x32, 0xbf, 0xdf, + 0xa7, 0x20, 0x3c, 0x74, 0x58, 0x2a, 0x9a, 0xde, + 0x61, 0x00, 0x1c, 0x4f, 0xff, 0x59, 0xc4, 0x22, + 0xac, 0x3c, 0xd0, 0xe8, 0x6c, 0xf9, 0x97, 0x1b, + 0x58, 0x9b, 0xad, 0x71, 0xe8, 0xa9, 0xb5, 0x0d, + 0xee, 0x2f, 0x04, 0x1f, 0x7f, 0xbc, 0x99, 0xee, + 0x84, 0xff, 0x42, 0x60, 0xdc, 0x3a, 0x18, 0xa5, + 0x81, 0xf9, 0xef, 0xdc, 0x7a, 0x0f, 0x65, 0x41, + 0x2f, 0xa3, 0xd3, 0xf9, 0xc2, 0xcb, 0xc0, 0x4d, + 0x8f, 0xd3, 0x76, 0x96, 0xad, 0x49, 0x6d, 0x38, + 0x3d, 0x39, 0x0b, 0x6c, 0x80, 0xb7, 0x54, 0x69, + 0xf0, 0x2c, 0x90, 0x02, 0x29, 0x0d, 0x1c, 0x12, + 0xad, 0x55, 0xc3, 0x8b, 0x68, 0xd9, 0xcc, 0xb3, + 0xb2, 0x64, 0x33, 0x90, 0x5e, 0xca, 0x4b, 0xe2, + 0xfb, 0x75, 0xdc, 0x63, 0xf7, 0x9f, 0x82, 0x74, + 0xf0, 0xc9, 0xaa, 0x7f, 0xe9, 0x2a, 0x9b, 0x33, + 0xbc, 0x88, 0x00, 0x7f, 0xca, 0xb2, 0x1f, 0x14, + 0xdb, 0xc5, 0x8e, 0x7b, 0x11, 0x3c, 0x3e, 0x08, + 0xf3, 0x83, 0xe8, 0xe0, 0x94, 0x86, 0x2e, 0x92, + 0x78, 0x6b, 0x01, 0xc9, 0xc7, 0x83, 0xba, 0x21, + 0x6a, 0x25, 0x15, 0x33, 0x4e, 0x45, 0x08, 0xec, + 0x35, 0xdb, 0xe0, 0x6e, 0x31, 0x51, 0x79, 0xa9, + 0x42, 0x44, 0x65, 0xc1, 0xa0, 0xf1, 0xf9, 0x2a, + 0x70, 0xd5, 0xb6, 0xc6, 0xc1, 0x8c, 0x39, 0xfc, + 0x25, 0xa6, 0x55, 0xd9, 0xdd, 0x2d, 0x4c, 0xec, + 0x49, 0xc6, 0xeb, 0x0e, 0xa8, 0x25, 0x2a, 0x16, + 0x1b, 0x66, 0x84, 0xda, 0xe2, 0x92, 0xe5, 0xc0, + 0xc8, 0x53, 0x07, 0xaf, 0x80, 0x84, 0xec, 0xfd, + 0xcd, 0xd1, 0x6e, 0xcd, 0x6f, 0x6a, 0xf5, 0x36, + 0xc5, 0x15, 0xe5, 0x25, 0x7d, 0x77, 0xd1, 0x1a, + 0x93, 0x36, 0xa9, 0xcf, 0x7c, 0xa4, 0x54, 0x4a, + 0x06, 0x51, 0x48, 0x4e, 0xf6, 0x59, 0x87, 0xd2, + 0x04, 0x02, 0xef, 0xd3, 0x44, 0xde, 0x76, 0x31, + 0xb3, 0x34, 0x17, 0x1b, 0x9d, 0x66, 0x11, 0x9f, + 0x1e, 0xcc, 0x17, 0xe9, 0xc7, 0x3c, 0x1b, 0xe7, + 0xcb, 0x50, 0x08, 0xfc, 0xdc, 0x2b, 0x24, 0xdb, + 0x65, 0x83, 0xd0, 0x3b, 0xe3, 0x30, 0xea, 0x94, + 0x6c, 0xe7, 0xe8, 0x35, 0x32, 0xc7, 0xdb, 0x64, + 0xb4, 0x01, 0xab, 0x36, 0x2c, 0x77, 0x13, 0xaf, + 0xf8, 0x2b, 0x88, 0x3f, 0x54, 0x39, 0xc4, 0x44, + 0xfe, 0xef, 0x6f, 0x68, 0x34, 0xbe, 0x0f, 0x05, + 0x16, 0x6d, 0xf6, 0x0a, 0x30, 0xe7, 0xe3, 0xed, + 0xc4, 0xde, 0x3c, 0x1b, 0x13, 0xd8, 0xdb, 0xfe, + 0x41, 0x62, 0xe5, 0x28, 0xd4, 0x8d, 0xa3, 0xc7, + 0x93, 0x97, 0xc6, 0x48, 0x45, 0x1d, 0x9f, 0x83, + 0xdf, 0x4b, 0x40, 0x3e, 0x42, 0x25, 0x87, 0x80, + 0x4c, 0x7d, 0xa8, 0xd4, 0x98, 0x23, 0x95, 0x75, + 0x41, 0x8c, 0xda, 0x41, 0x9b, 0xd4, 0xa7, 0x06, + 0xb5, 0xf1, 0x71, 0x09, 0x53, 0xbe, 0xca, 0xbf, + 0x32, 0x03, 0xed, 0xf0, 0x50, 0x1c, 0x56, 0x39, + 0x5b, 0xa4, 0x75, 0x18, 0xf7, 0x9b, 0x58, 0xef, + 0x53, 0xfc, 0x2a, 0x38, 0x23, 0x15, 0x75, 0xcd, + 0x45, 0xe5, 0x5a, 0x82, 0x55, 0xba, 0x21, 0xfa, + 0xd4, 0xbd, 0xc6, 0x94, 0x7c, 0xc5, 0x80, 0x12, + 0xf7, 0x4b, 0x32, 0xc4, 0x9a, 0x82, 0xd8, 0x28, + 0x8f, 0xd9, 0xc2, 0x0f, 0x60, 0x03, 0xbe, 0x5e, + 0x21, 0xd6, 0x5f, 0x58, 0xbf, 0x5c, 0xb1, 0x32, + 0x82, 0x8d, 0xa9, 0xe5, 0xf2, 0x66, 0x1a, 0xc0, + 0xa0, 0xbc, 0x58, 0x2f, 0x71, 0xf5, 0x2f, 0xed, + 0xd1, 0x26, 0xb9, 0xd8, 0x49, 0x5a, 0x07, 0x19, + 0x01, 0x7c, 0x59, 0xb0, 0xf8, 0xa4, 0xb7, 0xd3, + 0x7b, 0x1a, 0x8c, 0x38, 0xf4, 0x50, 0xa4, 0x59, + 0xb0, 0xcc, 0x41, 0x0b, 0x88, 0x7f, 0xe5, 0x31, + 0xb3, 0x42, 0xba, 0xa2, 0x7e, 0xd4, 0x32, 0x71, + 0x45, 0x87, 0x48, 0xa9, 0xc2, 0xf2, 0x89, 0xb3, + 0xe4, 0xa7, 0x7e, 0x52, 0x15, 0x61, 0xfa, 0xfe, + 0xc9, 0xdd, 0x81, 0xeb, 0x13, 0xab, 0xab, 0xc3, + 0x98, 0x59, 0xd8, 0x16, 0x3d, 0x14, 0x7a, 0x1c, + 0x3c, 0x41, 0x9a, 0x16, 0x16, 0x9b, 0xd2, 0xd2, + 0x69, 0x3a, 0x29, 0x23, 0xac, 0x86, 0x32, 0xa5, + 0x48, 0x9c, 0x9e, 0xf3, 0x47, 0x77, 0x81, 0x70, + 0x24, 0xe8, 0x85, 0xd2, 0xf5, 0xb5, 0xfa, 0xff, + 0x59, 0x6a, 0xd3, 0x50, 0x59, 0x43, 0x59, 0xde, + 0xd9, 0xf1, 0x55, 0xa5, 0x0c, 0xc3, 0x1a, 0x1a, + 0x18, 0x34, 0x0d, 0x1a, 0x63, 0x33, 0xed, 0x10, + 0xe0, 0x1d, 0x2a, 0x18, 0xd2, 0xc0, 0x54, 0xa8, + 0xca, 0xb5, 0x9a, 0xd3, 0xdd, 0xca, 0x45, 0x84, + 0x50, 0xe7, 0x0f, 0xfe, 0xa4, 0x99, 0x5a, 0xbe, + 0x43, 0x2d, 0x9a, 0xcb, 0x92, 0x3f, 0x5a, 0x1d, + 0x85, 0xd8, 0xc9, 0xdf, 0x68, 0xc9, 0x12, 0x80, + 0x56, 0x0c, 0xdc, 0x00, 0xdc, 0x3a, 0x7d, 0x9d, + 0xa3, 0xa2, 0xe8, 0x4d, 0xbf, 0xf9, 0x70, 0xa0, + 0xa4, 0x13, 0x4f, 0x6b, 0xaf, 0x0a, 0x89, 0x7f, + 0xda, 0xf0, 0xbf, 0x9b, 0xc8, 0x1d, 0xe5, 0xf8, + 0x2e, 0x8b, 0x07, 0xb5, 0x73, 0x1b, 0xcc, 0xa2, + 0xa6, 0xad, 0x30, 0xbc, 0x78, 0x3c, 0x5b, 0x10, + 0xfa, 0x5e, 0x62, 0x2d, 0x9e, 0x64, 0xb3, 0x33, + 0xce, 0xf9, 0x1f, 0x86, 0xe7, 0x8b, 0xa2, 0xb8, + 0xe8, 0x99, 0x57, 0x8c, 0x11, 0xed, 0x66, 0xd9, + 0x3c, 0x72, 0xb9, 0xc3, 0xe6, 0x4e, 0x17, 0x3a, + 0x6a, 0xcb, 0x42, 0x24, 0x06, 0xed, 0x3e, 0x4e, + 0xa3, 0xe8, 0x6a, 0x94, 0xda, 0x0d, 0x4e, 0xd5, + 0x14, 0x19, 0xcf, 0xb6, 0x26, 0xd8, 0x2e, 0xcc, + 0x64, 0x76, 0x38, 0x49, 0x4d, 0xfe, 0x30, 0x6d, + 0xe4, 0xc8, 0x8c, 0x7b, 0xc4, 0xe0, 0x35, 0xba, + 0x22, 0x6e, 0x76, 0xe1, 0x1a, 0xf2, 0x53, 0xc3, + 0x28, 0xa2, 0x82, 0x1f, 0x61, 0x69, 0xad, 0xc1, + 0x7b, 0x28, 0x4b, 0x1e, 0x6c, 0x85, 0x95, 0x9b, + 0x51, 0xb5, 0x17, 0x7f, 0x12, 0x69, 0x8c, 0x24, + 0xd5, 0xc7, 0x5a, 0x5a, 0x11, 0x54, 0xff, 0x5a, + 0xf7, 0x16, 0xc3, 0x91, 0xa6, 0xf0, 0xdc, 0x0a, + 0xb6, 0xa7, 0x4a, 0x0d, 0x7a, 0x58, 0xfe, 0xa5, + 0xf5, 0xcb, 0x8f, 0x7b, 0x0e, 0xea, 0x57, 0xe7, + 0xbd, 0x79, 0xd6, 0x1c, 0x88, 0x23, 0x6c, 0xf2, + 0x4d, 0x29, 0x77, 0x53, 0x35, 0x6a, 0x00, 0x8d, + 0xcd, 0xa3, 0x58, 0xbe, 0x77, 0x99, 0x18, 0xf8, + 0xe6, 0xe1, 0x8f, 0xe9, 0x37, 0x8f, 0xe3, 0xe2, + 0x5a, 0x8a, 0x93, 0x25, 0xaf, 0xf3, 0x78, 0x80, + 0xbe, 0xa6, 0x1b, 0xc6, 0xac, 0x8b, 0x1c, 0x91, + 0x58, 0xe1, 0x9f, 0x89, 0x35, 0x9d, 0x1d, 0x21, + 0x29, 0x9f, 0xf4, 0x99, 0x02, 0x27, 0x0f, 0xa8, + 0x4f, 0x79, 0x94, 0x2b, 0x33, 0x2c, 0xda, 0xa2, + 0x26, 0x39, 0x83, 0x94, 0xef, 0x27, 0xd8, 0x53, + 0x8f, 0x66, 0x0d, 0xe4, 0x41, 0x7d, 0x34, 0xcd, + 0x43, 0x7c, 0x95, 0x0a, 0x53, 0xef, 0x66, 0xda, + 0x7e, 0x9b, 0xf3, 0x93, 0xaf, 0xd0, 0x73, 0x71, + 0xba, 0x40, 0x9b, 0x74, 0xf8, 0xd7, 0xd7, 0x41, + 0x6d, 0xaf, 0x72, 0x9c, 0x8d, 0x21, 0x87, 0x3c, + 0xfd, 0x0a, 0x90, 0xa9, 0x47, 0x96, 0x9e, 0xd3, + 0x88, 0xee, 0x73, 0xcf, 0x66, 0x2f, 0x52, 0x56, + 0x6d, 0xa9, 0x80, 0x4c, 0xe2, 0x6f, 0x62, 0x88, + 0x3f, 0x0e, 0x54, 0x17, 0x48, 0x80, 0x5d, 0xd3, + 0xc3, 0xda, 0x25, 0x3d, 0xa1, 0xc8, 0xcb, 0x9f, + 0x9b, 0x70, 0xb3, 0xa1, 0xeb, 0x04, 0x52, 0xa1, + 0xf2, 0x22, 0x0f, 0xfc, 0xc8, 0x18, 0xfa, 0xf9, + 0x85, 0x9c, 0xf1, 0xac, 0xeb, 0x0c, 0x02, 0x46, + 0x75, 0xd2, 0xf5, 0x2c, 0xe3, 0xd2, 0x59, 0x94, + 0x12, 0xf3, 0x3c, 0xfc, 0xd7, 0x92, 0xfa, 0x36, + 0xba, 0x61, 0x34, 0x38, 0x7c, 0xda, 0x48, 0x3e, + 0x08, 0xc9, 0x39, 0x23, 0x5e, 0x02, 0x2c, 0x1a, + 0x18, 0x7e, 0xb4, 0xd9, 0xfd, 0x9e, 0x40, 0x02, + 0xb1, 0x33, 0x37, 0x32, 0xe7, 0xde, 0xd6, 0xd0, + 0x7c, 0x58, 0x65, 0x4b, 0xf8, 0x34, 0x27, 0x9c, + 0x44, 0xb4, 0xbd, 0xe9, 0xe9, 0x4c, 0x78, 0x7d, + 0x4b, 0x9f, 0xce, 0xb1, 0xcd, 0x47, 0xa5, 0x37, + 0xe5, 0x6d, 0xbd, 0xb9, 0x43, 0x94, 0x0a, 0xd4, + 0xd6, 0xf9, 0x04, 0x5f, 0xb5, 0x66, 0x6c, 0x1a, + 0x35, 0x12, 0xe3, 0x36, 0x28, 0x27, 0x36, 0x58, + 0x01, 0x2b, 0x79, 0xe4, 0xba, 0x6d, 0x10, 0x7d, + 0x65, 0xdf, 0x84, 0x95, 0xf4, 0xd5, 0xb6, 0x8f, + 0x2b, 0x9f, 0x96, 0x00, 0x86, 0x60, 0xf0, 0x21, + 0x76, 0xa8, 0x6a, 0x8c, 0x28, 0x1c, 0xb3, 0x6b, + 0x97, 0xd7, 0xb6, 0x53, 0x2a, 0xcc, 0xab, 0x40, + 0x9d, 0x62, 0x79, 0x58, 0x52, 0xe6, 0x65, 0xb7, + 0xab, 0x55, 0x67, 0x9c, 0x89, 0x7c, 0x03, 0xb0, + 0x73, 0x59, 0xc5, 0x81, 0xf5, 0x18, 0x17, 0x5c, + 0x89, 0xf3, 0x78, 0x35, 0x44, 0x62, 0x78, 0x72, + 0xd0, 0x96, 0xeb, 0x31, 0xe7, 0x87, 0x77, 0x14, + 0x99, 0x51, 0xf2, 0x59, 0x26, 0x9e, 0xb5, 0xa6, + 0x45, 0xfe, 0x6e, 0xbd, 0x07, 0x4c, 0x94, 0x5a, + 0xa5, 0x7d, 0xfc, 0xf1, 0x2b, 0x77, 0xe2, 0xfe, + 0x17, 0xd4, 0x84, 0xa0, 0xac, 0xb5, 0xc7, 0xda, + 0xa9, 0x1a, 0xb6, 0xf3, 0x74, 0x11, 0xb4, 0x9d, + 0xfb, 0x79, 0x2e, 0x04, 0x2d, 0x50, 0x28, 0x83, + 0xbf, 0xc6, 0x52, 0xd3, 0x34, 0xd6, 0xe8, 0x7a, + 0xb6, 0xea, 0xe7, 0xa8, 0x6c, 0x15, 0x1e, 0x2c, + 0x57, 0xbc, 0x48, 0x4e, 0x5f, 0x5c, 0xb6, 0x92, + 0xd2, 0x49, 0x77, 0x81, 0x6d, 0x90, 0x70, 0xae, + 0x98, 0xa1, 0x03, 0x0d, 0x6b, 0xb9, 0x77, 0x14, + 0xf1, 0x4e, 0x23, 0xd3, 0xf8, 0x68, 0xbd, 0xc2, + 0xfe, 0x04, 0xb7, 0x5c, 0xc5, 0x17, 0x60, 0x8f, + 0x65, 0x54, 0xa4, 0x7a, 0x42, 0xdc, 0x18, 0x0d, + 0xb5, 0xcf, 0x0f, 0xd3, 0xc7, 0x91, 0x66, 0x1b, + 0x45, 0x42, 0x27, 0x75, 0x50, 0xe5, 0xee, 0xb8, + 0x7f, 0x33, 0x2c, 0xba, 0x4a, 0x92, 0x4d, 0x2c, + 0x3c, 0xe3, 0x0d, 0x80, 0x01, 0xba, 0x0d, 0x29, + 0xd8, 0x3c, 0xe9, 0x13, 0x16, 0x57, 0xe6, 0xea, + 0x94, 0x52, 0xe7, 0x00, 0x4d, 0x30, 0xb0, 0x0f, + 0x35, 0xb8, 0xb8, 0xa7, 0xb1, 0xb5, 0x3b, 0x44, + 0xe1, 0x2f, 0xfd, 0x88, 0xed, 0x43, 0xe7, 0x52, + 0x10, 0x93, 0xb3, 0x8a, 0x30, 0x6b, 0x0a, 0xf7, + 0x23, 0xc6, 0x50, 0x9d, 0x4a, 0xb0, 0xde, 0xc3, + 0xdc, 0x9b, 0x2f, 0x01, 0x56, 0x36, 0x09, 0xc5, + 0x2f, 0x6b, 0xfe, 0xf1, 0xd8, 0x27, 0x45, 0x03, + 0x30, 0x5e, 0x5c, 0x5b, 0xb4, 0x62, 0x0e, 0x1a, + 0xa9, 0x21, 0x2b, 0x92, 0x94, 0x87, 0x62, 0x57, + 0x4c, 0x10, 0x74, 0x1a, 0xf1, 0x0a, 0xc5, 0x84, + 0x3b, 0x9e, 0x72, 0x02, 0xd7, 0xcc, 0x09, 0x56, + 0xbd, 0x54, 0xc1, 0xf0, 0xc3, 0xe3, 0xb3, 0xf8, + 0xd2, 0x0d, 0x61, 0xcb, 0xef, 0xce, 0x0d, 0x05, + 0xb0, 0x98, 0xd9, 0x8e, 0x4f, 0xf9, 0xbc, 0x93, + 0xa6, 0xea, 0xc8, 0xcf, 0x10, 0x53, 0x4b, 0xf1, + 0xec, 0xfc, 0x89, 0xf9, 0x64, 0xb0, 0x22, 0xbf, + 0x9e, 0x55, 0x46, 0x9f, 0x7c, 0x50, 0x8e, 0x84, + 0x54, 0x20, 0x98, 0xd7, 0x6c, 0x40, 0x1e, 0xdb, + 0x69, 0x34, 0x78, 0x61, 0x24, 0x21, 0x9c, 0x8a, + 0xb3, 0x62, 0x31, 0x8b, 0x6e, 0xf5, 0x2a, 0x35, + 0x86, 0x13, 0xb1, 0x6c, 0x64, 0x2e, 0x41, 0xa5, + 0x05, 0xf2, 0x42, 0xba, 0xd2, 0x3a, 0x0d, 0x8e, + 0x8a, 0x59, 0x94, 0x3c, 0xcf, 0x36, 0x27, 0x82, + 0xc2, 0x45, 0xee, 0x58, 0xcd, 0x88, 0xb4, 0xec, + 0xde, 0xb2, 0x96, 0x0a, 0xaf, 0x38, 0x6f, 0x88, + 0xd7, 0xd8, 0xe1, 0xdf, 0xb9, 0x96, 0xa9, 0x0a, + 0xb1, 0x95, 0x28, 0x86, 0x20, 0xe9, 0x17, 0x49, + 0xa2, 0x29, 0x38, 0xaa, 0xa5, 0xe9, 0x6e, 0xf1, + 0x19, 0x27, 0xc0, 0xd5, 0x2a, 0x22, 0xc3, 0x0b, + 0xdb, 0x7c, 0x73, 0x10, 0xb9, 0xba, 0x89, 0x76, + 0x54, 0xae, 0x7d, 0x71, 0xb3, 0x93, 0xf6, 0x32, + 0xe6, 0x47, 0x43, 0x55, 0xac, 0xa0, 0x0d, 0xc2, + 0x93, 0x27, 0x4a, 0x8e, 0x0e, 0x74, 0x15, 0xc7, + 0x0b, 0x85, 0xd9, 0x0c, 0xa9, 0x30, 0x7a, 0x3e, + 0xea, 0x8f, 0x85, 0x6d, 0x3a, 0x12, 0x4f, 0x72, + 0x69, 0x58, 0x7a, 0x80, 0xbb, 0xb5, 0x97, 0xf3, + 0xcf, 0x70, 0xd2, 0x5d, 0xdd, 0x4d, 0x21, 0x79, + 0x54, 0x4d, 0xe4, 0x05, 0xe8, 0xbd, 0xc2, 0x62, + 0xb1, 0x3b, 0x77, 0x1c, 0xd6, 0x5c, 0xf3, 0xa0, + 0x79, 0x00, 0xa8, 0x6c, 0x29, 0xd9, 0x18, 0x24, + 0x36, 0xa2, 0x46, 0xc0, 0x96, 0x65, 0x7f, 0xbd, + 0x2a, 0xed, 0x36, 0x16, 0x0c, 0xaa, 0x9f, 0xf4, + 0xc5, 0xb4, 0xe2, 0x12, 0xed, 0x69, 0xed, 0x4f, + 0x26, 0x2c, 0x39, 0x52, 0x89, 0x98, 0xe7, 0x2c, + 0x99, 0xa4, 0x9e, 0xa3, 0x9b, 0x99, 0x46, 0x7a, + 0x3a, 0xdc, 0xa8, 0x59, 0xa3, 0xdb, 0xc3, 0x3b, + 0x95, 0x0d, 0x3b, 0x09, 0x6e, 0xee, 0x83, 0x5d, + 0x32, 0x4d, 0xed, 0xab, 0xfa, 0x98, 0x14, 0x4e, + 0xc3, 0x15, 0x45, 0x53, 0x61, 0xc4, 0x93, 0xbd, + 0x90, 0xf4, 0x99, 0x95, 0x4c, 0xe6, 0x76, 0x92, + 0x29, 0x90, 0x46, 0x30, 0x92, 0x69, 0x7d, 0x13, + 0xf2, 0xa5, 0xcd, 0x69, 0x49, 0x44, 0xb2, 0x0f, + 0x63, 0x40, 0x36, 0x5f, 0x09, 0xe2, 0x78, 0xf8, + 0x91, 0xe3, 0xe2, 0xfa, 0x10, 0xf7, 0xc8, 0x24, + 0xa8, 0x89, 0x32, 0x5c, 0x37, 0x25, 0x1d, 0xb2, + 0xea, 0x17, 0x8a, 0x0a, 0xa9, 0x64, 0xc3, 0x7c, + 0x3c, 0x7c, 0xbd, 0xc6, 0x79, 0x34, 0xe7, 0xe2, + 0x85, 0x8e, 0xbf, 0xf8, 0xde, 0x92, 0xa0, 0xae, + 0x20, 0xc4, 0xf6, 0xbb, 0x1f, 0x38, 0x19, 0x0e, + 0xe8, 0x79, 0x9c, 0xa1, 0x23, 0xe9, 0x54, 0x7e, + 0x37, 0x2f, 0xe2, 0x94, 0x32, 0xaf, 0xa0, 0x23, + 0x49, 0xe4, 0xc0, 0xb3, 0xac, 0x00, 0x8f, 0x36, + 0x05, 0xc4, 0xa6, 0x96, 0xec, 0x05, 0x98, 0x4f, + 0x96, 0x67, 0x57, 0x1f, 0x20, 0x86, 0x1b, 0x2d, + 0x69, 0xe4, 0x29, 0x93, 0x66, 0x5f, 0xaf, 0x6b, + 0x88, 0x26, 0x2c, 0x67, 0x02, 0x4b, 0x52, 0xd0, + 0x83, 0x7a, 0x43, 0x1f, 0xc0, 0x71, 0x15, 0x25, + 0x77, 0x65, 0x08, 0x60, 0x11, 0x76, 0x4c, 0x8d, + 0xed, 0xa9, 0x27, 0xc6, 0xb1, 0x2a, 0x2c, 0x6a, + 0x4a, 0x97, 0xf5, 0xc6, 0xb7, 0x70, 0x42, 0xd3, + 0x03, 0xd1, 0x24, 0x95, 0xec, 0x6d, 0xab, 0x38, + 0x72, 0xce, 0xe2, 0x8b, 0x33, 0xd7, 0x51, 0x09, + 0xdc, 0x45, 0xe0, 0x09, 0x96, 0x32, 0xf3, 0xc4, + 0x84, 0xdc, 0x73, 0x73, 0x2d, 0x1b, 0x11, 0x98, + 0xc5, 0x0e, 0x69, 0x28, 0x94, 0xc7, 0xb5, 0x4d, + 0xc8, 0x8a, 0xd0, 0xaa, 0x13, 0x2e, 0x18, 0x74, + 0xdd, 0xd1, 0x1e, 0xf3, 0x90, 0xe8, 0xfc, 0x9a, + 0x72, 0x4a, 0x0e, 0xd1, 0xe4, 0xfb, 0x0d, 0x96, + 0xd1, 0x0c, 0x79, 0x85, 0x1b, 0x1c, 0xfe, 0xe1, + 0x62, 0x8f, 0x7a, 0x73, 0x32, 0xab, 0xc8, 0x18, + 0x69, 0xe3, 0x34, 0x30, 0xdf, 0x13, 0xa6, 0xe5, + 0xe8, 0x0e, 0x67, 0x7f, 0x81, 0x11, 0xb4, 0x60, + 0xc7, 0xbd, 0x79, 0x65, 0x50, 0xdc, 0xc4, 0x5b, + 0xde, 0x39, 0xa4, 0x01, 0x72, 0x63, 0xf3, 0xd1, + 0x64, 0x4e, 0xdf, 0xfc, 0x27, 0x92, 0x37, 0x0d, + 0x57, 0xcd, 0x11, 0x4f, 0x11, 0x04, 0x8e, 0x1d, + 0x16, 0xf7, 0xcd, 0x92, 0x9a, 0x99, 0x30, 0x14, + 0xf1, 0x7c, 0x67, 0x1b, 0x1f, 0x41, 0x0b, 0xe8, + 0x32, 0xe8, 0xb8, 0xc1, 0x4f, 0x54, 0x86, 0x4f, + 0xe5, 0x79, 0x81, 0x73, 0xcd, 0x43, 0x59, 0x68, + 0x73, 0x02, 0x3b, 0x78, 0x21, 0x72, 0x43, 0x00, + 0x49, 0x17, 0xf7, 0x00, 0xaf, 0x68, 0x24, 0x53, + 0x05, 0x0a, 0xc3, 0x33, 0xe0, 0x33, 0x3f, 0x69, + 0xd2, 0x84, 0x2f, 0x0b, 0xed, 0xde, 0x04, 0xf4, + 0x11, 0x94, 0x13, 0x69, 0x51, 0x09, 0x28, 0xde, + 0x57, 0x5c, 0xef, 0xdc, 0x9a, 0x49, 0x1c, 0x17, + 0x97, 0xf3, 0x96, 0xc1, 0x7f, 0x5d, 0x2e, 0x7d, + 0x55, 0xb8, 0xb3, 0x02, 0x09, 0xb3, 0x1f, 0xe7, + 0xc9, 0x8d, 0xa3, 0x36, 0x34, 0x8a, 0x77, 0x13, + 0x30, 0x63, 0x4c, 0xa5, 0xcd, 0xc3, 0xe0, 0x7e, + 0x05, 0xa1, 0x7b, 0x0c, 0xcb, 0x74, 0x47, 0x31, + 0x62, 0x03, 0x43, 0xf1, 0x87, 0xb4, 0xb0, 0x85, + 0x87, 0x8e, 0x4b, 0x25, 0xc7, 0xcf, 0xae, 0x4b, + 0x36, 0x46, 0x3e, 0x62, 0xbc, 0x6f, 0xeb, 0x5f, + 0x73, 0xac, 0xe6, 0x07, 0xee, 0xc1, 0xa1, 0xd6, + 0xc4, 0xab, 0xc9, 0xd6, 0x89, 0x45, 0xe1, 0xf1, + 0x04, 0x4e, 0x1a, 0x6f, 0xbb, 0x4f, 0x3a, 0xa3, + 0xa0, 0xcb, 0xa3, 0x0a, 0xd8, 0x71, 0x35, 0x55, + 0xe4, 0xbc, 0x2e, 0x04, 0x06, 0xe6, 0xff, 0x5b, + 0x1c, 0xc0, 0x11, 0x7c, 0xc5, 0x17, 0xf3, 0x38, + 0xcf, 0xe9, 0xba, 0x0f, 0x0e, 0xef, 0x02, 0xc2, + 0x8d, 0xc6, 0xbc, 0x4b, 0x67, 0x20, 0x95, 0xd7, + 0x2c, 0x45, 0x5b, 0x86, 0x44, 0x8c, 0x6f, 0x2e, + 0x7e, 0x9f, 0x1c, 0x77, 0xba, 0x6b, 0x0e, 0xa3, + 0x69, 0xdc, 0xab, 0x24, 0x57, 0x60, 0x47, 0xc1, + 0xd1, 0xa5, 0x9d, 0x23, 0xe6, 0xb1, 0x37, 0xfe, + 0x93, 0xd2, 0x4c, 0x46, 0xf9, 0x0c, 0xc6, 0xfb, + 0xd6, 0x9d, 0x99, 0x69, 0xab, 0x7a, 0x07, 0x0c, + 0x65, 0xe7, 0xc4, 0x08, 0x96, 0xe2, 0xa5, 0x01, + 0x3f, 0x46, 0x07, 0x05, 0x7e, 0xe8, 0x9a, 0x90, + 0x50, 0xdc, 0xe9, 0x7a, 0xea, 0xa1, 0x39, 0x6e, + 0x66, 0xe4, 0x6f, 0xa5, 0x5f, 0xb2, 0xd9, 0x5b, + 0xf5, 0xdb, 0x2a, 0x32, 0xf0, 0x11, 0x6f, 0x7c, + 0x26, 0x10, 0x8f, 0x3d, 0x80, 0xe9, 0x58, 0xf7, + 0xe0, 0xa8, 0x57, 0xf8, 0xdb, 0x0e, 0xce, 0x99, + 0x63, 0x19, 0x3d, 0xd5, 0xec, 0x1b, 0x77, 0x69, + 0x98, 0xf6, 0xe4, 0x5f, 0x67, 0x17, 0x4b, 0x09, + 0x85, 0x62, 0x82, 0x70, 0x18, 0xe2, 0x9a, 0x78, + 0xe2, 0x62, 0xbd, 0xb4, 0xf1, 0x42, 0xc6, 0xfb, + 0x08, 0xd0, 0xbd, 0xeb, 0x4e, 0x09, 0xf2, 0xc8, + 0x1e, 0xdc, 0x3d, 0x32, 0x21, 0x56, 0x9c, 0x4f, + 0x35, 0xf3, 0x61, 0x06, 0x72, 0x84, 0xc4, 0x32, + 0xf2, 0xf1, 0xfa, 0x0b, 0x2f, 0xc3, 0xdb, 0x02, + 0x04, 0xc2, 0xde, 0x57, 0x64, 0x60, 0x8d, 0xcf, + 0xcb, 0x86, 0x5d, 0x97, 0x3e, 0xb1, 0x9c, 0x01, + 0xd6, 0x28, 0x8f, 0x99, 0xbc, 0x46, 0xeb, 0x05, + 0xaf, 0x7e, 0xb8, 0x21, 0x2a, 0x56, 0x85, 0x1c, + 0xb3, 0x71, 0xa0, 0xde, 0xca, 0x96, 0xf1, 0x78, + 0x49, 0xa2, 0x99, 0x81, 0x80, 0x5c, 0x01, 0xf5, + 0xa0, 0xa2, 0x56, 0x63, 0xe2, 0x70, 0x07, 0xa5, + 0x95, 0xd6, 0x85, 0xeb, 0x36, 0x9e, 0xa9, 0x51, + 0x66, 0x56, 0x5f, 0x1d, 0x02, 0x19, 0xe2, 0xf6, + 0x4f, 0x73, 0x38, 0x09, 0x75, 0x64, 0x48, 0xe0, + 0xf1, 0x7e, 0x0e, 0xe8, 0x9d, 0xf9, 0xed, 0x94, + 0xfe, 0x16, 0x26, 0x62, 0x49, 0x74, 0xf4, 0xb0, + 0xd4, 0xa9, 0x6c, 0xb0, 0xfd, 0x53, 0xe9, 0x81, + 0xe0, 0x7a, 0xbf, 0xcf, 0xb5, 0xc4, 0x01, 0x81, + 0x79, 0x99, 0x77, 0x01, 0x3b, 0xe9, 0xa2, 0xb6, + 0xe6, 0x6a, 0x8a, 0x9e, 0x56, 0x1c, 0x8d, 0x1e, + 0x8f, 0x06, 0x55, 0x2c, 0x6c, 0xdc, 0x92, 0x87, + 0x64, 0x3b, 0x4b, 0x19, 0xa1, 0x13, 0x64, 0x1d, + 0x4a, 0xe9, 0xc0, 0x00, 0xb8, 0x95, 0xef, 0x6b, + 0x1a, 0x86, 0x6d, 0x37, 0x52, 0x02, 0xc2, 0xe0, + 0xc8, 0xbb, 0x42, 0x0c, 0x02, 0x21, 0x4a, 0xc9, + 0xef, 0xa0, 0x54, 0xe4, 0x5e, 0x16, 0x53, 0x81, + 0x70, 0x62, 0x10, 0xaf, 0xde, 0xb8, 0xb5, 0xd3, + 0xe8, 0x5e, 0x6c, 0xc3, 0x8a, 0x3e, 0x18, 0x07, + 0xf2, 0x2f, 0x7d, 0xa7, 0xe1, 0x3d, 0x4e, 0xb4, + 0x26, 0xa7, 0xa3, 0x93, 0x86, 0xb2, 0x04, 0x1e, + 0x53, 0x5d, 0x86, 0xd6, 0xde, 0x65, 0xca, 0xe3, + 0x4e, 0xc1, 0xcf, 0xef, 0xc8, 0x70, 0x1b, 0x83, + 0x13, 0xdd, 0x18, 0x8b, 0x0d, 0x76, 0xd2, 0xf6, + 0x37, 0x7a, 0x93, 0x7a, 0x50, 0x11, 0x9f, 0x96, + 0x86, 0x25, 0xfd, 0xac, 0xdc, 0xbe, 0x18, 0x93, + 0x19, 0x6b, 0xec, 0x58, 0x4f, 0xb9, 0x75, 0xa7, + 0xdd, 0x3f, 0x2f, 0xec, 0xc8, 0x5a, 0x84, 0xab, + 0xd5, 0xe4, 0x8a, 0x07, 0xf6, 0x4d, 0x23, 0xd6, + 0x03, 0xfb, 0x03, 0x6a, 0xea, 0x66, 0xbf, 0xd4, + 0xb1, 0x34, 0xfb, 0x78, 0xe9, 0x55, 0xdc, 0x7c, + 0x3d, 0x9c, 0xe5, 0x9a, 0xac, 0xc3, 0x7a, 0x80, + 0x24, 0x6d, 0xa0, 0xef, 0x25, 0x7c, 0xb7, 0xea, + 0xce, 0x4d, 0x5f, 0x18, 0x60, 0xce, 0x87, 0x22, + 0x66, 0x2f, 0xd5, 0xdd, 0xdd, 0x02, 0x21, 0x75, + 0x82, 0xa0, 0x1f, 0x58, 0xc6, 0xd3, 0x62, 0xf7, + 0x32, 0xd8, 0xaf, 0x1e, 0x07, 0x77, 0x51, 0x96, + 0xd5, 0x6b, 0x1e, 0x7e, 0x80, 0x02, 0xe8, 0x67, + 0xea, 0x17, 0x0b, 0x10, 0xd2, 0x3f, 0x28, 0x25, + 0x4f, 0x05, 0x77, 0x02, 0x14, 0x69, 0xf0, 0x2c, + 0xbe, 0x0c, 0xf1, 0x74, 0x30, 0xd1, 0xb9, 0x9b, + 0xfc, 0x8c, 0xbb, 0x04, 0x16, 0xd9, 0xba, 0xc3, + 0xbc, 0x91, 0x8a, 0xc4, 0x30, 0xa4, 0xb0, 0x12, + 0x4c, 0x21, 0x87, 0xcb, 0xc9, 0x1d, 0x16, 0x96, + 0x07, 0x6f, 0x23, 0x54, 0xb9, 0x6f, 0x79, 0xe5, + 0x64, 0xc0, 0x64, 0xda, 0xb1, 0xae, 0xdd, 0x60, + 0x6c, 0x1a, 0x9d, 0xd3, 0x04, 0x8e, 0x45, 0xb0, + 0x92, 0x61, 0xd0, 0x48, 0x81, 0xed, 0x5e, 0x1d, + 0xa0, 0xc9, 0xa4, 0x33, 0xc7, 0x13, 0x51, 0x5d, + 0x7f, 0x83, 0x73, 0xb6, 0x70, 0x18, 0x65, 0x3e, + 0x2f, 0x0e, 0x7a, 0x12, 0x39, 0x98, 0xab, 0xd8, + 0x7e, 0x6f, 0xa3, 0xd1, 0xba, 0x56, 0xad, 0xbd, + 0xf0, 0x03, 0x01, 0x1c, 0x85, 0x35, 0x9f, 0xeb, + 0x19, 0x63, 0xa1, 0xaf, 0xfe, 0x2d, 0x35, 0x50, + 0x39, 0xa0, 0x65, 0x7c, 0x95, 0x7e, 0x6b, 0xfe, + 0xc1, 0xac, 0x07, 0x7c, 0x98, 0x4f, 0xbe, 0x57, + 0xa7, 0x22, 0xec, 0xe2, 0x7e, 0x29, 0x09, 0x53, + 0xe8, 0xbf, 0xb4, 0x7e, 0x3f, 0x8f, 0xfc, 0x14, + 0xce, 0x54, 0xf9, 0x18, 0x58, 0xb5, 0xff, 0x44, + 0x05, 0x9d, 0xce, 0x1b, 0xb6, 0x82, 0x23, 0xc8, + 0x2e, 0xbc, 0x69, 0xbb, 0x4a, 0x29, 0x0f, 0x65, + 0x94, 0xf0, 0x63, 0x06, 0x0e, 0xef, 0x8c, 0xbd, + 0xff, 0xfd, 0xb0, 0x21, 0x6e, 0x57, 0x05, 0x75, + 0xda, 0xd5, 0xc4, 0xeb, 0x8d, 0x32, 0xf7, 0x50, + 0xd3, 0x6f, 0x22, 0xed, 0x5f, 0x8e, 0xa2, 0x5b, + 0x80, 0x8c, 0xc8, 0x78, 0x40, 0x24, 0x4b, 0x89, + 0x30, 0xce, 0x7a, 0x97, 0x0e, 0xc4, 0xaf, 0xef, + 0x9b, 0xb4, 0xcd, 0x66, 0x74, 0x14, 0x04, 0x2b, + 0xf7, 0xce, 0x0b, 0x1c, 0x6e, 0xc2, 0x78, 0x8c, + 0xca, 0xc5, 0xd0, 0x1c, 0x95, 0x4a, 0x91, 0x2d, + 0xa7, 0x20, 0xeb, 0x86, 0x52, 0xb7, 0x67, 0xd8, + 0x0c, 0xd6, 0x04, 0x14, 0xde, 0x51, 0x74, 0x75, + 0xe7, 0x11, 0xb4, 0x87, 0xa3, 0x3d, 0x2d, 0xad, + 0x4f, 0xef, 0xa0, 0x0f, 0x70, 0x00, 0x6d, 0x13, + 0x19, 0x1d, 0x41, 0x50, 0xe9, 0xd8, 0xf0, 0x32, + 0x71, 0xbc, 0xd3, 0x11, 0xf2, 0xac, 0xbe, 0xaf, + 0x75, 0x46, 0x65, 0x4e, 0x07, 0x34, 0x37, 0xa3, + 0x89, 0xfe, 0x75, 0xd4, 0x70, 0x4c, 0xc6, 0x3f, + 0x69, 0x24, 0x0e, 0x38, 0x67, 0x43, 0x8c, 0xde, + 0x06, 0xb5, 0xb8, 0xe7, 0xc4, 0xf0, 0x41, 0x8f, + 0xf0, 0xbd, 0x2f, 0x0b, 0xb9, 0x18, 0xf8, 0xde, + 0x64, 0xb1, 0xdb, 0xee, 0x00, 0x50, 0x77, 0xe1, + 0xc7, 0xff, 0xa6, 0xfa, 0xdd, 0x70, 0xf4, 0xe3, + 0x93, 0xe9, 0x77, 0x35, 0x3d, 0x4b, 0x2f, 0x2b, + 0x6d, 0x55, 0xf0, 0xfc, 0x88, 0x54, 0x4e, 0x89, + 0xc1, 0x8a, 0x23, 0x31, 0x2d, 0x14, 0x2a, 0xb8, + 0x1b, 0x15, 0xdd, 0x9e, 0x6e, 0x7b, 0xda, 0x05, + 0x91, 0x7d, 0x62, 0x64, 0x96, 0x72, 0xde, 0xfc, + 0xc1, 0xec, 0xf0, 0x23, 0x51, 0x6f, 0xdb, 0x5b, + 0x1d, 0x08, 0x57, 0xce, 0x09, 0xb8, 0xf6, 0xcd, + 0x8d, 0x95, 0xf2, 0x20, 0xbf, 0x0f, 0x20, 0x57, + 0x98, 0x81, 0x84, 0x4f, 0x15, 0x5c, 0x76, 0xe7, + 0x3e, 0x0a, 0x3a, 0x6c, 0xc4, 0x8a, 0xbe, 0x78, + 0x74, 0x77, 0xc3, 0x09, 0x4b, 0x5d, 0x48, 0xe4, + 0xc8, 0xcb, 0x0b, 0xea, 0x17, 0x28, 0xcf, 0xcf, + 0x31, 0x32, 0x44, 0xa4, 0xe5, 0x0e, 0x1a, 0x98, + 0x94, 0xc4, 0xf0, 0xff, 0xae, 0x3e, 0x44, 0xe8, + 0xa5, 0xb3, 0xb5, 0x37, 0x2f, 0xe8, 0xaf, 0x6f, + 0x28, 0xc1, 0x37, 0x5f, 0x31, 0xd2, 0xb9, 0x33, + 0xb1, 0xb2, 0x52, 0x94, 0x75, 0x2c, 0x29, 0x59, + 0x06, 0xc2, 0x25, 0xe8, 0x71, 0x65, 0x4e, 0xed, + 0xc0, 0x9c, 0xb1, 0xbb, 0x25, 0xdc, 0x6c, 0xe7, + 0x4b, 0xa5, 0x7a, 0x54, 0x7a, 0x60, 0xff, 0x7a, + 0xe0, 0x50, 0x40, 0x96, 0x35, 0x63, 0xe4, 0x0b, + 0x76, 0xbd, 0xa4, 0x65, 0x00, 0x1b, 0x57, 0x88, + 0xae, 0xed, 0x39, 0x88, 0x42, 0x11, 0x3c, 0xed, + 0x85, 0x67, 0x7d, 0xb9, 0x68, 0x82, 0xe9, 0x43, + 0x3c, 0x47, 0x53, 0xfa, 0xe8, 0xf8, 0x9f, 0x1f, + 0x9f, 0xef, 0x0f, 0xf7, 0x30, 0xd9, 0x30, 0x0e, + 0xb9, 0x9f, 0x69, 0x18, 0x2f, 0x7e, 0xf8, 0xf8, + 0xf8, 0x8c, 0x0f, 0xd4, 0x02, 0x4d, 0xea, 0xcd, + 0x0a, 0x9c, 0x6f, 0x71, 0x6d, 0x5a, 0x4c, 0x60, + 0xce, 0x20, 0x56, 0x32, 0xc6, 0xc5, 0x99, 0x1f, + 0x09, 0xe6, 0x4e, 0x18, 0x1a, 0x15, 0x13, 0xa8, + 0x7d, 0xb1, 0x6b, 0xc0, 0xb2, 0x6d, 0xf8, 0x26, + 0x66, 0xf8, 0x3d, 0x18, 0x74, 0x70, 0x66, 0x7a, + 0x34, 0x17, 0xde, 0xba, 0x47, 0xf1, 0x06, 0x18, + 0xcb, 0xaf, 0xeb, 0x4a, 0x1e, 0x8f, 0xa7, 0x77, + 0xe0, 0x3b, 0x78, 0x62, 0x66, 0xc9, 0x10, 0xea, + 0x1f, 0xb7, 0x29, 0x0a, 0x45, 0xa1, 0x1d, 0x1e, + 0x1d, 0xe2, 0x65, 0x61, 0x50, 0x9c, 0xd7, 0x05, + 0xf2, 0x0b, 0x5b, 0x12, 0x61, 0x02, 0xc8, 0xe5, + 0x63, 0x4f, 0x20, 0x0c, 0x07, 0x17, 0x33, 0x5e, + 0x03, 0x9a, 0x53, 0x0f, 0x2e, 0x55, 0xfe, 0x50, + 0x43, 0x7d, 0xd0, 0xb6, 0x7e, 0x5a, 0xda, 0xae, + 0x58, 0xef, 0x15, 0xa9, 0x83, 0xd9, 0x46, 0xb1, + 0x42, 0xaa, 0xf5, 0x02, 0x6c, 0xce, 0x92, 0x06, + 0x1b, 0xdb, 0x66, 0x45, 0x91, 0x79, 0xc2, 0x2d, + 0xe6, 0x53, 0xd3, 0x14, 0xfd, 0xbb, 0x44, 0x63, + 0xc6, 0xd7, 0x3d, 0x7a, 0x0c, 0x75, 0x78, 0x9d, + 0x5c, 0xa6, 0x39, 0xb3, 0xe5, 0x63, 0xca, 0x8b, + 0xfe, 0xd3, 0xef, 0x60, 0x83, 0xf6, 0x8e, 0x70, + 0xb6, 0x67, 0xc7, 0x77, 0xed, 0x23, 0xef, 0x4c, + 0xf0, 0xed, 0x2d, 0x07, 0x59, 0x6f, 0xc1, 0x01, + 0x34, 0x37, 0x08, 0xab, 0xd9, 0x1f, 0x09, 0xb1, + 0xce, 0x5b, 0x17, 0xff, 0x74, 0xf8, 0x9c, 0xd5, + 0x2c, 0x56, 0x39, 0x79, 0x0f, 0x69, 0x44, 0x75, + 0x58, 0x27, 0x01, 0xc4, 0xbf, 0xa7, 0xa1, 0x1d, + 0x90, 0x17, 0x77, 0x86, 0x5a, 0x3f, 0xd9, 0xd1, + 0x0e, 0xa0, 0x10, 0xf8, 0xec, 0x1e, 0xa5, 0x7f, + 0x5e, 0x36, 0xd1, 0xe3, 0x04, 0x2c, 0x70, 0xf7, + 0x8e, 0xc0, 0x98, 0x2f, 0x6c, 0x94, 0x2b, 0x41, + 0xb7, 0x60, 0x00, 0xb7, 0x2e, 0xb8, 0x02, 0x8d, + 0xb8, 0xb0, 0xd3, 0x86, 0xba, 0x1d, 0xd7, 0x90, + 0xd6, 0xb6, 0xe1, 0xfc, 0xd7, 0xd8, 0x28, 0x06, + 0x63, 0x9b, 0xce, 0x61, 0x24, 0x79, 0xc0, 0x70, + 0x52, 0xd0, 0xb6, 0xd4, 0x28, 0x95, 0x24, 0x87, + 0x03, 0x1f, 0xb7, 0x9a, 0xda, 0xa3, 0xfb, 0x52, + 0x5b, 0x68, 0xe7, 0x4c, 0x8c, 0x24, 0xe1, 0x42, + 0xf7, 0xd5, 0xfd, 0xad, 0x06, 0x32, 0x9f, 0xba, + 0xc1, 0xfc, 0xdd, 0xc6, 0xfc, 0xfc, 0xb3, 0x38, + 0x74, 0x56, 0x58, 0x40, 0x02, 0x37, 0x52, 0x2c, + 0x55, 0xcc, 0xb3, 0x9e, 0x7a, 0xe9, 0xd4, 0x38, + 0x41, 0x5e, 0x0c, 0x35, 0xe2, 0x11, 0xd1, 0x13, + 0xf8, 0xb7, 0x8d, 0x72, 0x6b, 0x22, 0x2a, 0xb0, + 0xdb, 0x08, 0xba, 0x35, 0xb9, 0x3f, 0xc8, 0xd3, + 0x24, 0x90, 0xec, 0x58, 0xd2, 0x09, 0xc7, 0x2d, + 0xed, 0x38, 0x80, 0x36, 0x72, 0x43, 0x27, 0x49, + 0x4a, 0x80, 0x8a, 0xa2, 0xe8, 0xd3, 0xda, 0x30, + 0x7d, 0xb6, 0x82, 0x37, 0x86, 0x92, 0x86, 0x3e, + 0x08, 0xb2, 0x28, 0x5a, 0x55, 0x44, 0x24, 0x7d, + 0x40, 0x48, 0x8a, 0xb6, 0x89, 0x58, 0x08, 0xa0, + 0xd6, 0x6d, 0x3a, 0x17, 0xbf, 0xf6, 0x54, 0xa2, + 0xf5, 0xd3, 0x8c, 0x0f, 0x78, 0x12, 0x57, 0x8b, + 0xd5, 0xc2, 0xfd, 0x58, 0x5b, 0x7f, 0x38, 0xe3, + 0xcc, 0xb7, 0x7c, 0x48, 0xb3, 0x20, 0xe8, 0x81, + 0x14, 0x32, 0x45, 0x05, 0xe0, 0xdb, 0x9f, 0x75, + 0x85, 0xb4, 0x6a, 0xfc, 0x95, 0xe3, 0x54, 0x22, + 0x12, 0xee, 0x30, 0xfe, 0xd8, 0x30, 0xef, 0x34, + 0x50, 0xab, 0x46, 0x30, 0x98, 0x2f, 0xb7, 0xc0, + 0x15, 0xa2, 0x83, 0xb6, 0xf2, 0x06, 0x21, 0xa2, + 0xc3, 0x26, 0x37, 0x14, 0xd1, 0x4d, 0xb5, 0x10, + 0x52, 0x76, 0x4d, 0x6a, 0xee, 0xb5, 0x2b, 0x15, + 0xb7, 0xf9, 0x51, 0xe8, 0x2a, 0xaf, 0xc7, 0xfa, + 0x77, 0xaf, 0xb0, 0x05, 0x4d, 0xd1, 0x68, 0x8e, + 0x74, 0x05, 0x9f, 0x9d, 0x93, 0xa5, 0x3e, 0x7f, + 0x4e, 0x5f, 0x9d, 0xcb, 0x09, 0xc7, 0x83, 0xe3, + 0x02, 0x9d, 0x27, 0x1f, 0xef, 0x85, 0x05, 0x8d, + 0xec, 0x55, 0x88, 0x0f, 0x0d, 0x7c, 0x4c, 0xe8, + 0xa1, 0x75, 0xa0, 0xd8, 0x06, 0x47, 0x14, 0xef, + 0xaa, 0x61, 0xcf, 0x26, 0x15, 0xad, 0xd8, 0xa3, + 0xaa, 0x75, 0xf2, 0x78, 0x4a, 0x5a, 0x61, 0xdf, + 0x8b, 0xc7, 0x04, 0xbc, 0xb2, 0x32, 0xd2, 0x7e, + 0x42, 0xee, 0xb4, 0x2f, 0x51, 0xff, 0x7b, 0x2e, + 0xd3, 0x02, 0xe8, 0xdc, 0x5d, 0x0d, 0x50, 0xdc, + 0xae, 0xb7, 0x46, 0xf9, 0xa8, 0xe6, 0xd0, 0x16, + 0xcc, 0xe6, 0x2c, 0x81, 0xc7, 0xad, 0xe9, 0xf0, + 0x05, 0x72, 0x6d, 0x3d, 0x0a, 0x7a, 0xa9, 0x02, + 0xac, 0x82, 0x93, 0x6e, 0xb6, 0x1c, 0x28, 0xfc, + 0x44, 0x12, 0xfb, 0x73, 0x77, 0xd4, 0x13, 0x39, + 0x29, 0x88, 0x8a, 0xf3, 0x5c, 0xa6, 0x36, 0xa0, + 0x2a, 0xed, 0x7e, 0xb1, 0x1d, 0xd6, 0x4c, 0x6b, + 0x41, 0x01, 0x18, 0x5d, 0x5d, 0x07, 0x97, 0xa6, + 0x4b, 0xef, 0x31, 0x18, 0xea, 0xac, 0xb1, 0x84, + 0x21, 0xed, 0xda, 0x86, + }, + .rlen = 4100, }, }; -- cgit v1.1 From 9617d6ef6278edd04070ae404c871f65a466c6d2 Mon Sep 17 00:00:00 2001 From: Jan Glauber Date: Fri, 30 Nov 2007 15:57:05 +1100 Subject: [CRYPTO] tcrypt: AES CBC test vectors from NIST SP800-38A Add test vectors to tcrypt for AES in CBC mode for key sizes 192 and 256. The test vectors are copied from NIST SP800-38A. Signed-off-by: Jan Glauber Signed-off-by: Herbert Xu --- crypto/tcrypt.h | 106 ++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 104 insertions(+), 2 deletions(-) diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index d9d97a6..05aba22 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -2305,8 +2305,8 @@ static struct cipher_testvec cast6_dec_tv_template[] = { */ #define AES_ENC_TEST_VECTORS 3 #define AES_DEC_TEST_VECTORS 3 -#define AES_CBC_ENC_TEST_VECTORS 2 -#define AES_CBC_DEC_TEST_VECTORS 2 +#define AES_CBC_ENC_TEST_VECTORS 4 +#define AES_CBC_DEC_TEST_VECTORS 4 #define AES_LRW_ENC_TEST_VECTORS 8 #define AES_LRW_DEC_TEST_VECTORS 8 #define AES_XTS_ENC_TEST_VECTORS 4 @@ -2418,6 +2418,57 @@ static struct cipher_testvec aes_cbc_enc_tv_template[] = { 0x75, 0x86, 0x60, 0x2d, 0x25, 0x3c, 0xff, 0xf9, 0x1b, 0x82, 0x66, 0xbe, 0xa6, 0xd6, 0x1a, 0xb1 }, .rlen = 32, + }, { /* From NIST SP800-38A */ + .key = { 0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52, + 0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5, + 0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b }, + .klen = 24, + .iv = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }, + .input = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96, + 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a, + 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c, + 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51, + 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11, + 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef, + 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17, + 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 }, + .ilen = 64, + .result = { 0x4f, 0x02, 0x1d, 0xb2, 0x43, 0xbc, 0x63, 0x3d, + 0x71, 0x78, 0x18, 0x3a, 0x9f, 0xa0, 0x71, 0xe8, + 0xb4, 0xd9, 0xad, 0xa9, 0xad, 0x7d, 0xed, 0xf4, + 0xe5, 0xe7, 0x38, 0x76, 0x3f, 0x69, 0x14, 0x5a, + 0x57, 0x1b, 0x24, 0x20, 0x12, 0xfb, 0x7a, 0xe0, + 0x7f, 0xa9, 0xba, 0xac, 0x3d, 0xf1, 0x02, 0xe0, + 0x08, 0xb0, 0xe2, 0x79, 0x88, 0x59, 0x88, 0x81, + 0xd9, 0x20, 0xa9, 0xe6, 0x4f, 0x56, 0x15, 0xcd }, + .rlen = 64, + }, { + .key = { 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe, + 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81, + 0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7, + 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4 }, + .klen = 32, + .iv = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }, + .input = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96, + 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a, + 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c, + 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51, + 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11, + 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef, + 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17, + 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 }, + .ilen = 64, + .result = { 0xf5, 0x8c, 0x4c, 0x04, 0xd6, 0xe5, 0xf1, 0xba, + 0x77, 0x9e, 0xab, 0xfb, 0x5f, 0x7b, 0xfb, 0xd6, + 0x9c, 0xfc, 0x4e, 0x96, 0x7e, 0xdb, 0x80, 0x8d, + 0x67, 0x9f, 0x77, 0x7b, 0xc6, 0x70, 0x2c, 0x7d, + 0x39, 0xf2, 0x33, 0x69, 0xa9, 0xd9, 0xba, 0xcf, + 0xa5, 0x30, 0xe2, 0x63, 0x04, 0x23, 0x14, 0x61, + 0xb2, 0xeb, 0x05, 0xe2, 0xc3, 0x9b, 0xe9, 0xfc, + 0xda, 0x6c, 0x19, 0x07, 0x8c, 0x6a, 0x9d, 0x1b }, + .rlen = 64, }, }; @@ -2449,6 +2500,57 @@ static struct cipher_testvec aes_cbc_dec_tv_template[] = { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f }, .rlen = 32, + }, { /* From NIST SP800-38A */ + .key = { 0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52, + 0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5, + 0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b }, + .klen = 24, + .iv = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }, + .input = { 0x4f, 0x02, 0x1d, 0xb2, 0x43, 0xbc, 0x63, 0x3d, + 0x71, 0x78, 0x18, 0x3a, 0x9f, 0xa0, 0x71, 0xe8, + 0xb4, 0xd9, 0xad, 0xa9, 0xad, 0x7d, 0xed, 0xf4, + 0xe5, 0xe7, 0x38, 0x76, 0x3f, 0x69, 0x14, 0x5a, + 0x57, 0x1b, 0x24, 0x20, 0x12, 0xfb, 0x7a, 0xe0, + 0x7f, 0xa9, 0xba, 0xac, 0x3d, 0xf1, 0x02, 0xe0, + 0x08, 0xb0, 0xe2, 0x79, 0x88, 0x59, 0x88, 0x81, + 0xd9, 0x20, 0xa9, 0xe6, 0x4f, 0x56, 0x15, 0xcd }, + .ilen = 64, + .result = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96, + 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a, + 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c, + 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51, + 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11, + 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef, + 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17, + 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 }, + .rlen = 64, + }, { + .key = { 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe, + 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81, + 0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7, + 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4 }, + .klen = 32, + .iv = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }, + .input = { 0xf5, 0x8c, 0x4c, 0x04, 0xd6, 0xe5, 0xf1, 0xba, + 0x77, 0x9e, 0xab, 0xfb, 0x5f, 0x7b, 0xfb, 0xd6, + 0x9c, 0xfc, 0x4e, 0x96, 0x7e, 0xdb, 0x80, 0x8d, + 0x67, 0x9f, 0x77, 0x7b, 0xc6, 0x70, 0x2c, 0x7d, + 0x39, 0xf2, 0x33, 0x69, 0xa9, 0xd9, 0xba, 0xcf, + 0xa5, 0x30, 0xe2, 0x63, 0x04, 0x23, 0x14, 0x61, + 0xb2, 0xeb, 0x05, 0xe2, 0xc3, 0x9b, 0xe9, 0xfc, + 0xda, 0x6c, 0x19, 0x07, 0x8c, 0x6a, 0x9d, 0x1b }, + .ilen = 64, + .result = { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96, + 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a, + 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c, + 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51, + 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11, + 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef, + 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17, + 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 }, + .rlen = 64, }, }; -- cgit v1.1 From d2456c66236c15d6462f1ac751cdbd48a34e9704 Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Fri, 30 Nov 2007 16:36:57 +1100 Subject: [CRYPTO] geode: do not copy the IV too often There is no reason to keep the IV in the private structre. Instead keep just a pointer to make the patch smaller :) This also remove a few memcpy()s Signed-off-by: Sebastian Siewior Signed-off-by: Herbert Xu --- drivers/crypto/geode-aes.c | 6 ++---- drivers/crypto/geode-aes.h | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index 0ca92d4..68be7d08 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c @@ -315,7 +315,7 @@ geode_cbc_decrypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); - memcpy(op->iv, walk.iv, AES_IV_LENGTH); + op->iv = walk.iv; while((nbytes = walk.nbytes)) { op->src = walk.src.virt.addr, @@ -330,7 +330,6 @@ geode_cbc_decrypt(struct blkcipher_desc *desc, err = blkcipher_walk_done(desc, &walk, nbytes); } - memcpy(walk.iv, op->iv, AES_IV_LENGTH); return err; } @@ -348,7 +347,7 @@ geode_cbc_encrypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); - memcpy(op->iv, walk.iv, AES_IV_LENGTH); + op->iv = walk.iv; while((nbytes = walk.nbytes)) { op->src = walk.src.virt.addr, @@ -362,7 +361,6 @@ geode_cbc_encrypt(struct blkcipher_desc *desc, err = blkcipher_walk_done(desc, &walk, nbytes); } - memcpy(walk.iv, op->iv, AES_IV_LENGTH); return err; } diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h index 14cc763..f1855b5 100644 --- a/drivers/crypto/geode-aes.h +++ b/drivers/crypto/geode-aes.h @@ -65,7 +65,7 @@ struct geode_aes_op { int len; u8 key[AES_KEY_LENGTH]; - u8 iv[AES_IV_LENGTH]; + u8 *iv; union { struct crypto_blkcipher *blk; -- cgit v1.1 From a10e11946bb2d59516a5252b1b588e2963a13ebe Mon Sep 17 00:00:00 2001 From: Denis Cheng Date: Fri, 30 Nov 2007 16:59:30 +1100 Subject: [CRYPTO] tcrypt: Use print_hex_dump from linux/kernel.h These utilities implemented in lib/hexdump.c are more handy, please use this. Signed-off-by: Denis Cheng Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 71dc02a..387d105 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -89,10 +89,9 @@ static char *check[] = { static void hexdump(unsigned char *buf, unsigned int len) { - while (len--) - printk("%02x", *buf++); - - printk("\n"); + print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, + 16, 1, + buf, len, false); } static void tcrypt_complete(struct crypto_async_request *req, int err) -- cgit v1.1 From b0c3e75d857f3785a4b274e26b1c0b2327580dda Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Sat, 1 Dec 2007 12:47:37 +1100 Subject: [CRYPTO] aes_s390: Add fallback driver Some CPUs support only 128 bit keys in HW. This patch adds SW fallback support for the other keys which may be required. The generic algorithm (and the block mode) must be availble in case of a fallback. Signed-off-by: Sebastian Siewior Signed-off-by: Jan Glauber Signed-off-by: Herbert Xu --- arch/s390/crypto/aes_s390.c | 226 ++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 205 insertions(+), 21 deletions(-) diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 812511b..8524611 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -6,6 +6,7 @@ * s390 Version: * Copyright IBM Corp. 2005,2007 * Author(s): Jan Glauber (jang@de.ibm.com) + * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback * * Derived from "crypto/aes_generic.c" * @@ -18,6 +19,7 @@ #include #include +#include #include #include #include "crypt_s390.h" @@ -34,45 +36,89 @@ struct s390_aes_ctx { long enc; long dec; int key_len; + union { + struct crypto_blkcipher *blk; + struct crypto_cipher *cip; + } fallback; }; -static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, - unsigned int key_len) +/* + * Check if the key_len is supported by the HW. + * Returns 0 if it is, a positive number if it is not and software fallback is + * required or a negative number in case the key size is not valid + */ +static int need_fallback(unsigned int key_len) { - struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); - u32 *flags = &tfm->crt_flags; - switch (key_len) { case 16: if (!(keylen_flag & AES_KEYLEN_128)) - goto fail; + return 1; break; case 24: if (!(keylen_flag & AES_KEYLEN_192)) - goto fail; - + return 1; break; case 32: if (!(keylen_flag & AES_KEYLEN_256)) - goto fail; + return 1; break; default: - goto fail; + return -1; break; } + return 0; +} + +static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + int ret; + + sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; + sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags & + CRYPTO_TFM_REQ_MASK); + + ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len); + if (ret) { + tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; + tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags & + CRYPTO_TFM_RES_MASK); + } + return ret; +} + +static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + u32 *flags = &tfm->crt_flags; + int ret; + + ret = need_fallback(key_len); + if (ret < 0) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } sctx->key_len = key_len; - memcpy(sctx->key, in_key, key_len); - return 0; -fail: - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; - return -EINVAL; + if (!ret) { + memcpy(sctx->key, in_key, key_len); + return 0; + } + + return setkey_fallback_cip(tfm, in_key, key_len); } static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + if (unlikely(need_fallback(sctx->key_len))) { + crypto_cipher_encrypt_one(sctx->fallback.cip, out, in); + return; + } + switch (sctx->key_len) { case 16: crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in, @@ -93,6 +139,11 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + if (unlikely(need_fallback(sctx->key_len))) { + crypto_cipher_decrypt_one(sctx->fallback.cip, out, in); + return; + } + switch (sctx->key_len) { case 16: crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in, @@ -109,6 +160,29 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) } } +static int fallback_init_cip(struct crypto_tfm *tfm) +{ + const char *name = tfm->__crt_alg->cra_name; + struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + + sctx->fallback.cip = crypto_alloc_cipher(name, 0, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); + + if (IS_ERR(sctx->fallback.cip)) { + printk(KERN_ERR "Error allocating fallback algo %s\n", name); + return PTR_ERR(sctx->fallback.blk); + } + + return 0; +} + +static void fallback_exit_cip(struct crypto_tfm *tfm) +{ + struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + + crypto_free_cipher(sctx->fallback.cip); + sctx->fallback.cip = NULL; +} static struct crypto_alg aes_alg = { .cra_name = "aes", @@ -120,6 +194,8 @@ static struct crypto_alg aes_alg = { .cra_ctxsize = sizeof(struct s390_aes_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), + .cra_init = fallback_init_cip, + .cra_exit = fallback_exit_cip, .cra_u = { .cipher = { .cia_min_keysize = AES_MIN_KEY_SIZE, @@ -131,10 +207,76 @@ static struct crypto_alg aes_alg = { } }; +static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key, + unsigned int len) +{ + struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + unsigned int ret; + + sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; + sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags & + CRYPTO_TFM_REQ_MASK); + + ret = crypto_blkcipher_setkey(sctx->fallback.blk, key, len); + if (ret) { + tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; + tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags & + CRYPTO_TFM_RES_MASK); + } + return ret; +} + +static int fallback_blk_dec(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + unsigned int ret; + struct crypto_blkcipher *tfm; + struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); + + memcpy(crypto_blkcipher_crt(sctx->fallback.blk)->iv, desc->info, + AES_BLOCK_SIZE); + + tfm = desc->tfm; + desc->tfm = sctx->fallback.blk; + + ret = crypto_blkcipher_decrypt(desc, dst, src, nbytes); + + desc->tfm = tfm; + return ret; +} + +static int fallback_blk_enc(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + unsigned int ret; + struct crypto_blkcipher *tfm; + struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); + + memcpy(crypto_blkcipher_crt(sctx->fallback.blk)->iv, desc->info, + AES_BLOCK_SIZE); + + tfm = desc->tfm; + desc->tfm = sctx->fallback.blk; + + ret = crypto_blkcipher_encrypt(desc, dst, src, nbytes); + + desc->tfm = tfm; + return ret; +} + static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + int ret; + + ret = need_fallback(key_len); + if (ret > 0) { + sctx->key_len = key_len; + return setkey_fallback_blk(tfm, in_key, key_len); + } switch (key_len) { case 16: @@ -183,6 +325,9 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc, struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; + if (unlikely(need_fallback(sctx->key_len))) + return fallback_blk_enc(desc, dst, src, nbytes); + blkcipher_walk_init(&walk, dst, src, nbytes); return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk); } @@ -194,10 +339,37 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc, struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; + if (unlikely(need_fallback(sctx->key_len))) + return fallback_blk_dec(desc, dst, src, nbytes); + blkcipher_walk_init(&walk, dst, src, nbytes); return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk); } +static int fallback_init_blk(struct crypto_tfm *tfm) +{ + const char *name = tfm->__crt_alg->cra_name; + struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + + sctx->fallback.blk = crypto_alloc_blkcipher(name, 0, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); + + if (IS_ERR(sctx->fallback.blk)) { + printk(KERN_ERR "Error allocating fallback algo %s\n", name); + return PTR_ERR(sctx->fallback.blk); + } + + return 0; +} + +static void fallback_exit_blk(struct crypto_tfm *tfm) +{ + struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + + crypto_free_blkcipher(sctx->fallback.blk); + sctx->fallback.blk = NULL; +} + static struct crypto_alg ecb_aes_alg = { .cra_name = "ecb(aes)", .cra_driver_name = "ecb-aes-s390", @@ -209,6 +381,8 @@ static struct crypto_alg ecb_aes_alg = { .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list), + .cra_init = fallback_init_blk, + .cra_exit = fallback_exit_blk, .cra_u = { .blkcipher = { .min_keysize = AES_MIN_KEY_SIZE, @@ -224,6 +398,13 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + int ret; + + ret = need_fallback(key_len); + if (ret > 0) { + sctx->key_len = key_len; + return setkey_fallback_blk(tfm, in_key, key_len); + } switch (key_len) { case 16: @@ -278,6 +459,9 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; + if (unlikely(need_fallback(sctx->key_len))) + return fallback_blk_enc(desc, dst, src, nbytes); + blkcipher_walk_init(&walk, dst, src, nbytes); return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk); } @@ -289,6 +473,9 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; + if (unlikely(need_fallback(sctx->key_len))) + return fallback_blk_dec(desc, dst, src, nbytes); + blkcipher_walk_init(&walk, dst, src, nbytes); return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk); } @@ -304,6 +491,8 @@ static struct crypto_alg cbc_aes_alg = { .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list), + .cra_init = fallback_init_blk, + .cra_exit = fallback_exit_blk, .cra_u = { .blkcipher = { .min_keysize = AES_MIN_KEY_SIZE, @@ -331,14 +520,10 @@ static int __init aes_init(void) return -EOPNOTSUPP; /* z9 109 and z9 BC/EC only support 128 bit key length */ - if (keylen_flag == AES_KEYLEN_128) { - aes_alg.cra_u.cipher.cia_max_keysize = AES_MIN_KEY_SIZE; - ecb_aes_alg.cra_u.blkcipher.max_keysize = AES_MIN_KEY_SIZE; - cbc_aes_alg.cra_u.blkcipher.max_keysize = AES_MIN_KEY_SIZE; + if (keylen_flag == AES_KEYLEN_128) printk(KERN_INFO "aes_s390: hardware acceleration only available for" "128 bit keys\n"); - } ret = crypto_register_alg(&aes_alg); if (ret) @@ -377,4 +562,3 @@ MODULE_ALIAS("aes"); MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); MODULE_LICENSE("GPL"); - -- cgit v1.1 From 9aa6ad3e87af58b4fc87bd4beef1d4c6d9a4c5b7 Mon Sep 17 00:00:00 2001 From: Kamalesh Babulal Date: Sat, 1 Dec 2007 12:52:35 +1100 Subject: [HWRNG] pasemi: Add missing wait argument to data_present drivers/char/hw_random/pasemi-rng.c: In function `pasemi_rng_data_present': drivers/char/hw_random/pasemi-rng.c:53: error: `wait' undeclared (first use in this function) drivers/char/hw_random/pasemi-rng.c:53: error: (Each undeclared identifier is reported only once drivers/char/hw_random/pasemi-rng.c:53: error: for each function it appears in.) drivers/char/hw_random/pasemi-rng.c: At top level: drivers/char/hw_random/pasemi-rng.c:93: warning: initialization from incompatible pointer type Signed-off-by: Kamalesh Babulal Signed-off-by: Andrew Morton Signed-off-by: Herbert Xu --- drivers/char/hw_random/pasemi-rng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c index 621adf2..e2ea210 100644 --- a/drivers/char/hw_random/pasemi-rng.c +++ b/drivers/char/hw_random/pasemi-rng.c @@ -42,7 +42,7 @@ #define MODULE_NAME "pasemi_rng" -static int pasemi_rng_data_present(struct hwrng *rng) +static int pasemi_rng_data_present(struct hwrng *rng, int wait) { void __iomem *rng_regs = (void __iomem *)rng->priv; int data, i; -- cgit v1.1 From e29bc6ad0e84e3157e0f49130a15b278cb232c72 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 22 Nov 2007 22:46:40 +0800 Subject: [CRYPTO] authenc: Use or instead of max on alignment masks Since alignment masks are always one less than a power of two, we can use binary or to find their maximum. Signed-off-by: Herbert Xu --- crypto/authenc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/authenc.c b/crypto/authenc.c index bc4e608..66fb2aa 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -333,7 +333,7 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; inst->alg.cra_priority = enc->cra_priority * 10 + auth->cra_priority; inst->alg.cra_blocksize = enc->cra_blocksize; - inst->alg.cra_alignmask = max(auth->cra_alignmask, enc->cra_alignmask); + inst->alg.cra_alignmask = auth->cra_alignmask | enc->cra_alignmask; inst->alg.cra_type = &crypto_aead_type; inst->alg.cra_aead.ivsize = enc->cra_blkcipher.ivsize; -- cgit v1.1 From 7ba683a6deba70251756aa5a021cdaa5c875a7a2 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 2 Dec 2007 18:49:21 +1100 Subject: [CRYPTO] aead: Make authsize a run-time parameter As it is authsize is an algorithm paramter which cannot be changed at run-time. This is inconvenient because hardware that implements such algorithms would have to register each authsize that they support separately. Since authsize is a property common to all AEAD algorithms, we can add a function setauthsize that sets it at run-time, just like setkey. This patch does exactly that and also changes authenc so that authsize is no longer a parameter of its template. Signed-off-by: Herbert Xu --- crypto/aead.c | 24 +++++++++++++++++++++--- crypto/authenc.c | 39 ++++++++++++--------------------------- crypto/gcm.c | 2 +- include/linux/crypto.h | 5 ++++- 4 files changed, 38 insertions(+), 32 deletions(-) diff --git a/crypto/aead.c b/crypto/aead.c index 84a3501..f23c2b0 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -53,6 +53,24 @@ static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) return aead->setkey(tfm, key, keylen); } +int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) +{ + int err; + + if (authsize > crypto_aead_alg(tfm)->maxauthsize) + return -EINVAL; + + if (crypto_aead_alg(tfm)->setauthsize) { + err = crypto_aead_alg(tfm)->setauthsize(tfm, authsize); + if (err) + return err; + } + + crypto_aead_crt(tfm)->authsize = authsize; + return 0; +} +EXPORT_SYMBOL_GPL(crypto_aead_setauthsize); + static unsigned int crypto_aead_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) { @@ -64,14 +82,14 @@ static int crypto_init_aead_ops(struct crypto_tfm *tfm, u32 type, u32 mask) struct aead_alg *alg = &tfm->__crt_alg->cra_aead; struct aead_tfm *crt = &tfm->crt_aead; - if (max(alg->authsize, alg->ivsize) > PAGE_SIZE / 8) + if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8) return -EINVAL; crt->setkey = setkey; crt->encrypt = alg->encrypt; crt->decrypt = alg->decrypt; crt->ivsize = alg->ivsize; - crt->authsize = alg->authsize; + crt->authsize = alg->maxauthsize; return 0; } @@ -85,7 +103,7 @@ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "type : aead\n"); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "ivsize : %u\n", aead->ivsize); - seq_printf(m, "authsize : %u\n", aead->authsize); + seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize); } const struct crypto_type crypto_aead_type = { diff --git a/crypto/authenc.c b/crypto/authenc.c index 66fb2aa..5df5fb1 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -24,7 +24,6 @@ struct authenc_instance_ctx { struct crypto_spawn auth; struct crypto_spawn enc; - unsigned int authsize; unsigned int enckeylen; }; @@ -76,8 +75,6 @@ out: static int crypto_authenc_hash(struct aead_request *req) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); - struct authenc_instance_ctx *ictx = - crypto_instance_ctx(crypto_aead_alg_instance(authenc)); struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); struct crypto_hash *auth = ctx->auth; struct hash_desc desc = { @@ -111,7 +108,8 @@ auth_unlock: if (err) return err; - scatterwalk_map_and_copy(hash, dst, cryptlen, ictx->authsize, 1); + scatterwalk_map_and_copy(hash, dst, cryptlen, + crypto_aead_authsize(authenc), 1); return 0; } @@ -147,8 +145,6 @@ static int crypto_authenc_encrypt(struct aead_request *req) static int crypto_authenc_verify(struct aead_request *req) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); - struct authenc_instance_ctx *ictx = - crypto_instance_ctx(crypto_aead_alg_instance(authenc)); struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); struct crypto_hash *auth = ctx->auth; struct hash_desc desc = { @@ -186,7 +182,7 @@ auth_unlock: if (err) return err; - authsize = ictx->authsize; + authsize = crypto_aead_authsize(authenc); scatterwalk_map_and_copy(ihash, src, cryptlen, authsize, 0); return memcmp(ihash, ohash, authsize) ? -EINVAL : 0; } @@ -224,18 +220,12 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_hash *auth; struct crypto_ablkcipher *enc; - unsigned int digestsize; int err; auth = crypto_spawn_hash(&ictx->auth); if (IS_ERR(auth)) return PTR_ERR(auth); - err = -EINVAL; - digestsize = crypto_hash_digestsize(auth); - if (ictx->authsize > digestsize) - goto err_free_hash; - enc = crypto_spawn_ablkcipher(&ictx->enc); err = PTR_ERR(enc); if (IS_ERR(enc)) @@ -246,7 +236,7 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) tfm->crt_aead.reqsize = max_t(unsigned int, (crypto_hash_alignmask(auth) & ~(crypto_tfm_ctx_alignment() - 1)) + - digestsize * 2, + crypto_hash_digestsize(auth) * 2, sizeof(struct ablkcipher_request) + crypto_ablkcipher_reqsize(enc)); @@ -273,7 +263,6 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) struct crypto_alg *auth; struct crypto_alg *enc; struct authenc_instance_ctx *ctx; - unsigned int authsize; unsigned int enckeylen; int err; @@ -286,18 +275,13 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) if (IS_ERR(auth)) return ERR_PTR(PTR_ERR(auth)); - err = crypto_attr_u32(tb[2], &authsize); - inst = ERR_PTR(err); - if (err) - goto out_put_auth; - - enc = crypto_attr_alg(tb[3], CRYPTO_ALG_TYPE_BLKCIPHER, + enc = crypto_attr_alg(tb[2], CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_BLKCIPHER_MASK); inst = ERR_PTR(PTR_ERR(enc)); if (IS_ERR(enc)) goto out_put_auth; - err = crypto_attr_u32(tb[4], &enckeylen); + err = crypto_attr_u32(tb[3], &enckeylen); if (err) goto out_put_enc; @@ -308,18 +292,17 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) err = -ENAMETOOLONG; if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, - "authenc(%s,%u,%s,%u)", auth->cra_name, authsize, + "authenc(%s,%s,%u)", auth->cra_name, enc->cra_name, enckeylen) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "authenc(%s,%u,%s,%u)", auth->cra_driver_name, - authsize, enc->cra_driver_name, enckeylen) >= + "authenc(%s,%s,%u)", auth->cra_driver_name, + enc->cra_driver_name, enckeylen) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; ctx = crypto_instance_ctx(inst); - ctx->authsize = authsize; ctx->enckeylen = enckeylen; err = crypto_init_spawn(&ctx->auth, auth, inst, CRYPTO_ALG_TYPE_MASK); @@ -337,7 +320,9 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) inst->alg.cra_type = &crypto_aead_type; inst->alg.cra_aead.ivsize = enc->cra_blkcipher.ivsize; - inst->alg.cra_aead.authsize = authsize; + inst->alg.cra_aead.maxauthsize = auth->cra_type == &crypto_hash_type ? + auth->cra_hash.digestsize : + auth->cra_digest.dia_digestsize; inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx); diff --git a/crypto/gcm.c b/crypto/gcm.c index ad8b8b9..5681c79 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -414,7 +414,7 @@ static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb) inst->alg.cra_alignmask = __alignof__(u32) - 1; inst->alg.cra_type = &crypto_aead_type; inst->alg.cra_aead.ivsize = 12; - inst->alg.cra_aead.authsize = 16; + inst->alg.cra_aead.maxauthsize = 16; inst->alg.cra_ctxsize = sizeof(struct crypto_gcm_ctx); inst->alg.cra_init = crypto_gcm_init_tfm; inst->alg.cra_exit = crypto_gcm_exit_tfm; diff --git a/include/linux/crypto.h b/include/linux/crypto.h index f56ae87..48aa595 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -187,11 +187,12 @@ struct ablkcipher_alg { struct aead_alg { int (*setkey)(struct crypto_aead *tfm, const u8 *key, unsigned int keylen); + int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize); int (*encrypt)(struct aead_request *req); int (*decrypt)(struct aead_request *req); unsigned int ivsize; - unsigned int authsize; + unsigned int maxauthsize; }; struct blkcipher_alg { @@ -754,6 +755,8 @@ static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, return crypto_aead_crt(tfm)->setkey(tfm, key, keylen); } +int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); + static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) { return __crypto_aead_cast(req->base.tfm); -- cgit v1.1 From e236d4a89a2ffbc8aa18064161f4f159c4d89b4a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 22 Nov 2007 23:11:53 +0800 Subject: [CRYPTO] authenc: Move enckeylen into key itself Having enckeylen as a template parameter makes it a pain for hardware devices that implement ciphers with many key sizes since each one would have to be registered separately. Since the authenc algorithm is mainly used for legacy purposes where its key is going to be constructed out of two separate keys, we can in fact embed this value into the key itself. This patch does this by prepending an rtnetlink header to the key that contains the encryption key length. Signed-off-by: Herbert Xu --- crypto/authenc.c | 49 ++++++++++++++++++++++++++++-------------------- include/crypto/authenc.h | 27 ++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 20 deletions(-) create mode 100644 include/crypto/authenc.h diff --git a/crypto/authenc.c b/crypto/authenc.c index 5df5fb1..a61dea1 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -11,10 +11,12 @@ */ #include +#include #include #include #include #include +#include #include #include @@ -23,8 +25,6 @@ struct authenc_instance_ctx { struct crypto_spawn auth; struct crypto_spawn enc; - - unsigned int enckeylen; }; struct crypto_authenc_ctx { @@ -36,19 +36,31 @@ struct crypto_authenc_ctx { static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, unsigned int keylen) { - struct authenc_instance_ctx *ictx = - crypto_instance_ctx(crypto_aead_alg_instance(authenc)); - unsigned int enckeylen = ictx->enckeylen; unsigned int authkeylen; + unsigned int enckeylen; struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); struct crypto_hash *auth = ctx->auth; struct crypto_ablkcipher *enc = ctx->enc; + struct rtattr *rta = (void *)key; + struct crypto_authenc_key_param *param; int err = -EINVAL; - if (keylen < enckeylen) { - crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); - goto out; - } + if (keylen < sizeof(*rta)) + goto badkey; + if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) + goto badkey; + if (RTA_PAYLOAD(rta) < sizeof(*param)) + goto badkey; + + param = RTA_DATA(rta); + enckeylen = be32_to_cpu(param->enckeylen); + + key += RTA_ALIGN(rta->rta_len); + keylen -= RTA_ALIGN(rta->rta_len); + + if (keylen < enckeylen) + goto badkey; + authkeylen = keylen - enckeylen; crypto_hash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); @@ -70,6 +82,10 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, out: return err; + +badkey: + crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); + goto out; } static int crypto_authenc_hash(struct aead_request *req) @@ -263,7 +279,6 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) struct crypto_alg *auth; struct crypto_alg *enc; struct authenc_instance_ctx *ctx; - unsigned int enckeylen; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD); @@ -281,10 +296,6 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) if (IS_ERR(enc)) goto out_put_auth; - err = crypto_attr_u32(tb[3], &enckeylen); - if (err) - goto out_put_enc; - inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); err = -ENOMEM; if (!inst) @@ -292,18 +303,16 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) err = -ENAMETOOLONG; if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, - "authenc(%s,%s,%u)", auth->cra_name, - enc->cra_name, enckeylen) >= CRYPTO_MAX_ALG_NAME) + "authenc(%s,%s)", auth->cra_name, enc->cra_name) >= + CRYPTO_MAX_ALG_NAME) goto err_free_inst; if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "authenc(%s,%s,%u)", auth->cra_driver_name, - enc->cra_driver_name, enckeylen) >= - CRYPTO_MAX_ALG_NAME) + "authenc(%s,%s)", auth->cra_driver_name, + enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; ctx = crypto_instance_ctx(inst); - ctx->enckeylen = enckeylen; err = crypto_init_spawn(&ctx->auth, auth, inst, CRYPTO_ALG_TYPE_MASK); if (err) diff --git a/include/crypto/authenc.h b/include/crypto/authenc.h new file mode 100644 index 0000000..e47b044 --- /dev/null +++ b/include/crypto/authenc.h @@ -0,0 +1,27 @@ +/* + * Authenc: Simple AEAD wrapper for IPsec + * + * Copyright (c) 2007 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_AUTHENC_H +#define _CRYPTO_AUTHENC_H + +#include + +enum { + CRYPTO_AUTHENC_KEYA_UNSPEC, + CRYPTO_AUTHENC_KEYA_PARAM, +}; + +struct crypto_authenc_key_param { + __be32 enckeylen; +}; + +#endif /* _CRYPTO_AUTHENC_H */ + -- cgit v1.1 From 481f34ae752ac74c4cbd88a9954dd4ed10e84f81 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 4 Dec 2007 20:04:21 +1100 Subject: [CRYPTO] authenc: Fix hash verification The previous code incorrectly included the hash in the verification which also meant that we'd crash and burn when it comes to actually verifying the hash since we'd go past the end of the SG list. This patch fixes that by subtracting authsize from cryptlen at the start. Signed-off-by: Herbert Xu --- crypto/authenc.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/crypto/authenc.c b/crypto/authenc.c index a61dea1..82e03ff 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -158,7 +158,8 @@ static int crypto_authenc_encrypt(struct aead_request *req) return crypto_authenc_hash(req); } -static int crypto_authenc_verify(struct aead_request *req) +static int crypto_authenc_verify(struct aead_request *req, + unsigned int cryptlen) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); @@ -170,7 +171,6 @@ static int crypto_authenc_verify(struct aead_request *req) u8 *ohash = aead_request_ctx(req); u8 *ihash; struct scatterlist *src = req->src; - unsigned int cryptlen = req->cryptlen; unsigned int authsize; int err; @@ -214,16 +214,22 @@ static int crypto_authenc_decrypt(struct aead_request *req) struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); struct ablkcipher_request *abreq = aead_request_ctx(req); + unsigned int cryptlen = req->cryptlen; + unsigned int authsize = crypto_aead_authsize(authenc); int err; - err = crypto_authenc_verify(req); + if (cryptlen < authsize) + return -EINVAL; + cryptlen -= authsize; + + err = crypto_authenc_verify(req, cryptlen); if (err) return err; ablkcipher_request_set_tfm(abreq, ctx->enc); ablkcipher_request_set_callback(abreq, aead_request_flags(req), crypto_authenc_decrypt_done, req); - ablkcipher_request_set_crypt(abreq, req->src, req->dst, req->cryptlen, + ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, req->iv); return crypto_ablkcipher_decrypt(abreq); -- cgit v1.1 From 8df213d9b520a4b58b7a8f7f2200324d4e40363d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 2 Dec 2007 14:55:47 +1100 Subject: [CRYPTO] tcrypt: Make gcm available as a standalone test Currently the gcm(aes) tests have to be taken together with all other ciphers. This patch makes it available by itself at number 35. Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 387d105..df93595 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1548,6 +1548,13 @@ static void do_test(void) SALSA20_STREAM_ENC_TEST_VECTORS); break; + case 35: + test_aead("gcm(aes)", ENCRYPT, aes_gcm_enc_tv_template, + AES_GCM_ENC_TEST_VECTORS); + test_aead("gcm(aes)", DECRYPT, aes_gcm_dec_tv_template, + AES_GCM_DEC_TEST_VECTORS); + break; + case 100: test_hash("hmac(md5)", hmac_md5_tv_template, HMAC_MD5_TEST_VECTORS); -- cgit v1.1 From 6160b289929c0b622e64aa36106d8e6e53fcd826 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 4 Dec 2007 19:17:50 +1100 Subject: [CRYPTO] gcm: Fix ICV handling The crypto_aead convention for ICVs is to include it directly in the output. If we decided to change this in future then we would make the ICV (if the algorithm has an explicit one) available in the request itself. For now no algorithm needs this so this patch changes gcm to conform to this convention. It also adjusts the tcrypt aead tests to take this into account. Signed-off-by: Herbert Xu --- crypto/gcm.c | 68 +++++++++++++++++------------ crypto/tcrypt.c | 44 +++++++------------ crypto/tcrypt.h | 130 +++++++++++++++++++++++++------------------------------- 3 files changed, 113 insertions(+), 129 deletions(-) diff --git a/crypto/gcm.c b/crypto/gcm.c index 5681c79..ed8a626 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -36,6 +36,7 @@ struct crypto_gcm_ghash_ctx { struct crypto_gcm_req_priv_ctx { u8 auth_tag[16]; + u8 iauth_tag[16]; u8 counter[16]; struct crypto_gcm_ghash_ctx ghash; }; @@ -89,6 +90,9 @@ static void crypto_gcm_ghash_update_sg(struct crypto_gcm_ghash_ctx *ctx, u8 *src; int n; + if (!len) + return; + scatterwalk_start(&walk, sg); while (len) { @@ -211,9 +215,10 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, } static int crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, - struct aead_request *req, - void (*done)(struct crypto_async_request *, - int)) + struct aead_request *req, + unsigned int cryptlen, + void (*done)(struct crypto_async_request *, + int)) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); @@ -228,7 +233,7 @@ static int crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, ablkcipher_request_set_callback(ablk_req, aead_request_flags(req), done, req); ablkcipher_request_set_crypt(ablk_req, req->src, req->dst, - req->cryptlen, counter); + cryptlen, counter); err = crypto_gcm_encrypt_counter(aead, auth_tag, 0, req->iv); if (err) @@ -239,18 +244,16 @@ static int crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, crypto_gcm_ghash_init(ghash, flags, ctx->gf128); - if (req->assoclen) { - crypto_gcm_ghash_update_sg(ghash, req->assoc, req->assoclen); - crypto_gcm_ghash_flush(ghash); - } + crypto_gcm_ghash_update_sg(ghash, req->assoc, req->assoclen); + crypto_gcm_ghash_flush(ghash); out: return err; } -static void crypto_gcm_encrypt_done(struct crypto_async_request *areq, int err) +static int crypto_gcm_hash(struct aead_request *req) { - struct aead_request *req = areq->data; + struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_gcm_req_priv_ctx *pctx = aead_request_ctx(req); u8 *auth_tag = pctx->auth_tag; struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; @@ -259,18 +262,28 @@ static void crypto_gcm_encrypt_done(struct crypto_async_request *areq, int err) crypto_gcm_ghash_final_xor(ghash, req->assoclen, req->cryptlen, auth_tag); + scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen, + crypto_aead_authsize(aead), 1); + return 0; +} + +static void crypto_gcm_encrypt_done(struct crypto_async_request *areq, int err) +{ + struct aead_request *req = areq->data; + + if (!err) + err = crypto_gcm_hash(req); + aead_request_complete(req, err); } static int crypto_gcm_encrypt(struct aead_request *req) { struct ablkcipher_request abreq; - struct crypto_gcm_req_priv_ctx *pctx = aead_request_ctx(req); - u8 *auth_tag = pctx->auth_tag; - struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; int err = 0; - err = crypto_gcm_init_crypt(&abreq, req, crypto_gcm_encrypt_done); + err = crypto_gcm_init_crypt(&abreq, req, req->cryptlen, + crypto_gcm_encrypt_done); if (err) return err; @@ -278,14 +291,9 @@ static int crypto_gcm_encrypt(struct aead_request *req) err = crypto_ablkcipher_encrypt(&abreq); if (err) return err; - - crypto_gcm_ghash_update_sg(ghash, req->dst, req->cryptlen); } - crypto_gcm_ghash_final_xor(ghash, req->assoclen, req->cryptlen, - auth_tag); - - return err; + return crypto_gcm_hash(req); } static void crypto_gcm_decrypt_done(struct crypto_async_request *areq, int err) @@ -296,25 +304,29 @@ static void crypto_gcm_decrypt_done(struct crypto_async_request *areq, int err) static int crypto_gcm_decrypt(struct aead_request *req) { struct ablkcipher_request abreq; + struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_gcm_req_priv_ctx *pctx = aead_request_ctx(req); u8 *auth_tag = pctx->auth_tag; + u8 *iauth_tag = pctx->iauth_tag; struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; - u8 tag[16]; + unsigned int cryptlen = req->cryptlen; + unsigned int authsize = crypto_aead_authsize(aead); int err; - if (!req->cryptlen) + if (cryptlen < authsize) return -EINVAL; + cryptlen -= authsize; - memcpy(tag, auth_tag, 16); - err = crypto_gcm_init_crypt(&abreq, req, crypto_gcm_decrypt_done); + err = crypto_gcm_init_crypt(&abreq, req, cryptlen, + crypto_gcm_decrypt_done); if (err) return err; - crypto_gcm_ghash_update_sg(ghash, req->src, req->cryptlen); - crypto_gcm_ghash_final_xor(ghash, req->assoclen, req->cryptlen, - auth_tag); + crypto_gcm_ghash_update_sg(ghash, req->src, cryptlen); + crypto_gcm_ghash_final_xor(ghash, req->assoclen, cryptlen, auth_tag); - if (memcmp(tag, auth_tag, 16)) + scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); + if (memcmp(iauth_tag, auth_tag, authsize)) return -EINVAL; return crypto_ablkcipher_decrypt(&abreq); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index df93595..a6d4160 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -235,6 +235,7 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template, struct scatterlist asg[8]; const char *e; struct tcrypt_result result; + unsigned int authsize; if (enc == ENCRYPT) e = "encryption"; @@ -265,6 +266,8 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template, return; } + authsize = crypto_aead_authsize(tfm); + req = aead_request_alloc(tfm, GFP_KERNEL); if (!req) { printk(KERN_INFO "failed to allocate request for %s\n", algo); @@ -296,7 +299,7 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template, } sg_init_one(&sg[0], aead_tv[i].input, - aead_tv[i].ilen); + aead_tv[i].ilen + (enc ? authsize : 0)); sg_init_one(&asg[0], aead_tv[i].assoc, aead_tv[i].alen); @@ -307,13 +310,9 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template, aead_request_set_assoc(req, asg, aead_tv[i].alen); - if (enc) { - ret = crypto_aead_encrypt(req); - } else { - memcpy(req->__ctx, aead_tv[i].tag, - aead_tv[i].tlen); - ret = crypto_aead_decrypt(req); - } + ret = enc ? + crypto_aead_encrypt(req) : + crypto_aead_decrypt(req); switch (ret) { case 0: @@ -335,16 +334,10 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template, q = kmap(sg_page(&sg[0])) + sg[0].offset; hexdump(q, aead_tv[i].rlen); - printk(KERN_INFO "auth tag: "); - hexdump((unsigned char *)req->__ctx, aead_tv[i].tlen); printk(KERN_INFO "enc/dec: %s\n", memcmp(q, aead_tv[i].result, aead_tv[i].rlen) ? "fail" : "pass"); - - printk(KERN_INFO "auth tag: %s\n", - memcmp(req->__ctx, aead_tv[i].tag, - aead_tv[i].tlen) ? "fail" : "pass"); } } @@ -381,6 +374,9 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template, aead_tv[i].tap[k]); } + if (enc) + sg[k - 1].length += authsize; + sg_init_table(asg, aead_tv[i].anp); for (k = 0, temp = 0; k < aead_tv[i].anp; k++) { memcpy(&axbuf[IDX[k]], @@ -397,13 +393,9 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template, aead_request_set_assoc(req, asg, aead_tv[i].alen); - if (enc) { - ret = crypto_aead_encrypt(req); - } else { - memcpy(req->__ctx, aead_tv[i].tag, - aead_tv[i].tlen); - ret = crypto_aead_decrypt(req); - } + ret = enc ? + crypto_aead_encrypt(req) : + crypto_aead_decrypt(req); switch (ret) { case 0: @@ -429,17 +421,13 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template, hexdump(q, aead_tv[i].tap[k]); printk(KERN_INFO "%s\n", memcmp(q, aead_tv[i].result + temp, - aead_tv[i].tap[k]) ? + aead_tv[i].tap[k] - + (k < aead_tv[i].np - 1 || enc ? + 0 : authsize)) ? "fail" : "pass"); temp += aead_tv[i].tap[k]; } - printk(KERN_INFO "auth tag: "); - hexdump((unsigned char *)req->__ctx, aead_tv[i].tlen); - - printk(KERN_INFO "auth tag: %s\n", - memcmp(req->__ctx, aead_tv[i].tag, - aead_tv[i].tlen) ? "fail" : "pass"); } } diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 05aba22..439e290 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -60,7 +60,6 @@ struct aead_testvec { char input[512]; char assoc[512]; char result[512]; - char tag[128]; unsigned char tap[MAX_TAP]; unsigned char atap[MAX_TAP]; int np; @@ -71,7 +70,6 @@ struct aead_testvec { unsigned short ilen; unsigned short alen; unsigned short rlen; - unsigned short tlen; }; struct cipher_speed { @@ -4682,18 +4680,17 @@ static struct cipher_testvec aes_ctr_dec_tv_template[] = { static struct aead_testvec aes_gcm_enc_tv_template[] = { { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */ .klen = 16, - .tag = { 0x58, 0xe2, 0xfc, 0xce, 0xfa, 0x7e, 0x30, 0x61, + .result = { 0x58, 0xe2, 0xfc, 0xce, 0xfa, 0x7e, 0x30, 0x61, 0x36, 0x7f, 0x1d, 0x57, 0xa4, 0xe7, 0x45, 0x5a }, - .tlen = 16 + .rlen = 16, }, { .klen = 16, .ilen = 16, .result = { 0x03, 0x88, 0xda, 0xce, 0x60, 0xb6, 0xa3, 0x92, - 0xf3, 0x28, 0xc2, 0xb9, 0x71, 0xb2, 0xfe, 0x78 }, - .rlen = 16, - .tag = { 0xab, 0x6e, 0x47, 0xd4, 0x2c, 0xec, 0x13, 0xbd, + 0xf3, 0x28, 0xc2, 0xb9, 0x71, 0xb2, 0xfe, 0x78, + 0xab, 0x6e, 0x47, 0xd4, 0x2c, 0xec, 0x13, 0xbd, 0xf5, 0x3a, 0x67, 0xb2, 0x12, 0x57, 0xbd, 0xdf }, - .tlen = 16 + .rlen = 32, }, { .key = { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 }, @@ -4716,11 +4713,10 @@ static struct aead_testvec aes_gcm_enc_tv_template[] = { 0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c, 0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05, 0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97, - 0x3d, 0x58, 0xe0, 0x91, 0x47, 0x3f, 0x59, 0x85 }, - .rlen = 64, - .tag = { 0x4d, 0x5c, 0x2a, 0xf3, 0x27, 0xcd, 0x64, 0xa6, + 0x3d, 0x58, 0xe0, 0x91, 0x47, 0x3f, 0x59, 0x85, + 0x4d, 0x5c, 0x2a, 0xf3, 0x27, 0xcd, 0x64, 0xa6, 0x2c, 0xf3, 0x5a, 0xbd, 0x2b, 0xa6, 0xfa, 0xb4 }, - .tlen = 16 + .rlen = 80, }, { .key = { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 }, @@ -4747,25 +4743,23 @@ static struct aead_testvec aes_gcm_enc_tv_template[] = { 0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c, 0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05, 0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97, - 0x3d, 0x58, 0xe0, 0x91 }, - .rlen = 60, - .tag = { 0x5b, 0xc9, 0x4f, 0xbc, 0x32, 0x21, 0xa5, 0xdb, + 0x3d, 0x58, 0xe0, 0x91, + 0x5b, 0xc9, 0x4f, 0xbc, 0x32, 0x21, 0xa5, 0xdb, 0x94, 0xfa, 0xe9, 0x5a, 0xe7, 0x12, 0x1a, 0x47 }, - .tlen = 16 + .rlen = 76, }, { .klen = 24, - .tag = { 0xcd, 0x33, 0xb2, 0x8a, 0xc7, 0x73, 0xf7, 0x4b, + .result = { 0xcd, 0x33, 0xb2, 0x8a, 0xc7, 0x73, 0xf7, 0x4b, 0xa0, 0x0e, 0xd1, 0xf3, 0x12, 0x57, 0x24, 0x35 }, - .tlen = 16 + .rlen = 16, }, { .klen = 24, .ilen = 16, .result = { 0x98, 0xe7, 0x24, 0x7c, 0x07, 0xf0, 0xfe, 0x41, - 0x1c, 0x26, 0x7e, 0x43, 0x84, 0xb0, 0xf6, 0x00 }, - .rlen = 16, - .tag = { 0x2f, 0xf5, 0x8d, 0x80, 0x03, 0x39, 0x27, 0xab, + 0x1c, 0x26, 0x7e, 0x43, 0x84, 0xb0, 0xf6, 0x00, + 0x2f, 0xf5, 0x8d, 0x80, 0x03, 0x39, 0x27, 0xab, 0x8e, 0xf4, 0xd4, 0x58, 0x75, 0x14, 0xf0, 0xfb }, - .tlen = 16 + .rlen = 32, }, { .key = { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08, @@ -4789,11 +4783,10 @@ static struct aead_testvec aes_gcm_enc_tv_template[] = { 0x7d, 0x77, 0x3d, 0x00, 0xc1, 0x44, 0xc5, 0x25, 0xac, 0x61, 0x9d, 0x18, 0xc8, 0x4a, 0x3f, 0x47, 0x18, 0xe2, 0x44, 0x8b, 0x2f, 0xe3, 0x24, 0xd9, - 0xcc, 0xda, 0x27, 0x10, 0xac, 0xad, 0xe2, 0x56 }, - .rlen = 64, - .tag = { 0x99, 0x24, 0xa7, 0xc8, 0x58, 0x73, 0x36, 0xbf, + 0xcc, 0xda, 0x27, 0x10, 0xac, 0xad, 0xe2, 0x56, + 0x99, 0x24, 0xa7, 0xc8, 0x58, 0x73, 0x36, 0xbf, 0xb1, 0x18, 0x02, 0x4d, 0xb8, 0x67, 0x4a, 0x14 }, - .tlen = 16 + .rlen = 80, }, { .key = { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08, @@ -4821,20 +4814,19 @@ static struct aead_testvec aes_gcm_enc_tv_template[] = { 0x7d, 0x77, 0x3d, 0x00, 0xc1, 0x44, 0xc5, 0x25, 0xac, 0x61, 0x9d, 0x18, 0xc8, 0x4a, 0x3f, 0x47, 0x18, 0xe2, 0x44, 0x8b, 0x2f, 0xe3, 0x24, 0xd9, - 0xcc, 0xda, 0x27, 0x10 }, - .rlen = 60, - .tag = { 0x25, 0x19, 0x49, 0x8e, 0x80, 0xf1, 0x47, 0x8f, + 0xcc, 0xda, 0x27, 0x10, + 0x25, 0x19, 0x49, 0x8e, 0x80, 0xf1, 0x47, 0x8f, 0x37, 0xba, 0x55, 0xbd, 0x6d, 0x27, 0x61, 0x8c }, - .tlen = 16, + .rlen = 76, .np = 2, .tap = { 32, 28 }, .anp = 2, .atap = { 8, 12 } }, { .klen = 32, - .tag = { 0x53, 0x0f, 0x8a, 0xfb, 0xc7, 0x45, 0x36, 0xb9, + .result = { 0x53, 0x0f, 0x8a, 0xfb, 0xc7, 0x45, 0x36, 0xb9, 0xa9, 0x63, 0xb4, 0xf1, 0xc4, 0xcb, 0x73, 0x8b }, - .tlen = 16 + .rlen = 16, } }; @@ -4842,12 +4834,11 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = { { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */ .klen = 32, .input = { 0xce, 0xa7, 0x40, 0x3d, 0x4d, 0x60, 0x6b, 0x6e, - 0x07, 0x4e, 0xc5, 0xd3, 0xba, 0xf3, 0x9d, 0x18 }, - .ilen = 16, - .rlen = 16, - .tag = { 0xd0, 0xd1, 0xc8, 0xa7, 0x99, 0x99, 0x6b, 0xf0, + 0x07, 0x4e, 0xc5, 0xd3, 0xba, 0xf3, 0x9d, 0x18, + 0xd0, 0xd1, 0xc8, 0xa7, 0x99, 0x99, 0x6b, 0xf0, 0x26, 0x5b, 0x98, 0xb5, 0xd4, 0x8a, 0xb9, 0x19 }, - .tlen = 16 + .ilen = 32, + .rlen = 16, }, { .key = { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08, @@ -4863,8 +4854,10 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = { 0x8c, 0xb0, 0x8e, 0x48, 0x59, 0x0d, 0xbb, 0x3d, 0xa7, 0xb0, 0x8b, 0x10, 0x56, 0x82, 0x88, 0x38, 0xc5, 0xf6, 0x1e, 0x63, 0x93, 0xba, 0x7a, 0x0a, - 0xbc, 0xc9, 0xf6, 0x62, 0x89, 0x80, 0x15, 0xad }, - .ilen = 64, + 0xbc, 0xc9, 0xf6, 0x62, 0x89, 0x80, 0x15, 0xad, + 0xb0, 0x94, 0xda, 0xc5, 0xd9, 0x34, 0x71, 0xbd, + 0xec, 0x1a, 0x50, 0x22, 0x70, 0xe3, 0xcc, 0x6c }, + .ilen = 80, .result = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5, 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a, 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda, @@ -4874,9 +4867,6 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = { 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57, 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 }, .rlen = 64, - .tag = { 0xb0, 0x94, 0xda, 0xc5, 0xd9, 0x34, 0x71, 0xbd, - 0xec, 0x1a, 0x50, 0x22, 0x70, 0xe3, 0xcc, 0x6c }, - .tlen = 16 }, { .key = { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08, @@ -4892,8 +4882,10 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = { 0x8c, 0xb0, 0x8e, 0x48, 0x59, 0x0d, 0xbb, 0x3d, 0xa7, 0xb0, 0x8b, 0x10, 0x56, 0x82, 0x88, 0x38, 0xc5, 0xf6, 0x1e, 0x63, 0x93, 0xba, 0x7a, 0x0a, - 0xbc, 0xc9, 0xf6, 0x62 }, - .ilen = 60, + 0xbc, 0xc9, 0xf6, 0x62, + 0x76, 0xfc, 0x6e, 0xce, 0x0f, 0x4e, 0x17, 0x68, + 0xcd, 0xdf, 0x88, 0x53, 0xbb, 0x2d, 0x55, 0x1b }, + .ilen = 76, .assoc = { 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef, 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef, 0xab, 0xad, 0xda, 0xd2 }, @@ -4907,11 +4899,8 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = { 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57, 0xba, 0x63, 0x7b, 0x39 }, .rlen = 60, - .tag = { 0x76, 0xfc, 0x6e, 0xce, 0x0f, 0x4e, 0x17, 0x68, - 0xcd, 0xdf, 0x88, 0x53, 0xbb, 0x2d, 0x55, 0x1b }, - .tlen = 16, .np = 2, - .tap = { 48, 12 }, + .tap = { 48, 28 }, .anp = 3, .atap = { 8, 8, 4 } }, { @@ -4927,8 +4916,10 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = { 0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c, 0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05, 0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97, - 0x3d, 0x58, 0xe0, 0x91, 0x47, 0x3f, 0x59, 0x85 }, - .ilen = 64, + 0x3d, 0x58, 0xe0, 0x91, 0x47, 0x3f, 0x59, 0x85, + 0x4d, 0x5c, 0x2a, 0xf3, 0x27, 0xcd, 0x64, 0xa6, + 0x2c, 0xf3, 0x5a, 0xbd, 0x2b, 0xa6, 0xfa, 0xb4 }, + .ilen = 80, .result = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5, 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a, 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda, @@ -4938,9 +4929,6 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = { 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57, 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 }, .rlen = 64, - .tag = { 0x4d, 0x5c, 0x2a, 0xf3, 0x27, 0xcd, 0x64, 0xa6, - 0x2c, 0xf3, 0x5a, 0xbd, 0x2b, 0xa6, 0xfa, 0xb4 }, - .tlen = 16 }, { .key = { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 }, @@ -4954,8 +4942,10 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = { 0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c, 0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05, 0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97, - 0x3d, 0x58, 0xe0, 0x91 }, - .ilen = 60, + 0x3d, 0x58, 0xe0, 0x91, + 0x5b, 0xc9, 0x4f, 0xbc, 0x32, 0x21, 0xa5, 0xdb, + 0x94, 0xfa, 0xe9, 0x5a, 0xe7, 0x12, 0x1a, 0x47 }, + .ilen = 76, .assoc = { 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef, 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef, 0xab, 0xad, 0xda, 0xd2 }, @@ -4969,18 +4959,14 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = { 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57, 0xba, 0x63, 0x7b, 0x39 }, .rlen = 60, - .tag = { 0x5b, 0xc9, 0x4f, 0xbc, 0x32, 0x21, 0xa5, 0xdb, - 0x94, 0xfa, 0xe9, 0x5a, 0xe7, 0x12, 0x1a, 0x47 }, - .tlen = 16 }, { .klen = 24, .input = { 0x98, 0xe7, 0x24, 0x7c, 0x07, 0xf0, 0xfe, 0x41, - 0x1c, 0x26, 0x7e, 0x43, 0x84, 0xb0, 0xf6, 0x00 }, - .ilen = 16, - .rlen = 16, - .tag = { 0x2f, 0xf5, 0x8d, 0x80, 0x03, 0x39, 0x27, 0xab, + 0x1c, 0x26, 0x7e, 0x43, 0x84, 0xb0, 0xf6, 0x00, + 0x2f, 0xf5, 0x8d, 0x80, 0x03, 0x39, 0x27, 0xab, 0x8e, 0xf4, 0xd4, 0x58, 0x75, 0x14, 0xf0, 0xfb }, - .tlen = 16 + .ilen = 32, + .rlen = 16, }, { .key = { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08, @@ -4995,8 +4981,10 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = { 0x7d, 0x77, 0x3d, 0x00, 0xc1, 0x44, 0xc5, 0x25, 0xac, 0x61, 0x9d, 0x18, 0xc8, 0x4a, 0x3f, 0x47, 0x18, 0xe2, 0x44, 0x8b, 0x2f, 0xe3, 0x24, 0xd9, - 0xcc, 0xda, 0x27, 0x10, 0xac, 0xad, 0xe2, 0x56 }, - .ilen = 64, + 0xcc, 0xda, 0x27, 0x10, 0xac, 0xad, 0xe2, 0x56, + 0x99, 0x24, 0xa7, 0xc8, 0x58, 0x73, 0x36, 0xbf, + 0xb1, 0x18, 0x02, 0x4d, 0xb8, 0x67, 0x4a, 0x14 }, + .ilen = 80, .result = { 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5, 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a, 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda, @@ -5006,9 +4994,6 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = { 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57, 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 }, .rlen = 64, - .tag = { 0x99, 0x24, 0xa7, 0xc8, 0x58, 0x73, 0x36, 0xbf, - 0xb1, 0x18, 0x02, 0x4d, 0xb8, 0x67, 0x4a, 0x14 }, - .tlen = 16 }, { .key = { 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c, 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08, @@ -5023,8 +5008,10 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = { 0x7d, 0x77, 0x3d, 0x00, 0xc1, 0x44, 0xc5, 0x25, 0xac, 0x61, 0x9d, 0x18, 0xc8, 0x4a, 0x3f, 0x47, 0x18, 0xe2, 0x44, 0x8b, 0x2f, 0xe3, 0x24, 0xd9, - 0xcc, 0xda, 0x27, 0x10 }, - .ilen = 60, + 0xcc, 0xda, 0x27, 0x10, + 0x25, 0x19, 0x49, 0x8e, 0x80, 0xf1, 0x47, 0x8f, + 0x37, 0xba, 0x55, 0xbd, 0x6d, 0x27, 0x61, 0x8c }, + .ilen = 76, .assoc = { 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef, 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef, 0xab, 0xad, 0xda, 0xd2 }, @@ -5038,9 +5025,6 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = { 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57, 0xba, 0x63, 0x7b, 0x39 }, .rlen = 60, - .tag = { 0x25, 0x19, 0x49, 0x8e, 0x80, 0xf1, 0x47, 0x8f, - 0x37, 0xba, 0x55, 0xbd, 0x6d, 0x27, 0x61, 0x8c }, - .tlen = 16 } }; -- cgit v1.1 From fe70f5dfe1a7b5caab96531089dac3d8728c0ebd Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 4 Dec 2007 20:07:27 +1100 Subject: [CRYPTO] aead: Return EBADMSG for ICV mismatch This patch changes gcm/authenc to return EBADMSG instead of EINVAL for ICV mismatches. This convention has already been adopted by IPsec. Signed-off-by: Herbert Xu --- crypto/authenc.c | 2 +- crypto/gcm.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crypto/authenc.c b/crypto/authenc.c index 82e03ff..6c9104e 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -200,7 +200,7 @@ auth_unlock: authsize = crypto_aead_authsize(authenc); scatterwalk_map_and_copy(ihash, src, cryptlen, authsize, 0); - return memcmp(ihash, ohash, authsize) ? -EINVAL : 0; + return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0; } static void crypto_authenc_decrypt_done(struct crypto_async_request *req, diff --git a/crypto/gcm.c b/crypto/gcm.c index ed8a626..d60c340 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -327,7 +327,7 @@ static int crypto_gcm_decrypt(struct aead_request *req) scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); if (memcmp(iauth_tag, auth_tag, authsize)) - return -EINVAL; + return -EBADMSG; return crypto_ablkcipher_decrypt(&abreq); } -- cgit v1.1 From 42c271c6c538857cb13c5ead5184d264d745f675 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 7 Dec 2007 18:52:49 +0800 Subject: [CRYPTO] scatterwalk: Move scatterwalk.h to linux/crypto The scatterwalk infrastructure is used by algorithms so it needs to move out of crypto for future users that may live in drivers/crypto or asm/*/crypto. Signed-off-by: Herbert Xu --- crypto/authenc.c | 3 +- crypto/blkcipher.c | 2 +- crypto/digest.c | 3 +- crypto/gcm.c | 3 +- crypto/internal.h | 29 ------------ crypto/scatterwalk.c | 5 +- crypto/scatterwalk.h | 75 ------------------------------ crypto/xcbc.c | 2 +- include/crypto/scatterwalk.h | 107 +++++++++++++++++++++++++++++++++++++++++++ 9 files changed, 115 insertions(+), 114 deletions(-) delete mode 100644 crypto/scatterwalk.h create mode 100644 include/crypto/scatterwalk.h diff --git a/crypto/authenc.c b/crypto/authenc.c index 6c9104e..fbbc2b5 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -20,8 +21,6 @@ #include #include -#include "scatterwalk.h" - struct authenc_instance_ctx { struct crypto_spawn auth; struct crypto_spawn enc; diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 180d914..7939504 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -14,6 +14,7 @@ * */ +#include #include #include #include @@ -25,7 +26,6 @@ #include #include "internal.h" -#include "scatterwalk.h" enum { BLKCIPHER_WALK_PHYS = 1 << 0, diff --git a/crypto/digest.c b/crypto/digest.c index d3e827a..52845f5 100644 --- a/crypto/digest.c +++ b/crypto/digest.c @@ -12,6 +12,7 @@ * */ +#include #include #include #include @@ -20,8 +21,6 @@ #include #include -#include "internal.h" - static int init(struct hash_desc *desc) { struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); diff --git a/crypto/gcm.c b/crypto/gcm.c index d60c340..27483f3 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -10,13 +10,14 @@ #include #include +#include #include #include #include #include #include -#include "scatterwalk.h" +#include "internal.h" struct gcm_instance_ctx { struct crypto_spawn ctr; diff --git a/crypto/internal.h b/crypto/internal.h index abb01f7..cb13952 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -25,7 +25,6 @@ #include #include #include -#include /* Crypto notification events. */ enum { @@ -50,34 +49,6 @@ extern struct list_head crypto_alg_list; extern struct rw_semaphore crypto_alg_sem; extern struct blocking_notifier_head crypto_chain; -static inline enum km_type crypto_kmap_type(int out) -{ - enum km_type type; - - if (in_softirq()) - type = out * (KM_SOFTIRQ1 - KM_SOFTIRQ0) + KM_SOFTIRQ0; - else - type = out * (KM_USER1 - KM_USER0) + KM_USER0; - - return type; -} - -static inline void *crypto_kmap(struct page *page, int out) -{ - return kmap_atomic(page, crypto_kmap_type(out)); -} - -static inline void crypto_kunmap(void *vaddr, int out) -{ - kunmap_atomic(vaddr, crypto_kmap_type(out)); -} - -static inline void crypto_yield(u32 flags) -{ - if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) - cond_resched(); -} - #ifdef CONFIG_PROC_FS void __init crypto_init_proc(void); void __exit crypto_exit_proc(void); diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 206c39a..12d1901 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -13,6 +13,8 @@ * any later version. * */ + +#include #include #include #include @@ -20,9 +22,6 @@ #include #include -#include "internal.h" -#include "scatterwalk.h" - static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out) { void *src = out ? buf : sgdata; diff --git a/crypto/scatterwalk.h b/crypto/scatterwalk.h deleted file mode 100644 index fd5517d..0000000 --- a/crypto/scatterwalk.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Cryptographic API. - * - * Copyright (c) 2002 James Morris - * Copyright (c) 2002 Adam J. Richter - * Copyright (c) 2004 Jean-Luc Cooke - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - */ - -#ifndef _CRYPTO_SCATTERWALK_H -#define _CRYPTO_SCATTERWALK_H - -#include -#include - -#include "internal.h" - -static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in, - struct scatter_walk *walk_out) -{ - return !(((sg_page(walk_in->sg) - sg_page(walk_out->sg)) << PAGE_SHIFT) + - (int)(walk_in->offset - walk_out->offset)); -} - -static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk) -{ - unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; - unsigned int len_this_page = offset_in_page(~walk->offset) + 1; - return len_this_page > len ? len : len_this_page; -} - -static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk, - unsigned int nbytes) -{ - unsigned int len_this_page = scatterwalk_pagelen(walk); - return nbytes > len_this_page ? len_this_page : nbytes; -} - -static inline void scatterwalk_advance(struct scatter_walk *walk, - unsigned int nbytes) -{ - walk->offset += nbytes; -} - -static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk, - unsigned int alignmask) -{ - return !(walk->offset & alignmask); -} - -static inline struct page *scatterwalk_page(struct scatter_walk *walk) -{ - return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); -} - -static inline void scatterwalk_unmap(void *vaddr, int out) -{ - crypto_kunmap(vaddr, out); -} - -void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg); -void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, - size_t nbytes, int out); -void *scatterwalk_map(struct scatter_walk *walk, int out); -void scatterwalk_done(struct scatter_walk *walk, int out, int more); - -void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, - unsigned int start, unsigned int nbytes, int out); - -#endif /* _CRYPTO_SCATTERWALK_H */ diff --git a/crypto/xcbc.c b/crypto/xcbc.c index ac68f3b..789cdee 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -19,6 +19,7 @@ * Kazunori Miyazawa */ +#include #include #include #include @@ -27,7 +28,6 @@ #include #include #include -#include "internal.h" static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101, 0x02020202, 0x02020202, 0x02020202, 0x02020202, diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h new file mode 100644 index 0000000..07b6f17 --- /dev/null +++ b/include/crypto/scatterwalk.h @@ -0,0 +1,107 @@ +/* + * Cryptographic scatter and gather helpers. + * + * Copyright (c) 2002 James Morris + * Copyright (c) 2002 Adam J. Richter + * Copyright (c) 2004 Jean-Luc Cooke + * Copyright (c) 2007 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_SCATTERWALK_H +#define _CRYPTO_SCATTERWALK_H + +#include +#include +#include +#include +#include +#include +#include + +static inline enum km_type crypto_kmap_type(int out) +{ + enum km_type type; + + if (in_softirq()) + type = out * (KM_SOFTIRQ1 - KM_SOFTIRQ0) + KM_SOFTIRQ0; + else + type = out * (KM_USER1 - KM_USER0) + KM_USER0; + + return type; +} + +static inline void *crypto_kmap(struct page *page, int out) +{ + return kmap_atomic(page, crypto_kmap_type(out)); +} + +static inline void crypto_kunmap(void *vaddr, int out) +{ + kunmap_atomic(vaddr, crypto_kmap_type(out)); +} + +static inline void crypto_yield(u32 flags) +{ + if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) + cond_resched(); +} + +static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in, + struct scatter_walk *walk_out) +{ + return !(((sg_page(walk_in->sg) - sg_page(walk_out->sg)) << PAGE_SHIFT) + + (int)(walk_in->offset - walk_out->offset)); +} + +static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk) +{ + unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; + unsigned int len_this_page = offset_in_page(~walk->offset) + 1; + return len_this_page > len ? len : len_this_page; +} + +static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk, + unsigned int nbytes) +{ + unsigned int len_this_page = scatterwalk_pagelen(walk); + return nbytes > len_this_page ? len_this_page : nbytes; +} + +static inline void scatterwalk_advance(struct scatter_walk *walk, + unsigned int nbytes) +{ + walk->offset += nbytes; +} + +static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk, + unsigned int alignmask) +{ + return !(walk->offset & alignmask); +} + +static inline struct page *scatterwalk_page(struct scatter_walk *walk) +{ + return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); +} + +static inline void scatterwalk_unmap(void *vaddr, int out) +{ + crypto_kunmap(vaddr, out); +} + +void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg); +void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, + size_t nbytes, int out); +void *scatterwalk_map(struct scatter_walk *walk, int out); +void scatterwalk_done(struct scatter_walk *walk, int out, int more); + +void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, + unsigned int start, unsigned int nbytes, int out); + +#endif /* _CRYPTO_SCATTERWALK_H */ -- cgit v1.1 From b2ab4a57b018aafbba35bff088218f5cc3d2142e Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 5 Dec 2007 20:59:25 +1100 Subject: [CRYPTO] scatterwalk: Restore custom sg chaining for now Unfortunately the generic chaining hasn't been ported to all architectures yet, and notably not s390. So this patch restores the chainging that we've been using previously which does work everywhere. Signed-off-by: Herbert Xu --- crypto/digest.c | 2 +- crypto/gcm.c | 2 +- crypto/hmac.c | 3 ++- crypto/scatterwalk.c | 4 ++-- include/crypto/scatterwalk.h | 11 +++++++++++ 5 files changed, 17 insertions(+), 5 deletions(-) diff --git a/crypto/digest.c b/crypto/digest.c index 52845f5..6fd43bd 100644 --- a/crypto/digest.c +++ b/crypto/digest.c @@ -75,7 +75,7 @@ static int update2(struct hash_desc *desc, if (!nbytes) break; - sg = sg_next(sg); + sg = scatterwalk_sg_next(sg); } return 0; diff --git a/crypto/gcm.c b/crypto/gcm.c index 27483f3..502da92 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -100,7 +100,7 @@ static void crypto_gcm_ghash_update_sg(struct crypto_gcm_ghash_ctx *ctx, n = scatterwalk_clamp(&walk, len); if (!n) { - scatterwalk_start(&walk, sg_next(walk.sg)); + scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg)); n = scatterwalk_clamp(&walk, len); } diff --git a/crypto/hmac.c b/crypto/hmac.c index 34c3706..a1d016a 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -17,6 +17,7 @@ */ #include +#include #include #include #include @@ -160,7 +161,7 @@ static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg, sg_init_table(sg1, 2); sg_set_buf(sg1, ipad, bs); - sg_chain(sg1, 2, sg); + scatterwalk_sg_chain(sg1, 2, sg); sg_init_table(sg2, 1); sg_set_buf(sg2, opad, bs + ds); diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 12d1901..297e19d 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -61,7 +61,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out, walk->offset += PAGE_SIZE - 1; walk->offset &= PAGE_MASK; if (walk->offset >= walk->sg->offset + walk->sg->length) - scatterwalk_start(walk, sg_next(walk->sg)); + scatterwalk_start(walk, scatterwalk_sg_next(walk->sg)); } } @@ -112,7 +112,7 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, break; offset += sg->length; - sg = sg_next(sg); + sg = scatterwalk_sg_next(sg); } scatterwalk_advance(&walk, start - offset); diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 07b6f17..bd62431 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -52,6 +52,17 @@ static inline void crypto_yield(u32 flags) cond_resched(); } +static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num, + struct scatterlist *sg2) +{ + sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0); +} + +static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg) +{ + return (++sg)->length ? sg : (void *)sg_page(sg); +} + static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in, struct scatter_walk *walk_out) { -- cgit v1.1 From 7f6813786a6521380e1756ca5b4336bc63c5bf7d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 6 Dec 2007 14:59:53 +0800 Subject: [CRYPTO] gcm: Put abreq in private context instead of on stack The abreq structure is currently allocated on the stack. This is broken if the underlying algorithm is asynchronous. This patch changes it so that it's taken from the private context instead which has been enlarged accordingly. Signed-off-by: Herbert Xu --- crypto/gcm.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/crypto/gcm.c b/crypto/gcm.c index 502da92..73565d6 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -40,6 +40,7 @@ struct crypto_gcm_req_priv_ctx { u8 iauth_tag[16]; u8 counter[16]; struct crypto_gcm_ghash_ctx ghash; + struct ablkcipher_request abreq; }; static void crypto_gcm_ghash_init(struct crypto_gcm_ghash_ctx *ctx, u32 flags, @@ -280,16 +281,17 @@ static void crypto_gcm_encrypt_done(struct crypto_async_request *areq, int err) static int crypto_gcm_encrypt(struct aead_request *req) { - struct ablkcipher_request abreq; + struct crypto_gcm_req_priv_ctx *pctx = aead_request_ctx(req); + struct ablkcipher_request *abreq = &pctx->abreq; int err = 0; - err = crypto_gcm_init_crypt(&abreq, req, req->cryptlen, + err = crypto_gcm_init_crypt(abreq, req, req->cryptlen, crypto_gcm_encrypt_done); if (err) return err; if (req->cryptlen) { - err = crypto_ablkcipher_encrypt(&abreq); + err = crypto_ablkcipher_encrypt(abreq); if (err) return err; } @@ -304,9 +306,9 @@ static void crypto_gcm_decrypt_done(struct crypto_async_request *areq, int err) static int crypto_gcm_decrypt(struct aead_request *req) { - struct ablkcipher_request abreq; struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_gcm_req_priv_ctx *pctx = aead_request_ctx(req); + struct ablkcipher_request *abreq = &pctx->abreq; u8 *auth_tag = pctx->auth_tag; u8 *iauth_tag = pctx->iauth_tag; struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; @@ -318,7 +320,7 @@ static int crypto_gcm_decrypt(struct aead_request *req) return -EINVAL; cryptlen -= authsize; - err = crypto_gcm_init_crypt(&abreq, req, cryptlen, + err = crypto_gcm_init_crypt(abreq, req, cryptlen, crypto_gcm_decrypt_done); if (err) return err; @@ -330,7 +332,7 @@ static int crypto_gcm_decrypt(struct aead_request *req) if (memcmp(iauth_tag, auth_tag, authsize)) return -EBADMSG; - return crypto_ablkcipher_decrypt(&abreq); + return crypto_ablkcipher_decrypt(abreq); } static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) @@ -353,7 +355,9 @@ static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) align = max_t(unsigned long, crypto_ablkcipher_alignmask(ctr), __alignof__(u32) - 1); align &= ~(crypto_tfm_ctx_alignment() - 1); - tfm->crt_aead.reqsize = align + sizeof(struct crypto_gcm_req_priv_ctx); + tfm->crt_aead.reqsize = align + + sizeof(struct crypto_gcm_req_priv_ctx) + + crypto_ablkcipher_reqsize(ctr); return 0; } -- cgit v1.1 From eb6f13eb9f812f5812ed5d14f241309da369dee6 Mon Sep 17 00:00:00 2001 From: Tan Swee Heng Date: Fri, 7 Dec 2007 16:38:45 +0800 Subject: [CRYPTO] salsa20_generic: Fix multi-page processing This patch fixes the multi-page processing bug that affects large test vectors (the same bug that previously affected ctr.c). There is an optimization for the case walk.nbytes == nbytes. Also we now use crypto_xor() instead of adhoc XOR routines. Signed-off-by: Tan Swee Heng Signed-off-by: Herbert Xu --- crypto/salsa20_generic.c | 34 +++++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 11 deletions(-) diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c index b49328a..1fa4e4d 100644 --- a/crypto/salsa20_generic.c +++ b/crypto/salsa20_generic.c @@ -143,7 +143,6 @@ static void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, u8 *dst, const u8 *src, unsigned int bytes) { u8 buf[64]; - int i; if (dst != src) memcpy(dst, src, bytes); @@ -156,15 +155,11 @@ static void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, u8 *dst, ctx->input[9] = PLUSONE(ctx->input[9]); if (bytes <= 64) { - for (i = 0; i < bytes/4; ++i) - ((u32*)dst)[i] ^= ((u32*)buf)[i]; - for (i = bytes - bytes % 4; i < bytes; ++i) - dst[i] ^= buf[i]; + crypto_xor(dst, buf, bytes); return; } - for (i = 0; i < 64/4; ++i) - ((u32*)dst)[i] ^= ((u32*)buf)[i]; + crypto_xor(dst, buf, 64); bytes -= 64; dst += 64; } @@ -192,13 +187,30 @@ static int encrypt(struct blkcipher_desc *desc, int err; blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); + err = blkcipher_walk_virt_block(desc, &walk, 64); salsa20_ivsetup(ctx, walk.iv); - salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, - walk.src.virt.addr, nbytes); - err = blkcipher_walk_done(desc, &walk, 0); + if (likely(walk.nbytes == nbytes)) + { + salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, + walk.src.virt.addr, nbytes); + return blkcipher_walk_done(desc, &walk, 0); + } + + while (walk.nbytes >= 64) { + salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, + walk.src.virt.addr, + walk.nbytes - (walk.nbytes % 64)); + err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64); + } + + if (walk.nbytes) { + salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, + walk.src.virt.addr, walk.nbytes); + err = blkcipher_walk_done(desc, &walk, 0); + } + return err; } -- cgit v1.1 From 8bff664cdf8797564fb6b59b7be028846fab8c27 Mon Sep 17 00:00:00 2001 From: Tan Swee Heng Date: Fri, 7 Dec 2007 16:41:29 +0800 Subject: [CRYPTO] tcrypt: Salsa20 large test vector This is a large test vector for Salsa20 that crosses the 4096-bytes page boundary. Signed-off-by: Tan Swee Heng Signed-off-by: Herbert Xu --- crypto/tcrypt.h | 1048 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 1046 insertions(+), 2 deletions(-) diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 439e290..d3c380f 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -6165,7 +6165,7 @@ static struct cipher_testvec seed_dec_tv_template[] = { } }; -#define SALSA20_STREAM_ENC_TEST_VECTORS 4 +#define SALSA20_STREAM_ENC_TEST_VECTORS 5 static struct cipher_testvec salsa20_stream_enc_tv_template[] = { /* * Testvectors from verified.test-vectors submitted to ECRYPT. @@ -6323,7 +6323,1051 @@ static struct cipher_testvec salsa20_stream_enc_tv_template[] = { 0x5A, }, .rlen = 129, - } + }, { /* large test vector generated using Crypto++ */ + .key = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }, + .klen = 32, + .iv = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + .input = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, + 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, + 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, + 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, + 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, + 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, + 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, + 0x00, 0x03, 0x06, 0x09, 0x0c, 0x0f, 0x12, 0x15, + 0x18, 0x1b, 0x1e, 0x21, 0x24, 0x27, 0x2a, 0x2d, + 0x30, 0x33, 0x36, 0x39, 0x3c, 0x3f, 0x42, 0x45, + 0x48, 0x4b, 0x4e, 0x51, 0x54, 0x57, 0x5a, 0x5d, + 0x60, 0x63, 0x66, 0x69, 0x6c, 0x6f, 0x72, 0x75, + 0x78, 0x7b, 0x7e, 0x81, 0x84, 0x87, 0x8a, 0x8d, + 0x90, 0x93, 0x96, 0x99, 0x9c, 0x9f, 0xa2, 0xa5, + 0xa8, 0xab, 0xae, 0xb1, 0xb4, 0xb7, 0xba, 0xbd, + 0xc0, 0xc3, 0xc6, 0xc9, 0xcc, 0xcf, 0xd2, 0xd5, + 0xd8, 0xdb, 0xde, 0xe1, 0xe4, 0xe7, 0xea, 0xed, + 0xf0, 0xf3, 0xf6, 0xf9, 0xfc, 0xff, 0x02, 0x05, + 0x08, 0x0b, 0x0e, 0x11, 0x14, 0x17, 0x1a, 0x1d, + 0x20, 0x23, 0x26, 0x29, 0x2c, 0x2f, 0x32, 0x35, + 0x38, 0x3b, 0x3e, 0x41, 0x44, 0x47, 0x4a, 0x4d, + 0x50, 0x53, 0x56, 0x59, 0x5c, 0x5f, 0x62, 0x65, + 0x68, 0x6b, 0x6e, 0x71, 0x74, 0x77, 0x7a, 0x7d, + 0x80, 0x83, 0x86, 0x89, 0x8c, 0x8f, 0x92, 0x95, + 0x98, 0x9b, 0x9e, 0xa1, 0xa4, 0xa7, 0xaa, 0xad, + 0xb0, 0xb3, 0xb6, 0xb9, 0xbc, 0xbf, 0xc2, 0xc5, + 0xc8, 0xcb, 0xce, 0xd1, 0xd4, 0xd7, 0xda, 0xdd, + 0xe0, 0xe3, 0xe6, 0xe9, 0xec, 0xef, 0xf2, 0xf5, + 0xf8, 0xfb, 0xfe, 0x01, 0x04, 0x07, 0x0a, 0x0d, + 0x10, 0x13, 0x16, 0x19, 0x1c, 0x1f, 0x22, 0x25, + 0x28, 0x2b, 0x2e, 0x31, 0x34, 0x37, 0x3a, 0x3d, + 0x40, 0x43, 0x46, 0x49, 0x4c, 0x4f, 0x52, 0x55, + 0x58, 0x5b, 0x5e, 0x61, 0x64, 0x67, 0x6a, 0x6d, + 0x70, 0x73, 0x76, 0x79, 0x7c, 0x7f, 0x82, 0x85, + 0x88, 0x8b, 0x8e, 0x91, 0x94, 0x97, 0x9a, 0x9d, + 0xa0, 0xa3, 0xa6, 0xa9, 0xac, 0xaf, 0xb2, 0xb5, + 0xb8, 0xbb, 0xbe, 0xc1, 0xc4, 0xc7, 0xca, 0xcd, + 0xd0, 0xd3, 0xd6, 0xd9, 0xdc, 0xdf, 0xe2, 0xe5, + 0xe8, 0xeb, 0xee, 0xf1, 0xf4, 0xf7, 0xfa, 0xfd, + 0x00, 0x05, 0x0a, 0x0f, 0x14, 0x19, 0x1e, 0x23, + 0x28, 0x2d, 0x32, 0x37, 0x3c, 0x41, 0x46, 0x4b, + 0x50, 0x55, 0x5a, 0x5f, 0x64, 0x69, 0x6e, 0x73, + 0x78, 0x7d, 0x82, 0x87, 0x8c, 0x91, 0x96, 0x9b, + 0xa0, 0xa5, 0xaa, 0xaf, 0xb4, 0xb9, 0xbe, 0xc3, + 0xc8, 0xcd, 0xd2, 0xd7, 0xdc, 0xe1, 0xe6, 0xeb, + 0xf0, 0xf5, 0xfa, 0xff, 0x04, 0x09, 0x0e, 0x13, + 0x18, 0x1d, 0x22, 0x27, 0x2c, 0x31, 0x36, 0x3b, + 0x40, 0x45, 0x4a, 0x4f, 0x54, 0x59, 0x5e, 0x63, + 0x68, 0x6d, 0x72, 0x77, 0x7c, 0x81, 0x86, 0x8b, + 0x90, 0x95, 0x9a, 0x9f, 0xa4, 0xa9, 0xae, 0xb3, + 0xb8, 0xbd, 0xc2, 0xc7, 0xcc, 0xd1, 0xd6, 0xdb, + 0xe0, 0xe5, 0xea, 0xef, 0xf4, 0xf9, 0xfe, 0x03, + 0x08, 0x0d, 0x12, 0x17, 0x1c, 0x21, 0x26, 0x2b, + 0x30, 0x35, 0x3a, 0x3f, 0x44, 0x49, 0x4e, 0x53, + 0x58, 0x5d, 0x62, 0x67, 0x6c, 0x71, 0x76, 0x7b, + 0x80, 0x85, 0x8a, 0x8f, 0x94, 0x99, 0x9e, 0xa3, + 0xa8, 0xad, 0xb2, 0xb7, 0xbc, 0xc1, 0xc6, 0xcb, + 0xd0, 0xd5, 0xda, 0xdf, 0xe4, 0xe9, 0xee, 0xf3, + 0xf8, 0xfd, 0x02, 0x07, 0x0c, 0x11, 0x16, 0x1b, + 0x20, 0x25, 0x2a, 0x2f, 0x34, 0x39, 0x3e, 0x43, + 0x48, 0x4d, 0x52, 0x57, 0x5c, 0x61, 0x66, 0x6b, + 0x70, 0x75, 0x7a, 0x7f, 0x84, 0x89, 0x8e, 0x93, + 0x98, 0x9d, 0xa2, 0xa7, 0xac, 0xb1, 0xb6, 0xbb, + 0xc0, 0xc5, 0xca, 0xcf, 0xd4, 0xd9, 0xde, 0xe3, + 0xe8, 0xed, 0xf2, 0xf7, 0xfc, 0x01, 0x06, 0x0b, + 0x10, 0x15, 0x1a, 0x1f, 0x24, 0x29, 0x2e, 0x33, + 0x38, 0x3d, 0x42, 0x47, 0x4c, 0x51, 0x56, 0x5b, + 0x60, 0x65, 0x6a, 0x6f, 0x74, 0x79, 0x7e, 0x83, + 0x88, 0x8d, 0x92, 0x97, 0x9c, 0xa1, 0xa6, 0xab, + 0xb0, 0xb5, 0xba, 0xbf, 0xc4, 0xc9, 0xce, 0xd3, + 0xd8, 0xdd, 0xe2, 0xe7, 0xec, 0xf1, 0xf6, 0xfb, + 0x00, 0x07, 0x0e, 0x15, 0x1c, 0x23, 0x2a, 0x31, + 0x38, 0x3f, 0x46, 0x4d, 0x54, 0x5b, 0x62, 0x69, + 0x70, 0x77, 0x7e, 0x85, 0x8c, 0x93, 0x9a, 0xa1, + 0xa8, 0xaf, 0xb6, 0xbd, 0xc4, 0xcb, 0xd2, 0xd9, + 0xe0, 0xe7, 0xee, 0xf5, 0xfc, 0x03, 0x0a, 0x11, + 0x18, 0x1f, 0x26, 0x2d, 0x34, 0x3b, 0x42, 0x49, + 0x50, 0x57, 0x5e, 0x65, 0x6c, 0x73, 0x7a, 0x81, + 0x88, 0x8f, 0x96, 0x9d, 0xa4, 0xab, 0xb2, 0xb9, + 0xc0, 0xc7, 0xce, 0xd5, 0xdc, 0xe3, 0xea, 0xf1, + 0xf8, 0xff, 0x06, 0x0d, 0x14, 0x1b, 0x22, 0x29, + 0x30, 0x37, 0x3e, 0x45, 0x4c, 0x53, 0x5a, 0x61, + 0x68, 0x6f, 0x76, 0x7d, 0x84, 0x8b, 0x92, 0x99, + 0xa0, 0xa7, 0xae, 0xb5, 0xbc, 0xc3, 0xca, 0xd1, + 0xd8, 0xdf, 0xe6, 0xed, 0xf4, 0xfb, 0x02, 0x09, + 0x10, 0x17, 0x1e, 0x25, 0x2c, 0x33, 0x3a, 0x41, + 0x48, 0x4f, 0x56, 0x5d, 0x64, 0x6b, 0x72, 0x79, + 0x80, 0x87, 0x8e, 0x95, 0x9c, 0xa3, 0xaa, 0xb1, + 0xb8, 0xbf, 0xc6, 0xcd, 0xd4, 0xdb, 0xe2, 0xe9, + 0xf0, 0xf7, 0xfe, 0x05, 0x0c, 0x13, 0x1a, 0x21, + 0x28, 0x2f, 0x36, 0x3d, 0x44, 0x4b, 0x52, 0x59, + 0x60, 0x67, 0x6e, 0x75, 0x7c, 0x83, 0x8a, 0x91, + 0x98, 0x9f, 0xa6, 0xad, 0xb4, 0xbb, 0xc2, 0xc9, + 0xd0, 0xd7, 0xde, 0xe5, 0xec, 0xf3, 0xfa, 0x01, + 0x08, 0x0f, 0x16, 0x1d, 0x24, 0x2b, 0x32, 0x39, + 0x40, 0x47, 0x4e, 0x55, 0x5c, 0x63, 0x6a, 0x71, + 0x78, 0x7f, 0x86, 0x8d, 0x94, 0x9b, 0xa2, 0xa9, + 0xb0, 0xb7, 0xbe, 0xc5, 0xcc, 0xd3, 0xda, 0xe1, + 0xe8, 0xef, 0xf6, 0xfd, 0x04, 0x0b, 0x12, 0x19, + 0x20, 0x27, 0x2e, 0x35, 0x3c, 0x43, 0x4a, 0x51, + 0x58, 0x5f, 0x66, 0x6d, 0x74, 0x7b, 0x82, 0x89, + 0x90, 0x97, 0x9e, 0xa5, 0xac, 0xb3, 0xba, 0xc1, + 0xc8, 0xcf, 0xd6, 0xdd, 0xe4, 0xeb, 0xf2, 0xf9, + 0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f, + 0x48, 0x51, 0x5a, 0x63, 0x6c, 0x75, 0x7e, 0x87, + 0x90, 0x99, 0xa2, 0xab, 0xb4, 0xbd, 0xc6, 0xcf, + 0xd8, 0xe1, 0xea, 0xf3, 0xfc, 0x05, 0x0e, 0x17, + 0x20, 0x29, 0x32, 0x3b, 0x44, 0x4d, 0x56, 0x5f, + 0x68, 0x71, 0x7a, 0x83, 0x8c, 0x95, 0x9e, 0xa7, + 0xb0, 0xb9, 0xc2, 0xcb, 0xd4, 0xdd, 0xe6, 0xef, + 0xf8, 0x01, 0x0a, 0x13, 0x1c, 0x25, 0x2e, 0x37, + 0x40, 0x49, 0x52, 0x5b, 0x64, 0x6d, 0x76, 0x7f, + 0x88, 0x91, 0x9a, 0xa3, 0xac, 0xb5, 0xbe, 0xc7, + 0xd0, 0xd9, 0xe2, 0xeb, 0xf4, 0xfd, 0x06, 0x0f, + 0x18, 0x21, 0x2a, 0x33, 0x3c, 0x45, 0x4e, 0x57, + 0x60, 0x69, 0x72, 0x7b, 0x84, 0x8d, 0x96, 0x9f, + 0xa8, 0xb1, 0xba, 0xc3, 0xcc, 0xd5, 0xde, 0xe7, + 0xf0, 0xf9, 0x02, 0x0b, 0x14, 0x1d, 0x26, 0x2f, + 0x38, 0x41, 0x4a, 0x53, 0x5c, 0x65, 0x6e, 0x77, + 0x80, 0x89, 0x92, 0x9b, 0xa4, 0xad, 0xb6, 0xbf, + 0xc8, 0xd1, 0xda, 0xe3, 0xec, 0xf5, 0xfe, 0x07, + 0x10, 0x19, 0x22, 0x2b, 0x34, 0x3d, 0x46, 0x4f, + 0x58, 0x61, 0x6a, 0x73, 0x7c, 0x85, 0x8e, 0x97, + 0xa0, 0xa9, 0xb2, 0xbb, 0xc4, 0xcd, 0xd6, 0xdf, + 0xe8, 0xf1, 0xfa, 0x03, 0x0c, 0x15, 0x1e, 0x27, + 0x30, 0x39, 0x42, 0x4b, 0x54, 0x5d, 0x66, 0x6f, + 0x78, 0x81, 0x8a, 0x93, 0x9c, 0xa5, 0xae, 0xb7, + 0xc0, 0xc9, 0xd2, 0xdb, 0xe4, 0xed, 0xf6, 0xff, + 0x08, 0x11, 0x1a, 0x23, 0x2c, 0x35, 0x3e, 0x47, + 0x50, 0x59, 0x62, 0x6b, 0x74, 0x7d, 0x86, 0x8f, + 0x98, 0xa1, 0xaa, 0xb3, 0xbc, 0xc5, 0xce, 0xd7, + 0xe0, 0xe9, 0xf2, 0xfb, 0x04, 0x0d, 0x16, 0x1f, + 0x28, 0x31, 0x3a, 0x43, 0x4c, 0x55, 0x5e, 0x67, + 0x70, 0x79, 0x82, 0x8b, 0x94, 0x9d, 0xa6, 0xaf, + 0xb8, 0xc1, 0xca, 0xd3, 0xdc, 0xe5, 0xee, 0xf7, + 0x00, 0x0b, 0x16, 0x21, 0x2c, 0x37, 0x42, 0x4d, + 0x58, 0x63, 0x6e, 0x79, 0x84, 0x8f, 0x9a, 0xa5, + 0xb0, 0xbb, 0xc6, 0xd1, 0xdc, 0xe7, 0xf2, 0xfd, + 0x08, 0x13, 0x1e, 0x29, 0x34, 0x3f, 0x4a, 0x55, + 0x60, 0x6b, 0x76, 0x81, 0x8c, 0x97, 0xa2, 0xad, + 0xb8, 0xc3, 0xce, 0xd9, 0xe4, 0xef, 0xfa, 0x05, + 0x10, 0x1b, 0x26, 0x31, 0x3c, 0x47, 0x52, 0x5d, + 0x68, 0x73, 0x7e, 0x89, 0x94, 0x9f, 0xaa, 0xb5, + 0xc0, 0xcb, 0xd6, 0xe1, 0xec, 0xf7, 0x02, 0x0d, + 0x18, 0x23, 0x2e, 0x39, 0x44, 0x4f, 0x5a, 0x65, + 0x70, 0x7b, 0x86, 0x91, 0x9c, 0xa7, 0xb2, 0xbd, + 0xc8, 0xd3, 0xde, 0xe9, 0xf4, 0xff, 0x0a, 0x15, + 0x20, 0x2b, 0x36, 0x41, 0x4c, 0x57, 0x62, 0x6d, + 0x78, 0x83, 0x8e, 0x99, 0xa4, 0xaf, 0xba, 0xc5, + 0xd0, 0xdb, 0xe6, 0xf1, 0xfc, 0x07, 0x12, 0x1d, + 0x28, 0x33, 0x3e, 0x49, 0x54, 0x5f, 0x6a, 0x75, + 0x80, 0x8b, 0x96, 0xa1, 0xac, 0xb7, 0xc2, 0xcd, + 0xd8, 0xe3, 0xee, 0xf9, 0x04, 0x0f, 0x1a, 0x25, + 0x30, 0x3b, 0x46, 0x51, 0x5c, 0x67, 0x72, 0x7d, + 0x88, 0x93, 0x9e, 0xa9, 0xb4, 0xbf, 0xca, 0xd5, + 0xe0, 0xeb, 0xf6, 0x01, 0x0c, 0x17, 0x22, 0x2d, + 0x38, 0x43, 0x4e, 0x59, 0x64, 0x6f, 0x7a, 0x85, + 0x90, 0x9b, 0xa6, 0xb1, 0xbc, 0xc7, 0xd2, 0xdd, + 0xe8, 0xf3, 0xfe, 0x09, 0x14, 0x1f, 0x2a, 0x35, + 0x40, 0x4b, 0x56, 0x61, 0x6c, 0x77, 0x82, 0x8d, + 0x98, 0xa3, 0xae, 0xb9, 0xc4, 0xcf, 0xda, 0xe5, + 0xf0, 0xfb, 0x06, 0x11, 0x1c, 0x27, 0x32, 0x3d, + 0x48, 0x53, 0x5e, 0x69, 0x74, 0x7f, 0x8a, 0x95, + 0xa0, 0xab, 0xb6, 0xc1, 0xcc, 0xd7, 0xe2, 0xed, + 0xf8, 0x03, 0x0e, 0x19, 0x24, 0x2f, 0x3a, 0x45, + 0x50, 0x5b, 0x66, 0x71, 0x7c, 0x87, 0x92, 0x9d, + 0xa8, 0xb3, 0xbe, 0xc9, 0xd4, 0xdf, 0xea, 0xf5, + 0x00, 0x0d, 0x1a, 0x27, 0x34, 0x41, 0x4e, 0x5b, + 0x68, 0x75, 0x82, 0x8f, 0x9c, 0xa9, 0xb6, 0xc3, + 0xd0, 0xdd, 0xea, 0xf7, 0x04, 0x11, 0x1e, 0x2b, + 0x38, 0x45, 0x52, 0x5f, 0x6c, 0x79, 0x86, 0x93, + 0xa0, 0xad, 0xba, 0xc7, 0xd4, 0xe1, 0xee, 0xfb, + 0x08, 0x15, 0x22, 0x2f, 0x3c, 0x49, 0x56, 0x63, + 0x70, 0x7d, 0x8a, 0x97, 0xa4, 0xb1, 0xbe, 0xcb, + 0xd8, 0xe5, 0xf2, 0xff, 0x0c, 0x19, 0x26, 0x33, + 0x40, 0x4d, 0x5a, 0x67, 0x74, 0x81, 0x8e, 0x9b, + 0xa8, 0xb5, 0xc2, 0xcf, 0xdc, 0xe9, 0xf6, 0x03, + 0x10, 0x1d, 0x2a, 0x37, 0x44, 0x51, 0x5e, 0x6b, + 0x78, 0x85, 0x92, 0x9f, 0xac, 0xb9, 0xc6, 0xd3, + 0xe0, 0xed, 0xfa, 0x07, 0x14, 0x21, 0x2e, 0x3b, + 0x48, 0x55, 0x62, 0x6f, 0x7c, 0x89, 0x96, 0xa3, + 0xb0, 0xbd, 0xca, 0xd7, 0xe4, 0xf1, 0xfe, 0x0b, + 0x18, 0x25, 0x32, 0x3f, 0x4c, 0x59, 0x66, 0x73, + 0x80, 0x8d, 0x9a, 0xa7, 0xb4, 0xc1, 0xce, 0xdb, + 0xe8, 0xf5, 0x02, 0x0f, 0x1c, 0x29, 0x36, 0x43, + 0x50, 0x5d, 0x6a, 0x77, 0x84, 0x91, 0x9e, 0xab, + 0xb8, 0xc5, 0xd2, 0xdf, 0xec, 0xf9, 0x06, 0x13, + 0x20, 0x2d, 0x3a, 0x47, 0x54, 0x61, 0x6e, 0x7b, + 0x88, 0x95, 0xa2, 0xaf, 0xbc, 0xc9, 0xd6, 0xe3, + 0xf0, 0xfd, 0x0a, 0x17, 0x24, 0x31, 0x3e, 0x4b, + 0x58, 0x65, 0x72, 0x7f, 0x8c, 0x99, 0xa6, 0xb3, + 0xc0, 0xcd, 0xda, 0xe7, 0xf4, 0x01, 0x0e, 0x1b, + 0x28, 0x35, 0x42, 0x4f, 0x5c, 0x69, 0x76, 0x83, + 0x90, 0x9d, 0xaa, 0xb7, 0xc4, 0xd1, 0xde, 0xeb, + 0xf8, 0x05, 0x12, 0x1f, 0x2c, 0x39, 0x46, 0x53, + 0x60, 0x6d, 0x7a, 0x87, 0x94, 0xa1, 0xae, 0xbb, + 0xc8, 0xd5, 0xe2, 0xef, 0xfc, 0x09, 0x16, 0x23, + 0x30, 0x3d, 0x4a, 0x57, 0x64, 0x71, 0x7e, 0x8b, + 0x98, 0xa5, 0xb2, 0xbf, 0xcc, 0xd9, 0xe6, 0xf3, + 0x00, 0x0f, 0x1e, 0x2d, 0x3c, 0x4b, 0x5a, 0x69, + 0x78, 0x87, 0x96, 0xa5, 0xb4, 0xc3, 0xd2, 0xe1, + 0xf0, 0xff, 0x0e, 0x1d, 0x2c, 0x3b, 0x4a, 0x59, + 0x68, 0x77, 0x86, 0x95, 0xa4, 0xb3, 0xc2, 0xd1, + 0xe0, 0xef, 0xfe, 0x0d, 0x1c, 0x2b, 0x3a, 0x49, + 0x58, 0x67, 0x76, 0x85, 0x94, 0xa3, 0xb2, 0xc1, + 0xd0, 0xdf, 0xee, 0xfd, 0x0c, 0x1b, 0x2a, 0x39, + 0x48, 0x57, 0x66, 0x75, 0x84, 0x93, 0xa2, 0xb1, + 0xc0, 0xcf, 0xde, 0xed, 0xfc, 0x0b, 0x1a, 0x29, + 0x38, 0x47, 0x56, 0x65, 0x74, 0x83, 0x92, 0xa1, + 0xb0, 0xbf, 0xce, 0xdd, 0xec, 0xfb, 0x0a, 0x19, + 0x28, 0x37, 0x46, 0x55, 0x64, 0x73, 0x82, 0x91, + 0xa0, 0xaf, 0xbe, 0xcd, 0xdc, 0xeb, 0xfa, 0x09, + 0x18, 0x27, 0x36, 0x45, 0x54, 0x63, 0x72, 0x81, + 0x90, 0x9f, 0xae, 0xbd, 0xcc, 0xdb, 0xea, 0xf9, + 0x08, 0x17, 0x26, 0x35, 0x44, 0x53, 0x62, 0x71, + 0x80, 0x8f, 0x9e, 0xad, 0xbc, 0xcb, 0xda, 0xe9, + 0xf8, 0x07, 0x16, 0x25, 0x34, 0x43, 0x52, 0x61, + 0x70, 0x7f, 0x8e, 0x9d, 0xac, 0xbb, 0xca, 0xd9, + 0xe8, 0xf7, 0x06, 0x15, 0x24, 0x33, 0x42, 0x51, + 0x60, 0x6f, 0x7e, 0x8d, 0x9c, 0xab, 0xba, 0xc9, + 0xd8, 0xe7, 0xf6, 0x05, 0x14, 0x23, 0x32, 0x41, + 0x50, 0x5f, 0x6e, 0x7d, 0x8c, 0x9b, 0xaa, 0xb9, + 0xc8, 0xd7, 0xe6, 0xf5, 0x04, 0x13, 0x22, 0x31, + 0x40, 0x4f, 0x5e, 0x6d, 0x7c, 0x8b, 0x9a, 0xa9, + 0xb8, 0xc7, 0xd6, 0xe5, 0xf4, 0x03, 0x12, 0x21, + 0x30, 0x3f, 0x4e, 0x5d, 0x6c, 0x7b, 0x8a, 0x99, + 0xa8, 0xb7, 0xc6, 0xd5, 0xe4, 0xf3, 0x02, 0x11, + 0x20, 0x2f, 0x3e, 0x4d, 0x5c, 0x6b, 0x7a, 0x89, + 0x98, 0xa7, 0xb6, 0xc5, 0xd4, 0xe3, 0xf2, 0x01, + 0x10, 0x1f, 0x2e, 0x3d, 0x4c, 0x5b, 0x6a, 0x79, + 0x88, 0x97, 0xa6, 0xb5, 0xc4, 0xd3, 0xe2, 0xf1, + 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, + 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff, + 0x10, 0x21, 0x32, 0x43, 0x54, 0x65, 0x76, 0x87, + 0x98, 0xa9, 0xba, 0xcb, 0xdc, 0xed, 0xfe, 0x0f, + 0x20, 0x31, 0x42, 0x53, 0x64, 0x75, 0x86, 0x97, + 0xa8, 0xb9, 0xca, 0xdb, 0xec, 0xfd, 0x0e, 0x1f, + 0x30, 0x41, 0x52, 0x63, 0x74, 0x85, 0x96, 0xa7, + 0xb8, 0xc9, 0xda, 0xeb, 0xfc, 0x0d, 0x1e, 0x2f, + 0x40, 0x51, 0x62, 0x73, 0x84, 0x95, 0xa6, 0xb7, + 0xc8, 0xd9, 0xea, 0xfb, 0x0c, 0x1d, 0x2e, 0x3f, + 0x50, 0x61, 0x72, 0x83, 0x94, 0xa5, 0xb6, 0xc7, + 0xd8, 0xe9, 0xfa, 0x0b, 0x1c, 0x2d, 0x3e, 0x4f, + 0x60, 0x71, 0x82, 0x93, 0xa4, 0xb5, 0xc6, 0xd7, + 0xe8, 0xf9, 0x0a, 0x1b, 0x2c, 0x3d, 0x4e, 0x5f, + 0x70, 0x81, 0x92, 0xa3, 0xb4, 0xc5, 0xd6, 0xe7, + 0xf8, 0x09, 0x1a, 0x2b, 0x3c, 0x4d, 0x5e, 0x6f, + 0x80, 0x91, 0xa2, 0xb3, 0xc4, 0xd5, 0xe6, 0xf7, + 0x08, 0x19, 0x2a, 0x3b, 0x4c, 0x5d, 0x6e, 0x7f, + 0x90, 0xa1, 0xb2, 0xc3, 0xd4, 0xe5, 0xf6, 0x07, + 0x18, 0x29, 0x3a, 0x4b, 0x5c, 0x6d, 0x7e, 0x8f, + 0xa0, 0xb1, 0xc2, 0xd3, 0xe4, 0xf5, 0x06, 0x17, + 0x28, 0x39, 0x4a, 0x5b, 0x6c, 0x7d, 0x8e, 0x9f, + 0xb0, 0xc1, 0xd2, 0xe3, 0xf4, 0x05, 0x16, 0x27, + 0x38, 0x49, 0x5a, 0x6b, 0x7c, 0x8d, 0x9e, 0xaf, + 0xc0, 0xd1, 0xe2, 0xf3, 0x04, 0x15, 0x26, 0x37, + 0x48, 0x59, 0x6a, 0x7b, 0x8c, 0x9d, 0xae, 0xbf, + 0xd0, 0xe1, 0xf2, 0x03, 0x14, 0x25, 0x36, 0x47, + 0x58, 0x69, 0x7a, 0x8b, 0x9c, 0xad, 0xbe, 0xcf, + 0xe0, 0xf1, 0x02, 0x13, 0x24, 0x35, 0x46, 0x57, + 0x68, 0x79, 0x8a, 0x9b, 0xac, 0xbd, 0xce, 0xdf, + 0xf0, 0x01, 0x12, 0x23, 0x34, 0x45, 0x56, 0x67, + 0x78, 0x89, 0x9a, 0xab, 0xbc, 0xcd, 0xde, 0xef, + 0x00, 0x13, 0x26, 0x39, 0x4c, 0x5f, 0x72, 0x85, + 0x98, 0xab, 0xbe, 0xd1, 0xe4, 0xf7, 0x0a, 0x1d, + 0x30, 0x43, 0x56, 0x69, 0x7c, 0x8f, 0xa2, 0xb5, + 0xc8, 0xdb, 0xee, 0x01, 0x14, 0x27, 0x3a, 0x4d, + 0x60, 0x73, 0x86, 0x99, 0xac, 0xbf, 0xd2, 0xe5, + 0xf8, 0x0b, 0x1e, 0x31, 0x44, 0x57, 0x6a, 0x7d, + 0x90, 0xa3, 0xb6, 0xc9, 0xdc, 0xef, 0x02, 0x15, + 0x28, 0x3b, 0x4e, 0x61, 0x74, 0x87, 0x9a, 0xad, + 0xc0, 0xd3, 0xe6, 0xf9, 0x0c, 0x1f, 0x32, 0x45, + 0x58, 0x6b, 0x7e, 0x91, 0xa4, 0xb7, 0xca, 0xdd, + 0xf0, 0x03, 0x16, 0x29, 0x3c, 0x4f, 0x62, 0x75, + 0x88, 0x9b, 0xae, 0xc1, 0xd4, 0xe7, 0xfa, 0x0d, + 0x20, 0x33, 0x46, 0x59, 0x6c, 0x7f, 0x92, 0xa5, + 0xb8, 0xcb, 0xde, 0xf1, 0x04, 0x17, 0x2a, 0x3d, + 0x50, 0x63, 0x76, 0x89, 0x9c, 0xaf, 0xc2, 0xd5, + 0xe8, 0xfb, 0x0e, 0x21, 0x34, 0x47, 0x5a, 0x6d, + 0x80, 0x93, 0xa6, 0xb9, 0xcc, 0xdf, 0xf2, 0x05, + 0x18, 0x2b, 0x3e, 0x51, 0x64, 0x77, 0x8a, 0x9d, + 0xb0, 0xc3, 0xd6, 0xe9, 0xfc, 0x0f, 0x22, 0x35, + 0x48, 0x5b, 0x6e, 0x81, 0x94, 0xa7, 0xba, 0xcd, + 0xe0, 0xf3, 0x06, 0x19, 0x2c, 0x3f, 0x52, 0x65, + 0x78, 0x8b, 0x9e, 0xb1, 0xc4, 0xd7, 0xea, 0xfd, + 0x10, 0x23, 0x36, 0x49, 0x5c, 0x6f, 0x82, 0x95, + 0xa8, 0xbb, 0xce, 0xe1, 0xf4, 0x07, 0x1a, 0x2d, + 0x40, 0x53, 0x66, 0x79, 0x8c, 0x9f, 0xb2, 0xc5, + 0xd8, 0xeb, 0xfe, 0x11, 0x24, 0x37, 0x4a, 0x5d, + 0x70, 0x83, 0x96, 0xa9, 0xbc, 0xcf, 0xe2, 0xf5, + 0x08, 0x1b, 0x2e, 0x41, 0x54, 0x67, 0x7a, 0x8d, + 0xa0, 0xb3, 0xc6, 0xd9, 0xec, 0xff, 0x12, 0x25, + 0x38, 0x4b, 0x5e, 0x71, 0x84, 0x97, 0xaa, 0xbd, + 0xd0, 0xe3, 0xf6, 0x09, 0x1c, 0x2f, 0x42, 0x55, + 0x68, 0x7b, 0x8e, 0xa1, 0xb4, 0xc7, 0xda, 0xed, + 0x00, 0x15, 0x2a, 0x3f, 0x54, 0x69, 0x7e, 0x93, + 0xa8, 0xbd, 0xd2, 0xe7, 0xfc, 0x11, 0x26, 0x3b, + 0x50, 0x65, 0x7a, 0x8f, 0xa4, 0xb9, 0xce, 0xe3, + 0xf8, 0x0d, 0x22, 0x37, 0x4c, 0x61, 0x76, 0x8b, + 0xa0, 0xb5, 0xca, 0xdf, 0xf4, 0x09, 0x1e, 0x33, + 0x48, 0x5d, 0x72, 0x87, 0x9c, 0xb1, 0xc6, 0xdb, + 0xf0, 0x05, 0x1a, 0x2f, 0x44, 0x59, 0x6e, 0x83, + 0x98, 0xad, 0xc2, 0xd7, 0xec, 0x01, 0x16, 0x2b, + 0x40, 0x55, 0x6a, 0x7f, 0x94, 0xa9, 0xbe, 0xd3, + 0xe8, 0xfd, 0x12, 0x27, 0x3c, 0x51, 0x66, 0x7b, + 0x90, 0xa5, 0xba, 0xcf, 0xe4, 0xf9, 0x0e, 0x23, + 0x38, 0x4d, 0x62, 0x77, 0x8c, 0xa1, 0xb6, 0xcb, + 0xe0, 0xf5, 0x0a, 0x1f, 0x34, 0x49, 0x5e, 0x73, + 0x88, 0x9d, 0xb2, 0xc7, 0xdc, 0xf1, 0x06, 0x1b, + 0x30, 0x45, 0x5a, 0x6f, 0x84, 0x99, 0xae, 0xc3, + 0xd8, 0xed, 0x02, 0x17, 0x2c, 0x41, 0x56, 0x6b, + 0x80, 0x95, 0xaa, 0xbf, 0xd4, 0xe9, 0xfe, 0x13, + 0x28, 0x3d, 0x52, 0x67, 0x7c, 0x91, 0xa6, 0xbb, + 0xd0, 0xe5, 0xfa, 0x0f, 0x24, 0x39, 0x4e, 0x63, + 0x78, 0x8d, 0xa2, 0xb7, 0xcc, 0xe1, 0xf6, 0x0b, + 0x20, 0x35, 0x4a, 0x5f, 0x74, 0x89, 0x9e, 0xb3, + 0xc8, 0xdd, 0xf2, 0x07, 0x1c, 0x31, 0x46, 0x5b, + 0x70, 0x85, 0x9a, 0xaf, 0xc4, 0xd9, 0xee, 0x03, + 0x18, 0x2d, 0x42, 0x57, 0x6c, 0x81, 0x96, 0xab, + 0xc0, 0xd5, 0xea, 0xff, 0x14, 0x29, 0x3e, 0x53, + 0x68, 0x7d, 0x92, 0xa7, 0xbc, 0xd1, 0xe6, 0xfb, + 0x10, 0x25, 0x3a, 0x4f, 0x64, 0x79, 0x8e, 0xa3, + 0xb8, 0xcd, 0xe2, 0xf7, 0x0c, 0x21, 0x36, 0x4b, + 0x60, 0x75, 0x8a, 0x9f, 0xb4, 0xc9, 0xde, 0xf3, + 0x08, 0x1d, 0x32, 0x47, 0x5c, 0x71, 0x86, 0x9b, + 0xb0, 0xc5, 0xda, 0xef, 0x04, 0x19, 0x2e, 0x43, + 0x58, 0x6d, 0x82, 0x97, 0xac, 0xc1, 0xd6, 0xeb, + 0x00, 0x17, 0x2e, 0x45, 0x5c, 0x73, 0x8a, 0xa1, + 0xb8, 0xcf, 0xe6, 0xfd, 0x14, 0x2b, 0x42, 0x59, + 0x70, 0x87, 0x9e, 0xb5, 0xcc, 0xe3, 0xfa, 0x11, + 0x28, 0x3f, 0x56, 0x6d, 0x84, 0x9b, 0xb2, 0xc9, + 0xe0, 0xf7, 0x0e, 0x25, 0x3c, 0x53, 0x6a, 0x81, + 0x98, 0xaf, 0xc6, 0xdd, 0xf4, 0x0b, 0x22, 0x39, + 0x50, 0x67, 0x7e, 0x95, 0xac, 0xc3, 0xda, 0xf1, + 0x08, 0x1f, 0x36, 0x4d, 0x64, 0x7b, 0x92, 0xa9, + 0xc0, 0xd7, 0xee, 0x05, 0x1c, 0x33, 0x4a, 0x61, + 0x78, 0x8f, 0xa6, 0xbd, 0xd4, 0xeb, 0x02, 0x19, + 0x30, 0x47, 0x5e, 0x75, 0x8c, 0xa3, 0xba, 0xd1, + 0xe8, 0xff, 0x16, 0x2d, 0x44, 0x5b, 0x72, 0x89, + 0xa0, 0xb7, 0xce, 0xe5, 0xfc, 0x13, 0x2a, 0x41, + 0x58, 0x6f, 0x86, 0x9d, 0xb4, 0xcb, 0xe2, 0xf9, + 0x10, 0x27, 0x3e, 0x55, 0x6c, 0x83, 0x9a, 0xb1, + 0xc8, 0xdf, 0xf6, 0x0d, 0x24, 0x3b, 0x52, 0x69, + 0x80, 0x97, 0xae, 0xc5, 0xdc, 0xf3, 0x0a, 0x21, + 0x38, 0x4f, 0x66, 0x7d, 0x94, 0xab, 0xc2, 0xd9, + 0xf0, 0x07, 0x1e, 0x35, 0x4c, 0x63, 0x7a, 0x91, + 0xa8, 0xbf, 0xd6, 0xed, 0x04, 0x1b, 0x32, 0x49, + 0x60, 0x77, 0x8e, 0xa5, 0xbc, 0xd3, 0xea, 0x01, + 0x18, 0x2f, 0x46, 0x5d, 0x74, 0x8b, 0xa2, 0xb9, + 0xd0, 0xe7, 0xfe, 0x15, 0x2c, 0x43, 0x5a, 0x71, + 0x88, 0x9f, 0xb6, 0xcd, 0xe4, 0xfb, 0x12, 0x29, + 0x40, 0x57, 0x6e, 0x85, 0x9c, 0xb3, 0xca, 0xe1, + 0xf8, 0x0f, 0x26, 0x3d, 0x54, 0x6b, 0x82, 0x99, + 0xb0, 0xc7, 0xde, 0xf5, 0x0c, 0x23, 0x3a, 0x51, + 0x68, 0x7f, 0x96, 0xad, 0xc4, 0xdb, 0xf2, 0x09, + 0x20, 0x37, 0x4e, 0x65, 0x7c, 0x93, 0xaa, 0xc1, + 0xd8, 0xef, 0x06, 0x1d, 0x34, 0x4b, 0x62, 0x79, + 0x90, 0xa7, 0xbe, 0xd5, 0xec, 0x03, 0x1a, 0x31, + 0x48, 0x5f, 0x76, 0x8d, 0xa4, 0xbb, 0xd2, 0xe9, + 0x00, 0x19, 0x32, 0x4b, 0x64, 0x7d, 0x96, 0xaf, + 0xc8, 0xe1, 0xfa, 0x13, 0x2c, 0x45, 0x5e, 0x77, + 0x90, 0xa9, 0xc2, 0xdb, 0xf4, 0x0d, 0x26, 0x3f, + 0x58, 0x71, 0x8a, 0xa3, 0xbc, 0xd5, 0xee, 0x07, + 0x20, 0x39, 0x52, 0x6b, 0x84, 0x9d, 0xb6, 0xcf, + 0xe8, 0x01, 0x1a, 0x33, 0x4c, 0x65, 0x7e, 0x97, + 0xb0, 0xc9, 0xe2, 0xfb, 0x14, 0x2d, 0x46, 0x5f, + 0x78, 0x91, 0xaa, 0xc3, 0xdc, 0xf5, 0x0e, 0x27, + 0x40, 0x59, 0x72, 0x8b, 0xa4, 0xbd, 0xd6, 0xef, + 0x08, 0x21, 0x3a, 0x53, 0x6c, 0x85, 0x9e, 0xb7, + 0xd0, 0xe9, 0x02, 0x1b, 0x34, 0x4d, 0x66, 0x7f, + 0x98, 0xb1, 0xca, 0xe3, 0xfc, 0x15, 0x2e, 0x47, + 0x60, 0x79, 0x92, 0xab, 0xc4, 0xdd, 0xf6, 0x0f, + 0x28, 0x41, 0x5a, 0x73, 0x8c, 0xa5, 0xbe, 0xd7, + 0xf0, 0x09, 0x22, 0x3b, 0x54, 0x6d, 0x86, 0x9f, + 0xb8, 0xd1, 0xea, 0x03, 0x1c, 0x35, 0x4e, 0x67, + 0x80, 0x99, 0xb2, 0xcb, 0xe4, 0xfd, 0x16, 0x2f, + 0x48, 0x61, 0x7a, 0x93, 0xac, 0xc5, 0xde, 0xf7, + 0x10, 0x29, 0x42, 0x5b, 0x74, 0x8d, 0xa6, 0xbf, + 0xd8, 0xf1, 0x0a, 0x23, 0x3c, 0x55, 0x6e, 0x87, + 0xa0, 0xb9, 0xd2, 0xeb, 0x04, 0x1d, 0x36, 0x4f, + 0x68, 0x81, 0x9a, 0xb3, 0xcc, 0xe5, 0xfe, 0x17, + 0x30, 0x49, 0x62, 0x7b, 0x94, 0xad, 0xc6, 0xdf, + 0xf8, 0x11, 0x2a, 0x43, 0x5c, 0x75, 0x8e, 0xa7, + 0xc0, 0xd9, 0xf2, 0x0b, 0x24, 0x3d, 0x56, 0x6f, + 0x88, 0xa1, 0xba, 0xd3, 0xec, 0x05, 0x1e, 0x37, + 0x50, 0x69, 0x82, 0x9b, 0xb4, 0xcd, 0xe6, 0xff, + 0x18, 0x31, 0x4a, 0x63, 0x7c, 0x95, 0xae, 0xc7, + 0xe0, 0xf9, 0x12, 0x2b, 0x44, 0x5d, 0x76, 0x8f, + 0xa8, 0xc1, 0xda, 0xf3, 0x0c, 0x25, 0x3e, 0x57, + 0x70, 0x89, 0xa2, 0xbb, 0xd4, 0xed, 0x06, 0x1f, + 0x38, 0x51, 0x6a, 0x83, 0x9c, 0xb5, 0xce, 0xe7, + 0x00, 0x1b, 0x36, 0x51, 0x6c, 0x87, 0xa2, 0xbd, + 0xd8, 0xf3, 0x0e, 0x29, 0x44, 0x5f, 0x7a, 0x95, + 0xb0, 0xcb, 0xe6, 0x01, 0x1c, 0x37, 0x52, 0x6d, + 0x88, 0xa3, 0xbe, 0xd9, 0xf4, 0x0f, 0x2a, 0x45, + 0x60, 0x7b, 0x96, 0xb1, 0xcc, 0xe7, 0x02, 0x1d, + 0x38, 0x53, 0x6e, 0x89, 0xa4, 0xbf, 0xda, 0xf5, + 0x10, 0x2b, 0x46, 0x61, 0x7c, 0x97, 0xb2, 0xcd, + 0xe8, 0x03, 0x1e, 0x39, 0x54, 0x6f, 0x8a, 0xa5, + 0xc0, 0xdb, 0xf6, 0x11, 0x2c, 0x47, 0x62, 0x7d, + 0x98, 0xb3, 0xce, 0xe9, 0x04, 0x1f, 0x3a, 0x55, + 0x70, 0x8b, 0xa6, 0xc1, 0xdc, 0xf7, 0x12, 0x2d, + 0x48, 0x63, 0x7e, 0x99, 0xb4, 0xcf, 0xea, 0x05, + 0x20, 0x3b, 0x56, 0x71, 0x8c, 0xa7, 0xc2, 0xdd, + 0xf8, 0x13, 0x2e, 0x49, 0x64, 0x7f, 0x9a, 0xb5, + 0xd0, 0xeb, 0x06, 0x21, 0x3c, 0x57, 0x72, 0x8d, + 0xa8, 0xc3, 0xde, 0xf9, 0x14, 0x2f, 0x4a, 0x65, + 0x80, 0x9b, 0xb6, 0xd1, 0xec, 0x07, 0x22, 0x3d, + 0x58, 0x73, 0x8e, 0xa9, 0xc4, 0xdf, 0xfa, 0x15, + 0x30, 0x4b, 0x66, 0x81, 0x9c, 0xb7, 0xd2, 0xed, + 0x08, 0x23, 0x3e, 0x59, 0x74, 0x8f, 0xaa, 0xc5, + 0xe0, 0xfb, 0x16, 0x31, 0x4c, 0x67, 0x82, 0x9d, + 0xb8, 0xd3, 0xee, 0x09, 0x24, 0x3f, 0x5a, 0x75, + 0x90, 0xab, 0xc6, 0xe1, 0xfc, 0x17, 0x32, 0x4d, + 0x68, 0x83, 0x9e, 0xb9, 0xd4, 0xef, 0x0a, 0x25, + 0x40, 0x5b, 0x76, 0x91, 0xac, 0xc7, 0xe2, 0xfd, + 0x18, 0x33, 0x4e, 0x69, 0x84, 0x9f, 0xba, 0xd5, + 0xf0, 0x0b, 0x26, 0x41, 0x5c, 0x77, 0x92, 0xad, + 0xc8, 0xe3, 0xfe, 0x19, 0x34, 0x4f, 0x6a, 0x85, + 0xa0, 0xbb, 0xd6, 0xf1, 0x0c, 0x27, 0x42, 0x5d, + 0x78, 0x93, 0xae, 0xc9, 0xe4, 0xff, 0x1a, 0x35, + 0x50, 0x6b, 0x86, 0xa1, 0xbc, 0xd7, 0xf2, 0x0d, + 0x28, 0x43, 0x5e, 0x79, 0x94, 0xaf, 0xca, 0xe5, + 0x00, 0x1d, 0x3a, 0x57, 0x74, 0x91, 0xae, 0xcb, + 0xe8, 0x05, 0x22, 0x3f, 0x5c, 0x79, 0x96, 0xb3, + 0xd0, 0xed, 0x0a, 0x27, 0x44, 0x61, 0x7e, 0x9b, + 0xb8, 0xd5, 0xf2, 0x0f, 0x2c, 0x49, 0x66, 0x83, + 0xa0, 0xbd, 0xda, 0xf7, 0x14, 0x31, 0x4e, 0x6b, + 0x88, 0xa5, 0xc2, 0xdf, 0xfc, 0x19, 0x36, 0x53, + 0x70, 0x8d, 0xaa, 0xc7, 0xe4, 0x01, 0x1e, 0x3b, + 0x58, 0x75, 0x92, 0xaf, 0xcc, 0xe9, 0x06, 0x23, + 0x40, 0x5d, 0x7a, 0x97, 0xb4, 0xd1, 0xee, 0x0b, + 0x28, 0x45, 0x62, 0x7f, 0x9c, 0xb9, 0xd6, 0xf3, + 0x10, 0x2d, 0x4a, 0x67, 0x84, 0xa1, 0xbe, 0xdb, + 0xf8, 0x15, 0x32, 0x4f, 0x6c, 0x89, 0xa6, 0xc3, + 0xe0, 0xfd, 0x1a, 0x37, 0x54, 0x71, 0x8e, 0xab, + 0xc8, 0xe5, 0x02, 0x1f, 0x3c, 0x59, 0x76, 0x93, + 0xb0, 0xcd, 0xea, 0x07, 0x24, 0x41, 0x5e, 0x7b, + 0x98, 0xb5, 0xd2, 0xef, 0x0c, 0x29, 0x46, 0x63, + 0x80, 0x9d, 0xba, 0xd7, 0xf4, 0x11, 0x2e, 0x4b, + 0x68, 0x85, 0xa2, 0xbf, 0xdc, 0xf9, 0x16, 0x33, + 0x50, 0x6d, 0x8a, 0xa7, 0xc4, 0xe1, 0xfe, 0x1b, + 0x38, 0x55, 0x72, 0x8f, 0xac, 0xc9, 0xe6, 0x03, + 0x20, 0x3d, 0x5a, 0x77, 0x94, 0xb1, 0xce, 0xeb, + 0x08, 0x25, 0x42, 0x5f, 0x7c, 0x99, 0xb6, 0xd3, + 0xf0, 0x0d, 0x2a, 0x47, 0x64, 0x81, 0x9e, 0xbb, + 0xd8, 0xf5, 0x12, 0x2f, 0x4c, 0x69, 0x86, 0xa3, + 0xc0, 0xdd, 0xfa, 0x17, 0x34, 0x51, 0x6e, 0x8b, + 0xa8, 0xc5, 0xe2, 0xff, 0x1c, 0x39, 0x56, 0x73, + 0x90, 0xad, 0xca, 0xe7, 0x04, 0x21, 0x3e, 0x5b, + 0x78, 0x95, 0xb2, 0xcf, 0xec, 0x09, 0x26, 0x43, + 0x60, 0x7d, 0x9a, 0xb7, 0xd4, 0xf1, 0x0e, 0x2b, + 0x48, 0x65, 0x82, 0x9f, 0xbc, 0xd9, 0xf6, 0x13, + 0x30, 0x4d, 0x6a, 0x87, 0xa4, 0xc1, 0xde, 0xfb, + 0x18, 0x35, 0x52, 0x6f, 0x8c, 0xa9, 0xc6, 0xe3, + 0x00, 0x1f, 0x3e, 0x5d, 0x7c, 0x9b, 0xba, 0xd9, + 0xf8, 0x17, 0x36, 0x55, 0x74, 0x93, 0xb2, 0xd1, + 0xf0, 0x0f, 0x2e, 0x4d, 0x6c, 0x8b, 0xaa, 0xc9, + 0xe8, 0x07, 0x26, 0x45, 0x64, 0x83, 0xa2, 0xc1, + 0xe0, 0xff, 0x1e, 0x3d, 0x5c, 0x7b, 0x9a, 0xb9, + 0xd8, 0xf7, 0x16, 0x35, 0x54, 0x73, 0x92, 0xb1, + 0xd0, 0xef, 0x0e, 0x2d, 0x4c, 0x6b, 0x8a, 0xa9, + 0xc8, 0xe7, 0x06, 0x25, 0x44, 0x63, 0x82, 0xa1, + 0xc0, 0xdf, 0xfe, 0x1d, 0x3c, 0x5b, 0x7a, 0x99, + 0xb8, 0xd7, 0xf6, 0x15, 0x34, 0x53, 0x72, 0x91, + 0xb0, 0xcf, 0xee, 0x0d, 0x2c, 0x4b, 0x6a, 0x89, + 0xa8, 0xc7, 0xe6, 0x05, 0x24, 0x43, 0x62, 0x81, + 0xa0, 0xbf, 0xde, 0xfd, 0x1c, 0x3b, 0x5a, 0x79, + 0x98, 0xb7, 0xd6, 0xf5, 0x14, 0x33, 0x52, 0x71, + 0x90, 0xaf, 0xce, 0xed, 0x0c, 0x2b, 0x4a, 0x69, + 0x88, 0xa7, 0xc6, 0xe5, 0x04, 0x23, 0x42, 0x61, + 0x80, 0x9f, 0xbe, 0xdd, 0xfc, 0x1b, 0x3a, 0x59, + 0x78, 0x97, 0xb6, 0xd5, 0xf4, 0x13, 0x32, 0x51, + 0x70, 0x8f, 0xae, 0xcd, 0xec, 0x0b, 0x2a, 0x49, + 0x68, 0x87, 0xa6, 0xc5, 0xe4, 0x03, 0x22, 0x41, + 0x60, 0x7f, 0x9e, 0xbd, 0xdc, 0xfb, 0x1a, 0x39, + 0x58, 0x77, 0x96, 0xb5, 0xd4, 0xf3, 0x12, 0x31, + 0x50, 0x6f, 0x8e, 0xad, 0xcc, 0xeb, 0x0a, 0x29, + 0x48, 0x67, 0x86, 0xa5, 0xc4, 0xe3, 0x02, 0x21, + 0x40, 0x5f, 0x7e, 0x9d, 0xbc, 0xdb, 0xfa, 0x19, + 0x38, 0x57, 0x76, 0x95, 0xb4, 0xd3, 0xf2, 0x11, + 0x30, 0x4f, 0x6e, 0x8d, 0xac, 0xcb, 0xea, 0x09, + 0x28, 0x47, 0x66, 0x85, 0xa4, 0xc3, 0xe2, 0x01, + 0x20, 0x3f, 0x5e, 0x7d, 0x9c, 0xbb, 0xda, 0xf9, + 0x18, 0x37, 0x56, 0x75, 0x94, 0xb3, 0xd2, 0xf1, + 0x10, 0x2f, 0x4e, 0x6d, 0x8c, 0xab, 0xca, 0xe9, + 0x08, 0x27, 0x46, 0x65, 0x84, 0xa3, 0xc2, 0xe1, + 0x00, 0x21, 0x42, 0x63, + }, + .ilen = 4100, + .result = { + 0xb5, 0x81, 0xf5, 0x64, 0x18, 0x73, 0xe3, 0xf0, + 0x4c, 0x13, 0xf2, 0x77, 0x18, 0x60, 0x65, 0x5e, + 0x29, 0x01, 0xce, 0x98, 0x55, 0x53, 0xf9, 0x0c, + 0x2a, 0x08, 0xd5, 0x09, 0xb3, 0x57, 0x55, 0x56, + 0xc5, 0xe9, 0x56, 0x90, 0xcb, 0x6a, 0xa3, 0xc0, + 0xff, 0xc4, 0x79, 0xb4, 0xd2, 0x97, 0x5d, 0xc4, + 0x43, 0xd1, 0xfe, 0x94, 0x7b, 0x88, 0x06, 0x5a, + 0xb2, 0x9e, 0x2c, 0xfc, 0x44, 0x03, 0xb7, 0x90, + 0xa0, 0xc1, 0xba, 0x6a, 0x33, 0xb8, 0xc7, 0xb2, + 0x9d, 0xe1, 0x12, 0x4f, 0xc0, 0x64, 0xd4, 0x01, + 0xfe, 0x8c, 0x7a, 0x66, 0xf7, 0xe6, 0x5a, 0x91, + 0xbb, 0xde, 0x56, 0x86, 0xab, 0x65, 0x21, 0x30, + 0x00, 0x84, 0x65, 0x24, 0xa5, 0x7d, 0x85, 0xb4, + 0xe3, 0x17, 0xed, 0x3a, 0xb7, 0x6f, 0xb4, 0x0b, + 0x0b, 0xaf, 0x15, 0xae, 0x5a, 0x8f, 0xf2, 0x0c, + 0x2f, 0x27, 0xf4, 0x09, 0xd8, 0xd2, 0x96, 0xb7, + 0x71, 0xf2, 0xc5, 0x99, 0x4d, 0x7e, 0x7f, 0x75, + 0x77, 0x89, 0x30, 0x8b, 0x59, 0xdb, 0xa2, 0xb2, + 0xa0, 0xf3, 0x19, 0x39, 0x2b, 0xc5, 0x7e, 0x3f, + 0x4f, 0xd9, 0xd3, 0x56, 0x28, 0x97, 0x44, 0xdc, + 0xc0, 0x8b, 0x77, 0x24, 0xd9, 0x52, 0xe7, 0xc5, + 0xaf, 0xf6, 0x7d, 0x59, 0xb2, 0x44, 0x05, 0x1d, + 0xb1, 0xb0, 0x11, 0xa5, 0x0f, 0xec, 0x33, 0xe1, + 0x6d, 0x1b, 0x4e, 0x1f, 0xff, 0x57, 0x91, 0xb4, + 0x5b, 0x9a, 0x96, 0xc5, 0x53, 0xbc, 0xae, 0x20, + 0x3c, 0xbb, 0x14, 0xe2, 0xe8, 0x22, 0x33, 0xc1, + 0x5e, 0x76, 0x9e, 0x46, 0x99, 0xf6, 0x2a, 0x15, + 0xc6, 0x97, 0x02, 0xa0, 0x66, 0x43, 0xd1, 0xa6, + 0x31, 0xa6, 0x9f, 0xfb, 0xf4, 0xd3, 0x69, 0xe5, + 0xcd, 0x76, 0x95, 0xb8, 0x7a, 0x82, 0x7f, 0x21, + 0x45, 0xff, 0x3f, 0xce, 0x55, 0xf6, 0x95, 0x10, + 0x08, 0x77, 0x10, 0x43, 0xc6, 0xf3, 0x09, 0xe5, + 0x68, 0xe7, 0x3c, 0xad, 0x00, 0x52, 0x45, 0x0d, + 0xfe, 0x2d, 0xc6, 0xc2, 0x94, 0x8c, 0x12, 0x1d, + 0xe6, 0x25, 0xae, 0x98, 0x12, 0x8e, 0x19, 0x9c, + 0x81, 0x68, 0xb1, 0x11, 0xf6, 0x69, 0xda, 0xe3, + 0x62, 0x08, 0x18, 0x7a, 0x25, 0x49, 0x28, 0xac, + 0xba, 0x71, 0x12, 0x0b, 0xe4, 0xa2, 0xe5, 0xc7, + 0x5d, 0x8e, 0xec, 0x49, 0x40, 0x21, 0xbf, 0x5a, + 0x98, 0xf3, 0x02, 0x68, 0x55, 0x03, 0x7f, 0x8a, + 0xe5, 0x94, 0x0c, 0x32, 0x5c, 0x07, 0x82, 0x63, + 0xaf, 0x6f, 0x91, 0x40, 0x84, 0x8e, 0x52, 0x25, + 0xd0, 0xb0, 0x29, 0x53, 0x05, 0xe2, 0x50, 0x7a, + 0x34, 0xeb, 0xc9, 0x46, 0x20, 0xa8, 0x3d, 0xde, + 0x7f, 0x16, 0x5f, 0x36, 0xc5, 0x2e, 0xdc, 0xd1, + 0x15, 0x47, 0xc7, 0x50, 0x40, 0x6d, 0x91, 0xc5, + 0xe7, 0x93, 0x95, 0x1a, 0xd3, 0x57, 0xbc, 0x52, + 0x33, 0xee, 0x14, 0x19, 0x22, 0x52, 0x89, 0xa7, + 0x4a, 0x25, 0x56, 0x77, 0x4b, 0xca, 0xcf, 0x0a, + 0xe1, 0xf5, 0x35, 0x85, 0x30, 0x7e, 0x59, 0x4a, + 0xbd, 0x14, 0x5b, 0xdf, 0xe3, 0x46, 0xcb, 0xac, + 0x1f, 0x6c, 0x96, 0x0e, 0xf4, 0x81, 0xd1, 0x99, + 0xca, 0x88, 0x63, 0x3d, 0x02, 0x58, 0x6b, 0xa9, + 0xe5, 0x9f, 0xb3, 0x00, 0xb2, 0x54, 0xc6, 0x74, + 0x1c, 0xbf, 0x46, 0xab, 0x97, 0xcc, 0xf8, 0x54, + 0x04, 0x07, 0x08, 0x52, 0xe6, 0xc0, 0xda, 0x93, + 0x74, 0x7d, 0x93, 0x99, 0x5d, 0x78, 0x68, 0xa6, + 0x2e, 0x6b, 0xd3, 0x6a, 0x69, 0xcc, 0x12, 0x6b, + 0xd4, 0xc7, 0xa5, 0xc6, 0xe7, 0xf6, 0x03, 0x04, + 0x5d, 0xcd, 0x61, 0x5e, 0x17, 0x40, 0xdc, 0xd1, + 0x5c, 0xf5, 0x08, 0xdf, 0x5c, 0x90, 0x85, 0xa4, + 0xaf, 0xf6, 0x78, 0xbb, 0x0d, 0xf1, 0xf4, 0xa4, + 0x54, 0x26, 0x72, 0x9e, 0x61, 0xfa, 0x86, 0xcf, + 0xe8, 0x9e, 0xa1, 0xe0, 0xc7, 0x48, 0x23, 0xae, + 0x5a, 0x90, 0xae, 0x75, 0x0a, 0x74, 0x18, 0x89, + 0x05, 0xb1, 0x92, 0xb2, 0x7f, 0xd0, 0x1b, 0xa6, + 0x62, 0x07, 0x25, 0x01, 0xc7, 0xc2, 0x4f, 0xf9, + 0xe8, 0xfe, 0x63, 0x95, 0x80, 0x07, 0xb4, 0x26, + 0xcc, 0xd1, 0x26, 0xb6, 0xc4, 0x3f, 0x9e, 0xcb, + 0x8e, 0x3b, 0x2e, 0x44, 0x16, 0xd3, 0x10, 0x9a, + 0x95, 0x08, 0xeb, 0xc8, 0xcb, 0xeb, 0xbf, 0x6f, + 0x0b, 0xcd, 0x1f, 0xc8, 0xca, 0x86, 0xaa, 0xec, + 0x33, 0xe6, 0x69, 0xf4, 0x45, 0x25, 0x86, 0x3a, + 0x22, 0x94, 0x4f, 0x00, 0x23, 0x6a, 0x44, 0xc2, + 0x49, 0x97, 0x33, 0xab, 0x36, 0x14, 0x0a, 0x70, + 0x24, 0xc3, 0xbe, 0x04, 0x3b, 0x79, 0xa0, 0xf9, + 0xb8, 0xe7, 0x76, 0x29, 0x22, 0x83, 0xd7, 0xf2, + 0x94, 0xf4, 0x41, 0x49, 0xba, 0x5f, 0x7b, 0x07, + 0xb5, 0xfb, 0xdb, 0x03, 0x1a, 0x9f, 0xb6, 0x4c, + 0xc2, 0x2e, 0x37, 0x40, 0x49, 0xc3, 0x38, 0x16, + 0xe2, 0x4f, 0x77, 0x82, 0xb0, 0x68, 0x4c, 0x71, + 0x1d, 0x57, 0x61, 0x9c, 0xd9, 0x4e, 0x54, 0x99, + 0x47, 0x13, 0x28, 0x73, 0x3c, 0xbb, 0x00, 0x90, + 0xf3, 0x4d, 0xc9, 0x0e, 0xfd, 0xe7, 0xb1, 0x71, + 0xd3, 0x15, 0x79, 0xbf, 0xcc, 0x26, 0x2f, 0xbd, + 0xad, 0x6c, 0x50, 0x69, 0x6c, 0x3e, 0x6d, 0x80, + 0x9a, 0xea, 0x78, 0xaf, 0x19, 0xb2, 0x0d, 0x4d, + 0xad, 0x04, 0x07, 0xae, 0x22, 0x90, 0x4a, 0x93, + 0x32, 0x0e, 0x36, 0x9b, 0x1b, 0x46, 0xba, 0x3b, + 0xb4, 0xac, 0xc6, 0xd1, 0xa2, 0x31, 0x53, 0x3b, + 0x2a, 0x3d, 0x45, 0xfe, 0x03, 0x61, 0x10, 0x85, + 0x17, 0x69, 0xa6, 0x78, 0xcc, 0x6c, 0x87, 0x49, + 0x53, 0xf9, 0x80, 0x10, 0xde, 0x80, 0xa2, 0x41, + 0x6a, 0xc3, 0x32, 0x02, 0xad, 0x6d, 0x3c, 0x56, + 0x00, 0x71, 0x51, 0x06, 0xa7, 0xbd, 0xfb, 0xef, + 0x3c, 0xb5, 0x9f, 0xfc, 0x48, 0x7d, 0x53, 0x7c, + 0x66, 0xb0, 0x49, 0x23, 0xc4, 0x47, 0x10, 0x0e, + 0xe5, 0x6c, 0x74, 0x13, 0xe6, 0xc5, 0x3f, 0xaa, + 0xde, 0xff, 0x07, 0x44, 0xdd, 0x56, 0x1b, 0xad, + 0x09, 0x77, 0xfb, 0x5b, 0x12, 0xb8, 0x0d, 0x38, + 0x17, 0x37, 0x35, 0x7b, 0x9b, 0xbc, 0xfe, 0xd4, + 0x7e, 0x8b, 0xda, 0x7e, 0x5b, 0x04, 0xa7, 0x22, + 0xa7, 0x31, 0xa1, 0x20, 0x86, 0xc7, 0x1b, 0x99, + 0xdb, 0xd1, 0x89, 0xf4, 0x94, 0xa3, 0x53, 0x69, + 0x8d, 0xe7, 0xe8, 0x74, 0x11, 0x8d, 0x74, 0xd6, + 0x07, 0x37, 0x91, 0x9f, 0xfd, 0x67, 0x50, 0x3a, + 0xc9, 0xe1, 0xf4, 0x36, 0xd5, 0xa0, 0x47, 0xd1, + 0xf9, 0xe5, 0x39, 0xa3, 0x31, 0xac, 0x07, 0x36, + 0x23, 0xf8, 0x66, 0x18, 0x14, 0x28, 0x34, 0x0f, + 0xb8, 0xd0, 0xe7, 0x29, 0xb3, 0x04, 0x4b, 0x55, + 0x01, 0x41, 0xb2, 0x75, 0x8d, 0xcb, 0x96, 0x85, + 0x3a, 0xfb, 0xab, 0x2b, 0x9e, 0xfa, 0x58, 0x20, + 0x44, 0x1f, 0xc0, 0x14, 0x22, 0x75, 0x61, 0xe8, + 0xaa, 0x19, 0xcf, 0xf1, 0x82, 0x56, 0xf4, 0xd7, + 0x78, 0x7b, 0x3d, 0x5f, 0xb3, 0x9e, 0x0b, 0x8a, + 0x57, 0x50, 0xdb, 0x17, 0x41, 0x65, 0x4d, 0xa3, + 0x02, 0xc9, 0x9c, 0x9c, 0x53, 0xfb, 0x39, 0x39, + 0x9b, 0x1d, 0x72, 0x24, 0xda, 0xb7, 0x39, 0xbe, + 0x13, 0x3b, 0xfa, 0x29, 0xda, 0x9e, 0x54, 0x64, + 0x6e, 0xba, 0xd8, 0xa1, 0xcb, 0xb3, 0x36, 0xfa, + 0xcb, 0x47, 0x85, 0xe9, 0x61, 0x38, 0xbc, 0xbe, + 0xc5, 0x00, 0x38, 0x2a, 0x54, 0xf7, 0xc4, 0xb9, + 0xb3, 0xd3, 0x7b, 0xa0, 0xa0, 0xf8, 0x72, 0x7f, + 0x8c, 0x8e, 0x82, 0x0e, 0xc6, 0x1c, 0x75, 0x9d, + 0xca, 0x8e, 0x61, 0x87, 0xde, 0xad, 0x80, 0xd2, + 0xf5, 0xf9, 0x80, 0xef, 0x15, 0x75, 0xaf, 0xf5, + 0x80, 0xfb, 0xff, 0x6d, 0x1e, 0x25, 0xb7, 0x40, + 0x61, 0x6a, 0x39, 0x5a, 0x6a, 0xb5, 0x31, 0xab, + 0x97, 0x8a, 0x19, 0x89, 0x44, 0x40, 0xc0, 0xa6, + 0xb4, 0x4e, 0x30, 0x32, 0x7b, 0x13, 0xe7, 0x67, + 0xa9, 0x8b, 0x57, 0x04, 0xc2, 0x01, 0xa6, 0xf4, + 0x28, 0x99, 0xad, 0x2c, 0x76, 0xa3, 0x78, 0xc2, + 0x4a, 0xe6, 0xca, 0x5c, 0x50, 0x6a, 0xc1, 0xb0, + 0x62, 0x4b, 0x10, 0x8e, 0x7c, 0x17, 0x43, 0xb3, + 0x17, 0x66, 0x1c, 0x3e, 0x8d, 0x69, 0xf0, 0x5a, + 0x71, 0xf5, 0x97, 0xdc, 0xd1, 0x45, 0xdd, 0x28, + 0xf3, 0x5d, 0xdf, 0x53, 0x7b, 0x11, 0xe5, 0xbc, + 0x4c, 0xdb, 0x1b, 0x51, 0x6b, 0xe9, 0xfb, 0x3d, + 0xc1, 0xc3, 0x2c, 0xb9, 0x71, 0xf5, 0xb6, 0xb2, + 0x13, 0x36, 0x79, 0x80, 0x53, 0xe8, 0xd3, 0xa6, + 0x0a, 0xaf, 0xfd, 0x56, 0x97, 0xf7, 0x40, 0x8e, + 0x45, 0xce, 0xf8, 0xb0, 0x9e, 0x5c, 0x33, 0x82, + 0xb0, 0x44, 0x56, 0xfc, 0x05, 0x09, 0xe9, 0x2a, + 0xac, 0x26, 0x80, 0x14, 0x1d, 0xc8, 0x3a, 0x35, + 0x4c, 0x82, 0x97, 0xfd, 0x76, 0xb7, 0xa9, 0x0a, + 0x35, 0x58, 0x79, 0x8e, 0x0f, 0x66, 0xea, 0xaf, + 0x51, 0x6c, 0x09, 0xa9, 0x6e, 0x9b, 0xcb, 0x9a, + 0x31, 0x47, 0xa0, 0x2f, 0x7c, 0x71, 0xb4, 0x4a, + 0x11, 0xaa, 0x8c, 0x66, 0xc5, 0x64, 0xe6, 0x3a, + 0x54, 0xda, 0x24, 0x6a, 0xc4, 0x41, 0x65, 0x46, + 0x82, 0xa0, 0x0a, 0x0f, 0x5f, 0xfb, 0x25, 0xd0, + 0x2c, 0x91, 0xa7, 0xee, 0xc4, 0x81, 0x07, 0x86, + 0x75, 0x5e, 0x33, 0x69, 0x97, 0xe4, 0x2c, 0xa8, + 0x9d, 0x9f, 0x0b, 0x6a, 0xbe, 0xad, 0x98, 0xda, + 0x6d, 0x94, 0x41, 0xda, 0x2c, 0x1e, 0x89, 0xc4, + 0xc2, 0xaf, 0x1e, 0x00, 0x05, 0x0b, 0x83, 0x60, + 0xbd, 0x43, 0xea, 0x15, 0x23, 0x7f, 0xb9, 0xac, + 0xee, 0x4f, 0x2c, 0xaf, 0x2a, 0xf3, 0xdf, 0xd0, + 0xf3, 0x19, 0x31, 0xbb, 0x4a, 0x74, 0x84, 0x17, + 0x52, 0x32, 0x2c, 0x7d, 0x61, 0xe4, 0xcb, 0xeb, + 0x80, 0x38, 0x15, 0x52, 0xcb, 0x6f, 0xea, 0xe5, + 0x73, 0x9c, 0xd9, 0x24, 0x69, 0xc6, 0x95, 0x32, + 0x21, 0xc8, 0x11, 0xe4, 0xdc, 0x36, 0xd7, 0x93, + 0x38, 0x66, 0xfb, 0xb2, 0x7f, 0x3a, 0xb9, 0xaf, + 0x31, 0xdd, 0x93, 0x75, 0x78, 0x8a, 0x2c, 0x94, + 0x87, 0x1a, 0x58, 0xec, 0x9e, 0x7d, 0x4d, 0xba, + 0xe1, 0xe5, 0x4d, 0xfc, 0xbc, 0xa4, 0x2a, 0x14, + 0xef, 0xcc, 0xa7, 0xec, 0xab, 0x43, 0x09, 0x18, + 0xd3, 0xab, 0x68, 0xd1, 0x07, 0x99, 0x44, 0x47, + 0xd6, 0x83, 0x85, 0x3b, 0x30, 0xea, 0xa9, 0x6b, + 0x63, 0xea, 0xc4, 0x07, 0xfb, 0x43, 0x2f, 0xa4, + 0xaa, 0xb0, 0xab, 0x03, 0x89, 0xce, 0x3f, 0x8c, + 0x02, 0x7c, 0x86, 0x54, 0xbc, 0x88, 0xaf, 0x75, + 0xd2, 0xdc, 0x63, 0x17, 0xd3, 0x26, 0xf6, 0x96, + 0xa9, 0x3c, 0xf1, 0x61, 0x8c, 0x11, 0x18, 0xcc, + 0xd6, 0xea, 0x5b, 0xe2, 0xcd, 0xf0, 0xf1, 0xb2, + 0xe5, 0x35, 0x90, 0x1f, 0x85, 0x4c, 0x76, 0x5b, + 0x66, 0xce, 0x44, 0xa4, 0x32, 0x9f, 0xe6, 0x7b, + 0x71, 0x6e, 0x9f, 0x58, 0x15, 0x67, 0x72, 0x87, + 0x64, 0x8e, 0x3a, 0x44, 0x45, 0xd4, 0x76, 0xfa, + 0xc2, 0xf6, 0xef, 0x85, 0x05, 0x18, 0x7a, 0x9b, + 0xba, 0x41, 0x54, 0xac, 0xf0, 0xfc, 0x59, 0x12, + 0x3f, 0xdf, 0xa0, 0xe5, 0x8a, 0x65, 0xfd, 0x3a, + 0x62, 0x8d, 0x83, 0x2c, 0x03, 0xbe, 0x05, 0x76, + 0x2e, 0x53, 0x49, 0x97, 0x94, 0x33, 0xae, 0x40, + 0x81, 0x15, 0xdb, 0x6e, 0xad, 0xaa, 0xf5, 0x4b, + 0xe3, 0x98, 0x70, 0xdf, 0xe0, 0x7c, 0xcd, 0xdb, + 0x02, 0xd4, 0x7d, 0x2f, 0xc1, 0xe6, 0xb4, 0xf3, + 0xd7, 0x0d, 0x7a, 0xd9, 0x23, 0x9e, 0x87, 0x2d, + 0xce, 0x87, 0xad, 0xcc, 0x72, 0x05, 0x00, 0x29, + 0xdc, 0x73, 0x7f, 0x64, 0xc1, 0x15, 0x0e, 0xc2, + 0xdf, 0xa7, 0x5f, 0xeb, 0x41, 0xa1, 0xcd, 0xef, + 0x5c, 0x50, 0x79, 0x2a, 0x56, 0x56, 0x71, 0x8c, + 0xac, 0xc0, 0x79, 0x50, 0x69, 0xca, 0x59, 0x32, + 0x65, 0xf2, 0x54, 0xe4, 0x52, 0x38, 0x76, 0xd1, + 0x5e, 0xde, 0x26, 0x9e, 0xfb, 0x75, 0x2e, 0x11, + 0xb5, 0x10, 0xf4, 0x17, 0x73, 0xf5, 0x89, 0xc7, + 0x4f, 0x43, 0x5c, 0x8e, 0x7c, 0xb9, 0x05, 0x52, + 0x24, 0x40, 0x99, 0xfe, 0x9b, 0x85, 0x0b, 0x6c, + 0x22, 0x3e, 0x8b, 0xae, 0x86, 0xa1, 0xd2, 0x79, + 0x05, 0x68, 0x6b, 0xab, 0xe3, 0x41, 0x49, 0xed, + 0x15, 0xa1, 0x8d, 0x40, 0x2d, 0x61, 0xdf, 0x1a, + 0x59, 0xc9, 0x26, 0x8b, 0xef, 0x30, 0x4c, 0x88, + 0x4b, 0x10, 0xf8, 0x8d, 0xa6, 0x92, 0x9f, 0x4b, + 0xf3, 0xc4, 0x53, 0x0b, 0x89, 0x5d, 0x28, 0x92, + 0xcf, 0x78, 0xb2, 0xc0, 0x5d, 0xed, 0x7e, 0xfc, + 0xc0, 0x12, 0x23, 0x5f, 0x5a, 0x78, 0x86, 0x43, + 0x6e, 0x27, 0xf7, 0x5a, 0xa7, 0x6a, 0xed, 0x19, + 0x04, 0xf0, 0xb3, 0x12, 0xd1, 0xbd, 0x0e, 0x89, + 0x6e, 0xbc, 0x96, 0xa8, 0xd8, 0x49, 0x39, 0x9f, + 0x7e, 0x67, 0xf0, 0x2e, 0x3e, 0x01, 0xa9, 0xba, + 0xec, 0x8b, 0x62, 0x8e, 0xcb, 0x4a, 0x70, 0x43, + 0xc7, 0xc2, 0xc4, 0xca, 0x82, 0x03, 0x73, 0xe9, + 0x11, 0xdf, 0xcf, 0x54, 0xea, 0xc9, 0xb0, 0x95, + 0x51, 0xc0, 0x13, 0x3d, 0x92, 0x05, 0xfa, 0xf4, + 0xa9, 0x34, 0xc8, 0xce, 0x6c, 0x3d, 0x54, 0xcc, + 0xc4, 0xaf, 0xf1, 0xdc, 0x11, 0x44, 0x26, 0xa2, + 0xaf, 0xf1, 0x85, 0x75, 0x7d, 0x03, 0x61, 0x68, + 0x4e, 0x78, 0xc6, 0x92, 0x7d, 0x86, 0x7d, 0x77, + 0xdc, 0x71, 0x72, 0xdb, 0xc6, 0xae, 0xa1, 0xcb, + 0x70, 0x9a, 0x0b, 0x19, 0xbe, 0x4a, 0x6c, 0x2a, + 0xe2, 0xba, 0x6c, 0x64, 0x9a, 0x13, 0x28, 0xdf, + 0x85, 0x75, 0xe6, 0x43, 0xf6, 0x87, 0x08, 0x68, + 0x6e, 0xba, 0x6e, 0x79, 0x9f, 0x04, 0xbc, 0x23, + 0x50, 0xf6, 0x33, 0x5c, 0x1f, 0x24, 0x25, 0xbe, + 0x33, 0x47, 0x80, 0x45, 0x56, 0xa3, 0xa7, 0xd7, + 0x7a, 0xb1, 0x34, 0x0b, 0x90, 0x3c, 0x9c, 0xad, + 0x44, 0x5f, 0x9e, 0x0e, 0x9d, 0xd4, 0xbd, 0x93, + 0x5e, 0xfa, 0x3c, 0xe0, 0xb0, 0xd9, 0xed, 0xf3, + 0xd6, 0x2e, 0xff, 0x24, 0xd8, 0x71, 0x6c, 0xed, + 0xaf, 0x55, 0xeb, 0x22, 0xac, 0x93, 0x68, 0x32, + 0x05, 0x5b, 0x47, 0xdd, 0xc6, 0x4a, 0xcb, 0xc7, + 0x10, 0xe1, 0x3c, 0x92, 0x1a, 0xf3, 0x23, 0x78, + 0x2b, 0xa1, 0xd2, 0x80, 0xf4, 0x12, 0xb1, 0x20, + 0x8f, 0xff, 0x26, 0x35, 0xdd, 0xfb, 0xc7, 0x4e, + 0x78, 0xf1, 0x2d, 0x50, 0x12, 0x77, 0xa8, 0x60, + 0x7c, 0x0f, 0xf5, 0x16, 0x2f, 0x63, 0x70, 0x2a, + 0xc0, 0x96, 0x80, 0x4e, 0x0a, 0xb4, 0x93, 0x35, + 0x5d, 0x1d, 0x3f, 0x56, 0xf7, 0x2f, 0xbb, 0x90, + 0x11, 0x16, 0x8f, 0xa2, 0xec, 0x47, 0xbe, 0xac, + 0x56, 0x01, 0x26, 0x56, 0xb1, 0x8c, 0xb2, 0x10, + 0xf9, 0x1a, 0xca, 0xf5, 0xd1, 0xb7, 0x39, 0x20, + 0x63, 0xf1, 0x69, 0x20, 0x4f, 0x13, 0x12, 0x1f, + 0x5b, 0x65, 0xfc, 0x98, 0xf7, 0xc4, 0x7a, 0xbe, + 0xf7, 0x26, 0x4d, 0x2b, 0x84, 0x7b, 0x42, 0xad, + 0xd8, 0x7a, 0x0a, 0xb4, 0xd8, 0x74, 0xbf, 0xc1, + 0xf0, 0x6e, 0xb4, 0x29, 0xa3, 0xbb, 0xca, 0x46, + 0x67, 0x70, 0x6a, 0x2d, 0xce, 0x0e, 0xa2, 0x8a, + 0xa9, 0x87, 0xbf, 0x05, 0xc4, 0xc1, 0x04, 0xa3, + 0xab, 0xd4, 0x45, 0x43, 0x8c, 0xb6, 0x02, 0xb0, + 0x41, 0xc8, 0xfc, 0x44, 0x3d, 0x59, 0xaa, 0x2e, + 0x44, 0x21, 0x2a, 0x8d, 0x88, 0x9d, 0x57, 0xf4, + 0xa0, 0x02, 0x77, 0xb8, 0xa6, 0xa0, 0xe6, 0x75, + 0x5c, 0x82, 0x65, 0x3e, 0x03, 0x5c, 0x29, 0x8f, + 0x38, 0x55, 0xab, 0x33, 0x26, 0xef, 0x9f, 0x43, + 0x52, 0xfd, 0x68, 0xaf, 0x36, 0xb4, 0xbb, 0x9a, + 0x58, 0x09, 0x09, 0x1b, 0xc3, 0x65, 0x46, 0x46, + 0x1d, 0xa7, 0x94, 0x18, 0x23, 0x50, 0x2c, 0xca, + 0x2c, 0x55, 0x19, 0x97, 0x01, 0x9d, 0x93, 0x3b, + 0x63, 0x86, 0xf2, 0x03, 0x67, 0x45, 0xd2, 0x72, + 0x28, 0x52, 0x6c, 0xf4, 0xe3, 0x1c, 0xb5, 0x11, + 0x13, 0xf1, 0xeb, 0x21, 0xc7, 0xd9, 0x56, 0x82, + 0x2b, 0x82, 0x39, 0xbd, 0x69, 0x54, 0xed, 0x62, + 0xc3, 0xe2, 0xde, 0x73, 0xd4, 0x6a, 0x12, 0xae, + 0x13, 0x21, 0x7f, 0x4b, 0x5b, 0xfc, 0xbf, 0xe8, + 0x2b, 0xbe, 0x56, 0xba, 0x68, 0x8b, 0x9a, 0xb1, + 0x6e, 0xfa, 0xbf, 0x7e, 0x5a, 0x4b, 0xf1, 0xac, + 0x98, 0x65, 0x85, 0xd1, 0x93, 0x53, 0xd3, 0x7b, + 0x09, 0xdd, 0x4b, 0x10, 0x6d, 0x84, 0xb0, 0x13, + 0x65, 0xbd, 0xcf, 0x52, 0x09, 0xc4, 0x85, 0xe2, + 0x84, 0x74, 0x15, 0x65, 0xb7, 0xf7, 0x51, 0xaf, + 0x55, 0xad, 0xa4, 0xd1, 0x22, 0x54, 0x70, 0x94, + 0xa0, 0x1c, 0x90, 0x41, 0xfd, 0x99, 0xd7, 0x5a, + 0x31, 0xef, 0xaa, 0x25, 0xd0, 0x7f, 0x4f, 0xea, + 0x1d, 0x55, 0x42, 0xe5, 0x49, 0xb0, 0xd0, 0x46, + 0x62, 0x36, 0x43, 0xb2, 0x82, 0x15, 0x75, 0x50, + 0xa4, 0x72, 0xeb, 0x54, 0x27, 0x1f, 0x8a, 0xe4, + 0x7d, 0xe9, 0x66, 0xc5, 0xf1, 0x53, 0xa4, 0xd1, + 0x0c, 0xeb, 0xb8, 0xf8, 0xbc, 0xd4, 0xe2, 0xe7, + 0xe1, 0xf8, 0x4b, 0xcb, 0xa9, 0xa1, 0xaf, 0x15, + 0x83, 0xcb, 0x72, 0xd0, 0x33, 0x79, 0x00, 0x2d, + 0x9f, 0xd7, 0xf1, 0x2e, 0x1e, 0x10, 0xe4, 0x45, + 0xc0, 0x75, 0x3a, 0x39, 0xea, 0x68, 0xf7, 0x5d, + 0x1b, 0x73, 0x8f, 0xe9, 0x8e, 0x0f, 0x72, 0x47, + 0xae, 0x35, 0x0a, 0x31, 0x7a, 0x14, 0x4d, 0x4a, + 0x6f, 0x47, 0xf7, 0x7e, 0x91, 0x6e, 0x74, 0x8b, + 0x26, 0x47, 0xf9, 0xc3, 0xf9, 0xde, 0x70, 0xf5, + 0x61, 0xab, 0xa9, 0x27, 0x9f, 0x82, 0xe4, 0x9c, + 0x89, 0x91, 0x3f, 0x2e, 0x6a, 0xfd, 0xb5, 0x49, + 0xe9, 0xfd, 0x59, 0x14, 0x36, 0x49, 0x40, 0x6d, + 0x32, 0xd8, 0x85, 0x42, 0xf3, 0xa5, 0xdf, 0x0c, + 0xa8, 0x27, 0xd7, 0x54, 0xe2, 0x63, 0x2f, 0xf2, + 0x7e, 0x8b, 0x8b, 0xe7, 0xf1, 0x9a, 0x95, 0x35, + 0x43, 0xdc, 0x3a, 0xe4, 0xb6, 0xf4, 0xd0, 0xdf, + 0x9c, 0xcb, 0x94, 0xf3, 0x21, 0xa0, 0x77, 0x50, + 0xe2, 0xc6, 0xc4, 0xc6, 0x5f, 0x09, 0x64, 0x5b, + 0x92, 0x90, 0xd8, 0xe1, 0xd1, 0xed, 0x4b, 0x42, + 0xd7, 0x37, 0xaf, 0x65, 0x3d, 0x11, 0x39, 0xb6, + 0x24, 0x8a, 0x60, 0xae, 0xd6, 0x1e, 0xbf, 0x0e, + 0x0d, 0xd7, 0xdc, 0x96, 0x0e, 0x65, 0x75, 0x4e, + 0x29, 0x06, 0x9d, 0xa4, 0x51, 0x3a, 0x10, 0x63, + 0x8f, 0x17, 0x07, 0xd5, 0x8e, 0x3c, 0xf4, 0x28, + 0x00, 0x5a, 0x5b, 0x05, 0x19, 0xd8, 0xc0, 0x6c, + 0xe5, 0x15, 0xe4, 0x9c, 0x9d, 0x71, 0x9d, 0x5e, + 0x94, 0x29, 0x1a, 0xa7, 0x80, 0xfa, 0x0e, 0x33, + 0x03, 0xdd, 0xb7, 0x3e, 0x9a, 0xa9, 0x26, 0x18, + 0x37, 0xa9, 0x64, 0x08, 0x4d, 0x94, 0x5a, 0x88, + 0xca, 0x35, 0xce, 0x81, 0x02, 0xe3, 0x1f, 0x1b, + 0x89, 0x1a, 0x77, 0x85, 0xe3, 0x41, 0x6d, 0x32, + 0x42, 0x19, 0x23, 0x7d, 0xc8, 0x73, 0xee, 0x25, + 0x85, 0x0d, 0xf8, 0x31, 0x25, 0x79, 0x1b, 0x6f, + 0x79, 0x25, 0xd2, 0xd8, 0xd4, 0x23, 0xfd, 0xf7, + 0x82, 0x36, 0x6a, 0x0c, 0x46, 0x22, 0x15, 0xe9, + 0xff, 0x72, 0x41, 0x91, 0x91, 0x7d, 0x3a, 0xb7, + 0xdd, 0x65, 0x99, 0x70, 0xf6, 0x8d, 0x84, 0xf8, + 0x67, 0x15, 0x20, 0x11, 0xd6, 0xb2, 0x55, 0x7b, + 0xdb, 0x87, 0xee, 0xef, 0x55, 0x89, 0x2a, 0x59, + 0x2b, 0x07, 0x8f, 0x43, 0x8a, 0x59, 0x3c, 0x01, + 0x8b, 0x65, 0x54, 0xa1, 0x66, 0xd5, 0x38, 0xbd, + 0xc6, 0x30, 0xa9, 0xcc, 0x49, 0xb6, 0xa8, 0x1b, + 0xb8, 0xc0, 0x0e, 0xe3, 0x45, 0x28, 0xe2, 0xff, + 0x41, 0x9f, 0x7e, 0x7c, 0xd1, 0xae, 0x9e, 0x25, + 0x3f, 0x4c, 0x7c, 0x7c, 0xf4, 0xa8, 0x26, 0x4d, + 0x5c, 0xfd, 0x4b, 0x27, 0x18, 0xf9, 0x61, 0x76, + 0x48, 0xba, 0x0c, 0x6b, 0xa9, 0x4d, 0xfc, 0xf5, + 0x3b, 0x35, 0x7e, 0x2f, 0x4a, 0xa9, 0xc2, 0x9a, + 0xae, 0xab, 0x86, 0x09, 0x89, 0xc9, 0xc2, 0x40, + 0x39, 0x2c, 0x81, 0xb3, 0xb8, 0x17, 0x67, 0xc2, + 0x0d, 0x32, 0x4a, 0x3a, 0x67, 0x81, 0xd7, 0x1a, + 0x34, 0x52, 0xc5, 0xdb, 0x0a, 0xf5, 0x63, 0x39, + 0xea, 0x1f, 0xe1, 0x7c, 0xa1, 0x9e, 0xc1, 0x35, + 0xe3, 0xb1, 0x18, 0x45, 0x67, 0xf9, 0x22, 0x38, + 0x95, 0xd9, 0x34, 0x34, 0x86, 0xc6, 0x41, 0x94, + 0x15, 0xf9, 0x5b, 0x41, 0xa6, 0x87, 0x8b, 0xf8, + 0xd5, 0xe1, 0x1b, 0xe2, 0x5b, 0xf3, 0x86, 0x10, + 0xff, 0xe6, 0xae, 0x69, 0x76, 0xbc, 0x0d, 0xb4, + 0x09, 0x90, 0x0c, 0xa2, 0x65, 0x0c, 0xad, 0x74, + 0xf5, 0xd7, 0xff, 0xda, 0xc1, 0xce, 0x85, 0xbe, + 0x00, 0xa7, 0xff, 0x4d, 0x2f, 0x65, 0xd3, 0x8c, + 0x86, 0x2d, 0x05, 0xe8, 0xed, 0x3e, 0x6b, 0x8b, + 0x0f, 0x3d, 0x83, 0x8c, 0xf1, 0x1d, 0x5b, 0x96, + 0x2e, 0xb1, 0x9c, 0xc2, 0x98, 0xe1, 0x70, 0xb9, + 0xba, 0x5c, 0x8a, 0x43, 0xd6, 0x34, 0xa7, 0x2d, + 0xc9, 0x92, 0xae, 0xf2, 0xa5, 0x7b, 0x05, 0x49, + 0xa7, 0x33, 0x34, 0x86, 0xca, 0xe4, 0x96, 0x23, + 0x76, 0x5b, 0xf2, 0xc6, 0xf1, 0x51, 0x28, 0x42, + 0x7b, 0xcc, 0x76, 0x8f, 0xfa, 0xa2, 0xad, 0x31, + 0xd4, 0xd6, 0x7a, 0x6d, 0x25, 0x25, 0x54, 0xe4, + 0x3f, 0x50, 0x59, 0xe1, 0x5c, 0x05, 0xb7, 0x27, + 0x48, 0xbf, 0x07, 0xec, 0x1b, 0x13, 0xbe, 0x2b, + 0xa1, 0x57, 0x2b, 0xd5, 0xab, 0xd7, 0xd0, 0x4c, + 0x1e, 0xcb, 0x71, 0x9b, 0xc5, 0x90, 0x85, 0xd3, + 0xde, 0x59, 0xec, 0x71, 0xeb, 0x89, 0xbb, 0xd0, + 0x09, 0x50, 0xe1, 0x16, 0x3f, 0xfd, 0x1c, 0x34, + 0xc3, 0x1c, 0xa1, 0x10, 0x77, 0x53, 0x98, 0xef, + 0xf2, 0xfd, 0xa5, 0x01, 0x59, 0xc2, 0x9b, 0x26, + 0xc7, 0x42, 0xd9, 0x49, 0xda, 0x58, 0x2b, 0x6e, + 0x9f, 0x53, 0x19, 0x76, 0x7e, 0xd9, 0xc9, 0x0e, + 0x68, 0xc8, 0x7f, 0x51, 0x22, 0x42, 0xef, 0x49, + 0xa4, 0x55, 0xb6, 0x36, 0xac, 0x09, 0xc7, 0x31, + 0x88, 0x15, 0x4b, 0x2e, 0x8f, 0x3a, 0x08, 0xf7, + 0xd8, 0xf7, 0xa8, 0xc5, 0xa9, 0x33, 0xa6, 0x45, + 0xe4, 0xc4, 0x94, 0x76, 0xf3, 0x0d, 0x8f, 0x7e, + 0xc8, 0xf6, 0xbc, 0x23, 0x0a, 0xb6, 0x4c, 0xd3, + 0x6a, 0xcd, 0x36, 0xc2, 0x90, 0x5c, 0x5c, 0x3c, + 0x65, 0x7b, 0xc2, 0xd6, 0xcc, 0xe6, 0x0d, 0x87, + 0x73, 0x2e, 0x71, 0x79, 0x16, 0x06, 0x63, 0x28, + 0x09, 0x15, 0xd8, 0x89, 0x38, 0x38, 0x3d, 0xb5, + 0x42, 0x1c, 0x08, 0x24, 0xf7, 0x2a, 0xd2, 0x9d, + 0xc8, 0xca, 0xef, 0xf9, 0x27, 0xd8, 0x07, 0x86, + 0xf7, 0x43, 0x0b, 0x55, 0x15, 0x3f, 0x9f, 0x83, + 0xef, 0xdc, 0x49, 0x9d, 0x2a, 0xc1, 0x54, 0x62, + 0xbd, 0x9b, 0x66, 0x55, 0x9f, 0xb7, 0x12, 0xf3, + 0x1b, 0x4d, 0x9d, 0x2a, 0x5c, 0xed, 0x87, 0x75, + 0x87, 0x26, 0xec, 0x61, 0x2c, 0xb4, 0x0f, 0x89, + 0xb0, 0xfb, 0x2e, 0x68, 0x5d, 0x15, 0xc7, 0x8d, + 0x2e, 0xc0, 0xd9, 0xec, 0xaf, 0x4f, 0xd2, 0x25, + 0x29, 0xe8, 0xd2, 0x26, 0x2b, 0x67, 0xe9, 0xfc, + 0x2b, 0xa8, 0x67, 0x96, 0x12, 0x1f, 0x5b, 0x96, + 0xc6, 0x14, 0x53, 0xaf, 0x44, 0xea, 0xd6, 0xe2, + 0x94, 0x98, 0xe4, 0x12, 0x93, 0x4c, 0x92, 0xe0, + 0x18, 0xa5, 0x8d, 0x2d, 0xe4, 0x71, 0x3c, 0x47, + 0x4c, 0xf7, 0xe6, 0x47, 0x9e, 0xc0, 0x68, 0xdf, + 0xd4, 0xf5, 0x5a, 0x74, 0xb1, 0x2b, 0x29, 0x03, + 0x19, 0x07, 0xaf, 0x90, 0x62, 0x5c, 0x68, 0x98, + 0x48, 0x16, 0x11, 0x02, 0x9d, 0xee, 0xb4, 0x9b, + 0xe5, 0x42, 0x7f, 0x08, 0xfd, 0x16, 0x32, 0x0b, + 0xd0, 0xb3, 0xfa, 0x2b, 0xb7, 0x99, 0xf9, 0x29, + 0xcd, 0x20, 0x45, 0x9f, 0xb3, 0x1a, 0x5d, 0xa2, + 0xaf, 0x4d, 0xe0, 0xbd, 0x42, 0x0d, 0xbc, 0x74, + 0x99, 0x9c, 0x8e, 0x53, 0x1a, 0xb4, 0x3e, 0xbd, + 0xa2, 0x9a, 0x2d, 0xf7, 0xf8, 0x39, 0x0f, 0x67, + 0x63, 0xfc, 0x6b, 0xc0, 0xaf, 0xb3, 0x4b, 0x4f, + 0x55, 0xc4, 0xcf, 0xa7, 0xc8, 0x04, 0x11, 0x3e, + 0x14, 0x32, 0xbb, 0x1b, 0x38, 0x77, 0xd6, 0x7f, + 0x54, 0x4c, 0xdf, 0x75, 0xf3, 0x07, 0x2d, 0x33, + 0x9b, 0xa8, 0x20, 0xe1, 0x7b, 0x12, 0xb5, 0xf3, + 0xef, 0x2f, 0xce, 0x72, 0xe5, 0x24, 0x60, 0xc1, + 0x30, 0xe2, 0xab, 0xa1, 0x8e, 0x11, 0x09, 0xa8, + 0x21, 0x33, 0x44, 0xfe, 0x7f, 0x35, 0x32, 0x93, + 0x39, 0xa7, 0xad, 0x8b, 0x79, 0x06, 0xb2, 0xcb, + 0x4e, 0xa9, 0x5f, 0xc7, 0xba, 0x74, 0x29, 0xec, + 0x93, 0xa0, 0x4e, 0x54, 0x93, 0xc0, 0xbc, 0x55, + 0x64, 0xf0, 0x48, 0xe5, 0x57, 0x99, 0xee, 0x75, + 0xd6, 0x79, 0x0f, 0x66, 0xb7, 0xc6, 0x57, 0x76, + 0xf7, 0xb7, 0xf3, 0x9c, 0xc5, 0x60, 0xe8, 0x7f, + 0x83, 0x76, 0xd6, 0x0e, 0xaa, 0xe6, 0x90, 0x39, + 0x1d, 0xa6, 0x32, 0x6a, 0x34, 0xe3, 0x55, 0xf8, + 0x58, 0xa0, 0x58, 0x7d, 0x33, 0xe0, 0x22, 0x39, + 0x44, 0x64, 0x87, 0x86, 0x5a, 0x2f, 0xa7, 0x7e, + 0x0f, 0x38, 0xea, 0xb0, 0x30, 0xcc, 0x61, 0xa5, + 0x6a, 0x32, 0xae, 0x1e, 0xf7, 0xe9, 0xd0, 0xa9, + 0x0c, 0x32, 0x4b, 0xb5, 0x49, 0x28, 0xab, 0x85, + 0x2f, 0x8e, 0x01, 0x36, 0x38, 0x52, 0xd0, 0xba, + 0xd6, 0x02, 0x78, 0xf8, 0x0e, 0x3e, 0x9c, 0x8b, + 0x6b, 0x45, 0x99, 0x3f, 0x5c, 0xfe, 0x58, 0xf1, + 0x5c, 0x94, 0x04, 0xe1, 0xf5, 0x18, 0x6d, 0x51, + 0xb2, 0x5d, 0x18, 0x20, 0xb6, 0xc2, 0x9a, 0x42, + 0x1d, 0xb3, 0xab, 0x3c, 0xb6, 0x3a, 0x13, 0x03, + 0xb2, 0x46, 0x82, 0x4f, 0xfc, 0x64, 0xbc, 0x4f, + 0xca, 0xfa, 0x9c, 0xc0, 0xd5, 0xa7, 0xbd, 0x11, + 0xb7, 0xe4, 0x5a, 0xf6, 0x6f, 0x4d, 0x4d, 0x54, + 0xea, 0xa4, 0x98, 0x66, 0xd4, 0x22, 0x3b, 0xd3, + 0x8f, 0x34, 0x47, 0xd9, 0x7c, 0xf4, 0x72, 0x3b, + 0x4d, 0x02, 0x77, 0xf6, 0xd6, 0xdd, 0x08, 0x0a, + 0x81, 0xe1, 0x86, 0x89, 0x3e, 0x56, 0x10, 0x3c, + 0xba, 0xd7, 0x81, 0x8c, 0x08, 0xbc, 0x8b, 0xe2, + 0x53, 0xec, 0xa7, 0x89, 0xee, 0xc8, 0x56, 0xb5, + 0x36, 0x2c, 0xb2, 0x03, 0xba, 0x99, 0xdd, 0x7c, + 0x48, 0xa0, 0xb0, 0xbc, 0x91, 0x33, 0xe9, 0xa8, + 0xcb, 0xcd, 0xcf, 0x59, 0x5f, 0x1f, 0x15, 0xe2, + 0x56, 0xf5, 0x4e, 0x01, 0x35, 0x27, 0x45, 0x77, + 0x47, 0xc8, 0xbc, 0xcb, 0x7e, 0x39, 0xc1, 0x97, + 0x28, 0xd3, 0x84, 0xfc, 0x2c, 0x3e, 0xc8, 0xad, + 0x9c, 0xf8, 0x8a, 0x61, 0x9c, 0x28, 0xaa, 0xc5, + 0x99, 0x20, 0x43, 0x85, 0x9d, 0xa5, 0xe2, 0x8b, + 0xb8, 0xae, 0xeb, 0xd0, 0x32, 0x0d, 0x52, 0x78, + 0x09, 0x56, 0x3f, 0xc7, 0xd8, 0x7e, 0x26, 0xfc, + 0x37, 0xfb, 0x6f, 0x04, 0xfc, 0xfa, 0x92, 0x10, + 0xac, 0xf8, 0x3e, 0x21, 0xdc, 0x8c, 0x21, 0x16, + 0x7d, 0x67, 0x6e, 0xf6, 0xcd, 0xda, 0xb6, 0x98, + 0x23, 0xab, 0x23, 0x3c, 0xb2, 0x10, 0xa0, 0x53, + 0x5a, 0x56, 0x9f, 0xc5, 0xd0, 0xff, 0xbb, 0xe4, + 0x98, 0x3c, 0x69, 0x1e, 0xdb, 0x38, 0x8f, 0x7e, + 0x0f, 0xd2, 0x98, 0x88, 0x81, 0x8b, 0x45, 0x67, + 0xea, 0x33, 0xf1, 0xeb, 0xe9, 0x97, 0x55, 0x2e, + 0xd9, 0xaa, 0xeb, 0x5a, 0xec, 0xda, 0xe1, 0x68, + 0xa8, 0x9d, 0x3c, 0x84, 0x7c, 0x05, 0x3d, 0x62, + 0x87, 0x8f, 0x03, 0x21, 0x28, 0x95, 0x0c, 0x89, + 0x25, 0x22, 0x4a, 0xb0, 0x93, 0xa9, 0x50, 0xa2, + 0x2f, 0x57, 0x6e, 0x18, 0x42, 0x19, 0x54, 0x0c, + 0x55, 0x67, 0xc6, 0x11, 0x49, 0xf4, 0x5c, 0xd2, + 0xe9, 0x3d, 0xdd, 0x8b, 0x48, 0x71, 0x21, 0x00, + 0xc3, 0x9a, 0x6c, 0x85, 0x74, 0x28, 0x83, 0x4a, + 0x1b, 0x31, 0x05, 0xe1, 0x06, 0x92, 0xe7, 0xda, + 0x85, 0x73, 0x78, 0x45, 0x20, 0x7f, 0xae, 0x13, + 0x7c, 0x33, 0x06, 0x22, 0xf4, 0x83, 0xf9, 0x35, + 0x3f, 0x6c, 0x71, 0xa8, 0x4e, 0x48, 0xbe, 0x9b, + 0xce, 0x8a, 0xba, 0xda, 0xbe, 0x28, 0x08, 0xf7, + 0xe2, 0x14, 0x8c, 0x71, 0xea, 0x72, 0xf9, 0x33, + 0xf2, 0x88, 0x3f, 0xd7, 0xbb, 0x69, 0x6c, 0x29, + 0x19, 0xdc, 0x84, 0xce, 0x1f, 0x12, 0x4f, 0xc8, + 0xaf, 0xa5, 0x04, 0xba, 0x5a, 0xab, 0xb0, 0xd9, + 0x14, 0x1f, 0x6c, 0x68, 0x98, 0x39, 0x89, 0x7a, + 0xd9, 0xd8, 0x2f, 0xdf, 0xa8, 0x47, 0x4a, 0x25, + 0xe2, 0xfb, 0x33, 0xf4, 0x59, 0x78, 0xe1, 0x68, + 0x85, 0xcf, 0xfe, 0x59, 0x20, 0xd4, 0x05, 0x1d, + 0x80, 0x99, 0xae, 0xbc, 0xca, 0xae, 0x0f, 0x2f, + 0x65, 0x43, 0x34, 0x8e, 0x7e, 0xac, 0xd3, 0x93, + 0x2f, 0xac, 0x6d, 0x14, 0x3d, 0x02, 0x07, 0x70, + 0x9d, 0xa4, 0xf3, 0x1b, 0x5c, 0x36, 0xfc, 0x01, + 0x73, 0x34, 0x85, 0x0c, 0x6c, 0xd6, 0xf1, 0xbd, + 0x3f, 0xdf, 0xee, 0xf5, 0xd9, 0xba, 0x56, 0xef, + 0xf4, 0x9b, 0x6b, 0xee, 0x9f, 0x5a, 0x78, 0x6d, + 0x32, 0x19, 0xf4, 0xf7, 0xf8, 0x4c, 0x69, 0x0b, + 0x4b, 0xbc, 0xbb, 0xb7, 0xf2, 0x85, 0xaf, 0x70, + 0x75, 0x24, 0x6c, 0x54, 0xa7, 0x0e, 0x4d, 0x1d, + 0x01, 0xbf, 0x08, 0xac, 0xcf, 0x7f, 0x2c, 0xe3, + 0x14, 0x89, 0x5e, 0x70, 0x5a, 0x99, 0x92, 0xcd, + 0x01, 0x84, 0xc8, 0xd2, 0xab, 0xe5, 0x4f, 0x58, + 0xe7, 0x0f, 0x2f, 0x0e, 0xff, 0x68, 0xea, 0xfd, + 0x15, 0xb3, 0x17, 0xe6, 0xb0, 0xe7, 0x85, 0xd8, + 0x23, 0x2e, 0x05, 0xc7, 0xc9, 0xc4, 0x46, 0x1f, + 0xe1, 0x9e, 0x49, 0x20, 0x23, 0x24, 0x4d, 0x7e, + 0x29, 0x65, 0xff, 0xf4, 0xb6, 0xfd, 0x1a, 0x85, + 0xc4, 0x16, 0xec, 0xfc, 0xea, 0x7b, 0xd6, 0x2c, + 0x43, 0xf8, 0xb7, 0xbf, 0x79, 0xc0, 0x85, 0xcd, + 0xef, 0xe1, 0x98, 0xd3, 0xa5, 0xf7, 0x90, 0x8c, + 0xe9, 0x7f, 0x80, 0x6b, 0xd2, 0xac, 0x4c, 0x30, + 0xa7, 0xc6, 0x61, 0x6c, 0xd2, 0xf9, 0x2c, 0xff, + 0x30, 0xbc, 0x22, 0x81, 0x7d, 0x93, 0x12, 0xe4, + 0x0a, 0xcd, 0xaf, 0xdd, 0xe8, 0xab, 0x0a, 0x1e, + 0x13, 0xa4, 0x27, 0xc3, 0x5f, 0xf7, 0x4b, 0xbb, + 0x37, 0x09, 0x4b, 0x91, 0x6f, 0x92, 0x4f, 0xaf, + 0x52, 0xee, 0xdf, 0xef, 0x09, 0x6f, 0xf7, 0x5c, + 0x6e, 0x12, 0x17, 0x72, 0x63, 0x57, 0xc7, 0xba, + 0x3b, 0x6b, 0x38, 0x32, 0x73, 0x1b, 0x9c, 0x80, + 0xc1, 0x7a, 0xc6, 0xcf, 0xcd, 0x35, 0xc0, 0x6b, + 0x31, 0x1a, 0x6b, 0xe9, 0xd8, 0x2c, 0x29, 0x3f, + 0x96, 0xfb, 0xb6, 0xcd, 0x13, 0x91, 0x3b, 0xc2, + 0xd2, 0xa3, 0x31, 0x8d, 0xa4, 0xcd, 0x57, 0xcd, + 0x13, 0x3d, 0x64, 0xfd, 0x06, 0xce, 0xe6, 0xdc, + 0x0c, 0x24, 0x43, 0x31, 0x40, 0x57, 0xf1, 0x72, + 0x17, 0xe3, 0x3a, 0x63, 0x6d, 0x35, 0xcf, 0x5d, + 0x97, 0x40, 0x59, 0xdd, 0xf7, 0x3c, 0x02, 0xf7, + 0x1c, 0x7e, 0x05, 0xbb, 0xa9, 0x0d, 0x01, 0xb1, + 0x8e, 0xc0, 0x30, 0xa9, 0x53, 0x24, 0xc9, 0x89, + 0x84, 0x6d, 0xaa, 0xd0, 0xcd, 0x91, 0xc2, 0x4d, + 0x91, 0xb0, 0x89, 0xe2, 0xbf, 0x83, 0x44, 0xaa, + 0x28, 0x72, 0x23, 0xa0, 0xc2, 0xad, 0xad, 0x1c, + 0xfc, 0x3f, 0x09, 0x7a, 0x0b, 0xdc, 0xc5, 0x1b, + 0x87, 0x13, 0xc6, 0x5b, 0x59, 0x8d, 0xf2, 0xc8, + 0xaf, 0xdf, 0x11, 0x95, + }, + .rlen = 4100, + }, }; /* -- cgit v1.1 From 91755a921c4af51c355bcb74a98b717d5c1818b6 Mon Sep 17 00:00:00 2001 From: Zoltan Sogor Date: Fri, 7 Dec 2007 16:48:11 +0800 Subject: [CRYPTO] tcrypt: Add common compression tester function Add common compression tester function Modify deflate test case to use the common compressor test function Signed-off-by: Zoltan Sogor Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index a6d4160..c8d3e60 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1019,7 +1019,8 @@ out: crypto_free_hash(tfm); } -static void test_deflate(void) +static void test_comp(char *algo, struct comp_testvec *ctemplate, + struct comp_testvec *dtemplate, int ctcount, int dtcount) { unsigned int i; char result[COMP_BUF_SIZE]; @@ -1027,25 +1028,26 @@ static void test_deflate(void) struct comp_testvec *tv; unsigned int tsize; - printk("\ntesting deflate compression\n"); + printk("\ntesting %s compression\n", algo); - tsize = sizeof (deflate_comp_tv_template); + tsize = sizeof(struct comp_testvec); + tsize *= ctcount; if (tsize > TVMEMSIZE) { printk("template (%u) too big for tvmem (%u)\n", tsize, TVMEMSIZE); return; } - memcpy(tvmem, deflate_comp_tv_template, tsize); + memcpy(tvmem, ctemplate, tsize); tv = (void *)tvmem; - tfm = crypto_alloc_comp("deflate", 0, CRYPTO_ALG_ASYNC); + tfm = crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { - printk("failed to load transform for deflate\n"); + printk("failed to load transform for %s\n", algo); return; } - for (i = 0; i < DEFLATE_COMP_TEST_VECTORS; i++) { + for (i = 0; i < ctcount; i++) { int ilen, ret, dlen = COMP_BUF_SIZE; printk("test %u:\n", i + 1); @@ -1064,19 +1066,20 @@ static void test_deflate(void) ilen, dlen); } - printk("\ntesting deflate decompression\n"); + printk("\ntesting %s decompression\n", algo); - tsize = sizeof (deflate_decomp_tv_template); + tsize = sizeof(struct comp_testvec); + tsize *= dtcount; if (tsize > TVMEMSIZE) { printk("template (%u) too big for tvmem (%u)\n", tsize, TVMEMSIZE); goto out; } - memcpy(tvmem, deflate_decomp_tv_template, tsize); + memcpy(tvmem, dtemplate, tsize); tv = (void *)tvmem; - for (i = 0; i < DEFLATE_DECOMP_TEST_VECTORS; i++) { + for (i = 0; i < dtcount; i++) { int ilen, ret, dlen = COMP_BUF_SIZE; printk("test %u:\n", i + 1); @@ -1286,7 +1289,9 @@ static void do_test(void) test_hash("tgr192", tgr192_tv_template, TGR192_TEST_VECTORS); test_hash("tgr160", tgr160_tv_template, TGR160_TEST_VECTORS); test_hash("tgr128", tgr128_tv_template, TGR128_TEST_VECTORS); - test_deflate(); + test_comp("deflate", deflate_comp_tv_template, + deflate_decomp_tv_template, DEFLATE_COMP_TEST_VECTORS, + DEFLATE_DECOMP_TEST_VECTORS); test_hash("crc32c", crc32c_tv_template, CRC32C_TEST_VECTORS); test_hash("hmac(md5)", hmac_md5_tv_template, HMAC_MD5_TEST_VECTORS); @@ -1402,7 +1407,9 @@ static void do_test(void) break; case 13: - test_deflate(); + test_comp("deflate", deflate_comp_tv_template, + deflate_decomp_tv_template, DEFLATE_COMP_TEST_VECTORS, + DEFLATE_DECOMP_TEST_VECTORS); break; case 14: -- cgit v1.1 From 0b77abb3b2d0c2eee1da79a3f3bd4312a0edb156 Mon Sep 17 00:00:00 2001 From: Zoltan Sogor Date: Fri, 7 Dec 2007 16:53:23 +0800 Subject: [CRYPTO] lzo: Add LZO compression algorithm support Add LZO compression algorithm support Signed-off-by: Zoltan Sogor Signed-off-by: Herbert Xu --- crypto/Kconfig | 8 +++++ crypto/Makefile | 1 + crypto/lzo.c | 106 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ crypto/tcrypt.c | 9 ++++- crypto/tcrypt.h | 82 +++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 205 insertions(+), 1 deletion(-) create mode 100644 crypto/lzo.c diff --git a/crypto/Kconfig b/crypto/Kconfig index 40ae92c..4fd14e4 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -536,6 +536,14 @@ config CRYPTO_AUTHENC Authenc: Combined mode wrapper for IPsec. This is required for IPSec. +config CRYPTO_LZO + tristate "LZO compression algorithm" + select CRYPTO_ALGAPI + select LZO_COMPRESS + select LZO_DECOMPRESS + help + This is the LZO algorithm. + source "drivers/crypto/Kconfig" endif # if CRYPTO diff --git a/crypto/Makefile b/crypto/Makefile index 957343c..83532ac 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -55,6 +55,7 @@ obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o +obj-$(CONFIG_CRYPTO_LZO) += lzo.o obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o diff --git a/crypto/lzo.c b/crypto/lzo.c new file mode 100644 index 0000000..48c3288 --- /dev/null +++ b/crypto/lzo.c @@ -0,0 +1,106 @@ +/* + * Cryptographic API. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include +#include +#include +#include +#include + +struct lzo_ctx { + void *lzo_comp_mem; +}; + +static int lzo_init(struct crypto_tfm *tfm) +{ + struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS); + if (!ctx->lzo_comp_mem) + return -ENOMEM; + + return 0; +} + +static void lzo_exit(struct crypto_tfm *tfm) +{ + struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); + + vfree(ctx->lzo_comp_mem); +} + +static int lzo_compress(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen) +{ + struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); + size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ + int err; + + err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx->lzo_comp_mem); + + if (err != LZO_E_OK) + return -EINVAL; + + *dlen = tmp_len; + return 0; +} + +static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen) +{ + int err; + size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ + + err = lzo1x_decompress_safe(src, slen, dst, &tmp_len); + + if (err != LZO_E_OK) + return -EINVAL; + + *dlen = tmp_len; + return 0; + +} + +static struct crypto_alg alg = { + .cra_name = "lzo", + .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, + .cra_ctxsize = sizeof(struct lzo_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(alg.cra_list), + .cra_init = lzo_init, + .cra_exit = lzo_exit, + .cra_u = { .compress = { + .coa_compress = lzo_compress, + .coa_decompress = lzo_decompress } } +}; + +static int __init init(void) +{ + return crypto_register_alg(&alg); +} + +static void __exit fini(void) +{ + crypto_unregister_alg(&alg); +} + +module_init(init); +module_exit(fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("LZO Compression Algorithm"); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index c8d3e60..943a514 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -84,7 +84,7 @@ static char *check[] = { "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", - "camellia", "seed", "salsa20", NULL + "camellia", "seed", "salsa20", "lzo", NULL }; static void hexdump(unsigned char *buf, unsigned int len) @@ -1292,6 +1292,8 @@ static void do_test(void) test_comp("deflate", deflate_comp_tv_template, deflate_decomp_tv_template, DEFLATE_COMP_TEST_VECTORS, DEFLATE_DECOMP_TEST_VECTORS); + test_comp("lzo", lzo_comp_tv_template, lzo_decomp_tv_template, + LZO_COMP_TEST_VECTORS, LZO_DECOMP_TEST_VECTORS); test_hash("crc32c", crc32c_tv_template, CRC32C_TEST_VECTORS); test_hash("hmac(md5)", hmac_md5_tv_template, HMAC_MD5_TEST_VECTORS); @@ -1550,6 +1552,11 @@ static void do_test(void) AES_GCM_DEC_TEST_VECTORS); break; + case 36: + test_comp("lzo", lzo_comp_tv_template, lzo_decomp_tv_template, + LZO_COMP_TEST_VECTORS, LZO_DECOMP_TEST_VECTORS); + break; + case 100: test_hash("hmac(md5)", hmac_md5_tv_template, HMAC_MD5_TEST_VECTORS); diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index d3c380f..175f26a 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -7461,6 +7461,88 @@ static struct comp_testvec deflate_decomp_tv_template[] = { }; /* + * LZO test vectors (null-terminated strings). + */ +#define LZO_COMP_TEST_VECTORS 2 +#define LZO_DECOMP_TEST_VECTORS 2 + +static struct comp_testvec lzo_comp_tv_template[] = { + { + .inlen = 70, + .outlen = 46, + .input = "Join us now and share the software " + "Join us now and share the software ", + .output = { 0x00, 0x0d, 0x4a, 0x6f, 0x69, 0x6e, 0x20, 0x75, + 0x73, 0x20, 0x6e, 0x6f, 0x77, 0x20, 0x61, 0x6e, + 0x64, 0x20, 0x73, 0x68, 0x61, 0x72, 0x65, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x73, 0x6f, 0x66, 0x74, + 0x77, 0x70, 0x01, 0x01, 0x4a, 0x6f, 0x69, 0x6e, + 0x3d, 0x88, 0x00, 0x11, 0x00, 0x00 }, + }, { + .inlen = 159, + .outlen = 133, + .input = "This document describes a compression method based on the LZO " + "compression algorithm. This document defines the application of " + "the LZO algorithm used in UBIFS.", + .output = { 0x00, 0x2b, 0x54, 0x68, 0x69, 0x73, 0x20, 0x64, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x20, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, + 0x73, 0x20, 0x61, 0x20, 0x63, 0x6f, 0x6d, 0x70, + 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x20, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x20, 0x62, + 0x61, 0x73, 0x65, 0x64, 0x20, 0x6f, 0x6e, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x4c, 0x5a, 0x4f, 0x2b, + 0x8c, 0x00, 0x0d, 0x61, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x2e, 0x20, 0x20, 0x54, + 0x68, 0x69, 0x73, 0x2a, 0x54, 0x01, 0x02, 0x66, + 0x69, 0x6e, 0x65, 0x73, 0x94, 0x06, 0x05, 0x61, + 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x76, + 0x0a, 0x6f, 0x66, 0x88, 0x02, 0x60, 0x09, 0x27, + 0xf0, 0x00, 0x0c, 0x20, 0x75, 0x73, 0x65, 0x64, + 0x20, 0x69, 0x6e, 0x20, 0x55, 0x42, 0x49, 0x46, + 0x53, 0x2e, 0x11, 0x00, 0x00 }, + }, +}; + +static struct comp_testvec lzo_decomp_tv_template[] = { + { + .inlen = 133, + .outlen = 159, + .input = { 0x00, 0x2b, 0x54, 0x68, 0x69, 0x73, 0x20, 0x64, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x20, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, + 0x73, 0x20, 0x61, 0x20, 0x63, 0x6f, 0x6d, 0x70, + 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x20, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x20, 0x62, + 0x61, 0x73, 0x65, 0x64, 0x20, 0x6f, 0x6e, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x4c, 0x5a, 0x4f, 0x2b, + 0x8c, 0x00, 0x0d, 0x61, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x2e, 0x20, 0x20, 0x54, + 0x68, 0x69, 0x73, 0x2a, 0x54, 0x01, 0x02, 0x66, + 0x69, 0x6e, 0x65, 0x73, 0x94, 0x06, 0x05, 0x61, + 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x76, + 0x0a, 0x6f, 0x66, 0x88, 0x02, 0x60, 0x09, 0x27, + 0xf0, 0x00, 0x0c, 0x20, 0x75, 0x73, 0x65, 0x64, + 0x20, 0x69, 0x6e, 0x20, 0x55, 0x42, 0x49, 0x46, + 0x53, 0x2e, 0x11, 0x00, 0x00 }, + .output = "This document describes a compression method based on the LZO " + "compression algorithm. This document defines the application of " + "the LZO algorithm used in UBIFS.", + }, { + .inlen = 46, + .outlen = 70, + .input = { 0x00, 0x0d, 0x4a, 0x6f, 0x69, 0x6e, 0x20, 0x75, + 0x73, 0x20, 0x6e, 0x6f, 0x77, 0x20, 0x61, 0x6e, + 0x64, 0x20, 0x73, 0x68, 0x61, 0x72, 0x65, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x73, 0x6f, 0x66, 0x74, + 0x77, 0x70, 0x01, 0x01, 0x4a, 0x6f, 0x69, 0x6e, + 0x3d, 0x88, 0x00, 0x11, 0x00, 0x00 }, + .output = "Join us now and share the software " + "Join us now and share the software ", + }, +}; + +/* * Michael MIC test vectors from IEEE 802.11i */ #define MICHAEL_MIC_TEST_VECTORS 6 -- cgit v1.1 From 5de8f1b562e87ae9d93a4e0897e54c18a5e82915 Mon Sep 17 00:00:00 2001 From: Tan Swee Heng Date: Fri, 7 Dec 2007 17:17:43 +0800 Subject: [CRYPTO] tcrypt: Added salsa20 speed test This patch adds a simple speed test for salsa20. Usage: modprobe tcrypt mode=206 Signed-of-by: Tan Swee Heng Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 5 +++++ crypto/tcrypt.h | 16 ++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 943a514..0cfb8eb 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1668,6 +1668,11 @@ static void do_test(void) camellia_speed_template); break; + case 206: + test_cipher_speed("salsa20", ENCRYPT, sec, NULL, 0, + salsa20_speed_template); + break; + case 300: /* fall through */ diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 175f26a..5b86509 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -7947,4 +7947,20 @@ static struct cipher_speed camellia_speed_template[] = { { .klen = 0, .blen = 0, } }; +static struct cipher_speed salsa20_speed_template[] = { + { .klen = 16, .blen = 16, }, + { .klen = 16, .blen = 64, }, + { .klen = 16, .blen = 256, }, + { .klen = 16, .blen = 1024, }, + { .klen = 16, .blen = 8192, }, + { .klen = 32, .blen = 16, }, + { .klen = 32, .blen = 64, }, + { .klen = 32, .blen = 256, }, + { .klen = 32, .blen = 1024, }, + { .klen = 32, .blen = 8192, }, + + /* End marker */ + { .klen = 0, .blen = 0, } +}; + #endif /* _CRYPTO_TCRYPT_H */ -- cgit v1.1 From fdc520aa693d462f4958339534a3b596f95795b7 Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Mon, 10 Dec 2007 15:48:17 +0800 Subject: [CRYPTO] geode: Use correct encrypt/decrypt function in fallback crypto_blkcipher_decrypt is wrong because it does not care about the IV. Signed-off-by: Sebastian Siewior Signed-off-by: Herbert Xu --- drivers/crypto/geode-aes.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index 68be7d08..581b003 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c @@ -190,7 +190,7 @@ static int fallback_blk_dec(struct blkcipher_desc *desc, tfm = desc->tfm; desc->tfm = op->fallback.blk; - ret = crypto_blkcipher_decrypt(desc, dst, src, nbytes); + ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes); desc->tfm = tfm; return ret; @@ -206,7 +206,7 @@ static int fallback_blk_enc(struct blkcipher_desc *desc, tfm = desc->tfm; desc->tfm = op->fallback.blk; - ret = crypto_blkcipher_encrypt(desc, dst, src, nbytes); + ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); desc->tfm = tfm; return ret; -- cgit v1.1 From 2d74d405fc5ea78b20a4a2efd24201db424e07b1 Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Mon, 10 Dec 2007 15:49:41 +0800 Subject: [CRYPTO] s390-aes: Use correct encrypt/decrypt function in fallback crypto_blkcipher_decrypt is wrong because it does not care about the IV. Signed-off-by: Sebastian Siewior Signed-off-by: Herbert Xu --- arch/s390/crypto/aes_s390.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 8524611..46c97058 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -234,13 +234,10 @@ static int fallback_blk_dec(struct blkcipher_desc *desc, struct crypto_blkcipher *tfm; struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); - memcpy(crypto_blkcipher_crt(sctx->fallback.blk)->iv, desc->info, - AES_BLOCK_SIZE); - tfm = desc->tfm; desc->tfm = sctx->fallback.blk; - ret = crypto_blkcipher_decrypt(desc, dst, src, nbytes); + ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes); desc->tfm = tfm; return ret; @@ -254,13 +251,10 @@ static int fallback_blk_enc(struct blkcipher_desc *desc, struct crypto_blkcipher *tfm; struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); - memcpy(crypto_blkcipher_crt(sctx->fallback.blk)->iv, desc->info, - AES_BLOCK_SIZE); - tfm = desc->tfm; desc->tfm = sctx->fallback.blk; - ret = crypto_blkcipher_encrypt(desc, dst, src, nbytes); + ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); desc->tfm = tfm; return ret; -- cgit v1.1 From c2c61f513db395ddd8d67690bf3301ebe1e8155a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 10 Dec 2007 10:54:44 +0800 Subject: [CRYPTO] authenc: Fix typo in ivsize The ivsize should be fetched from ablkcipher, not blkcipher. Signed-off-by: Herbert Xu --- crypto/authenc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/authenc.c b/crypto/authenc.c index fbbc2b5..80d9d0b 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -333,7 +333,7 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) inst->alg.cra_alignmask = auth->cra_alignmask | enc->cra_alignmask; inst->alg.cra_type = &crypto_aead_type; - inst->alg.cra_aead.ivsize = enc->cra_blkcipher.ivsize; + inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize; inst->alg.cra_aead.maxauthsize = auth->cra_type == &crypto_hash_type ? auth->cra_hash.digestsize : auth->cra_digest.dia_digestsize; -- cgit v1.1 From 12dc5e62b4f93f1d399fd81e35be3f9ea0027712 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 10 Dec 2007 10:55:21 +0800 Subject: [CRYPTO] authenc: Use RTA_OK to check length This patch changes setkey to use RTA_OK to check the validity of the setkey request. Signed-off-by: Herbert Xu --- crypto/authenc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/authenc.c b/crypto/authenc.c index 80d9d0b..aa442de 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -44,7 +44,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, struct crypto_authenc_key_param *param; int err = -EINVAL; - if (keylen < sizeof(*rta)) + if (!RTA_OK(rta, keylen)) goto badkey; if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) goto badkey; -- cgit v1.1 From 7c3d703fa81db42f9766325cebd6bfc1c5eac838 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 10 Dec 2007 16:15:41 +0800 Subject: [CRYPTO] authenc: Merge common hashing code This patch merges the common hashing code between encryption and decryption. Signed-off-by: Herbert Xu --- crypto/authenc.c | 66 ++++++++++++++++++++++++-------------------------------- 1 file changed, 28 insertions(+), 38 deletions(-) diff --git a/crypto/authenc.c b/crypto/authenc.c index aa442de..394e733 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -87,17 +87,18 @@ badkey: goto out; } -static int crypto_authenc_hash(struct aead_request *req) +static u8 *crypto_authenc_hash(struct aead_request *req, unsigned int flags, + struct scatterlist *cipher, + unsigned int cryptlen) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); struct crypto_hash *auth = ctx->auth; struct hash_desc desc = { .tfm = auth, + .flags = aead_request_flags(req) & flags, }; u8 *hash = aead_request_ctx(req); - struct scatterlist *dst = req->dst; - unsigned int cryptlen = req->cryptlen; int err; hash = (u8 *)ALIGN((unsigned long)hash + crypto_hash_alignmask(auth), @@ -112,7 +113,7 @@ static int crypto_authenc_hash(struct aead_request *req) if (err) goto auth_unlock; - err = crypto_hash_update(&desc, dst, cryptlen); + err = crypto_hash_update(&desc, cipher, cryptlen); if (err) goto auth_unlock; @@ -121,7 +122,21 @@ auth_unlock: spin_unlock_bh(&ctx->auth_lock); if (err) - return err; + return ERR_PTR(err); + + return hash; +} + +static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags) +{ + struct crypto_aead *authenc = crypto_aead_reqtfm(req); + struct scatterlist *dst = req->dst; + unsigned int cryptlen = req->cryptlen; + u8 *hash; + + hash = crypto_authenc_hash(req, flags, dst, cryptlen); + if (IS_ERR(hash)) + return PTR_ERR(hash); scatterwalk_map_and_copy(hash, dst, cryptlen, crypto_aead_authsize(authenc), 1); @@ -132,7 +147,7 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req, int err) { if (!err) - err = crypto_authenc_hash(req->data); + err = crypto_authenc_genicv(req->data, 0); aead_request_complete(req->data, err); } @@ -154,50 +169,25 @@ static int crypto_authenc_encrypt(struct aead_request *req) if (err) return err; - return crypto_authenc_hash(req); + return crypto_authenc_genicv(req, CRYPTO_TFM_REQ_MAY_SLEEP); } static int crypto_authenc_verify(struct aead_request *req, unsigned int cryptlen) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); - struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); - struct crypto_hash *auth = ctx->auth; - struct hash_desc desc = { - .tfm = auth, - .flags = aead_request_flags(req), - }; - u8 *ohash = aead_request_ctx(req); + u8 *ohash; u8 *ihash; struct scatterlist *src = req->src; unsigned int authsize; - int err; - ohash = (u8 *)ALIGN((unsigned long)ohash + crypto_hash_alignmask(auth), - crypto_hash_alignmask(auth) + 1); - ihash = ohash + crypto_hash_digestsize(auth); - - spin_lock_bh(&ctx->auth_lock); - err = crypto_hash_init(&desc); - if (err) - goto auth_unlock; - - err = crypto_hash_update(&desc, req->assoc, req->assoclen); - if (err) - goto auth_unlock; - - err = crypto_hash_update(&desc, src, cryptlen); - if (err) - goto auth_unlock; - - err = crypto_hash_final(&desc, ohash); -auth_unlock: - spin_unlock_bh(&ctx->auth_lock); - - if (err) - return err; + ohash = crypto_authenc_hash(req, CRYPTO_TFM_REQ_MAY_SLEEP, src, + cryptlen); + if (IS_ERR(ohash)) + return PTR_ERR(ohash); authsize = crypto_aead_authsize(authenc); + ihash = ohash + authsize; scatterwalk_map_and_copy(ihash, src, cryptlen, authsize, 0); return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0; } -- cgit v1.1 From f17922bc75d6261dd6e0e2d687ff43b96e91e04a Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Wed, 12 Dec 2007 10:42:41 +0800 Subject: [CRYPTO] geode: Add __dev{init,exit} annotations This patch adds __dev{init,exit} annotations. Signed-off-by: Adrian Bunk Signed-off-by: Herbert Xu --- drivers/crypto/geode-aes.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index 581b003..48011629 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c @@ -499,7 +499,7 @@ static struct crypto_alg geode_ecb_alg = { } }; -static void +static void __devexit geode_aes_remove(struct pci_dev *dev) { crypto_unregister_alg(&geode_alg); @@ -514,7 +514,7 @@ geode_aes_remove(struct pci_dev *dev) } -static int +static int __devinit geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) { int ret; -- cgit v1.1 From 5e553110f27ff77591ec7305c6216ad6949f7a95 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 14 Dec 2007 16:43:32 +0800 Subject: [CRYPTO] authenc: Select HASH in Kconfig i get here: ---- LD vmlinux SYSMAP System.map SYSMAP .tmp_System.map Building modules, stage 2. MODPOST 226 modules ERROR: "crypto_hash_type" [crypto/authenc.ko] undefined! make[1]: *** [__modpost] Error 1 make: *** [modules] Error 2 --- which fails because crypto_hash_type is declared in crypto/hash.c. You might wanna fix it like so: Signed-off-by: Borislav Petkov Signed-off-by: Herbert Xu --- crypto/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/crypto/Kconfig b/crypto/Kconfig index 4fd14e4..1eb4bcd 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -532,6 +532,7 @@ config CRYPTO_AUTHENC tristate "Authenc support" select CRYPTO_AEAD select CRYPTO_MANAGER + select CRYPTO_HASH help Authenc: Combined mode wrapper for IPsec. This is required for IPSec. -- cgit v1.1 From 551a09a7a954f720067f207657bbbd26a3fe156a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 1 Dec 2007 21:47:07 +1100 Subject: [CRYPTO] api: Sanitise mask when allocating ablkcipher/hash When allocating ablkcipher/hash objects, we use a mask that's wider than the usual type mask. This patch sanitises the mask supplied by the user so we don't end up using a narrower mask which may lead to unintended results. Signed-off-by: Herbert Xu --- include/linux/crypto.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 48aa595..ef7642e 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -532,6 +532,7 @@ static inline struct crypto_ablkcipher *crypto_alloc_ablkcipher( const char *alg_name, u32 type, u32 mask) { type &= ~CRYPTO_ALG_TYPE_MASK; + mask &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_BLKCIPHER; mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; @@ -554,6 +555,7 @@ static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, u32 mask) { type &= ~CRYPTO_ALG_TYPE_MASK; + mask &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_BLKCIPHER; mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; @@ -1086,6 +1088,7 @@ static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name, u32 type, u32 mask) { type &= ~CRYPTO_ALG_TYPE_MASK; + mask &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_HASH; mask |= CRYPTO_ALG_TYPE_HASH_MASK; @@ -1105,6 +1108,7 @@ static inline void crypto_free_hash(struct crypto_hash *tfm) static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask) { type &= ~CRYPTO_ALG_TYPE_MASK; + mask &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_HASH; mask |= CRYPTO_ALG_TYPE_HASH_MASK; -- cgit v1.1 From 68b6c7d6919be7c732fc6229c55e35d0166e9258 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 7 Dec 2007 20:18:17 +0800 Subject: [CRYPTO] api: Add crypto_attr_alg_name This patch adds a new helper crypto_attr_alg_name which is basically the first half of crypto_attr_alg. That is, it returns an algorithm name parameter as a string without looking it up. The caller can then look it up immediately or defer it until later. Signed-off-by: Herbert Xu --- crypto/algapi.c | 18 ++++++++++++++++-- include/crypto/algapi.h | 1 + 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/crypto/algapi.c b/crypto/algapi.c index 08eca6d..e65cb50 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -472,7 +472,7 @@ int crypto_check_attr_type(struct rtattr **tb, u32 type) } EXPORT_SYMBOL_GPL(crypto_check_attr_type); -struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask) +const char *crypto_attr_alg_name(struct rtattr *rta) { struct crypto_attr_alg *alga; @@ -486,7 +486,21 @@ struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask) alga = RTA_DATA(rta); alga->name[CRYPTO_MAX_ALG_NAME - 1] = 0; - return crypto_alg_mod_lookup(alga->name, type, mask); + return alga->name; +} +EXPORT_SYMBOL_GPL(crypto_attr_alg_name); + +struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask) +{ + const char *name; + int err; + + name = crypto_attr_alg_name(rta); + err = PTR_ERR(name); + if (IS_ERR(name)) + return ERR_PTR(err); + + return crypto_alg_mod_lookup(name, type, mask); } EXPORT_SYMBOL_GPL(crypto_attr_alg); diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 2cdb227..726a765 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -113,6 +113,7 @@ struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); int crypto_check_attr_type(struct rtattr **tb, u32 type); +const char *crypto_attr_alg_name(struct rtattr *rta); struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask); int crypto_attr_u32(struct rtattr *rta, u32 *num); struct crypto_instance *crypto_alloc_instance(const char *name, -- cgit v1.1 From 2589469d7bc69bdfad4e05d88a0d2748f92ef0f3 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 12 Dec 2007 19:16:38 +0800 Subject: [CRYPTO] gcm: Fix request context alignment This patch fixes the request context alignment so that it is actually aligned to the value required by the algorithm. Signed-off-by: Herbert Xu --- crypto/gcm.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/crypto/gcm.c b/crypto/gcm.c index 73565d6..0818317 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -43,6 +43,14 @@ struct crypto_gcm_req_priv_ctx { struct ablkcipher_request abreq; }; +static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( + struct aead_request *req) +{ + unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); + + return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); +} + static void crypto_gcm_ghash_init(struct crypto_gcm_ghash_ctx *ctx, u32 flags, struct gf128mul_4k *gf128) { @@ -224,7 +232,7 @@ static int crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); - struct crypto_gcm_req_priv_ctx *pctx = aead_request_ctx(req); + struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); u32 flags = req->base.tfm->crt_flags; u8 *auth_tag = pctx->auth_tag; u8 *counter = pctx->counter; @@ -256,7 +264,7 @@ static int crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, static int crypto_gcm_hash(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct crypto_gcm_req_priv_ctx *pctx = aead_request_ctx(req); + struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); u8 *auth_tag = pctx->auth_tag; struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; @@ -281,7 +289,7 @@ static void crypto_gcm_encrypt_done(struct crypto_async_request *areq, int err) static int crypto_gcm_encrypt(struct aead_request *req) { - struct crypto_gcm_req_priv_ctx *pctx = aead_request_ctx(req); + struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct ablkcipher_request *abreq = &pctx->abreq; int err = 0; @@ -307,7 +315,7 @@ static void crypto_gcm_decrypt_done(struct crypto_async_request *areq, int err) static int crypto_gcm_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct crypto_gcm_req_priv_ctx *pctx = aead_request_ctx(req); + struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct ablkcipher_request *abreq = &pctx->abreq; u8 *auth_tag = pctx->auth_tag; u8 *iauth_tag = pctx->iauth_tag; @@ -352,8 +360,7 @@ static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) ctx->ctr = ctr; ctx->gf128 = NULL; - align = max_t(unsigned long, crypto_ablkcipher_alignmask(ctr), - __alignof__(u32) - 1); + align = crypto_tfm_alg_alignmask(tfm); align &= ~(crypto_tfm_ctx_alignment() - 1); tfm->crt_aead.reqsize = align + sizeof(struct crypto_gcm_req_priv_ctx) + @@ -428,7 +435,7 @@ static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb) inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; inst->alg.cra_priority = ctr->cra_priority; inst->alg.cra_blocksize = 16; - inst->alg.cra_alignmask = __alignof__(u32) - 1; + inst->alg.cra_alignmask = ctr->cra_alignmask | (__alignof__(u64) - 1); inst->alg.cra_type = &crypto_aead_type; inst->alg.cra_aead.ivsize = 12; inst->alg.cra_aead.maxauthsize = 16; -- cgit v1.1 From 653ebd9c8510a7d647ed23e66e1338f848ebdbab Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 27 Nov 2007 19:48:27 +0800 Subject: [CRYPTO] blkcipher: Merge ablkcipher and blkcipher into one option/module With the impending addition of the givcipher type, both blkcipher and ablkcipher algorithms will use it to create givcipher objects. As such it no longer makes sense to split the system between ablkcipher and blkcipher. In particular, both ablkcipher.c and blkcipher.c would need to use the givcipher type which has to reside in ablkcipher.c since it shares much code with it. This patch merges the two Kconfig options as well as the modules into one. Signed-off-by: Herbert Xu --- crypto/Kconfig | 6 +----- crypto/Makefile | 6 ++++-- drivers/crypto/Kconfig | 2 +- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index 1eb4bcd..c4b6c91 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -24,10 +24,6 @@ config CRYPTO_ALGAPI help This option provides the API for cryptographic algorithms. -config CRYPTO_ABLKCIPHER - tristate - select CRYPTO_BLKCIPHER - config CRYPTO_AEAD tristate select CRYPTO_ALGAPI @@ -217,7 +213,7 @@ config CRYPTO_GCM config CRYPTO_CRYPTD tristate "Software async crypto daemon" - select CRYPTO_ABLKCIPHER + select CRYPTO_BLKCIPHER select CRYPTO_MANAGER help This is a generic software asynchronous crypto daemon that diff --git a/crypto/Makefile b/crypto/Makefile index 83532ac..2a1883f 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -8,9 +8,11 @@ crypto_algapi-$(CONFIG_PROC_FS) += proc.o crypto_algapi-objs := algapi.o scatterwalk.o $(crypto_algapi-y) obj-$(CONFIG_CRYPTO_ALGAPI) += crypto_algapi.o -obj-$(CONFIG_CRYPTO_ABLKCIPHER) += ablkcipher.o obj-$(CONFIG_CRYPTO_AEAD) += aead.o -obj-$(CONFIG_CRYPTO_BLKCIPHER) += blkcipher.o + +crypto_blkcipher-objs := ablkcipher.o +crypto_blkcipher-objs += blkcipher.o +obj-$(CONFIG_CRYPTO_BLKCIPHER) += crypto_blkcipher.o crypto_hash-objs := hash.o obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index d848e1b..d8c7040 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -87,7 +87,7 @@ config CRYPTO_DEV_HIFN_795X tristate "Driver HIFN 795x crypto accelerator chips" select CRYPTO_DES select CRYPTO_ALGAPI - select CRYPTO_ABLKCIPHER + select CRYPTO_BLKCIPHER depends on PCI help This option allows you to have support for HIFN 795x crypto adapters. -- cgit v1.1 From 5311f248b7764ba8b59e6d477355f766e5609686 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 17 Dec 2007 21:34:32 +0800 Subject: [CRYPTO] ctr: Refactor into ctr and rfc3686 As discussed previously, this patch moves the basic CTR functionality into a chainable algorithm called ctr. The IPsec-specific variant of it is now placed on top with the name rfc3686. So ctr(aes) gives a chainable cipher with IV size 16 while the IPsec variant will be called rfc3686(ctr(aes)). This patch also adjusts gcm accordingly. Signed-off-by: Herbert Xu --- crypto/ctr.c | 334 +++++++++++++++++++++++++++++++-------------------- crypto/gcm.c | 7 +- crypto/tcrypt.c | 8 +- include/crypto/ctr.h | 20 +++ 4 files changed, 233 insertions(+), 136 deletions(-) create mode 100644 include/crypto/ctr.h diff --git a/crypto/ctr.c b/crypto/ctr.c index 57da7d0..1052b31 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -11,6 +11,7 @@ */ #include +#include #include #include #include @@ -19,16 +20,13 @@ #include #include -struct ctr_instance_ctx { - struct crypto_spawn alg; - unsigned int noncesize; - unsigned int ivsize; - unsigned int countersize; -}; - struct crypto_ctr_ctx { struct crypto_cipher *child; - u8 *nonce; +}; + +struct crypto_rfc3686_ctx { + struct crypto_blkcipher *child; + u8 nonce[CTR_RFC3686_NONCE_SIZE]; }; static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key, @@ -36,18 +34,7 @@ static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key, { struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(parent); struct crypto_cipher *child = ctx->child; - struct ctr_instance_ctx *ictx = - crypto_instance_ctx(crypto_tfm_alg_instance(parent)); - unsigned int noncelen = ictx->noncesize; - int err = 0; - - /* the nonce is stored in bytes at end of key */ - if (keylen < noncelen) - return -EINVAL; - - memcpy(ctx->nonce, key + (keylen - noncelen), noncelen); - - keylen -= noncelen; + int err; crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & @@ -60,11 +47,13 @@ static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key, } static void crypto_ctr_crypt_final(struct blkcipher_walk *walk, - struct crypto_cipher *tfm, u8 *ctrblk, - unsigned int countersize) + struct crypto_cipher *tfm) { unsigned int bsize = crypto_cipher_blocksize(tfm); - u8 *keystream = ctrblk + bsize; + unsigned long alignmask = crypto_cipher_alignmask(tfm); + u8 *ctrblk = walk->iv; + u8 tmp[bsize + alignmask]; + u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; @@ -72,15 +61,17 @@ static void crypto_ctr_crypt_final(struct blkcipher_walk *walk, crypto_cipher_encrypt_one(tfm, keystream, ctrblk); crypto_xor(keystream, src, nbytes); memcpy(dst, keystream, nbytes); + + crypto_inc(ctrblk, bsize); } static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk, - struct crypto_cipher *tfm, u8 *ctrblk, - unsigned int countersize) + struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_encrypt; unsigned int bsize = crypto_cipher_blocksize(tfm); + u8 *ctrblk = walk->iv; u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; @@ -91,7 +82,7 @@ static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk, crypto_xor(dst, src, bsize); /* increment counter in counterblock */ - crypto_inc(ctrblk + bsize - countersize, countersize); + crypto_inc(ctrblk, bsize); src += bsize; dst += bsize; @@ -101,15 +92,17 @@ static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk, } static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk, - struct crypto_cipher *tfm, u8 *ctrblk, - unsigned int countersize) + struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_encrypt; unsigned int bsize = crypto_cipher_blocksize(tfm); + unsigned long alignmask = crypto_cipher_alignmask(tfm); unsigned int nbytes = walk->nbytes; + u8 *ctrblk = walk->iv; u8 *src = walk->src.virt.addr; - u8 *keystream = ctrblk + bsize; + u8 tmp[bsize + alignmask]; + u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); do { /* create keystream */ @@ -117,7 +110,7 @@ static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk, crypto_xor(src, keystream, bsize); /* increment counter in counterblock */ - crypto_inc(ctrblk + bsize - countersize, countersize); + crypto_inc(ctrblk, bsize); src += bsize; } while ((nbytes -= bsize) >= bsize); @@ -134,41 +127,22 @@ static int crypto_ctr_crypt(struct blkcipher_desc *desc, struct crypto_ctr_ctx *ctx = crypto_blkcipher_ctx(tfm); struct crypto_cipher *child = ctx->child; unsigned int bsize = crypto_cipher_blocksize(child); - struct ctr_instance_ctx *ictx = - crypto_instance_ctx(crypto_tfm_alg_instance(&tfm->base)); - unsigned long alignmask = crypto_cipher_alignmask(child) | - (__alignof__(u32) - 1); - u8 cblk[bsize * 2 + alignmask]; - u8 *counterblk = (u8 *)ALIGN((unsigned long)cblk, alignmask + 1); int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt_block(desc, &walk, bsize); - /* set up counter block */ - memset(counterblk, 0 , bsize); - memcpy(counterblk, ctx->nonce, ictx->noncesize); - memcpy(counterblk + ictx->noncesize, walk.iv, ictx->ivsize); - - /* initialize counter portion of counter block */ - crypto_inc(counterblk + bsize - ictx->countersize, ictx->countersize); - while (walk.nbytes >= bsize) { if (walk.src.virt.addr == walk.dst.virt.addr) - nbytes = crypto_ctr_crypt_inplace(&walk, child, - counterblk, - ictx->countersize); + nbytes = crypto_ctr_crypt_inplace(&walk, child); else - nbytes = crypto_ctr_crypt_segment(&walk, child, - counterblk, - ictx->countersize); + nbytes = crypto_ctr_crypt_segment(&walk, child); err = blkcipher_walk_done(desc, &walk, nbytes); } if (walk.nbytes) { - crypto_ctr_crypt_final(&walk, child, counterblk, - ictx->countersize); + crypto_ctr_crypt_final(&walk, child); err = blkcipher_walk_done(desc, &walk, 0); } @@ -178,15 +152,11 @@ static int crypto_ctr_crypt(struct blkcipher_desc *desc, static int crypto_ctr_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = (void *)tfm->__crt_alg; - struct ctr_instance_ctx *ictx = crypto_instance_ctx(inst); + struct crypto_spawn *spawn = crypto_instance_ctx(inst); struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_cipher *cipher; - ctx->nonce = kzalloc(ictx->noncesize, GFP_KERNEL); - if (!ctx->nonce) - return -ENOMEM; - - cipher = crypto_spawn_cipher(&ictx->alg); + cipher = crypto_spawn_cipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); @@ -199,7 +169,6 @@ static void crypto_ctr_exit_tfm(struct crypto_tfm *tfm) { struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm); - kfree(ctx->nonce); crypto_free_cipher(ctx->child); } @@ -207,10 +176,6 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; - struct ctr_instance_ctx *ictx; - unsigned int noncesize; - unsigned int ivsize; - unsigned int countersize; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); @@ -222,71 +187,28 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) if (IS_ERR(alg)) return ERR_PTR(PTR_ERR(alg)); - err = crypto_attr_u32(tb[2], &noncesize); - if (err) - goto out_put_alg; - - err = crypto_attr_u32(tb[3], &ivsize); - if (err) - goto out_put_alg; - - err = crypto_attr_u32(tb[4], &countersize); - if (err) - goto out_put_alg; - - /* verify size of nonce + iv + counter - * counter must be >= 4 bytes. - */ + /* Block size must be >= 4 bytes. */ err = -EINVAL; - if (((noncesize + ivsize + countersize) < alg->cra_blocksize) || - ((noncesize + ivsize) > alg->cra_blocksize) || - (countersize > alg->cra_blocksize) || (countersize < 4)) + if (alg->cra_blocksize < 4) goto out_put_alg; /* If this is false we'd fail the alignment of crypto_inc. */ - if ((alg->cra_blocksize - countersize) % 4) + if (alg->cra_blocksize % 4) goto out_put_alg; - inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); - err = -ENOMEM; - if (!inst) - goto out_put_alg; - - err = -ENAMETOOLONG; - if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, - "ctr(%s,%u,%u,%u)", alg->cra_name, noncesize, - ivsize, countersize) >= CRYPTO_MAX_ALG_NAME) { - goto err_free_inst; - } - - if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "ctr(%s,%u,%u,%u)", alg->cra_driver_name, noncesize, - ivsize, countersize) >= CRYPTO_MAX_ALG_NAME) { - goto err_free_inst; - } - - ictx = crypto_instance_ctx(inst); - ictx->noncesize = noncesize; - ictx->ivsize = ivsize; - ictx->countersize = countersize; - - err = crypto_init_spawn(&ictx->alg, alg, inst, - CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); - if (err) - goto err_free_inst; + inst = crypto_alloc_instance("ctr", alg); + if (IS_ERR(inst)) + goto out; - err = 0; inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; inst->alg.cra_priority = alg->cra_priority; inst->alg.cra_blocksize = 1; inst->alg.cra_alignmask = alg->cra_alignmask | (__alignof__(u32) - 1); inst->alg.cra_type = &crypto_blkcipher_type; - inst->alg.cra_blkcipher.ivsize = ivsize; - inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize - + noncesize; - inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize - + noncesize; + inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; + inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; + inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; inst->alg.cra_ctxsize = sizeof(struct crypto_ctr_ctx); @@ -297,24 +219,18 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt; inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt; -err_free_inst: - if (err) - kfree(inst); - -out_put_alg: +out: crypto_mod_put(alg); - - if (err) - inst = ERR_PTR(err); - return inst; + +out_put_alg: + inst = ERR_PTR(err); + goto out; } static void crypto_ctr_free(struct crypto_instance *inst) { - struct ctr_instance_ctx *ictx = crypto_instance_ctx(inst); - - crypto_drop_spawn(&ictx->alg); + crypto_drop_spawn(crypto_instance_ctx(inst)); kfree(inst); } @@ -325,13 +241,174 @@ static struct crypto_template crypto_ctr_tmpl = { .module = THIS_MODULE, }; +static int crypto_rfc3686_setkey(struct crypto_tfm *parent, const u8 *key, + unsigned int keylen) +{ + struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(parent); + struct crypto_blkcipher *child = ctx->child; + int err; + + /* the nonce is stored in bytes at end of key */ + if (keylen < CTR_RFC3686_NONCE_SIZE) + return -EINVAL; + + memcpy(ctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE), + CTR_RFC3686_NONCE_SIZE); + + keylen -= CTR_RFC3686_NONCE_SIZE; + + crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_blkcipher_setkey(child, key, keylen); + crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) & + CRYPTO_TFM_RES_MASK); + + return err; +} + +static int crypto_rfc3686_crypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) +{ + struct crypto_blkcipher *tfm = desc->tfm; + struct crypto_rfc3686_ctx *ctx = crypto_blkcipher_ctx(tfm); + struct crypto_blkcipher *child = ctx->child; + unsigned long alignmask = crypto_blkcipher_alignmask(tfm); + u8 ivblk[CTR_RFC3686_BLOCK_SIZE + alignmask]; + u8 *iv = PTR_ALIGN(ivblk + 0, alignmask + 1); + u8 *info = desc->info; + int err; + + /* set up counter block */ + memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); + memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE); + + /* initialize counter portion of counter block */ + *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = + cpu_to_be32(1); + + desc->tfm = child; + desc->info = iv; + err = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); + desc->tfm = tfm; + desc->info = info; + + return err; +} + +static int crypto_rfc3686_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct crypto_spawn *spawn = crypto_instance_ctx(inst); + struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_blkcipher *cipher; + + cipher = crypto_spawn_blkcipher(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + ctx->child = cipher; + + return 0; +} + +static void crypto_rfc3686_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_blkcipher(ctx->child); +} + +static struct crypto_instance *crypto_rfc3686_alloc(struct rtattr **tb) +{ + struct crypto_instance *inst; + struct crypto_alg *alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); + if (err) + return ERR_PTR(err); + + alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_BLKCIPHER, + CRYPTO_ALG_TYPE_MASK); + err = PTR_ERR(alg); + if (IS_ERR(alg)) + return ERR_PTR(err); + + /* We only support 16-byte blocks. */ + err = -EINVAL; + if (alg->cra_blkcipher.ivsize != CTR_RFC3686_BLOCK_SIZE) + goto out_put_alg; + + /* Not a stream cipher? */ + if (alg->cra_blocksize != 1) + goto out_put_alg; + + inst = crypto_alloc_instance("rfc3686", alg); + if (IS_ERR(inst)) + goto out; + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; + inst->alg.cra_priority = alg->cra_priority; + inst->alg.cra_blocksize = 1; + inst->alg.cra_alignmask = alg->cra_alignmask; + inst->alg.cra_type = &crypto_blkcipher_type; + + inst->alg.cra_blkcipher.ivsize = CTR_RFC3686_IV_SIZE; + inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize + + CTR_RFC3686_NONCE_SIZE; + inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize + + CTR_RFC3686_NONCE_SIZE; + + inst->alg.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx); + + inst->alg.cra_init = crypto_rfc3686_init_tfm; + inst->alg.cra_exit = crypto_rfc3686_exit_tfm; + + inst->alg.cra_blkcipher.setkey = crypto_rfc3686_setkey; + inst->alg.cra_blkcipher.encrypt = crypto_rfc3686_crypt; + inst->alg.cra_blkcipher.decrypt = crypto_rfc3686_crypt; + +out: + crypto_mod_put(alg); + return inst; + +out_put_alg: + inst = ERR_PTR(err); + goto out; +} + +static struct crypto_template crypto_rfc3686_tmpl = { + .name = "rfc3686", + .alloc = crypto_rfc3686_alloc, + .free = crypto_ctr_free, + .module = THIS_MODULE, +}; + static int __init crypto_ctr_module_init(void) { - return crypto_register_template(&crypto_ctr_tmpl); + int err; + + err = crypto_register_template(&crypto_ctr_tmpl); + if (err) + goto out; + + err = crypto_register_template(&crypto_rfc3686_tmpl); + if (err) + goto out_drop_ctr; + +out: + return err; + +out_drop_ctr: + crypto_unregister_template(&crypto_ctr_tmpl); + goto out; } static void __exit crypto_ctr_module_exit(void) { + crypto_unregister_template(&crypto_rfc3686_tmpl); crypto_unregister_template(&crypto_ctr_tmpl); } @@ -340,3 +417,4 @@ module_exit(crypto_ctr_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CTR Counter block mode"); +MODULE_ALIAS("rfc3686"); diff --git a/crypto/gcm.c b/crypto/gcm.c index 0818317..c54d478 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -160,7 +160,7 @@ static void crypto_gcm_ghash_final_xor(struct crypto_gcm_ghash_ctx *ctx, static inline void crypto_gcm_set_counter(u8 *counterblock, u32 value) { - *((u32 *)&counterblock[12]) = cpu_to_be32(value); + *((u32 *)&counterblock[12]) = cpu_to_be32(value + 1); } static int crypto_gcm_encrypt_counter(struct crypto_aead *aead, u8 *block, @@ -400,9 +400,8 @@ static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb) return inst; inst = ERR_PTR(ENAMETOOLONG); - if (snprintf( - ctr_name, CRYPTO_MAX_ALG_NAME, - "ctr(%s,0,16,4)", cipher->cra_name) >= CRYPTO_MAX_ALG_NAME) + if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", + cipher->cra_name) >= CRYPTO_MAX_ALG_NAME) return inst; ctr = crypto_alg_mod_lookup(ctr_name, CRYPTO_ALG_TYPE_BLKCIPHER, diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 0cfb8eb..1142b49 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1193,9 +1193,9 @@ static void do_test(void) AES_XTS_ENC_TEST_VECTORS); test_cipher("xts(aes)", DECRYPT, aes_xts_dec_tv_template, AES_XTS_DEC_TEST_VECTORS); - test_cipher("ctr(aes,4,8,4)", ENCRYPT, aes_ctr_enc_tv_template, + test_cipher("rfc3686(ctr(aes))", ENCRYPT, aes_ctr_enc_tv_template, AES_CTR_ENC_TEST_VECTORS); - test_cipher("ctr(aes,4,8,4)", DECRYPT, aes_ctr_dec_tv_template, + test_cipher("rfc3686(ctr(aes))", DECRYPT, aes_ctr_dec_tv_template, AES_CTR_DEC_TEST_VECTORS); test_aead("gcm(aes)", ENCRYPT, aes_gcm_enc_tv_template, AES_GCM_ENC_TEST_VECTORS); @@ -1394,9 +1394,9 @@ static void do_test(void) AES_XTS_ENC_TEST_VECTORS); test_cipher("xts(aes)", DECRYPT, aes_xts_dec_tv_template, AES_XTS_DEC_TEST_VECTORS); - test_cipher("ctr(aes,4,8,4)", ENCRYPT, aes_ctr_enc_tv_template, + test_cipher("rfc3686(ctr(aes))", ENCRYPT, aes_ctr_enc_tv_template, AES_CTR_ENC_TEST_VECTORS); - test_cipher("ctr(aes,4,8,4)", DECRYPT, aes_ctr_dec_tv_template, + test_cipher("rfc3686(ctr(aes))", DECRYPT, aes_ctr_dec_tv_template, AES_CTR_DEC_TEST_VECTORS); break; diff --git a/include/crypto/ctr.h b/include/crypto/ctr.h new file mode 100644 index 0000000..4180fc0 --- /dev/null +++ b/include/crypto/ctr.h @@ -0,0 +1,20 @@ +/* + * CTR: Counter mode + * + * Copyright (c) 2007 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_CTR_H +#define _CRYPTO_CTR_H + +#define CTR_RFC3686_NONCE_SIZE 4 +#define CTR_RFC3686_IV_SIZE 8 +#define CTR_RFC3686_BLOCK_SIZE 16 + +#endif /* _CRYPTO_CTR_H */ -- cgit v1.1 From 84c911523020a2e39b307a2da26ee1886b7214fe Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 17 Dec 2007 21:42:08 +0800 Subject: [CRYPTO] gcm: Add support for async ciphers This patch adds the necessary changes for GCM to be used with async ciphers. This would allow it to be used with hardware devices that support CTR. Signed-off-by: Herbert Xu --- crypto/gcm.c | 190 +++++++++++++++++++++++++++++++++++------------------------ 1 file changed, 112 insertions(+), 78 deletions(-) diff --git a/crypto/gcm.c b/crypto/gcm.c index c54d478..f6bee6f 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -38,11 +39,17 @@ struct crypto_gcm_ghash_ctx { struct crypto_gcm_req_priv_ctx { u8 auth_tag[16]; u8 iauth_tag[16]; - u8 counter[16]; + struct scatterlist src[2]; + struct scatterlist dst[2]; struct crypto_gcm_ghash_ctx ghash; struct ablkcipher_request abreq; }; +struct crypto_gcm_setkey_result { + int err; + struct completion completion; +}; + static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( struct aead_request *req) { @@ -158,33 +165,15 @@ static void crypto_gcm_ghash_final_xor(struct crypto_gcm_ghash_ctx *ctx, crypto_xor(dst, buf, 16); } -static inline void crypto_gcm_set_counter(u8 *counterblock, u32 value) +static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err) { - *((u32 *)&counterblock[12]) = cpu_to_be32(value + 1); -} + struct crypto_gcm_setkey_result *result = req->data; -static int crypto_gcm_encrypt_counter(struct crypto_aead *aead, u8 *block, - u32 value, const u8 *iv) -{ - struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); - struct crypto_ablkcipher *ctr = ctx->ctr; - struct ablkcipher_request req; - struct scatterlist sg; - u8 counterblock[16]; - - if (iv == NULL) - memset(counterblock, 0, 12); - else - memcpy(counterblock, iv, 12); - - crypto_gcm_set_counter(counterblock, value); - - sg_init_one(&sg, block, 16); - ablkcipher_request_set_tfm(&req, ctr); - ablkcipher_request_set_crypt(&req, &sg, &sg, 16, counterblock); - ablkcipher_request_set_callback(&req, 0, NULL, NULL); - memset(block, 0, 16); - return crypto_ablkcipher_encrypt(&req); + if (err == -EINPROGRESS) + return; + + result->err = err; + complete(&result->completion); } static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, @@ -192,10 +181,16 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, { struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_ablkcipher *ctr = ctx->ctr; - int alignmask = crypto_ablkcipher_alignmask(ctr); - u8 alignbuf[16+alignmask]; - u8 *hash = (u8 *)ALIGN((unsigned long)alignbuf, alignmask+1); - int err = 0; + struct { + be128 hash; + u8 iv[8]; + + struct crypto_gcm_setkey_result result; + + struct scatterlist sg[1]; + struct ablkcipher_request req; + } *data; + int err; crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) & @@ -203,62 +198,86 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, err = crypto_ablkcipher_setkey(ctr, key, keylen); if (err) - goto out; + return err; crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) & CRYPTO_TFM_RES_MASK); - err = crypto_gcm_encrypt_counter(aead, hash, -1, NULL); + data = kzalloc(sizeof(*data) + crypto_ablkcipher_reqsize(ctr), + GFP_KERNEL); + if (!data) + return -ENOMEM; + + init_completion(&data->result.completion); + sg_init_one(data->sg, &data->hash, sizeof(data->hash)); + ablkcipher_request_set_tfm(&data->req, ctr); + ablkcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | + CRYPTO_TFM_REQ_MAY_BACKLOG, + crypto_gcm_setkey_done, + &data->result); + ablkcipher_request_set_crypt(&data->req, data->sg, data->sg, + sizeof(data->hash), data->iv); + + err = crypto_ablkcipher_encrypt(&data->req); + if (err == -EINPROGRESS || err == -EBUSY) { + err = wait_for_completion_interruptible( + &data->result.completion); + if (!err) + err = data->result.err; + } + if (err) goto out; if (ctx->gf128 != NULL) gf128mul_free_4k(ctx->gf128); - ctx->gf128 = gf128mul_init_4k_lle((be128 *)hash); + ctx->gf128 = gf128mul_init_4k_lle(&data->hash); if (ctx->gf128 == NULL) err = -ENOMEM; - out: +out: + kfree(data); return err; } -static int crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, - struct aead_request *req, - unsigned int cryptlen, - void (*done)(struct crypto_async_request *, - int)) +static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, + struct aead_request *req, + unsigned int cryptlen) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); u32 flags = req->base.tfm->crt_flags; - u8 *auth_tag = pctx->auth_tag; - u8 *counter = pctx->counter; struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; - int err = 0; + struct scatterlist *dst; + __be32 counter = cpu_to_be32(1); + + memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag)); + memcpy(req->iv + 12, &counter, 4); + + sg_init_table(pctx->src, 2); + sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag)); + scatterwalk_sg_chain(pctx->src, 2, req->src); + + dst = pctx->src; + if (req->src != req->dst) { + sg_init_table(pctx->dst, 2); + sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag)); + scatterwalk_sg_chain(pctx->dst, 2, req->dst); + dst = pctx->dst; + } ablkcipher_request_set_tfm(ablk_req, ctx->ctr); - ablkcipher_request_set_callback(ablk_req, aead_request_flags(req), - done, req); - ablkcipher_request_set_crypt(ablk_req, req->src, req->dst, - cryptlen, counter); - - err = crypto_gcm_encrypt_counter(aead, auth_tag, 0, req->iv); - if (err) - goto out; - - memcpy(counter, req->iv, 12); - crypto_gcm_set_counter(counter, 1); + ablkcipher_request_set_crypt(ablk_req, pctx->src, dst, + cryptlen + sizeof(pctx->auth_tag), + req->iv); crypto_gcm_ghash_init(ghash, flags, ctx->gf128); crypto_gcm_ghash_update_sg(ghash, req->assoc, req->assoclen); crypto_gcm_ghash_flush(ghash); - - out: - return err; } static int crypto_gcm_hash(struct aead_request *req) @@ -291,25 +310,44 @@ static int crypto_gcm_encrypt(struct aead_request *req) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct ablkcipher_request *abreq = &pctx->abreq; - int err = 0; + int err; + + crypto_gcm_init_crypt(abreq, req, req->cryptlen); + ablkcipher_request_set_callback(abreq, aead_request_flags(req), + crypto_gcm_encrypt_done, req); - err = crypto_gcm_init_crypt(abreq, req, req->cryptlen, - crypto_gcm_encrypt_done); + err = crypto_ablkcipher_encrypt(abreq); if (err) return err; - if (req->cryptlen) { - err = crypto_ablkcipher_encrypt(abreq); - if (err) - return err; - } - return crypto_gcm_hash(req); } +static int crypto_gcm_verify(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); + struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; + u8 *auth_tag = pctx->auth_tag; + u8 *iauth_tag = pctx->iauth_tag; + unsigned int authsize = crypto_aead_authsize(aead); + unsigned int cryptlen = req->cryptlen - authsize; + + crypto_gcm_ghash_final_xor(ghash, req->assoclen, cryptlen, auth_tag); + + authsize = crypto_aead_authsize(aead); + scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); + return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; +} + static void crypto_gcm_decrypt_done(struct crypto_async_request *areq, int err) { - aead_request_complete(areq->data, err); + struct aead_request *req = areq->data; + + if (!err) + err = crypto_gcm_verify(req); + + aead_request_complete(req, err); } static int crypto_gcm_decrypt(struct aead_request *req) @@ -317,8 +355,6 @@ static int crypto_gcm_decrypt(struct aead_request *req) struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct ablkcipher_request *abreq = &pctx->abreq; - u8 *auth_tag = pctx->auth_tag; - u8 *iauth_tag = pctx->iauth_tag; struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; unsigned int cryptlen = req->cryptlen; unsigned int authsize = crypto_aead_authsize(aead); @@ -328,19 +364,17 @@ static int crypto_gcm_decrypt(struct aead_request *req) return -EINVAL; cryptlen -= authsize; - err = crypto_gcm_init_crypt(abreq, req, cryptlen, - crypto_gcm_decrypt_done); - if (err) - return err; + crypto_gcm_init_crypt(abreq, req, cryptlen); + ablkcipher_request_set_callback(abreq, aead_request_flags(req), + crypto_gcm_decrypt_done, req); crypto_gcm_ghash_update_sg(ghash, req->src, cryptlen); - crypto_gcm_ghash_final_xor(ghash, req->assoclen, cryptlen, auth_tag); - scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); - if (memcmp(iauth_tag, auth_tag, authsize)) - return -EBADMSG; + err = crypto_ablkcipher_decrypt(abreq); + if (err) + return err; - return crypto_ablkcipher_decrypt(abreq); + return crypto_gcm_verify(req); } static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) @@ -436,7 +470,7 @@ static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb) inst->alg.cra_blocksize = 16; inst->alg.cra_alignmask = ctr->cra_alignmask | (__alignof__(u64) - 1); inst->alg.cra_type = &crypto_aead_type; - inst->alg.cra_aead.ivsize = 12; + inst->alg.cra_aead.ivsize = 16; inst->alg.cra_aead.maxauthsize = 16; inst->alg.cra_ctxsize = sizeof(struct crypto_gcm_ctx); inst->alg.cra_init = crypto_gcm_init_tfm; -- cgit v1.1 From 378f4f51f9fdd8df80ea875320e2bf1d7c6e6e77 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 17 Dec 2007 20:07:31 +0800 Subject: [CRYPTO] skcipher: Add crypto_grab_skcipher interface Note: From now on the collective of ablkcipher/blkcipher/givcipher will be known as skcipher, i.e., symmetric key cipher. The name blkcipher has always been much of a misnomer since it supports stream ciphers too. This patch adds the function crypto_grab_skcipher as a new way of getting an ablkcipher spawn. The problem is that previously we did this in two steps, first getting the algorithm and then calling crypto_init_spawn. This meant that each spawn user had to be aware of what type and mask to use for these two steps. This is difficult and also presents a problem when the type/mask changes as they're about to be for IV generators. The new interface does both steps together just like crypto_alloc_ablkcipher. As a side-effect this also allows us to be stronger on type enforcement for spawns. For now this is only done for ablkcipher but it's trivial to extend for other types. This patch also moves the type/mask logic for skcipher into the helpers crypto_skcipher_type and crypto_skcipher_mask. Finally this patch introduces the function crypto_require_sync to determine whether the user is specifically requesting a sync algorithm. Signed-off-by: Herbert Xu --- crypto/ablkcipher.c | 25 +++++++++++++++++-- include/crypto/algapi.h | 22 +++++++++++++--- include/crypto/internal/skcipher.h | 51 ++++++++++++++++++++++++++++++++++++++ include/linux/crypto.h | 26 +++++++++++-------- 4 files changed, 108 insertions(+), 16 deletions(-) create mode 100644 include/crypto/internal/skcipher.h diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index 2731acb..0083140 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -13,14 +13,16 @@ * */ -#include -#include +#include +#include #include #include #include #include #include +#include "internal.h" + static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { @@ -105,5 +107,24 @@ const struct crypto_type crypto_ablkcipher_type = { }; EXPORT_SYMBOL_GPL(crypto_ablkcipher_type); +int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, + u32 type, u32 mask) +{ + struct crypto_alg *alg; + int err; + + type = crypto_skcipher_type(type); + mask = crypto_skcipher_mask(mask); + + alg = crypto_alg_mod_lookup(name, type, mask); + if (IS_ERR(alg)) + return PTR_ERR(alg); + + err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask); + crypto_mod_put(alg); + return err; +} +EXPORT_SYMBOL_GPL(crypto_grab_skcipher); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous block chaining cipher type"); diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 726a765..fda1759 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -111,6 +111,12 @@ void crypto_drop_spawn(struct crypto_spawn *spawn); struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, u32 mask); +static inline void crypto_set_spawn(struct crypto_spawn *spawn, + struct crypto_instance *inst) +{ + spawn->inst = inst; +} + struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); int crypto_check_attr_type(struct rtattr **tb, u32 type); const char *crypto_attr_alg_name(struct rtattr *rta); @@ -195,10 +201,9 @@ static inline struct crypto_instance *crypto_aead_alg_instance( static inline struct crypto_ablkcipher *crypto_spawn_ablkcipher( struct crypto_spawn *spawn) { - u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; - u32 mask = CRYPTO_ALG_TYPE_BLKCIPHER_MASK; - - return __crypto_ablkcipher_cast(crypto_spawn_tfm(spawn, type, mask)); + return __crypto_ablkcipher_cast( + crypto_spawn_tfm(spawn, crypto_skcipher_type(0), + crypto_skcipher_mask(0))); } static inline struct crypto_blkcipher *crypto_spawn_blkcipher( @@ -308,5 +313,14 @@ static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, return crypto_attr_alg(tb[1], type, mask); } +/* + * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms. + * Otherwise returns zero. + */ +static inline int crypto_requires_sync(u32 type, u32 mask) +{ + return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC; +} + #endif /* _CRYPTO_ALGAPI_H */ diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h new file mode 100644 index 0000000..87879e6 --- /dev/null +++ b/include/crypto/internal/skcipher.h @@ -0,0 +1,51 @@ +/* + * Symmetric key ciphers. + * + * Copyright (c) 2007 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_INTERNAL_SKCIPHER_H +#define _CRYPTO_INTERNAL_SKCIPHER_H + +#include + +struct crypto_skcipher_spawn { + struct crypto_spawn base; +}; + +static inline void crypto_set_skcipher_spawn( + struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst) +{ + crypto_set_spawn(&spawn->base, inst); +} + +int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, + u32 type, u32 mask); + +static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct crypto_alg *crypto_skcipher_spawn_alg( + struct crypto_skcipher_spawn *spawn) +{ + return spawn->base.alg; +} + +static inline struct crypto_ablkcipher *crypto_spawn_skcipher( + struct crypto_skcipher_spawn *spawn) +{ + return __crypto_ablkcipher_cast( + crypto_spawn_tfm(&spawn->base, crypto_skcipher_type(0), + crypto_skcipher_mask(0))); +} + +#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ + diff --git a/include/linux/crypto.h b/include/linux/crypto.h index ef7642e..d6962b4 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -528,16 +528,26 @@ static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast( return (struct crypto_ablkcipher *)tfm; } -static inline struct crypto_ablkcipher *crypto_alloc_ablkcipher( - const char *alg_name, u32 type, u32 mask) +static inline u32 crypto_skcipher_type(u32 type) { type &= ~CRYPTO_ALG_TYPE_MASK; - mask &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_BLKCIPHER; + return type; +} + +static inline u32 crypto_skcipher_mask(u32 mask) +{ + mask &= ~CRYPTO_ALG_TYPE_MASK; mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; + return mask; +} +static inline struct crypto_ablkcipher *crypto_alloc_ablkcipher( + const char *alg_name, u32 type, u32 mask) +{ return __crypto_ablkcipher_cast( - crypto_alloc_base(alg_name, type, mask)); + crypto_alloc_base(alg_name, crypto_skcipher_type(type), + crypto_skcipher_mask(mask))); } static inline struct crypto_tfm *crypto_ablkcipher_tfm( @@ -554,12 +564,8 @@ static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, u32 mask) { - type &= ~CRYPTO_ALG_TYPE_MASK; - mask &= ~CRYPTO_ALG_TYPE_MASK; - type |= CRYPTO_ALG_TYPE_BLKCIPHER; - mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; - - return crypto_has_alg(alg_name, type, mask); + return crypto_has_alg(alg_name, crypto_skcipher_type(type), + crypto_skcipher_mask(mask)); } static inline struct ablkcipher_tfm *crypto_ablkcipher_crt( -- cgit v1.1 From 61da88e2b800eed2b03834a73c46cc89ad48716d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 17 Dec 2007 21:51:27 +0800 Subject: [CRYPTO] skcipher: Add givcrypt operations and givcipher type Different block cipher modes have different requirements for intialisation vectors. For example, CBC can use a simple randomly generated IV while modes such as CTR must use an IV generation mechanisms that give a stronger guarantee on the lack of collisions. Furthermore, disk encryption modes have their own IV generation algorithms. Up until now IV generation has been left to the users of the symmetric key cipher API. This is inconvenient as the number of block cipher modes increase because the user needs to be aware of which mode is supposed to be paired with which IV generation algorithm. Therefore it makes sense to integrate the IV generation into the crypto API. This patch takes the first step in that direction by creating two new ablkcipher operations, givencrypt and givdecrypt that generates an IV before performing the actual encryption or decryption. The operations are currently not exposed to the user. That will be done once the underlying functionality has actually been implemented. It also creates the underlying givcipher type. Algorithms that directly generate IVs would use it instead of ablkcipher. All other algorithms (including all existing ones) would generate a givcipher algorithm upon registration. This givcipher algorithm will be constructed from the geniv string that's stored in every algorithm. That string will locate a template which is instantiated by the blkcipher/ablkcipher algorithm in question to give a givcipher algorithm. Signed-off-by: Herbert Xu --- crypto/ablkcipher.c | 46 ++++++++++++++++++++++++++++++++++++++ include/crypto/internal/skcipher.h | 9 ++++++++ include/crypto/skcipher.h | 38 +++++++++++++++++++++++++++++++ include/linux/crypto.h | 7 ++++++ 4 files changed, 100 insertions(+) create mode 100644 include/crypto/skcipher.h diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index 0083140..e403d81 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -107,6 +107,52 @@ const struct crypto_type crypto_ablkcipher_type = { }; EXPORT_SYMBOL_GPL(crypto_ablkcipher_type); +static int no_givdecrypt(struct skcipher_givcrypt_request *req) +{ + return -ENOSYS; +} + +static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type, + u32 mask) +{ + struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; + struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; + + if (alg->ivsize > PAGE_SIZE / 8) + return -EINVAL; + + crt->setkey = setkey; + crt->encrypt = alg->encrypt; + crt->decrypt = alg->decrypt; + crt->givencrypt = alg->givencrypt; + crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt; + crt->ivsize = alg->ivsize; + + return 0; +} + +static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg) + __attribute__ ((unused)); +static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg) +{ + struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher; + + seq_printf(m, "type : givcipher\n"); + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); + seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize); + seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize); + seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize); +} + +const struct crypto_type crypto_givcipher_type = { + .ctxsize = crypto_ablkcipher_ctxsize, + .init = crypto_init_givcipher_ops, +#ifdef CONFIG_PROC_FS + .show = crypto_givcipher_show, +#endif +}; +EXPORT_SYMBOL_GPL(crypto_givcipher_type); + int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, u32 type, u32 mask) { diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 87879e6..c9402dd 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -14,11 +14,14 @@ #define _CRYPTO_INTERNAL_SKCIPHER_H #include +#include struct crypto_skcipher_spawn { struct crypto_spawn base; }; +extern const struct crypto_type crypto_givcipher_type; + static inline void crypto_set_skcipher_spawn( struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst) { @@ -47,5 +50,11 @@ static inline struct crypto_ablkcipher *crypto_spawn_skcipher( crypto_skcipher_mask(0))); } +static inline void *skcipher_givcrypt_reqctx( + struct skcipher_givcrypt_request *req) +{ + return ablkcipher_request_ctx(&req->creq); +} + #endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h new file mode 100644 index 0000000..c283fab --- /dev/null +++ b/include/crypto/skcipher.h @@ -0,0 +1,38 @@ +/* + * Symmetric key ciphers. + * + * Copyright (c) 2007 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_SKCIPHER_H +#define _CRYPTO_SKCIPHER_H + +#include + +/** + * struct skcipher_givcrypt_request - Crypto request with IV generation + * @seq: Sequence number for IV generation + * @giv: Space for generated IV + * @creq: The crypto request itself + */ +struct skcipher_givcrypt_request { + u64 seq; + u8 *giv; + + struct ablkcipher_request creq; +}; + +static inline struct crypto_ablkcipher *skcipher_givcrypt_reqtfm( + struct skcipher_givcrypt_request *req) +{ + return crypto_ablkcipher_reqtfm(&req->creq); +} + +#endif /* _CRYPTO_SKCIPHER_H */ + diff --git a/include/linux/crypto.h b/include/linux/crypto.h index d6962b4..3656a24e 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -34,6 +34,7 @@ #define CRYPTO_ALG_TYPE_HASH 0x00000003 #define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 #define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 +#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 #define CRYPTO_ALG_TYPE_COMPRESS 0x00000008 #define CRYPTO_ALG_TYPE_AEAD 0x00000009 @@ -99,6 +100,7 @@ struct crypto_blkcipher; struct crypto_hash; struct crypto_tfm; struct crypto_type; +struct skcipher_givcrypt_request; typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); @@ -178,6 +180,8 @@ struct ablkcipher_alg { unsigned int keylen); int (*encrypt)(struct ablkcipher_request *req); int (*decrypt)(struct ablkcipher_request *req); + int (*givencrypt)(struct skcipher_givcrypt_request *req); + int (*givdecrypt)(struct skcipher_givcrypt_request *req); unsigned int min_keysize; unsigned int max_keysize; @@ -320,6 +324,9 @@ struct ablkcipher_tfm { unsigned int keylen); int (*encrypt)(struct ablkcipher_request *req); int (*decrypt)(struct ablkcipher_request *req); + int (*givencrypt)(struct skcipher_givcrypt_request *req); + int (*givdecrypt)(struct skcipher_givcrypt_request *req); + unsigned int ivsize; unsigned int reqsize; }; -- cgit v1.1 From 23508e11ab3bb405dca66bf4d77e488bf2b07b0c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 27 Nov 2007 21:33:24 +0800 Subject: [CRYPTO] skcipher: Added geniv field This patch introduces the geniv field which indicates the default IV generator for each algorithm. It should point to a string that is not freed as long as the algorithm is registered. Signed-off-by: Herbert Xu --- crypto/ablkcipher.c | 2 ++ crypto/blkcipher.c | 2 ++ include/linux/crypto.h | 4 ++++ 3 files changed, 8 insertions(+) diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index e403d81..8f48b4d 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -96,6 +96,7 @@ static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize); seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize); seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize); + seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: ""); } const struct crypto_type crypto_ablkcipher_type = { @@ -142,6 +143,7 @@ static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize); seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize); seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize); + seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: ""); } const struct crypto_type crypto_givcipher_type = { diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 7939504..7a3e1a3 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -496,6 +496,8 @@ static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize); seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize); seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize); + seq_printf(m, "geniv : %s\n", alg->cra_blkcipher.geniv ?: + ""); } const struct crypto_type crypto_blkcipher_type = { diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 3656a24e..facafa1 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -183,6 +183,8 @@ struct ablkcipher_alg { int (*givencrypt)(struct skcipher_givcrypt_request *req); int (*givdecrypt)(struct skcipher_givcrypt_request *req); + const char *geniv; + unsigned int min_keysize; unsigned int max_keysize; unsigned int ivsize; @@ -209,6 +211,8 @@ struct blkcipher_alg { struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes); + const char *geniv; + unsigned int min_keysize; unsigned int max_keysize; unsigned int ivsize; -- cgit v1.1 From 927eead52c958829ef62c8aa5da2751033a2cf98 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 27 Nov 2007 21:15:31 +0800 Subject: [CRYPTO] cryptd: Use geniv of the underlying algorithm If the underlying algorithm specifies a specific geniv algorithm then we should use it for the cryptd version as well. Signed-off-by: Herbert Xu --- crypto/cryptd.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 1a5c45b..074298f 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -243,6 +243,8 @@ static struct crypto_instance *cryptd_alloc_blkcipher( inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; + inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; + inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); inst->alg.cra_init = cryptd_blkcipher_init_tfm; -- cgit v1.1 From ecfc43292f68566c144afca966b46b371c26d56c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 5 Dec 2007 21:08:36 +1100 Subject: [CRYPTO] skcipher: Add skcipher_geniv_alloc/skcipher_geniv_free This patch creates the infrastructure to help the construction of givcipher templates that wrap around existing blkcipher/ablkcipher algorithms by adding an IV generator to them. Signed-off-by: Herbert Xu --- crypto/ablkcipher.c | 10 +- crypto/blkcipher.c | 185 ++++++++++++++++++++++++++++++++++++- include/crypto/internal/skcipher.h | 18 ++++ include/linux/crypto.h | 18 +++- 4 files changed, 225 insertions(+), 6 deletions(-) diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index 8f48b4d..092d965 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -80,6 +80,7 @@ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type, crt->setkey = setkey; crt->encrypt = alg->encrypt; crt->decrypt = alg->decrypt; + crt->base = __crypto_ablkcipher_cast(tfm); crt->ivsize = alg->ivsize; return 0; @@ -122,11 +123,13 @@ static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type, if (alg->ivsize > PAGE_SIZE / 8) return -EINVAL; - crt->setkey = setkey; + crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ? + alg->setkey : setkey; crt->encrypt = alg->encrypt; crt->decrypt = alg->decrypt; crt->givencrypt = alg->givencrypt; crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt; + crt->base = __crypto_ablkcipher_cast(tfm); crt->ivsize = alg->ivsize; return 0; @@ -155,6 +158,11 @@ const struct crypto_type crypto_givcipher_type = { }; EXPORT_SYMBOL_GPL(crypto_givcipher_type); +const char *crypto_default_geniv(const struct crypto_alg *alg) +{ + return alg->cra_flags & CRYPTO_ALG_ASYNC ? "eseqiv" : "chainiv"; +} + int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, u32 type, u32 mask) { diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 7a3e1a3..ca6ef06 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -14,8 +14,8 @@ * */ +#include #include -#include #include #include #include @@ -450,6 +450,7 @@ static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm) crt->setkey = async_setkey; crt->encrypt = async_encrypt; crt->decrypt = async_decrypt; + crt->base = __crypto_ablkcipher_cast(tfm); crt->ivsize = alg->ivsize; return 0; @@ -509,5 +510,187 @@ const struct crypto_type crypto_blkcipher_type = { }; EXPORT_SYMBOL_GPL(crypto_blkcipher_type); +static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn, + const char *name, u32 type, u32 mask) +{ + struct crypto_alg *alg; + int err; + + type = crypto_skcipher_type(type); + mask = crypto_skcipher_mask(mask) | CRYPTO_ALG_GENIV; + + alg = crypto_alg_mod_lookup(name, type, mask); + if (IS_ERR(alg)) + return PTR_ERR(alg); + + err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask); + crypto_mod_put(alg); + return err; +} + +struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl, + struct rtattr **tb, u32 type, + u32 mask) +{ + struct { + int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct ablkcipher_request *req); + int (*decrypt)(struct ablkcipher_request *req); + + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; + + const char *geniv; + } balg; + const char *name; + struct crypto_skcipher_spawn *spawn; + struct crypto_attr_type *algt; + struct crypto_instance *inst; + struct crypto_alg *alg; + int err; + + algt = crypto_get_attr_type(tb); + err = PTR_ERR(algt); + if (IS_ERR(algt)) + return ERR_PTR(err); + + if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) & + algt->mask) + return ERR_PTR(-EINVAL); + + name = crypto_attr_alg_name(tb[1]); + err = PTR_ERR(name); + if (IS_ERR(name)) + return ERR_PTR(err); + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return ERR_PTR(-ENOMEM); + + spawn = crypto_instance_ctx(inst); + + /* Ignore async algorithms if necessary. */ + mask |= crypto_requires_sync(algt->type, algt->mask); + + crypto_set_skcipher_spawn(spawn, inst); + err = crypto_grab_nivcipher(spawn, name, type, mask); + if (err) + goto err_free_inst; + + alg = crypto_skcipher_spawn_alg(spawn); + + if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) { + balg.ivsize = alg->cra_blkcipher.ivsize; + balg.min_keysize = alg->cra_blkcipher.min_keysize; + balg.max_keysize = alg->cra_blkcipher.max_keysize; + + balg.setkey = async_setkey; + balg.encrypt = async_encrypt; + balg.decrypt = async_decrypt; + + balg.geniv = alg->cra_blkcipher.geniv; + } else { + balg.ivsize = alg->cra_ablkcipher.ivsize; + balg.min_keysize = alg->cra_ablkcipher.min_keysize; + balg.max_keysize = alg->cra_ablkcipher.max_keysize; + + balg.setkey = alg->cra_ablkcipher.setkey; + balg.encrypt = alg->cra_ablkcipher.encrypt; + balg.decrypt = alg->cra_ablkcipher.decrypt; + + balg.geniv = alg->cra_ablkcipher.geniv; + } + + err = -EINVAL; + if (!balg.ivsize) + goto err_drop_alg; + + /* + * This is only true if we're constructing an algorithm with its + * default IV generator. For the default generator we elide the + * template name and double-check the IV generator. + */ + if (algt->mask & CRYPTO_ALG_GENIV) { + if (!balg.geniv) + balg.geniv = crypto_default_geniv(alg); + err = -EAGAIN; + if (strcmp(tmpl->name, balg.geniv)) + goto err_drop_alg; + + memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); + memcpy(inst->alg.cra_driver_name, alg->cra_driver_name, + CRYPTO_MAX_ALG_NAME); + } else { + err = -ENAMETOOLONG; + if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, + "%s(%s)", tmpl->name, alg->cra_name) >= + CRYPTO_MAX_ALG_NAME) + goto err_drop_alg; + if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "%s(%s)", tmpl->name, alg->cra_driver_name) >= + CRYPTO_MAX_ALG_NAME) + goto err_drop_alg; + } + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV; + inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; + inst->alg.cra_priority = alg->cra_priority; + inst->alg.cra_blocksize = alg->cra_blocksize; + inst->alg.cra_alignmask = alg->cra_alignmask; + inst->alg.cra_type = &crypto_givcipher_type; + + inst->alg.cra_ablkcipher.ivsize = balg.ivsize; + inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize; + inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize; + inst->alg.cra_ablkcipher.geniv = balg.geniv; + + inst->alg.cra_ablkcipher.setkey = balg.setkey; + inst->alg.cra_ablkcipher.encrypt = balg.encrypt; + inst->alg.cra_ablkcipher.decrypt = balg.decrypt; + +out: + return inst; + +err_drop_alg: + crypto_drop_skcipher(spawn); +err_free_inst: + kfree(inst); + inst = ERR_PTR(err); + goto out; +} +EXPORT_SYMBOL_GPL(skcipher_geniv_alloc); + +void skcipher_geniv_free(struct crypto_instance *inst) +{ + crypto_drop_skcipher(crypto_instance_ctx(inst)); + kfree(inst); +} +EXPORT_SYMBOL_GPL(skcipher_geniv_free); + +int skcipher_geniv_init(struct crypto_tfm *tfm) +{ + struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct crypto_ablkcipher *cipher; + + cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst)); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + tfm->crt_ablkcipher.base = cipher; + tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher); + + return 0; +} +EXPORT_SYMBOL_GPL(skcipher_geniv_init); + +void skcipher_geniv_exit(struct crypto_tfm *tfm) +{ + crypto_free_ablkcipher(tfm->crt_ablkcipher.base); +} +EXPORT_SYMBOL_GPL(skcipher_geniv_exit); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Generic block chaining cipher type"); diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index c9402dd..07e7c82 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -15,6 +15,9 @@ #include #include +#include + +struct rtattr; struct crypto_skcipher_spawn { struct crypto_spawn base; @@ -50,6 +53,21 @@ static inline struct crypto_ablkcipher *crypto_spawn_skcipher( crypto_skcipher_mask(0))); } +const char *crypto_default_geniv(const struct crypto_alg *alg); + +struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl, + struct rtattr **tb, u32 type, + u32 mask); +void skcipher_geniv_free(struct crypto_instance *inst); +int skcipher_geniv_init(struct crypto_tfm *tfm); +void skcipher_geniv_exit(struct crypto_tfm *tfm); + +static inline struct crypto_ablkcipher *skcipher_geniv_cipher( + struct crypto_ablkcipher *geniv) +{ + return crypto_ablkcipher_crt(geniv)->base; +} + static inline void *skcipher_givcrypt_reqctx( struct skcipher_givcrypt_request *req) { diff --git a/include/linux/crypto.h b/include/linux/crypto.h index facafa1..fa7afa9 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -53,6 +53,12 @@ #define CRYPTO_ALG_NEED_FALLBACK 0x00000100 /* + * This bit is set for symmetric key ciphers that have already been wrapped + * with a generic IV generator to prevent them from being wrapped again. + */ +#define CRYPTO_ALG_GENIV 0x00000200 + +/* * Transform masks and values (for crt_flags). */ #define CRYPTO_TFM_REQ_MASK 0x000fff00 @@ -331,6 +337,8 @@ struct ablkcipher_tfm { int (*givencrypt)(struct skcipher_givcrypt_request *req); int (*givdecrypt)(struct skcipher_givcrypt_request *req); + struct crypto_ablkcipher *base; + unsigned int ivsize; unsigned int reqsize; }; @@ -541,14 +549,14 @@ static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast( static inline u32 crypto_skcipher_type(u32 type) { - type &= ~CRYPTO_ALG_TYPE_MASK; + type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); type |= CRYPTO_ALG_TYPE_BLKCIPHER; return type; } static inline u32 crypto_skcipher_mask(u32 mask) { - mask &= ~CRYPTO_ALG_TYPE_MASK; + mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; return mask; } @@ -623,7 +631,9 @@ static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm, static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { - return crypto_ablkcipher_crt(tfm)->setkey(tfm, key, keylen); + struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm); + + return crt->setkey(crt->base, key, keylen); } static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( @@ -655,7 +665,7 @@ static inline unsigned int crypto_ablkcipher_reqsize( static inline void ablkcipher_request_set_tfm( struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) { - req->base.tfm = crypto_ablkcipher_tfm(tfm); + req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base); } static inline struct ablkcipher_request *ablkcipher_request_cast( -- cgit v1.1 From 7f47073911f0e4384d38a0827d28305a177c8816 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 27 Nov 2007 23:17:23 +0800 Subject: [CRYPTO] chainiv: Add chain IV generator The chain IV generator is the one we've been using in the IPsec stack. It simply starts out with a random IV, then uses the last block of each encrypted packet's cipher text as the IV for the next packet. It can only be used by synchronous ciphers since we have to make sure that we don't start the encryption of the next packet until the last one has completed. It does have the advantage of using very little CPU time since it doesn't have to generate anything at all. Signed-off-by: Herbert Xu --- crypto/Makefile | 1 + crypto/chainiv.c | 139 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 140 insertions(+) create mode 100644 crypto/chainiv.c diff --git a/crypto/Makefile b/crypto/Makefile index 2a1883f..4b2e0c3 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_CRYPTO_AEAD) += aead.o crypto_blkcipher-objs := ablkcipher.o crypto_blkcipher-objs += blkcipher.o obj-$(CONFIG_CRYPTO_BLKCIPHER) += crypto_blkcipher.o +obj-$(CONFIG_CRYPTO_BLKCIPHER) += chainiv.o crypto_hash-objs := hash.o obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o diff --git a/crypto/chainiv.c b/crypto/chainiv.c new file mode 100644 index 0000000..38868d1 --- /dev/null +++ b/crypto/chainiv.c @@ -0,0 +1,139 @@ +/* + * chainiv: Chain IV Generator + * + * Generate IVs simply be using the last block of the previous encryption. + * This is mainly useful for CBC with a synchronous algorithm. + * + * Copyright (c) 2007 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +struct chainiv_ctx { + spinlock_t lock; + char iv[]; +}; + +static int chainiv_givencrypt(struct skcipher_givcrypt_request *req) +{ + struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); + struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); + struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); + unsigned int ivsize; + int err; + + ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); + ablkcipher_request_set_callback(subreq, req->creq.base.flags & + ~CRYPTO_TFM_REQ_MAY_SLEEP, + req->creq.base.complete, + req->creq.base.data); + ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, + req->creq.nbytes, req->creq.info); + + spin_lock_bh(&ctx->lock); + + ivsize = crypto_ablkcipher_ivsize(geniv); + + memcpy(req->giv, ctx->iv, ivsize); + memcpy(subreq->info, ctx->iv, ivsize); + + err = crypto_ablkcipher_encrypt(subreq); + if (err) + goto unlock; + + memcpy(ctx->iv, subreq->info, ivsize); + +unlock: + spin_unlock_bh(&ctx->lock); + + return err; +} + +static int chainiv_givencrypt_first(struct skcipher_givcrypt_request *req) +{ + struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); + struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); + + spin_lock_bh(&ctx->lock); + if (crypto_ablkcipher_crt(geniv)->givencrypt != + chainiv_givencrypt_first) + goto unlock; + + crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt; + get_random_bytes(ctx->iv, crypto_ablkcipher_ivsize(geniv)); + +unlock: + spin_unlock_bh(&ctx->lock); + + return chainiv_givencrypt(req); +} + +static int chainiv_init(struct crypto_tfm *tfm) +{ + struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); + struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); + + spin_lock_init(&ctx->lock); + + tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request); + + return skcipher_geniv_init(tfm); +} + +static struct crypto_template chainiv_tmpl; + +static struct crypto_instance *chainiv_alloc(struct rtattr **tb) +{ + struct crypto_instance *inst; + + inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(inst)) + goto out; + + inst->alg.cra_ablkcipher.givencrypt = chainiv_givencrypt_first; + + inst->alg.cra_init = chainiv_init; + inst->alg.cra_exit = skcipher_geniv_exit; + + inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx) + + inst->alg.cra_ablkcipher.ivsize; + +out: + return inst; +} + +static struct crypto_template chainiv_tmpl = { + .name = "chainiv", + .alloc = chainiv_alloc, + .free = skcipher_geniv_free, + .module = THIS_MODULE, +}; + +static int __init chainiv_module_init(void) +{ + return crypto_register_template(&chainiv_tmpl); +} + +static void __exit chainiv_module_exit(void) +{ + crypto_unregister_template(&chainiv_tmpl); +} + +module_init(chainiv_module_init); +module_exit(chainiv_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Chain IV Generator"); -- cgit v1.1 From 15c67286685cddce207b646306e8819ec8268ede Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 30 Nov 2007 20:17:28 +1100 Subject: [CRYPTO] skcipher: Added skcipher_givcrypt_complete This patch adds the helper skcipher_givcrypt_complete which should be called when an ablkcipher algorithm has completed a givcrypt request. Signed-off-by: Herbert Xu --- include/crypto/internal/skcipher.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 07e7c82..80c5bfb 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -74,5 +74,17 @@ static inline void *skcipher_givcrypt_reqctx( return ablkcipher_request_ctx(&req->creq); } +static inline void ablkcipher_request_complete(struct ablkcipher_request *req, + int err) +{ + req->base.complete(&req->base, err); +} + +static inline void skcipher_givcrypt_complete( + struct skcipher_givcrypt_request *req, int err) +{ + ablkcipher_request_complete(&req->creq, err); +} + #endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ -- cgit v1.1 From 806d183aa6cc565d0f6bd2fb7fc6bfb175cc4813 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 5 Dec 2007 12:10:53 +1100 Subject: [CRYPTO] eseqiv: Add Encrypted Sequence Number IV Generator This generator generates an IV based on a sequence number by xoring it with a salt and then encrypting it with the same key as used to encrypt the plain text. This algorithm requires that the block size be equal to the IV size. It is mainly useful for CBC. It has one noteworthy property that for IPsec the IV happens to lie just before the plain text so the IV generation simply increases the number of encrypted blocks by one. Therefore the cost of this generator is entirely dependent on the speed of the underlying cipher. Signed-off-by: Herbert Xu --- crypto/Makefile | 1 + crypto/eseqiv.c | 264 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 265 insertions(+) create mode 100644 crypto/eseqiv.c diff --git a/crypto/Makefile b/crypto/Makefile index 4b2e0c3..968b796 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -14,6 +14,7 @@ crypto_blkcipher-objs := ablkcipher.o crypto_blkcipher-objs += blkcipher.o obj-$(CONFIG_CRYPTO_BLKCIPHER) += crypto_blkcipher.o obj-$(CONFIG_CRYPTO_BLKCIPHER) += chainiv.o +obj-$(CONFIG_CRYPTO_BLKCIPHER) += eseqiv.o crypto_hash-objs := hash.o obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c new file mode 100644 index 0000000..eb90d27 --- /dev/null +++ b/crypto/eseqiv.c @@ -0,0 +1,264 @@ +/* + * eseqiv: Encrypted Sequence Number IV Generator + * + * This generator generates an IV based on a sequence number by xoring it + * with a salt and then encrypting it with the same key as used to encrypt + * the plain text. This algorithm requires that the block size be equal + * to the IV size. It is mainly useful for CBC. + * + * Copyright (c) 2007 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct eseqiv_request_ctx { + struct scatterlist src[2]; + struct scatterlist dst[2]; + char tail[]; +}; + +struct eseqiv_ctx { + spinlock_t lock; + unsigned int reqoff; + char salt[]; +}; + +static void eseqiv_complete2(struct skcipher_givcrypt_request *req) +{ + struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); + struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req); + + memcpy(req->giv, PTR_ALIGN((u8 *)reqctx->tail, + crypto_ablkcipher_alignmask(geniv) + 1), + crypto_ablkcipher_ivsize(geniv)); +} + +static void eseqiv_complete(struct crypto_async_request *base, int err) +{ + struct skcipher_givcrypt_request *req = base->data; + + if (err) + goto out; + + eseqiv_complete2(req); + +out: + skcipher_givcrypt_complete(req, err); +} + +static void eseqiv_chain(struct scatterlist *head, struct scatterlist *sg, + int chain) +{ + if (chain) { + head->length += sg->length; + sg = scatterwalk_sg_next(sg); + } + + if (sg) + scatterwalk_sg_chain(head, 2, sg); + else + sg_mark_end(head); +} + +static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req) +{ + struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); + struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); + struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req); + struct ablkcipher_request *subreq; + crypto_completion_t complete; + void *data; + struct scatterlist *osrc, *odst; + struct scatterlist *dst; + struct page *srcp; + struct page *dstp; + u8 *giv; + u8 *vsrc; + u8 *vdst; + __be64 seq; + unsigned int ivsize; + unsigned int len; + int err; + + subreq = (void *)(reqctx->tail + ctx->reqoff); + ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); + + giv = req->giv; + complete = req->creq.base.complete; + data = req->creq.base.data; + + osrc = req->creq.src; + odst = req->creq.dst; + srcp = sg_page(osrc); + dstp = sg_page(odst); + vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + osrc->offset; + vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + odst->offset; + + ivsize = crypto_ablkcipher_ivsize(geniv); + + if (vsrc != giv + ivsize && vdst != giv + ivsize) { + giv = PTR_ALIGN((u8 *)reqctx->tail, + crypto_ablkcipher_alignmask(geniv) + 1); + complete = eseqiv_complete; + data = req; + } + + ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete, + data); + + sg_init_table(reqctx->src, 2); + sg_set_buf(reqctx->src, giv, ivsize); + eseqiv_chain(reqctx->src, osrc, vsrc == giv + ivsize); + + dst = reqctx->src; + if (osrc != odst) { + sg_init_table(reqctx->dst, 2); + sg_set_buf(reqctx->dst, giv, ivsize); + eseqiv_chain(reqctx->dst, odst, vdst == giv + ivsize); + + dst = reqctx->dst; + } + + ablkcipher_request_set_crypt(subreq, reqctx->src, dst, + req->creq.nbytes, req->creq.info); + + memcpy(req->creq.info, ctx->salt, ivsize); + + len = ivsize; + if (ivsize > sizeof(u64)) { + memset(req->giv, 0, ivsize - sizeof(u64)); + len = sizeof(u64); + } + seq = cpu_to_be64(req->seq); + memcpy(req->giv + ivsize - len, &seq, len); + + err = crypto_ablkcipher_encrypt(subreq); + if (err) + goto out; + + eseqiv_complete2(req); + +out: + return err; +} + +static int eseqiv_givencrypt_first(struct skcipher_givcrypt_request *req) +{ + struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); + struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); + + spin_lock_bh(&ctx->lock); + if (crypto_ablkcipher_crt(geniv)->givencrypt != eseqiv_givencrypt_first) + goto unlock; + + crypto_ablkcipher_crt(geniv)->givencrypt = eseqiv_givencrypt; + get_random_bytes(ctx->salt, crypto_ablkcipher_ivsize(geniv)); + +unlock: + spin_unlock_bh(&ctx->lock); + + return eseqiv_givencrypt(req); +} + +static int eseqiv_init(struct crypto_tfm *tfm) +{ + struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); + struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); + unsigned long alignmask; + unsigned int reqsize; + + spin_lock_init(&ctx->lock); + + alignmask = crypto_tfm_ctx_alignment() - 1; + reqsize = sizeof(struct eseqiv_request_ctx); + + if (alignmask & reqsize) { + alignmask &= reqsize; + alignmask--; + } + + alignmask = ~alignmask; + alignmask &= crypto_ablkcipher_alignmask(geniv); + + reqsize += alignmask; + reqsize += crypto_ablkcipher_ivsize(geniv); + reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment()); + + ctx->reqoff = reqsize - sizeof(struct eseqiv_request_ctx); + + tfm->crt_ablkcipher.reqsize = reqsize + + sizeof(struct ablkcipher_request); + + return skcipher_geniv_init(tfm); +} + +static struct crypto_template eseqiv_tmpl; + +static struct crypto_instance *eseqiv_alloc(struct rtattr **tb) +{ + struct crypto_instance *inst; + int err; + + inst = skcipher_geniv_alloc(&eseqiv_tmpl, tb, 0, 0); + if (IS_ERR(inst)) + goto out; + + err = -EINVAL; + if (inst->alg.cra_ablkcipher.ivsize != inst->alg.cra_blocksize) + goto free_inst; + + inst->alg.cra_ablkcipher.givencrypt = eseqiv_givencrypt_first; + + inst->alg.cra_init = eseqiv_init; + inst->alg.cra_exit = skcipher_geniv_exit; + + inst->alg.cra_ctxsize = sizeof(struct eseqiv_ctx); + inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize; + +out: + return inst; + +free_inst: + skcipher_geniv_free(inst); + inst = ERR_PTR(err); + goto out; +} + +static struct crypto_template eseqiv_tmpl = { + .name = "eseqiv", + .alloc = eseqiv_alloc, + .free = skcipher_geniv_free, + .module = THIS_MODULE, +}; + +static int __init eseqiv_module_init(void) +{ + return crypto_register_template(&eseqiv_tmpl); +} + +static void __exit eseqiv_module_exit(void) +{ + crypto_unregister_template(&eseqiv_tmpl); +} + +module_init(eseqiv_module_init); +module_exit(eseqiv_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator"); -- cgit v1.1 From b9c55aa475599183d0eab6833ea23e70c52dd24b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 4 Dec 2007 12:46:48 +1100 Subject: [CRYPTO] skcipher: Create default givcipher instances This patch makes crypto_alloc_ablkcipher/crypto_grab_skcipher always return algorithms that are capable of generating their own IVs through givencrypt and givdecrypt. Each algorithm may specify its default IV generator through the geniv field. For algorithms that do not set the geniv field, the blkcipher layer will pick a default. Currently it's chainiv for synchronous algorithms and eseqiv for asynchronous algorithms. Note that if these wrappers do not work on an algorithm then that algorithm must specify its own geniv or it can't be used at all. Signed-off-by: Herbert Xu --- crypto/ablkcipher.c | 158 ++++++++++++++++++++++++++++++++++++- crypto/api.c | 19 +++-- crypto/blkcipher.c | 4 + crypto/internal.h | 2 + include/crypto/internal/skcipher.h | 2 + include/linux/crypto.h | 9 +-- 6 files changed, 181 insertions(+), 13 deletions(-) diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index 092d965..d1df528 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include #include @@ -68,6 +70,16 @@ static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type, return alg->cra_ctxsize; } +int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req) +{ + return crypto_ablkcipher_encrypt(&req->creq); +} + +int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req) +{ + return crypto_ablkcipher_decrypt(&req->creq); +} + static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) { @@ -80,6 +92,10 @@ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type, crt->setkey = setkey; crt->encrypt = alg->encrypt; crt->decrypt = alg->decrypt; + if (!alg->ivsize) { + crt->givencrypt = skcipher_null_givencrypt; + crt->givdecrypt = skcipher_null_givdecrypt; + } crt->base = __crypto_ablkcipher_cast(tfm); crt->ivsize = alg->ivsize; @@ -163,6 +179,108 @@ const char *crypto_default_geniv(const struct crypto_alg *alg) return alg->cra_flags & CRYPTO_ALG_ASYNC ? "eseqiv" : "chainiv"; } +static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) +{ + struct rtattr *tb[3]; + struct { + struct rtattr attr; + struct crypto_attr_type data; + } ptype; + struct { + struct rtattr attr; + struct crypto_attr_alg data; + } palg; + struct crypto_template *tmpl; + struct crypto_instance *inst; + struct crypto_alg *larval; + const char *geniv; + int err; + + larval = crypto_larval_lookup(alg->cra_driver_name, + CRYPTO_ALG_TYPE_GIVCIPHER, + CRYPTO_ALG_TYPE_MASK); + err = PTR_ERR(larval); + if (IS_ERR(larval)) + goto out; + + err = -EAGAIN; + if (!crypto_is_larval(larval)) + goto drop_larval; + + ptype.attr.rta_len = sizeof(ptype); + ptype.attr.rta_type = CRYPTOA_TYPE; + ptype.data.type = type | CRYPTO_ALG_GENIV; + /* GENIV tells the template that we're making a default geniv. */ + ptype.data.mask = mask | CRYPTO_ALG_GENIV; + tb[0] = &ptype.attr; + + palg.attr.rta_len = sizeof(palg); + palg.attr.rta_type = CRYPTOA_ALG; + /* Must use the exact name to locate ourselves. */ + memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME); + tb[1] = &palg.attr; + + tb[2] = NULL; + + if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + geniv = alg->cra_blkcipher.geniv; + else + geniv = alg->cra_ablkcipher.geniv; + + if (!geniv) + geniv = crypto_default_geniv(alg); + + tmpl = crypto_lookup_template(geniv); + err = -ENOENT; + if (!tmpl) + goto kill_larval; + + inst = tmpl->alloc(tb); + err = PTR_ERR(inst); + if (IS_ERR(inst)) + goto put_tmpl; + + if ((err = crypto_register_instance(tmpl, inst))) { + tmpl->free(inst); + goto put_tmpl; + } + + /* Redo the lookup to use the instance we just registered. */ + err = -EAGAIN; + +put_tmpl: + crypto_tmpl_put(tmpl); +kill_larval: + crypto_larval_kill(larval); +drop_larval: + crypto_mod_put(larval); +out: + crypto_mod_put(alg); + return err; +} + +static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, + u32 mask) +{ + struct crypto_alg *alg; + + alg = crypto_alg_mod_lookup(name, type, mask); + if (IS_ERR(alg)) + return alg; + + if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_GIVCIPHER) + return alg; + + if (!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize : + alg->cra_ablkcipher.ivsize)) + return alg; + + return ERR_PTR(crypto_givcipher_default(alg, type, mask)); +} + int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, u32 type, u32 mask) { @@ -172,7 +290,7 @@ int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, type = crypto_skcipher_type(type); mask = crypto_skcipher_mask(mask); - alg = crypto_alg_mod_lookup(name, type, mask); + alg = crypto_lookup_skcipher(name, type, mask); if (IS_ERR(alg)) return PTR_ERR(alg); @@ -182,5 +300,43 @@ int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, } EXPORT_SYMBOL_GPL(crypto_grab_skcipher); +struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, + u32 type, u32 mask) +{ + struct crypto_tfm *tfm; + int err; + + type = crypto_skcipher_type(type); + mask = crypto_skcipher_mask(mask); + + for (;;) { + struct crypto_alg *alg; + + alg = crypto_lookup_skcipher(alg_name, type, mask); + if (IS_ERR(alg)) { + err = PTR_ERR(alg); + goto err; + } + + tfm = __crypto_alloc_tfm(alg, type, mask); + if (!IS_ERR(tfm)) + return __crypto_ablkcipher_cast(tfm); + + crypto_mod_put(alg); + err = PTR_ERR(tfm); + +err: + if (err != -EAGAIN) + break; + if (signal_pending(current)) { + err = -EINTR; + break; + } + } + + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous block chaining cipher type"); diff --git a/crypto/api.c b/crypto/api.c index 1f5c724..a2496d1 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -137,7 +137,7 @@ static struct crypto_alg *crypto_larval_alloc(const char *name, u32 type, return alg; } -static void crypto_larval_kill(struct crypto_alg *alg) +void crypto_larval_kill(struct crypto_alg *alg) { struct crypto_larval *larval = (void *)alg; @@ -147,6 +147,7 @@ static void crypto_larval_kill(struct crypto_alg *alg) complete_all(&larval->completion); crypto_alg_put(alg); } +EXPORT_SYMBOL_GPL(crypto_larval_kill); static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) { @@ -176,11 +177,9 @@ static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, return alg; } -struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) +struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask) { struct crypto_alg *alg; - struct crypto_alg *larval; - int ok; if (!name) return ERR_PTR(-ENOENT); @@ -193,7 +192,17 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) if (alg) return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg; - larval = crypto_larval_alloc(name, type, mask); + return crypto_larval_alloc(name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_larval_lookup); + +struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) +{ + struct crypto_alg *alg; + struct crypto_alg *larval; + int ok; + + larval = crypto_larval_lookup(name, type, mask); if (IS_ERR(larval) || !crypto_is_larval(larval)) return larval; diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index ca6ef06..4a7e65c 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -450,6 +450,10 @@ static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm) crt->setkey = async_setkey; crt->encrypt = async_encrypt; crt->decrypt = async_decrypt; + if (!alg->ivsize) { + crt->givencrypt = skcipher_null_givencrypt; + crt->givdecrypt = skcipher_null_givdecrypt; + } crt->base = __crypto_ablkcipher_cast(tfm); crt->ivsize = alg->ivsize; diff --git a/crypto/internal.h b/crypto/internal.h index cb13952..32f4c21 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -93,6 +93,8 @@ void crypto_exit_digest_ops(struct crypto_tfm *tfm); void crypto_exit_cipher_ops(struct crypto_tfm *tfm); void crypto_exit_compress_ops(struct crypto_tfm *tfm); +void crypto_larval_kill(struct crypto_alg *alg); +struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask); void crypto_larval_error(const char *name, u32 type, u32 mask); void crypto_shoot_alg(struct crypto_alg *alg); diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 80c5bfb..2071999 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -53,6 +53,8 @@ static inline struct crypto_ablkcipher *crypto_spawn_skcipher( crypto_skcipher_mask(0))); } +int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req); +int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req); const char *crypto_default_geniv(const struct crypto_alg *alg); struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl, diff --git a/include/linux/crypto.h b/include/linux/crypto.h index fa7afa9..835dcaf 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -561,13 +561,8 @@ static inline u32 crypto_skcipher_mask(u32 mask) return mask; } -static inline struct crypto_ablkcipher *crypto_alloc_ablkcipher( - const char *alg_name, u32 type, u32 mask) -{ - return __crypto_ablkcipher_cast( - crypto_alloc_base(alg_name, crypto_skcipher_type(type), - crypto_skcipher_mask(mask))); -} +struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, + u32 type, u32 mask); static inline struct crypto_tfm *crypto_ablkcipher_tfm( struct crypto_ablkcipher *tfm) -- cgit v1.1 From 9ffde35a8edd3486cd7c80af931c15cec99a1a0d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 17 Dec 2007 20:12:49 +0800 Subject: [CRYPTO] authenc: Use crypto_grab_skcipher This patch converts the authenc algorithm over to crypto_grab_skcipher which is a prerequisite for IV generation. This patch also changes authenc to set its ASYNC status depending on the ASYNC status of the underlying skcipher. Signed-off-by: Herbert Xu --- crypto/authenc.c | 65 +++++++++++++++++++++++++++++++++----------------------- 1 file changed, 38 insertions(+), 27 deletions(-) diff --git a/crypto/authenc.c b/crypto/authenc.c index 394e733..2d609b7 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -10,7 +10,7 @@ * */ -#include +#include #include #include #include @@ -23,7 +23,7 @@ struct authenc_instance_ctx { struct crypto_spawn auth; - struct crypto_spawn enc; + struct crypto_skcipher_spawn enc; }; struct crypto_authenc_ctx { @@ -237,7 +237,7 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) if (IS_ERR(auth)) return PTR_ERR(auth); - enc = crypto_spawn_ablkcipher(&ictx->enc); + enc = crypto_spawn_skcipher(&ictx->enc); err = PTR_ERR(enc); if (IS_ERR(enc)) goto err_free_hash; @@ -270,42 +270,36 @@ static void crypto_authenc_exit_tfm(struct crypto_tfm *tfm) static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) { + struct crypto_attr_type *algt; struct crypto_instance *inst; struct crypto_alg *auth; struct crypto_alg *enc; struct authenc_instance_ctx *ctx; + const char *enc_name; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD); - if (err) + algt = crypto_get_attr_type(tb); + err = PTR_ERR(algt); + if (IS_ERR(algt)) return ERR_PTR(err); + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) + return ERR_PTR(-EINVAL); + auth = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, CRYPTO_ALG_TYPE_HASH_MASK); if (IS_ERR(auth)) return ERR_PTR(PTR_ERR(auth)); - enc = crypto_attr_alg(tb[2], CRYPTO_ALG_TYPE_BLKCIPHER, - CRYPTO_ALG_TYPE_BLKCIPHER_MASK); - inst = ERR_PTR(PTR_ERR(enc)); - if (IS_ERR(enc)) + enc_name = crypto_attr_alg_name(tb[2]); + err = PTR_ERR(enc_name); + if (IS_ERR(enc_name)) goto out_put_auth; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); err = -ENOMEM; if (!inst) - goto out_put_enc; - - err = -ENAMETOOLONG; - if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, - "authenc(%s,%s)", auth->cra_name, enc->cra_name) >= - CRYPTO_MAX_ALG_NAME) - goto err_free_inst; - - if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "authenc(%s,%s)", auth->cra_driver_name, - enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) - goto err_free_inst; + goto out_put_auth; ctx = crypto_instance_ctx(inst); @@ -313,11 +307,28 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) if (err) goto err_free_inst; - err = crypto_init_spawn(&ctx->enc, enc, inst, CRYPTO_ALG_TYPE_MASK); + crypto_set_skcipher_spawn(&ctx->enc, inst); + err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); if (err) goto err_drop_auth; - inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; + enc = crypto_skcipher_spawn_alg(&ctx->enc); + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, + "authenc(%s,%s)", auth->cra_name, enc->cra_name) >= + CRYPTO_MAX_ALG_NAME) + goto err_drop_enc; + + if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "authenc(%s,%s)", auth->cra_driver_name, + enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + goto err_drop_enc; + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; + inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC; inst->alg.cra_priority = enc->cra_priority * 10 + auth->cra_priority; inst->alg.cra_blocksize = enc->cra_blocksize; inst->alg.cra_alignmask = auth->cra_alignmask | enc->cra_alignmask; @@ -338,16 +349,16 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) inst->alg.cra_aead.decrypt = crypto_authenc_decrypt; out: - crypto_mod_put(enc); -out_put_auth: crypto_mod_put(auth); return inst; +err_drop_enc: + crypto_drop_skcipher(&ctx->enc); err_drop_auth: crypto_drop_spawn(&ctx->auth); err_free_inst: kfree(inst); -out_put_enc: +out_put_auth: inst = ERR_PTR(err); goto out; } @@ -356,7 +367,7 @@ static void crypto_authenc_free(struct crypto_instance *inst) { struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst); - crypto_drop_spawn(&ctx->enc); + crypto_drop_skcipher(&ctx->enc); crypto_drop_spawn(&ctx->auth); kfree(inst); } -- cgit v1.1 From d00aa19b507b39ee9a680d0d2ac2ae483686453a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 7 Dec 2007 20:31:10 +0800 Subject: [CRYPTO] gcm: Allow block cipher parameter This patch adds the gcm_base template which takes a block cipher parameter instead of cipher. This allows the user to specify a specific CTR implementation. This also fixes a leak of the cipher algorithm that was previously looked up but never freed. Signed-off-by: Herbert Xu --- crypto/gcm.c | 113 ++++++++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 89 insertions(+), 24 deletions(-) diff --git a/crypto/gcm.c b/crypto/gcm.c index f6bee6f..e87e5de 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -413,30 +413,23 @@ static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm) crypto_free_ablkcipher(ctx->ctr); } -static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb) +static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, + const char *full_name, + const char *ctr_name) { + struct crypto_attr_type *algt; struct crypto_instance *inst; struct crypto_alg *ctr; - struct crypto_alg *cipher; struct gcm_instance_ctx *ctx; int err; - char ctr_name[CRYPTO_MAX_ALG_NAME]; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD); - if (err) + algt = crypto_get_attr_type(tb); + err = PTR_ERR(algt); + if (IS_ERR(algt)) return ERR_PTR(err); - cipher = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK); - - inst = ERR_PTR(PTR_ERR(cipher)); - if (IS_ERR(cipher)) - return inst; - - inst = ERR_PTR(ENAMETOOLONG); - if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", - cipher->cra_name) >= CRYPTO_MAX_ALG_NAME) - return inst; + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) + return ERR_PTR(-EINVAL); ctr = crypto_alg_mod_lookup(ctr_name, CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK); @@ -444,7 +437,14 @@ static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb) if (IS_ERR(ctr)) return ERR_PTR(PTR_ERR(ctr)); - if (cipher->cra_blocksize != 16) + /* We only support 16-byte blocks. */ + if ((ctr->cra_type == &crypto_blkcipher_type ? + ctr->cra_blkcipher.ivsize : ctr->cra_ablkcipher.ivsize) != 16) + goto out_put_ctr; + + /* Not a stream cipher? */ + err = -EINVAL; + if (ctr->cra_blocksize != 1) goto out_put_ctr; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); @@ -453,21 +453,21 @@ static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb) goto out_put_ctr; err = -ENAMETOOLONG; - if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, - "gcm(%s)", cipher->cra_name) >= CRYPTO_MAX_ALG_NAME || - snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "gcm(%s)", cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "gcm_base(%s)", ctr->cra_driver_name) >= + CRYPTO_MAX_ALG_NAME) goto err_free_inst; - ctx = crypto_instance_ctx(inst); err = crypto_init_spawn(&ctx->ctr, ctr, inst, CRYPTO_ALG_TYPE_MASK); if (err) goto err_free_inst; + memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME); + inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; inst->alg.cra_priority = ctr->cra_priority; - inst->alg.cra_blocksize = 16; + inst->alg.cra_blocksize = 1; inst->alg.cra_alignmask = ctr->cra_alignmask | (__alignof__(u64) - 1); inst->alg.cra_type = &crypto_aead_type; inst->alg.cra_aead.ivsize = 16; @@ -489,6 +489,29 @@ out_put_ctr: goto out; } +static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb) +{ + int err; + const char *cipher_name; + char ctr_name[CRYPTO_MAX_ALG_NAME]; + char full_name[CRYPTO_MAX_ALG_NAME]; + + cipher_name = crypto_attr_alg_name(tb[1]); + err = PTR_ERR(cipher_name); + if (IS_ERR(cipher_name)) + return ERR_PTR(err); + + if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", cipher_name) >= + CRYPTO_MAX_ALG_NAME) + return ERR_PTR(-ENAMETOOLONG); + + if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >= + CRYPTO_MAX_ALG_NAME) + return ERR_PTR(-ENAMETOOLONG); + + return crypto_gcm_alloc_common(tb, full_name, ctr_name); +} + static void crypto_gcm_free(struct crypto_instance *inst) { struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst); @@ -504,14 +527,55 @@ static struct crypto_template crypto_gcm_tmpl = { .module = THIS_MODULE, }; +static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb) +{ + int err; + const char *ctr_name; + char full_name[CRYPTO_MAX_ALG_NAME]; + + ctr_name = crypto_attr_alg_name(tb[1]); + err = PTR_ERR(ctr_name); + if (IS_ERR(ctr_name)) + return ERR_PTR(err); + + if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s)", + ctr_name) >= CRYPTO_MAX_ALG_NAME) + return ERR_PTR(-ENAMETOOLONG); + + return crypto_gcm_alloc_common(tb, full_name, ctr_name); +} + +static struct crypto_template crypto_gcm_base_tmpl = { + .name = "gcm_base", + .alloc = crypto_gcm_base_alloc, + .free = crypto_gcm_free, + .module = THIS_MODULE, +}; + static int __init crypto_gcm_module_init(void) { - return crypto_register_template(&crypto_gcm_tmpl); + int err; + + err = crypto_register_template(&crypto_gcm_base_tmpl); + if (err) + goto out; + + err = crypto_register_template(&crypto_gcm_tmpl); + if (err) + goto out_undo_base; + +out: + return err; + +out_undo_base: + crypto_unregister_template(&crypto_gcm_base_tmpl); + goto out; } static void __exit crypto_gcm_module_exit(void) { crypto_unregister_template(&crypto_gcm_tmpl); + crypto_unregister_template(&crypto_gcm_base_tmpl); } module_init(crypto_gcm_module_init); @@ -520,3 +584,4 @@ module_exit(crypto_gcm_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Galois/Counter Mode"); MODULE_AUTHOR("Mikko Herranen "); +MODULE_ALIAS("gcm_base"); -- cgit v1.1 From 1472e5ebaac14dafbc0f978b5e951f1e9ca0b251 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 7 Dec 2007 19:26:11 +0800 Subject: [CRYPTO] gcm: Use crypto_grab_skcipher This patch converts the gcm algorithm over to crypto_grab_skcipher which is a prerequisite for IV generation. Signed-off-by: Herbert Xu --- crypto/gcm.c | 49 +++++++++++++++++++++++-------------------------- 1 file changed, 23 insertions(+), 26 deletions(-) diff --git a/crypto/gcm.c b/crypto/gcm.c index e87e5de..d539f5e 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -8,8 +8,8 @@ * by the Free Software Foundation. */ -#include #include +#include #include #include #include @@ -18,10 +18,8 @@ #include #include -#include "internal.h" - struct gcm_instance_ctx { - struct crypto_spawn ctr; + struct crypto_skcipher_spawn ctr; }; struct crypto_gcm_ctx { @@ -386,7 +384,7 @@ static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) unsigned long align; int err; - ctr = crypto_spawn_ablkcipher(&ictx->ctr); + ctr = crypto_spawn_skcipher(&ictx->ctr); err = PTR_ERR(ctr); if (IS_ERR(ctr)) return err; @@ -431,15 +429,22 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return ERR_PTR(-EINVAL); - ctr = crypto_alg_mod_lookup(ctr_name, CRYPTO_ALG_TYPE_BLKCIPHER, - CRYPTO_ALG_TYPE_MASK); + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); + if (!inst) + return ERR_PTR(-ENOMEM); - if (IS_ERR(ctr)) - return ERR_PTR(PTR_ERR(ctr)); + ctx = crypto_instance_ctx(inst); + crypto_set_skcipher_spawn(&ctx->ctr, inst); + err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); + if (err) + goto err_free_inst; + + ctr = crypto_skcipher_spawn_alg(&ctx->ctr); /* We only support 16-byte blocks. */ - if ((ctr->cra_type == &crypto_blkcipher_type ? - ctr->cra_blkcipher.ivsize : ctr->cra_ablkcipher.ivsize) != 16) + if (ctr->cra_ablkcipher.ivsize != 16) goto out_put_ctr; /* Not a stream cipher? */ @@ -447,25 +452,16 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, if (ctr->cra_blocksize != 1) goto out_put_ctr; - inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); - err = -ENOMEM; - if (!inst) - goto out_put_ctr; - err = -ENAMETOOLONG; if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s)", ctr->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) - goto err_free_inst; - - ctx = crypto_instance_ctx(inst); - err = crypto_init_spawn(&ctx->ctr, ctr, inst, CRYPTO_ALG_TYPE_MASK); - if (err) - goto err_free_inst; + goto out_put_ctr; memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME); - inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; + inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; + inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC; inst->alg.cra_priority = ctr->cra_priority; inst->alg.cra_blocksize = 1; inst->alg.cra_alignmask = ctr->cra_alignmask | (__alignof__(u64) - 1); @@ -480,11 +476,12 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, inst->alg.cra_aead.decrypt = crypto_gcm_decrypt; out: - crypto_mod_put(ctr); return inst; + +out_put_ctr: + crypto_drop_skcipher(&ctx->ctr); err_free_inst: kfree(inst); -out_put_ctr: inst = ERR_PTR(err); goto out; } @@ -516,7 +513,7 @@ static void crypto_gcm_free(struct crypto_instance *inst) { struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst); - crypto_drop_spawn(&ctx->ctr); + crypto_drop_skcipher(&ctx->ctr); kfree(inst); } -- cgit v1.1 From 45d44eb56ad197cfccb8f84b5df64abff8b7cb96 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 2 Dec 2007 21:21:02 +1100 Subject: [CRYPTO] skcipher: Remove crypto_spawn_ablkcipher Now that gcm and authenc have been converted to crypto_spawn_skcipher, this patch removes the obsolete crypto_spawn_ablkcipher function. Signed-off-by: Herbert Xu --- include/crypto/algapi.h | 8 -------- 1 file changed, 8 deletions(-) diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index fda1759..60d06e7 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -198,14 +198,6 @@ static inline struct crypto_instance *crypto_aead_alg_instance( return crypto_tfm_alg_instance(&aead->base); } -static inline struct crypto_ablkcipher *crypto_spawn_ablkcipher( - struct crypto_spawn *spawn) -{ - return __crypto_ablkcipher_cast( - crypto_spawn_tfm(spawn, crypto_skcipher_type(0), - crypto_skcipher_mask(0))); -} - static inline struct crypto_blkcipher *crypto_spawn_blkcipher( struct crypto_spawn *spawn) { -- cgit v1.1 From 0a270321dbf948963aeb0e8382fe17d2c2eb3771 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 30 Nov 2007 21:38:37 +1100 Subject: [CRYPTO] seqiv: Add Sequence Number IV Generator This generator generates an IV based on a sequence number by xoring it with a salt. This algorithm is mainly useful for CTR and similar modes. This patch also sets it as the default IV generator for ctr. Signed-off-by: Herbert Xu --- crypto/Kconfig | 9 +++ crypto/Makefile | 1 + crypto/ctr.c | 2 + crypto/seqiv.c | 186 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 198 insertions(+) create mode 100644 crypto/seqiv.c diff --git a/crypto/Kconfig b/crypto/Kconfig index c4b6c91..7ad9711 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -32,6 +32,14 @@ config CRYPTO_BLKCIPHER tristate select CRYPTO_ALGAPI +config CRYPTO_SEQIV + tristate "Sequence Number IV Generator" + select CRYPTO_BLKCIPHER + help + This IV generator generates an IV based on a sequence number by + xoring it with a salt. This algorithm is mainly useful for CTR + and similar modes. + config CRYPTO_HASH tristate select CRYPTO_ALGAPI @@ -197,6 +205,7 @@ config CRYPTO_XTS config CRYPTO_CTR tristate "CTR support" select CRYPTO_BLKCIPHER + select CRYPTO_SEQIV select CRYPTO_MANAGER help CTR: Counter mode diff --git a/crypto/Makefile b/crypto/Makefile index 968b796..1b99b3a 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -15,6 +15,7 @@ crypto_blkcipher-objs += blkcipher.o obj-$(CONFIG_CRYPTO_BLKCIPHER) += crypto_blkcipher.o obj-$(CONFIG_CRYPTO_BLKCIPHER) += chainiv.o obj-$(CONFIG_CRYPTO_BLKCIPHER) += eseqiv.o +obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o crypto_hash-objs := hash.o obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o diff --git a/crypto/ctr.c b/crypto/ctr.c index 1052b31..2d7425f 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -361,6 +361,8 @@ static struct crypto_instance *crypto_rfc3686_alloc(struct rtattr **tb) inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize + CTR_RFC3686_NONCE_SIZE; + inst->alg.cra_blkcipher.geniv = "seqiv"; + inst->alg.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx); inst->alg.cra_init = crypto_rfc3686_init_tfm; diff --git a/crypto/seqiv.c b/crypto/seqiv.c new file mode 100644 index 0000000..9c2d80d --- /dev/null +++ b/crypto/seqiv.c @@ -0,0 +1,186 @@ +/* + * seqiv: Sequence Number IV Generator + * + * This generator generates an IV based on a sequence number by xoring it + * with a salt. This algorithm is mainly useful for CTR and similar modes. + * + * Copyright (c) 2007 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct seqiv_ctx { + spinlock_t lock; + u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); +}; + +static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err) +{ + struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); + struct crypto_ablkcipher *geniv; + + if (err == -EINPROGRESS) + return; + + if (err) + goto out; + + geniv = skcipher_givcrypt_reqtfm(req); + memcpy(req->creq.info, subreq->info, crypto_ablkcipher_ivsize(geniv)); + +out: + kfree(subreq->info); +} + +static void seqiv_complete(struct crypto_async_request *base, int err) +{ + struct skcipher_givcrypt_request *req = base->data; + + seqiv_complete2(req, err); + skcipher_givcrypt_complete(req, err); +} + +static int seqiv_givencrypt(struct skcipher_givcrypt_request *req) +{ + struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); + struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); + struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); + crypto_completion_t complete; + void *data; + u8 *info; + __be64 seq; + unsigned int ivsize; + unsigned int len; + int err; + + ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); + + complete = req->creq.base.complete; + data = req->creq.base.data; + info = req->creq.info; + + ivsize = crypto_ablkcipher_ivsize(geniv); + + if (unlikely(!IS_ALIGNED((unsigned long)info, + crypto_ablkcipher_alignmask(geniv) + 1))) { + info = kmalloc(ivsize, req->creq.base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: + GFP_ATOMIC); + if (!info) + return -ENOMEM; + + complete = seqiv_complete; + data = req; + } + + ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete, + data); + ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, + req->creq.nbytes, info); + + len = ivsize; + if (ivsize > sizeof(u64)) { + memset(info, 0, ivsize - sizeof(u64)); + len = sizeof(u64); + } + seq = cpu_to_be64(req->seq); + memcpy(info + ivsize - len, &seq, len); + crypto_xor(info, ctx->salt, ivsize); + + memcpy(req->giv, info, ivsize); + + err = crypto_ablkcipher_encrypt(subreq); + if (unlikely(info != req->creq.info)) + seqiv_complete2(req, err); + return err; +} + +static int seqiv_givencrypt_first(struct skcipher_givcrypt_request *req) +{ + struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); + struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); + + spin_lock_bh(&ctx->lock); + if (crypto_ablkcipher_crt(geniv)->givencrypt != seqiv_givencrypt_first) + goto unlock; + + crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt; + get_random_bytes(ctx->salt, crypto_ablkcipher_ivsize(geniv)); + +unlock: + spin_unlock_bh(&ctx->lock); + + return seqiv_givencrypt(req); +} + +static int seqiv_init(struct crypto_tfm *tfm) +{ + struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); + struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); + + spin_lock_init(&ctx->lock); + + tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request); + + return skcipher_geniv_init(tfm); +} + +static struct crypto_template seqiv_tmpl; + +static struct crypto_instance *seqiv_alloc(struct rtattr **tb) +{ + struct crypto_instance *inst; + + inst = skcipher_geniv_alloc(&seqiv_tmpl, tb, 0, 0); + if (IS_ERR(inst)) + goto out; + + inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt_first; + + inst->alg.cra_init = seqiv_init; + inst->alg.cra_exit = skcipher_geniv_exit; + + inst->alg.cra_alignmask |= __alignof__(u32) - 1; + + inst->alg.cra_ctxsize = sizeof(struct seqiv_ctx); + inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize; + +out: + return inst; +} + +static struct crypto_template seqiv_tmpl = { + .name = "seqiv", + .alloc = seqiv_alloc, + .free = skcipher_geniv_free, + .module = THIS_MODULE, +}; + +static int __init seqiv_module_init(void) +{ + return crypto_register_template(&seqiv_tmpl); +} + +static void __exit seqiv_module_exit(void) +{ + crypto_unregister_template(&seqiv_tmpl); +} + +module_init(seqiv_module_init); +module_exit(seqiv_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Sequence Number IV Generator"); -- cgit v1.1 From 03bf712fb4defc7831c727d1e32d0269f7f96de0 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 1 Dec 2007 18:35:38 +1100 Subject: [CRYPTO] skcipher: Add top-level givencrypt/givdecrypt calls This patch finally makes the givencrypt/givdecrypt operations available to users by adding crypto_skcipher_givencrypt and crypto_skcipher_givdecrypt. A suite of helpers to allocate and fill in the request is also available. Signed-off-by: Herbert Xu --- include/crypto/skcipher.h | 72 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index c283fab..25fd612 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -14,6 +14,8 @@ #define _CRYPTO_SKCIPHER_H #include +#include +#include /** * struct skcipher_givcrypt_request - Crypto request with IV generation @@ -34,5 +36,75 @@ static inline struct crypto_ablkcipher *skcipher_givcrypt_reqtfm( return crypto_ablkcipher_reqtfm(&req->creq); } +static inline int crypto_skcipher_givencrypt( + struct skcipher_givcrypt_request *req) +{ + struct ablkcipher_tfm *crt = + crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req)); + return crt->givencrypt(req); +}; + +static inline int crypto_skcipher_givdecrypt( + struct skcipher_givcrypt_request *req) +{ + struct ablkcipher_tfm *crt = + crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req)); + return crt->givdecrypt(req); +}; + +static inline void skcipher_givcrypt_set_tfm( + struct skcipher_givcrypt_request *req, struct crypto_ablkcipher *tfm) +{ + req->creq.base.tfm = crypto_ablkcipher_tfm(tfm); +} + +static inline struct skcipher_givcrypt_request *skcipher_givcrypt_cast( + struct crypto_async_request *req) +{ + return container_of(ablkcipher_request_cast(req), + struct skcipher_givcrypt_request, creq); +} + +static inline struct skcipher_givcrypt_request *skcipher_givcrypt_alloc( + struct crypto_ablkcipher *tfm, gfp_t gfp) +{ + struct skcipher_givcrypt_request *req; + + req = kmalloc(sizeof(struct skcipher_givcrypt_request) + + crypto_ablkcipher_reqsize(tfm), gfp); + + if (likely(req)) + skcipher_givcrypt_set_tfm(req, tfm); + + return req; +} + +static inline void skcipher_givcrypt_free(struct skcipher_givcrypt_request *req) +{ + kfree(req); +} + +static inline void skcipher_givcrypt_set_callback( + struct skcipher_givcrypt_request *req, u32 flags, + crypto_completion_t complete, void *data) +{ + ablkcipher_request_set_callback(&req->creq, flags, complete, data); +} + +static inline void skcipher_givcrypt_set_crypt( + struct skcipher_givcrypt_request *req, + struct scatterlist *src, struct scatterlist *dst, + unsigned int nbytes, void *iv) +{ + ablkcipher_request_set_crypt(&req->creq, src, dst, nbytes, iv); +} + +static inline void skcipher_givcrypt_set_giv( + struct skcipher_givcrypt_request *req, u8 *giv, u64 seq) +{ + req->giv = giv; + req->seq = seq; +} + #endif /* _CRYPTO_SKCIPHER_H */ -- cgit v1.1 From 743edf57272fd420348e148bf94f9e48ed6abb70 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 10 Dec 2007 16:18:01 +0800 Subject: [CRYPTO] aead: Add givcrypt operations This patch adds the underlying givcrypt operations for aead and associated support elements. The rationale is identical to that of the skcipher givcrypt operations, i.e., sometimes only the algorithm knows how the IV should be generated. A new request type aead_givcrypt_request is added which contains an embedded aead_request structure with two new elements to support this operation. The new elements are seq and giv. The seq field should contain a strictly increasing 64-bit integer which may be used by certain IV generators as an input value. The giv field will be used to store the generated IV. It does not need to obey the alignment requirements of the algorithm because it's not used during the operation. The existing iv field must still be available as it will be used to store intermediate IVs and the output IV if chaining is desired. Signed-off-by: Herbert Xu --- crypto/aead.c | 7 +++++++ include/crypto/aead.h | 38 ++++++++++++++++++++++++++++++++++++++ include/linux/crypto.h | 5 +++++ 3 files changed, 50 insertions(+) create mode 100644 include/crypto/aead.h diff --git a/crypto/aead.c b/crypto/aead.c index f23c2b0..0402b60 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -77,6 +77,11 @@ static unsigned int crypto_aead_ctxsize(struct crypto_alg *alg, u32 type, return alg->cra_ctxsize; } +static int no_givdecrypt(struct aead_givcrypt_request *req) +{ + return -ENOSYS; +} + static int crypto_init_aead_ops(struct crypto_tfm *tfm, u32 type, u32 mask) { struct aead_alg *alg = &tfm->__crt_alg->cra_aead; @@ -88,6 +93,8 @@ static int crypto_init_aead_ops(struct crypto_tfm *tfm, u32 type, u32 mask) crt->setkey = setkey; crt->encrypt = alg->encrypt; crt->decrypt = alg->decrypt; + crt->givencrypt = alg->givencrypt; + crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt; crt->ivsize = alg->ivsize; crt->authsize = alg->maxauthsize; diff --git a/include/crypto/aead.h b/include/crypto/aead.h new file mode 100644 index 0000000..0839203 --- /dev/null +++ b/include/crypto/aead.h @@ -0,0 +1,38 @@ +/* + * AEAD: Authenticated Encryption with Associated Data + * + * Copyright (c) 2007 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_AEAD_H +#define _CRYPTO_AEAD_H + +#include +#include + +/** + * struct aead_givcrypt_request - AEAD request with IV generation + * @seq: Sequence number for IV generation + * @giv: Space for generated IV + * @areq: The AEAD request itself + */ +struct aead_givcrypt_request { + u64 seq; + u8 *giv; + + struct aead_request areq; +}; + +static inline struct crypto_aead *aead_givcrypt_reqtfm( + struct aead_givcrypt_request *req) +{ + return crypto_aead_reqtfm(&req->areq); +} + +#endif /* _CRYPTO_AEAD_H */ diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 835dcaf..7524928 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -106,6 +106,7 @@ struct crypto_blkcipher; struct crypto_hash; struct crypto_tfm; struct crypto_type; +struct aead_givcrypt_request; struct skcipher_givcrypt_request; typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); @@ -202,6 +203,8 @@ struct aead_alg { int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize); int (*encrypt)(struct aead_request *req); int (*decrypt)(struct aead_request *req); + int (*givencrypt)(struct aead_givcrypt_request *req); + int (*givdecrypt)(struct aead_givcrypt_request *req); unsigned int ivsize; unsigned int maxauthsize; @@ -348,6 +351,8 @@ struct aead_tfm { unsigned int keylen); int (*encrypt)(struct aead_request *req); int (*decrypt)(struct aead_request *req); + int (*givencrypt)(struct aead_givcrypt_request *req); + int (*givdecrypt)(struct aead_givcrypt_request *req); unsigned int ivsize; unsigned int authsize; unsigned int reqsize; -- cgit v1.1 From e56dd56418fcc024683d1638564a494d9e9aab85 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 10 Dec 2007 16:20:24 +0800 Subject: [CRYPTO] authenc: Add givencrypt operation This patch implements the givencrypt function for authenc. It simply calls the givencrypt operation on the underlying cipher instead of encrypt. Signed-off-by: Herbert Xu --- crypto/authenc.c | 136 ++++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 114 insertions(+), 22 deletions(-) diff --git a/crypto/authenc.c b/crypto/authenc.c index 2d609b7..ed8ac5a 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -10,6 +10,7 @@ * */ +#include #include #include #include @@ -87,6 +88,20 @@ badkey: goto out; } +static void authenc_chain(struct scatterlist *head, struct scatterlist *sg, + int chain) +{ + if (chain) { + head->length += sg->length; + sg = scatterwalk_sg_next(sg); + } + + if (sg) + scatterwalk_sg_chain(head, 2, sg); + else + sg_mark_end(head); +} + static u8 *crypto_authenc_hash(struct aead_request *req, unsigned int flags, struct scatterlist *cipher, unsigned int cryptlen) @@ -127,18 +142,31 @@ auth_unlock: return hash; } -static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags) +static int crypto_authenc_genicv(struct aead_request *req, u8 *iv, + unsigned int flags) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct scatterlist *dst = req->dst; - unsigned int cryptlen = req->cryptlen; + struct scatterlist cipher[2]; + struct page *dstp; + unsigned int ivsize = crypto_aead_ivsize(authenc); + unsigned int cryptlen; + u8 *vdst; u8 *hash; - hash = crypto_authenc_hash(req, flags, dst, cryptlen); + dstp = sg_page(dst); + vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset; + + sg_init_table(cipher, 2); + sg_set_buf(cipher, iv, ivsize); + authenc_chain(cipher, dst, vdst == iv + ivsize); + + cryptlen = req->cryptlen + ivsize; + hash = crypto_authenc_hash(req, flags, cipher, cryptlen); if (IS_ERR(hash)) return PTR_ERR(hash); - scatterwalk_map_and_copy(hash, dst, cryptlen, + scatterwalk_map_and_copy(hash, cipher, cryptlen, crypto_aead_authsize(authenc), 1); return 0; } @@ -146,8 +174,16 @@ static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags) static void crypto_authenc_encrypt_done(struct crypto_async_request *req, int err) { - if (!err) - err = crypto_authenc_genicv(req->data, 0); + if (!err) { + struct aead_request *areq = req->data; + struct crypto_aead *authenc = crypto_aead_reqtfm(areq); + struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); + struct ablkcipher_request *abreq = aead_request_ctx(areq); + u8 *iv = (u8 *)(abreq + 1) + + crypto_ablkcipher_reqsize(ctx->enc); + + err = crypto_authenc_genicv(areq, iv, 0); + } aead_request_complete(req->data, err); } @@ -157,45 +193,99 @@ static int crypto_authenc_encrypt(struct aead_request *req) struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); struct ablkcipher_request *abreq = aead_request_ctx(req); + struct crypto_ablkcipher *enc = ctx->enc; + struct scatterlist *dst = req->dst; + unsigned int cryptlen = req->cryptlen; + u8 *iv = (u8 *)(abreq + 1) + crypto_ablkcipher_reqsize(enc); int err; - ablkcipher_request_set_tfm(abreq, ctx->enc); + ablkcipher_request_set_tfm(abreq, enc); ablkcipher_request_set_callback(abreq, aead_request_flags(req), crypto_authenc_encrypt_done, req); - ablkcipher_request_set_crypt(abreq, req->src, req->dst, req->cryptlen, - req->iv); + ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv); + + memcpy(iv, req->iv, crypto_aead_ivsize(authenc)); err = crypto_ablkcipher_encrypt(abreq); if (err) return err; - return crypto_authenc_genicv(req, CRYPTO_TFM_REQ_MAY_SLEEP); + return crypto_authenc_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP); +} + +static void crypto_authenc_givencrypt_done(struct crypto_async_request *req, + int err) +{ + if (!err) { + struct aead_givcrypt_request *greq = req->data; + + err = crypto_authenc_genicv(&greq->areq, greq->giv, 0); + } + + aead_request_complete(req->data, err); +} + +static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req) +{ + struct crypto_aead *authenc = aead_givcrypt_reqtfm(req); + struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); + struct aead_request *areq = &req->areq; + struct skcipher_givcrypt_request *greq = aead_request_ctx(areq); + u8 *iv = req->giv; + int err; + + skcipher_givcrypt_set_tfm(greq, ctx->enc); + skcipher_givcrypt_set_callback(greq, aead_request_flags(areq), + crypto_authenc_givencrypt_done, areq); + skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen, + areq->iv); + skcipher_givcrypt_set_giv(greq, iv, req->seq); + + err = crypto_skcipher_givencrypt(greq); + if (err) + return err; + + return crypto_authenc_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP); } static int crypto_authenc_verify(struct aead_request *req, + struct scatterlist *cipher, unsigned int cryptlen) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); u8 *ohash; u8 *ihash; - struct scatterlist *src = req->src; unsigned int authsize; - ohash = crypto_authenc_hash(req, CRYPTO_TFM_REQ_MAY_SLEEP, src, + ohash = crypto_authenc_hash(req, CRYPTO_TFM_REQ_MAY_SLEEP, cipher, cryptlen); if (IS_ERR(ohash)) return PTR_ERR(ohash); authsize = crypto_aead_authsize(authenc); ihash = ohash + authsize; - scatterwalk_map_and_copy(ihash, src, cryptlen, authsize, 0); + scatterwalk_map_and_copy(ihash, cipher, cryptlen, authsize, 0); return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0; } -static void crypto_authenc_decrypt_done(struct crypto_async_request *req, - int err) +static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, + unsigned int cryptlen) { - aead_request_complete(req->data, err); + struct crypto_aead *authenc = crypto_aead_reqtfm(req); + struct scatterlist *src = req->src; + struct scatterlist cipher[2]; + struct page *srcp; + unsigned int ivsize = crypto_aead_ivsize(authenc); + u8 *vsrc; + + srcp = sg_page(src); + vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset; + + sg_init_table(cipher, 2); + sg_set_buf(cipher, iv, ivsize); + authenc_chain(cipher, src, vsrc == iv + ivsize); + + return crypto_authenc_verify(req, cipher, cryptlen + ivsize); } static int crypto_authenc_decrypt(struct aead_request *req) @@ -205,21 +295,21 @@ static int crypto_authenc_decrypt(struct aead_request *req) struct ablkcipher_request *abreq = aead_request_ctx(req); unsigned int cryptlen = req->cryptlen; unsigned int authsize = crypto_aead_authsize(authenc); + u8 *iv = req->iv; int err; if (cryptlen < authsize) return -EINVAL; cryptlen -= authsize; - err = crypto_authenc_verify(req, cryptlen); + err = crypto_authenc_iverify(req, iv, cryptlen); if (err) return err; ablkcipher_request_set_tfm(abreq, ctx->enc); ablkcipher_request_set_callback(abreq, aead_request_flags(req), - crypto_authenc_decrypt_done, req); - ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, - req->iv); + req->base.complete, req->base.data); + ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv); return crypto_ablkcipher_decrypt(abreq); } @@ -248,8 +338,9 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) (crypto_hash_alignmask(auth) & ~(crypto_tfm_ctx_alignment() - 1)) + crypto_hash_digestsize(auth) * 2, - sizeof(struct ablkcipher_request) + - crypto_ablkcipher_reqsize(enc)); + sizeof(struct skcipher_givcrypt_request) + + crypto_ablkcipher_reqsize(enc) + + crypto_ablkcipher_ivsize(enc)); spin_lock_init(&ctx->auth_lock); @@ -347,6 +438,7 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) inst->alg.cra_aead.setkey = crypto_authenc_setkey; inst->alg.cra_aead.encrypt = crypto_authenc_encrypt; inst->alg.cra_aead.decrypt = crypto_authenc_decrypt; + inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt; out: crypto_mod_put(auth); -- cgit v1.1 From 3a282bd2e77966e7361fffbd5d1cea6eb0499b6c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 8 Dec 2007 20:13:15 +0800 Subject: [CRYPTO] aead: Add top-level givencrypt/givdecrypt calls This patch finally makes the givencrypt/givdecrypt operations available to users by adding crypto_aead_givencrypt and crypto_aead_givdecrypt. A suite of helpers to allocate and fill in the request is also available. Signed-off-by: Herbert Xu --- include/crypto/aead.h | 67 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 0839203..0edf949 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -15,6 +15,7 @@ #include #include +#include /** * struct aead_givcrypt_request - AEAD request with IV generation @@ -35,4 +36,70 @@ static inline struct crypto_aead *aead_givcrypt_reqtfm( return crypto_aead_reqtfm(&req->areq); } +static inline int crypto_aead_givencrypt(struct aead_givcrypt_request *req) +{ + struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req)); + return crt->givencrypt(req); +}; + +static inline int crypto_aead_givdecrypt(struct aead_givcrypt_request *req) +{ + struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req)); + return crt->givdecrypt(req); +}; + +static inline void aead_givcrypt_set_tfm(struct aead_givcrypt_request *req, + struct crypto_aead *tfm) +{ + req->areq.base.tfm = crypto_aead_tfm(tfm); +} + +static inline struct aead_givcrypt_request *aead_givcrypt_alloc( + struct crypto_aead *tfm, gfp_t gfp) +{ + struct aead_givcrypt_request *req; + + req = kmalloc(sizeof(struct aead_givcrypt_request) + + crypto_aead_reqsize(tfm), gfp); + + if (likely(req)) + aead_givcrypt_set_tfm(req, tfm); + + return req; +} + +static inline void aead_givcrypt_free(struct aead_givcrypt_request *req) +{ + kfree(req); +} + +static inline void aead_givcrypt_set_callback( + struct aead_givcrypt_request *req, u32 flags, + crypto_completion_t complete, void *data) +{ + aead_request_set_callback(&req->areq, flags, complete, data); +} + +static inline void aead_givcrypt_set_crypt(struct aead_givcrypt_request *req, + struct scatterlist *src, + struct scatterlist *dst, + unsigned int nbytes, void *iv) +{ + aead_request_set_crypt(&req->areq, src, dst, nbytes, iv); +} + +static inline void aead_givcrypt_set_assoc(struct aead_givcrypt_request *req, + struct scatterlist *assoc, + unsigned int assoclen) +{ + aead_request_set_assoc(&req->areq, assoc, assoclen); +} + +static inline void aead_givcrypt_set_giv(struct aead_givcrypt_request *req, + u8 *giv, u64 seq) +{ + req->giv = giv; + req->seq = seq; +} + #endif /* _CRYPTO_AEAD_H */ -- cgit v1.1 From aedb30dc49eeecd48558b601c47e0b3f9e42c602 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 12 Dec 2007 19:27:25 +0800 Subject: [CRYPTO] aead: Allow algorithms with no givcrypt support Some algorithms always require manual IV construction. For instance, the generic CCM algorithm requires the first byte of the IV to be manually constructed. Such algorithms are always used by other algorithms equipped with their own IV generators and do not need IV generation per se. Signed-off-by: Herbert Xu --- crypto/aead.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crypto/aead.c b/crypto/aead.c index 0402b60..15335ed 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -77,7 +77,7 @@ static unsigned int crypto_aead_ctxsize(struct crypto_alg *alg, u32 type, return alg->cra_ctxsize; } -static int no_givdecrypt(struct aead_givcrypt_request *req) +static int no_givcrypt(struct aead_givcrypt_request *req) { return -ENOSYS; } @@ -93,8 +93,8 @@ static int crypto_init_aead_ops(struct crypto_tfm *tfm, u32 type, u32 mask) crt->setkey = setkey; crt->encrypt = alg->encrypt; crt->decrypt = alg->decrypt; - crt->givencrypt = alg->givencrypt; - crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt; + crt->givencrypt = alg->givencrypt ?: no_givcrypt; + crt->givdecrypt = alg->givdecrypt ?: no_givcrypt; crt->ivsize = alg->ivsize; crt->authsize = alg->maxauthsize; -- cgit v1.1 From 5b6d2d7fdf806f2b5a9352416f9e670911fc4748 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 12 Dec 2007 19:23:36 +0800 Subject: [CRYPTO] aead: Add aead_geniv_alloc/aead_geniv_free This patch creates the infrastructure to help the construction of IV generator templates that wrap around AEAD algorithms by adding an IV generator to them. This is useful for AEAD algorithms with no built-in IV generator or to replace their built-in generator. Signed-off-by: Herbert Xu --- crypto/aead.c | 216 ++++++++++++++++++++++++++++++++++++++++- include/crypto/internal/aead.h | 77 +++++++++++++++ include/linux/crypto.h | 11 ++- 3 files changed, 297 insertions(+), 7 deletions(-) create mode 100644 include/crypto/internal/aead.h diff --git a/crypto/aead.c b/crypto/aead.c index 15335ed..9f7aca8 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -12,14 +12,16 @@ * */ -#include -#include +#include +#include #include #include #include #include #include +#include "internal.h" + static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { @@ -55,18 +57,20 @@ static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { + struct aead_tfm *crt = crypto_aead_crt(tfm); int err; if (authsize > crypto_aead_alg(tfm)->maxauthsize) return -EINVAL; if (crypto_aead_alg(tfm)->setauthsize) { - err = crypto_aead_alg(tfm)->setauthsize(tfm, authsize); + err = crypto_aead_alg(tfm)->setauthsize(crt->base, authsize); if (err) return err; } - crypto_aead_crt(tfm)->authsize = authsize; + crypto_aead_crt(crt->base)->authsize = authsize; + crt->authsize = authsize; return 0; } EXPORT_SYMBOL_GPL(crypto_aead_setauthsize); @@ -90,11 +94,13 @@ static int crypto_init_aead_ops(struct crypto_tfm *tfm, u32 type, u32 mask) if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8) return -EINVAL; - crt->setkey = setkey; + crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ? + alg->setkey : setkey; crt->encrypt = alg->encrypt; crt->decrypt = alg->decrypt; crt->givencrypt = alg->givencrypt ?: no_givcrypt; crt->givdecrypt = alg->givdecrypt ?: no_givcrypt; + crt->base = __crypto_aead_cast(tfm); crt->ivsize = alg->ivsize; crt->authsize = alg->maxauthsize; @@ -111,6 +117,7 @@ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "ivsize : %u\n", aead->ivsize); seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize); + seq_printf(m, "geniv : %s\n", aead->geniv ?: ""); } const struct crypto_type crypto_aead_type = { @@ -122,5 +129,204 @@ const struct crypto_type crypto_aead_type = { }; EXPORT_SYMBOL_GPL(crypto_aead_type); +static int aead_null_givencrypt(struct aead_givcrypt_request *req) +{ + return crypto_aead_encrypt(&req->areq); +} + +static int aead_null_givdecrypt(struct aead_givcrypt_request *req) +{ + return crypto_aead_decrypt(&req->areq); +} + +static int crypto_init_nivaead_ops(struct crypto_tfm *tfm, u32 type, u32 mask) +{ + struct aead_alg *alg = &tfm->__crt_alg->cra_aead; + struct aead_tfm *crt = &tfm->crt_aead; + + if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8) + return -EINVAL; + + crt->setkey = setkey; + crt->encrypt = alg->encrypt; + crt->decrypt = alg->decrypt; + if (!alg->ivsize) { + crt->givencrypt = aead_null_givencrypt; + crt->givdecrypt = aead_null_givdecrypt; + } + crt->base = __crypto_aead_cast(tfm); + crt->ivsize = alg->ivsize; + crt->authsize = alg->maxauthsize; + + return 0; +} + +static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg) + __attribute__ ((unused)); +static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg) +{ + struct aead_alg *aead = &alg->cra_aead; + + seq_printf(m, "type : nivaead\n"); + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); + seq_printf(m, "ivsize : %u\n", aead->ivsize); + seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize); + seq_printf(m, "geniv : %s\n", aead->geniv); +} + +const struct crypto_type crypto_nivaead_type = { + .ctxsize = crypto_aead_ctxsize, + .init = crypto_init_nivaead_ops, +#ifdef CONFIG_PROC_FS + .show = crypto_nivaead_show, +#endif +}; +EXPORT_SYMBOL_GPL(crypto_nivaead_type); + +static int crypto_grab_nivaead(struct crypto_aead_spawn *spawn, + const char *name, u32 type, u32 mask) +{ + struct crypto_alg *alg; + int err; + + type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); + type |= CRYPTO_ALG_TYPE_AEAD; + mask |= CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV; + + alg = crypto_alg_mod_lookup(name, type, mask); + if (IS_ERR(alg)) + return PTR_ERR(alg); + + err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask); + crypto_mod_put(alg); + return err; +} + +struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl, + struct rtattr **tb, u32 type, + u32 mask) +{ + const char *name; + struct crypto_aead_spawn *spawn; + struct crypto_attr_type *algt; + struct crypto_instance *inst; + struct crypto_alg *alg; + int err; + + algt = crypto_get_attr_type(tb); + err = PTR_ERR(algt); + if (IS_ERR(algt)) + return ERR_PTR(err); + + if ((algt->type ^ (CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV)) & + algt->mask) + return ERR_PTR(-EINVAL); + + name = crypto_attr_alg_name(tb[1]); + err = PTR_ERR(name); + if (IS_ERR(name)) + return ERR_PTR(err); + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return ERR_PTR(-ENOMEM); + + spawn = crypto_instance_ctx(inst); + + /* Ignore async algorithms if necessary. */ + mask |= crypto_requires_sync(algt->type, algt->mask); + + crypto_set_aead_spawn(spawn, inst); + err = crypto_grab_nivaead(spawn, name, type, mask); + if (err) + goto err_free_inst; + + alg = crypto_aead_spawn_alg(spawn); + + err = -EINVAL; + if (!alg->cra_aead.ivsize) + goto err_drop_alg; + + /* + * This is only true if we're constructing an algorithm with its + * default IV generator. For the default generator we elide the + * template name and double-check the IV generator. + */ + if (algt->mask & CRYPTO_ALG_GENIV) { + if (strcmp(tmpl->name, alg->cra_aead.geniv)) + goto err_drop_alg; + + memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); + memcpy(inst->alg.cra_driver_name, alg->cra_driver_name, + CRYPTO_MAX_ALG_NAME); + } else { + err = -ENAMETOOLONG; + if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, + "%s(%s)", tmpl->name, alg->cra_name) >= + CRYPTO_MAX_ALG_NAME) + goto err_drop_alg; + if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "%s(%s)", tmpl->name, alg->cra_driver_name) >= + CRYPTO_MAX_ALG_NAME) + goto err_drop_alg; + } + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV; + inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; + inst->alg.cra_priority = alg->cra_priority; + inst->alg.cra_blocksize = alg->cra_blocksize; + inst->alg.cra_alignmask = alg->cra_alignmask; + inst->alg.cra_type = &crypto_aead_type; + + inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize; + inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize; + inst->alg.cra_aead.geniv = alg->cra_aead.geniv; + + inst->alg.cra_aead.setkey = alg->cra_aead.setkey; + inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize; + inst->alg.cra_aead.encrypt = alg->cra_aead.encrypt; + inst->alg.cra_aead.decrypt = alg->cra_aead.decrypt; + +out: + return inst; + +err_drop_alg: + crypto_drop_aead(spawn); +err_free_inst: + kfree(inst); + inst = ERR_PTR(err); + goto out; +} +EXPORT_SYMBOL_GPL(aead_geniv_alloc); + +void aead_geniv_free(struct crypto_instance *inst) +{ + crypto_drop_aead(crypto_instance_ctx(inst)); + kfree(inst); +} +EXPORT_SYMBOL_GPL(aead_geniv_free); + +int aead_geniv_init(struct crypto_tfm *tfm) +{ + struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct crypto_aead *aead; + + aead = crypto_spawn_aead(crypto_instance_ctx(inst)); + if (IS_ERR(aead)) + return PTR_ERR(aead); + + tfm->crt_aead.base = aead; + tfm->crt_aead.reqsize += crypto_aead_reqsize(aead); + + return 0; +} +EXPORT_SYMBOL_GPL(aead_geniv_init); + +void aead_geniv_exit(struct crypto_tfm *tfm) +{ + crypto_free_aead(tfm->crt_aead.base); +} +EXPORT_SYMBOL_GPL(aead_geniv_exit); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Authenticated Encryption with Associated Data (AEAD)"); diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h new file mode 100644 index 0000000..eb4eee7 --- /dev/null +++ b/include/crypto/internal/aead.h @@ -0,0 +1,77 @@ +/* + * AEAD: Authenticated Encryption with Associated Data + * + * Copyright (c) 2007 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_INTERNAL_AEAD_H +#define _CRYPTO_INTERNAL_AEAD_H + +#include +#include +#include + +struct rtattr; + +struct crypto_aead_spawn { + struct crypto_spawn base; +}; + +extern const struct crypto_type crypto_nivaead_type; + +static inline void crypto_set_aead_spawn( + struct crypto_aead_spawn *spawn, struct crypto_instance *inst) +{ + crypto_set_spawn(&spawn->base, inst); +} + +static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct crypto_alg *crypto_aead_spawn_alg( + struct crypto_aead_spawn *spawn) +{ + return spawn->base.alg; +} + +static inline struct crypto_aead *crypto_spawn_aead( + struct crypto_aead_spawn *spawn) +{ + return __crypto_aead_cast( + crypto_spawn_tfm(&spawn->base, CRYPTO_ALG_TYPE_AEAD, + CRYPTO_ALG_TYPE_MASK)); +} + +struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl, + struct rtattr **tb, u32 type, + u32 mask); +void aead_geniv_free(struct crypto_instance *inst); +int aead_geniv_init(struct crypto_tfm *tfm); +void aead_geniv_exit(struct crypto_tfm *tfm); + +static inline struct crypto_aead *aead_geniv_base(struct crypto_aead *geniv) +{ + return crypto_aead_crt(geniv)->base; +} + +static inline void *aead_givcrypt_reqctx(struct aead_givcrypt_request *req) +{ + return aead_request_ctx(&req->areq); +} + +static inline void aead_givcrypt_complete(struct aead_givcrypt_request *req, + int err) +{ + aead_request_complete(&req->areq, err); +} + +#endif /* _CRYPTO_INTERNAL_AEAD_H */ + diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 7524928..639385a 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -206,6 +206,8 @@ struct aead_alg { int (*givencrypt)(struct aead_givcrypt_request *req); int (*givdecrypt)(struct aead_givcrypt_request *req); + const char *geniv; + unsigned int ivsize; unsigned int maxauthsize; }; @@ -353,6 +355,9 @@ struct aead_tfm { int (*decrypt)(struct aead_request *req); int (*givencrypt)(struct aead_givcrypt_request *req); int (*givdecrypt)(struct aead_givcrypt_request *req); + + struct crypto_aead *base; + unsigned int ivsize; unsigned int authsize; unsigned int reqsize; @@ -781,7 +786,9 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags) static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { - return crypto_aead_crt(tfm)->setkey(tfm, key, keylen); + struct aead_tfm *crt = crypto_aead_crt(tfm); + + return crt->setkey(crt->base, key, keylen); } int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); @@ -809,7 +816,7 @@ static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) static inline void aead_request_set_tfm(struct aead_request *req, struct crypto_aead *tfm) { - req->base.tfm = crypto_aead_tfm(tfm); + req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base); } static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, -- cgit v1.1 From 14df4d80433b8413f901e80880c39e8759b8418f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 12 Dec 2007 12:27:26 +0800 Subject: [CRYPTO] seqiv: Add AEAD support This patch adds support for using seqiv with AEAD algorithms. This is useful for those AEAD algorithms that performs authentication before encryption because the IV generated by the underlying encryption algorithm won't be available for authentication. Signed-off-by: Herbert Xu --- crypto/seqiv.c | 191 ++++++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 175 insertions(+), 16 deletions(-) diff --git a/crypto/seqiv.c b/crypto/seqiv.c index 9c2d80d..b903aab 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -13,6 +13,7 @@ * */ +#include #include #include #include @@ -53,6 +54,46 @@ static void seqiv_complete(struct crypto_async_request *base, int err) skcipher_givcrypt_complete(req, err); } +static void seqiv_aead_complete2(struct aead_givcrypt_request *req, int err) +{ + struct aead_request *subreq = aead_givcrypt_reqctx(req); + struct crypto_aead *geniv; + + if (err == -EINPROGRESS) + return; + + if (err) + goto out; + + geniv = aead_givcrypt_reqtfm(req); + memcpy(req->areq.iv, subreq->iv, crypto_aead_ivsize(geniv)); + +out: + kfree(subreq->iv); +} + +static void seqiv_aead_complete(struct crypto_async_request *base, int err) +{ + struct aead_givcrypt_request *req = base->data; + + seqiv_aead_complete2(req, err); + aead_givcrypt_complete(req, err); +} + +static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq, + unsigned int ivsize) +{ + unsigned int len = ivsize; + + if (ivsize > sizeof(u64)) { + memset(info, 0, ivsize - sizeof(u64)); + len = sizeof(u64); + } + seq = cpu_to_be64(seq); + memcpy(info + ivsize - len, &seq, len); + crypto_xor(info, ctx->salt, ivsize); +} + static int seqiv_givencrypt(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); @@ -61,9 +102,7 @@ static int seqiv_givencrypt(struct skcipher_givcrypt_request *req) crypto_completion_t complete; void *data; u8 *info; - __be64 seq; unsigned int ivsize; - unsigned int len; int err; ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); @@ -91,15 +130,7 @@ static int seqiv_givencrypt(struct skcipher_givcrypt_request *req) ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, req->creq.nbytes, info); - len = ivsize; - if (ivsize > sizeof(u64)) { - memset(info, 0, ivsize - sizeof(u64)); - len = sizeof(u64); - } - seq = cpu_to_be64(req->seq); - memcpy(info + ivsize - len, &seq, len); - crypto_xor(info, ctx->salt, ivsize); - + seqiv_geniv(ctx, info, req->seq, ivsize); memcpy(req->giv, info, ivsize); err = crypto_ablkcipher_encrypt(subreq); @@ -108,6 +139,52 @@ static int seqiv_givencrypt(struct skcipher_givcrypt_request *req) return err; } +static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req) +{ + struct crypto_aead *geniv = aead_givcrypt_reqtfm(req); + struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); + struct aead_request *areq = &req->areq; + struct aead_request *subreq = aead_givcrypt_reqctx(req); + crypto_completion_t complete; + void *data; + u8 *info; + unsigned int ivsize; + int err; + + aead_request_set_tfm(subreq, aead_geniv_base(geniv)); + + complete = areq->base.complete; + data = areq->base.data; + info = areq->iv; + + ivsize = crypto_aead_ivsize(geniv); + + if (unlikely(!IS_ALIGNED((unsigned long)info, + crypto_aead_alignmask(geniv) + 1))) { + info = kmalloc(ivsize, areq->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: + GFP_ATOMIC); + if (!info) + return -ENOMEM; + + complete = seqiv_aead_complete; + data = req; + } + + aead_request_set_callback(subreq, areq->base.flags, complete, data); + aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen, + info); + aead_request_set_assoc(subreq, areq->assoc, areq->assoclen); + + seqiv_geniv(ctx, info, req->seq, ivsize); + memcpy(req->giv, info, ivsize); + + err = crypto_aead_encrypt(subreq); + if (unlikely(info != areq->iv)) + seqiv_aead_complete2(req, err); + return err; +} + static int seqiv_givencrypt_first(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); @@ -126,6 +203,24 @@ unlock: return seqiv_givencrypt(req); } +static int seqiv_aead_givencrypt_first(struct aead_givcrypt_request *req) +{ + struct crypto_aead *geniv = aead_givcrypt_reqtfm(req); + struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); + + spin_lock_bh(&ctx->lock); + if (crypto_aead_crt(geniv)->givencrypt != seqiv_aead_givencrypt_first) + goto unlock; + + crypto_aead_crt(geniv)->givencrypt = seqiv_aead_givencrypt; + get_random_bytes(ctx->salt, crypto_aead_ivsize(geniv)); + +unlock: + spin_unlock_bh(&ctx->lock); + + return seqiv_aead_givencrypt(req); +} + static int seqiv_init(struct crypto_tfm *tfm) { struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); @@ -138,13 +233,26 @@ static int seqiv_init(struct crypto_tfm *tfm) return skcipher_geniv_init(tfm); } +static int seqiv_aead_init(struct crypto_tfm *tfm) +{ + struct crypto_aead *geniv = __crypto_aead_cast(tfm); + struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); + + spin_lock_init(&ctx->lock); + + tfm->crt_aead.reqsize = sizeof(struct aead_request); + + return aead_geniv_init(tfm); +} + static struct crypto_template seqiv_tmpl; -static struct crypto_instance *seqiv_alloc(struct rtattr **tb) +static struct crypto_instance *seqiv_ablkcipher_alloc(struct rtattr **tb) { struct crypto_instance *inst; inst = skcipher_geniv_alloc(&seqiv_tmpl, tb, 0, 0); + if (IS_ERR(inst)) goto out; @@ -153,19 +261,70 @@ static struct crypto_instance *seqiv_alloc(struct rtattr **tb) inst->alg.cra_init = seqiv_init; inst->alg.cra_exit = skcipher_geniv_exit; - inst->alg.cra_alignmask |= __alignof__(u32) - 1; - - inst->alg.cra_ctxsize = sizeof(struct seqiv_ctx); inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize; out: return inst; } +static struct crypto_instance *seqiv_aead_alloc(struct rtattr **tb) +{ + struct crypto_instance *inst; + + inst = aead_geniv_alloc(&seqiv_tmpl, tb, 0, 0); + + if (IS_ERR(inst)) + goto out; + + inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first; + + inst->alg.cra_init = seqiv_aead_init; + inst->alg.cra_exit = aead_geniv_exit; + + inst->alg.cra_ctxsize = inst->alg.cra_aead.ivsize; + +out: + return inst; +} + +static struct crypto_instance *seqiv_alloc(struct rtattr **tb) +{ + struct crypto_attr_type *algt; + struct crypto_instance *inst; + int err; + + algt = crypto_get_attr_type(tb); + err = PTR_ERR(algt); + if (IS_ERR(algt)) + return ERR_PTR(err); + + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) + inst = seqiv_ablkcipher_alloc(tb); + else + inst = seqiv_aead_alloc(tb); + + if (IS_ERR(inst)) + goto out; + + inst->alg.cra_alignmask |= __alignof__(u32) - 1; + inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx); + +out: + return inst; +} + +static void seqiv_free(struct crypto_instance *inst) +{ + if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) + skcipher_geniv_free(inst); + else + aead_geniv_free(inst); +} + static struct crypto_template seqiv_tmpl = { .name = "seqiv", .alloc = seqiv_alloc, - .free = skcipher_geniv_free, + .free = seqiv_free, .module = THIS_MODULE, }; -- cgit v1.1 From d29ce988aeb459203c74f14747f4f77e1829ef78 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 12 Dec 2007 19:24:27 +0800 Subject: [CRYPTO] aead: Create default givcipher instances This patch makes crypto_alloc_aead always return algorithms that is capable of generating their own IVs through givencrypt and givdecrypt. All existing AEAD algorithms already do. New ones must either supply their own or specify a generic IV generator with the geniv field. Signed-off-by: Herbert Xu --- crypto/aead.c | 153 +++++++++++++++++++++++++++++++++++++++++ include/crypto/internal/aead.h | 3 + include/linux/crypto.h | 10 +-- 3 files changed, 157 insertions(+), 9 deletions(-) diff --git a/crypto/aead.c b/crypto/aead.c index 9f7aca8..f5b1add 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -328,5 +329,157 @@ void aead_geniv_exit(struct crypto_tfm *tfm) } EXPORT_SYMBOL_GPL(aead_geniv_exit); +static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask) +{ + struct rtattr *tb[3]; + struct { + struct rtattr attr; + struct crypto_attr_type data; + } ptype; + struct { + struct rtattr attr; + struct crypto_attr_alg data; + } palg; + struct crypto_template *tmpl; + struct crypto_instance *inst; + struct crypto_alg *larval; + const char *geniv; + int err; + + larval = crypto_larval_lookup(alg->cra_driver_name, + CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV, + CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); + err = PTR_ERR(larval); + if (IS_ERR(larval)) + goto out; + + err = -EAGAIN; + if (!crypto_is_larval(larval)) + goto drop_larval; + + ptype.attr.rta_len = sizeof(ptype); + ptype.attr.rta_type = CRYPTOA_TYPE; + ptype.data.type = type | CRYPTO_ALG_GENIV; + /* GENIV tells the template that we're making a default geniv. */ + ptype.data.mask = mask | CRYPTO_ALG_GENIV; + tb[0] = &ptype.attr; + + palg.attr.rta_len = sizeof(palg); + palg.attr.rta_type = CRYPTOA_ALG; + /* Must use the exact name to locate ourselves. */ + memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME); + tb[1] = &palg.attr; + + tb[2] = NULL; + + geniv = alg->cra_aead.geniv; + + tmpl = crypto_lookup_template(geniv); + err = -ENOENT; + if (!tmpl) + goto kill_larval; + + inst = tmpl->alloc(tb); + err = PTR_ERR(inst); + if (IS_ERR(inst)) + goto put_tmpl; + + if ((err = crypto_register_instance(tmpl, inst))) { + tmpl->free(inst); + goto put_tmpl; + } + + /* Redo the lookup to use the instance we just registered. */ + err = -EAGAIN; + +put_tmpl: + crypto_tmpl_put(tmpl); +kill_larval: + crypto_larval_kill(larval); +drop_larval: + crypto_mod_put(larval); +out: + crypto_mod_put(alg); + return err; +} + +static struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, + u32 mask) +{ + struct crypto_alg *alg; + + alg = crypto_alg_mod_lookup(name, type, mask); + if (IS_ERR(alg)) + return alg; + + if (alg->cra_type == &crypto_aead_type) + return alg; + + if (!alg->cra_aead.ivsize) + return alg; + + return ERR_PTR(crypto_nivaead_default(alg, type, mask)); +} + +int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name, + u32 type, u32 mask) +{ + struct crypto_alg *alg; + int err; + + type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); + type |= CRYPTO_ALG_TYPE_AEAD; + mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); + mask |= CRYPTO_ALG_TYPE_MASK; + + alg = crypto_lookup_aead(name, type, mask); + if (IS_ERR(alg)) + return PTR_ERR(alg); + + err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask); + crypto_mod_put(alg); + return err; +} +EXPORT_SYMBOL_GPL(crypto_grab_aead); + +struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask) +{ + struct crypto_tfm *tfm; + int err; + + type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); + type |= CRYPTO_ALG_TYPE_AEAD; + mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); + mask |= CRYPTO_ALG_TYPE_MASK; + + for (;;) { + struct crypto_alg *alg; + + alg = crypto_lookup_aead(alg_name, type, mask); + if (IS_ERR(alg)) { + err = PTR_ERR(alg); + goto err; + } + + tfm = __crypto_alloc_tfm(alg, type, mask); + if (!IS_ERR(tfm)) + return __crypto_aead_cast(tfm); + + crypto_mod_put(alg); + err = PTR_ERR(tfm); + +err: + if (err != -EAGAIN) + break; + if (signal_pending(current)) { + err = -EINTR; + break; + } + } + + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(crypto_alloc_aead); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Authenticated Encryption with Associated Data (AEAD)"); diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h index eb4eee7..d838c94 100644 --- a/include/crypto/internal/aead.h +++ b/include/crypto/internal/aead.h @@ -31,6 +31,9 @@ static inline void crypto_set_aead_spawn( crypto_set_spawn(&spawn->base, inst); } +int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name, + u32 type, u32 mask); + static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn) { crypto_drop_spawn(&spawn->base); diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 639385a..0aba104 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -723,15 +723,7 @@ static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) return (struct crypto_aead *)tfm; } -static inline struct crypto_aead *crypto_alloc_aead(const char *alg_name, - u32 type, u32 mask) -{ - type &= ~CRYPTO_ALG_TYPE_MASK; - type |= CRYPTO_ALG_TYPE_AEAD; - mask |= CRYPTO_ALG_TYPE_MASK; - - return __crypto_aead_cast(crypto_alloc_base(alg_name, type, mask)); -} +struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) { -- cgit v1.1 From 4a49b499dfa0c9e42be6d6fdd771f3434c776278 Mon Sep 17 00:00:00 2001 From: Joy Latten Date: Wed, 12 Dec 2007 20:25:13 +0800 Subject: [CRYPTO] ccm: Added CCM mode This patch adds Counter with CBC-MAC (CCM) support. RFC 3610 and NIST Special Publication 800-38C were referenced. Signed-off-by: Joy Latten Signed-off-by: Herbert Xu --- crypto/Kconfig | 7 + crypto/Makefile | 1 + crypto/ccm.c | 889 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 897 insertions(+) create mode 100644 crypto/ccm.c diff --git a/crypto/Kconfig b/crypto/Kconfig index 7ad9711..0d89f77 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -220,6 +220,13 @@ config CRYPTO_GCM Support for Galois/Counter Mode (GCM) and Galois Message Authentication Code (GMAC). Required for IPSec. +config CRYPTO_CCM + tristate "CCM support" + select CRYPTO_CTR + select CRYPTO_AEAD + help + Support for Counter with CBC MAC. Required for IPsec. + config CRYPTO_CRYPTD tristate "Software async crypto daemon" select CRYPTO_BLKCIPHER diff --git a/crypto/Makefile b/crypto/Makefile index 1b99b3a..48c7583 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -39,6 +39,7 @@ obj-$(CONFIG_CRYPTO_LRW) += lrw.o obj-$(CONFIG_CRYPTO_XTS) += xts.o obj-$(CONFIG_CRYPTO_CTR) += ctr.o obj-$(CONFIG_CRYPTO_GCM) += gcm.o +obj-$(CONFIG_CRYPTO_CCM) += ccm.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o obj-$(CONFIG_CRYPTO_DES) += des_generic.o obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o diff --git a/crypto/ccm.c b/crypto/ccm.c new file mode 100644 index 0000000..7cf7e5a --- /dev/null +++ b/crypto/ccm.c @@ -0,0 +1,889 @@ +/* + * CCM: Counter with CBC-MAC + * + * (C) Copyright IBM Corp. 2007 - Joy Latten + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "internal.h" + +struct ccm_instance_ctx { + struct crypto_skcipher_spawn ctr; + struct crypto_spawn cipher; +}; + +struct crypto_ccm_ctx { + struct crypto_cipher *cipher; + struct crypto_ablkcipher *ctr; +}; + +struct crypto_rfc4309_ctx { + struct crypto_aead *child; + u8 nonce[3]; +}; + +struct crypto_ccm_req_priv_ctx { + u8 odata[16]; + u8 idata[16]; + u8 auth_tag[16]; + u32 ilen; + u32 flags; + struct scatterlist src[2]; + struct scatterlist dst[2]; + struct ablkcipher_request abreq; +}; + +static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx( + struct aead_request *req) +{ + unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); + + return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); +} + +static int set_msg_len(u8 *block, unsigned int msglen, int csize) +{ + __be32 data; + + memset(block, 0, csize); + block += csize; + + if (csize >= 4) + csize = 4; + else if (msglen > (1 << (8 * csize))) + return -EOVERFLOW; + + data = cpu_to_be32(msglen); + memcpy(block - csize, (u8 *)&data + 4 - csize, csize); + + return 0; +} + +static int crypto_ccm_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); + struct crypto_ablkcipher *ctr = ctx->ctr; + struct crypto_cipher *tfm = ctx->cipher; + int err = 0; + + crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); + crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) & + CRYPTO_TFM_REQ_MASK); + err = crypto_ablkcipher_setkey(ctr, key, keylen); + crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) & + CRYPTO_TFM_RES_MASK); + if (err) + goto out; + + crypto_cipher_clear_flags(tfm, CRYPTO_TFM_REQ_MASK); + crypto_cipher_set_flags(tfm, crypto_aead_get_flags(aead) & + CRYPTO_TFM_REQ_MASK); + err = crypto_cipher_setkey(tfm, key, keylen); + crypto_aead_set_flags(aead, crypto_cipher_get_flags(tfm) & + CRYPTO_TFM_RES_MASK); + +out: + return err; +} + +static int crypto_ccm_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + switch (authsize) { + case 4: + case 6: + case 8: + case 10: + case 12: + case 14: + case 16: + break; + default: + return -EINVAL; + } + + return 0; +} + +static int format_input(u8 *info, struct aead_request *req, + unsigned int cryptlen) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + unsigned int lp = req->iv[0]; + unsigned int l = lp + 1; + unsigned int m; + + m = crypto_aead_authsize(aead); + + memcpy(info, req->iv, 16); + + /* format control info per RFC 3610 and + * NIST Special Publication 800-38C + */ + *info |= (8 * ((m - 2) / 2)); + if (req->assoclen) + *info |= 64; + + return set_msg_len(info + 16 - l, cryptlen, l); +} + +static int format_adata(u8 *adata, unsigned int a) +{ + int len = 0; + + /* add control info for associated data + * RFC 3610 and NIST Special Publication 800-38C + */ + if (a < 65280) { + *(__be16 *)adata = cpu_to_be16(a); + len = 2; + } else { + *(__be16 *)adata = cpu_to_be16(0xfffe); + *(__be32 *)&adata[2] = cpu_to_be32(a); + len = 6; + } + + return len; +} + +static void compute_mac(struct crypto_cipher *tfm, u8 *data, int n, + struct crypto_ccm_req_priv_ctx *pctx) +{ + unsigned int bs = 16; + u8 *odata = pctx->odata; + u8 *idata = pctx->idata; + int datalen, getlen; + + datalen = n; + + /* first time in here, block may be partially filled. */ + getlen = bs - pctx->ilen; + if (datalen >= getlen) { + memcpy(idata + pctx->ilen, data, getlen); + crypto_xor(odata, idata, bs); + crypto_cipher_encrypt_one(tfm, odata, odata); + datalen -= getlen; + data += getlen; + pctx->ilen = 0; + } + + /* now encrypt rest of data */ + while (datalen >= bs) { + crypto_xor(odata, data, bs); + crypto_cipher_encrypt_one(tfm, odata, odata); + + datalen -= bs; + data += bs; + } + + /* check and see if there's leftover data that wasn't + * enough to fill a block. + */ + if (datalen) { + memcpy(idata + pctx->ilen, data, datalen); + pctx->ilen += datalen; + } +} + +static void get_data_to_compute(struct crypto_cipher *tfm, + struct crypto_ccm_req_priv_ctx *pctx, + struct scatterlist *sg, unsigned int len) +{ + struct scatter_walk walk; + u8 *data_src; + int n; + + scatterwalk_start(&walk, sg); + + while (len) { + n = scatterwalk_clamp(&walk, len); + if (!n) { + scatterwalk_start(&walk, sg_next(walk.sg)); + n = scatterwalk_clamp(&walk, len); + } + data_src = scatterwalk_map(&walk, 0); + + compute_mac(tfm, data_src, n, pctx); + len -= n; + + scatterwalk_unmap(data_src, 0); + scatterwalk_advance(&walk, n); + scatterwalk_done(&walk, 0, len); + if (len) + crypto_yield(pctx->flags); + } + + /* any leftover needs padding and then encrypted */ + if (pctx->ilen) { + int padlen; + u8 *odata = pctx->odata; + u8 *idata = pctx->idata; + + padlen = 16 - pctx->ilen; + memset(idata + pctx->ilen, 0, padlen); + crypto_xor(odata, idata, 16); + crypto_cipher_encrypt_one(tfm, odata, odata); + pctx->ilen = 0; + } +} + +static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain, + unsigned int cryptlen) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); + struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); + struct crypto_cipher *cipher = ctx->cipher; + unsigned int assoclen = req->assoclen; + u8 *odata = pctx->odata; + u8 *idata = pctx->idata; + int err; + + /* format control data for input */ + err = format_input(odata, req, cryptlen); + if (err) + goto out; + + /* encrypt first block to use as start in computing mac */ + crypto_cipher_encrypt_one(cipher, odata, odata); + + /* format associated data and compute into mac */ + if (assoclen) { + pctx->ilen = format_adata(idata, assoclen); + get_data_to_compute(cipher, pctx, req->assoc, req->assoclen); + } + + /* compute plaintext into mac */ + get_data_to_compute(cipher, pctx, plain, cryptlen); + +out: + return err; +} + +static void crypto_ccm_encrypt_done(struct crypto_async_request *areq, int err) +{ + struct aead_request *req = areq->data; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); + u8 *odata = pctx->odata; + + if (!err) + scatterwalk_map_and_copy(odata, req->dst, req->cryptlen, + crypto_aead_authsize(aead), 1); + aead_request_complete(req, err); +} + +static inline int crypto_ccm_check_iv(const u8 *iv) +{ + /* 2 <= L <= 8, so 1 <= L' <= 7. */ + if (1 > iv[0] || iv[0] > 7) + return -EINVAL; + + return 0; +} + +static int crypto_ccm_encrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); + struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); + struct ablkcipher_request *abreq = &pctx->abreq; + struct scatterlist *dst; + unsigned int cryptlen = req->cryptlen; + u8 *odata = pctx->odata; + u8 *iv = req->iv; + int err; + + err = crypto_ccm_check_iv(iv); + if (err) + return err; + + pctx->flags = aead_request_flags(req); + + err = crypto_ccm_auth(req, req->src, cryptlen); + if (err) + return err; + + /* Note: rfc 3610 and NIST 800-38C require counter of + * zero to encrypt auth tag. + */ + memset(iv + 15 - iv[0], 0, iv[0] + 1); + + sg_init_table(pctx->src, 2); + sg_set_buf(pctx->src, odata, 16); + scatterwalk_sg_chain(pctx->src, 2, req->src); + + dst = pctx->src; + if (req->src != req->dst) { + sg_init_table(pctx->dst, 2); + sg_set_buf(pctx->dst, odata, 16); + scatterwalk_sg_chain(pctx->dst, 2, req->dst); + dst = pctx->dst; + } + + ablkcipher_request_set_tfm(abreq, ctx->ctr); + ablkcipher_request_set_callback(abreq, pctx->flags, + crypto_ccm_encrypt_done, req); + ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv); + err = crypto_ablkcipher_encrypt(abreq); + if (err) + return err; + + /* copy authtag to end of dst */ + scatterwalk_map_and_copy(odata, req->dst, cryptlen, + crypto_aead_authsize(aead), 1); + return err; +} + +static void crypto_ccm_decrypt_done(struct crypto_async_request *areq, + int err) +{ + struct aead_request *req = areq->data; + struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); + struct crypto_aead *aead = crypto_aead_reqtfm(req); + unsigned int authsize = crypto_aead_authsize(aead); + unsigned int cryptlen = req->cryptlen - authsize; + + if (!err) { + err = crypto_ccm_auth(req, req->dst, cryptlen); + if (!err && memcmp(pctx->auth_tag, pctx->odata, authsize)) + err = -EBADMSG; + } + aead_request_complete(req, err); +} + +static int crypto_ccm_decrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); + struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); + struct ablkcipher_request *abreq = &pctx->abreq; + struct scatterlist *dst; + unsigned int authsize = crypto_aead_authsize(aead); + unsigned int cryptlen = req->cryptlen; + u8 *authtag = pctx->auth_tag; + u8 *odata = pctx->odata; + u8 *iv = req->iv; + int err; + + if (cryptlen < authsize) + return -EINVAL; + cryptlen -= authsize; + + err = crypto_ccm_check_iv(iv); + if (err) + return err; + + pctx->flags = aead_request_flags(req); + + scatterwalk_map_and_copy(authtag, req->src, cryptlen, authsize, 0); + + memset(iv + 15 - iv[0], 0, iv[0] + 1); + + sg_init_table(pctx->src, 2); + sg_set_buf(pctx->src, authtag, 16); + scatterwalk_sg_chain(pctx->src, 2, req->src); + + dst = pctx->src; + if (req->src != req->dst) { + sg_init_table(pctx->dst, 2); + sg_set_buf(pctx->dst, authtag, 16); + scatterwalk_sg_chain(pctx->dst, 2, req->dst); + dst = pctx->dst; + } + + ablkcipher_request_set_tfm(abreq, ctx->ctr); + ablkcipher_request_set_callback(abreq, pctx->flags, + crypto_ccm_decrypt_done, req); + ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv); + err = crypto_ablkcipher_decrypt(abreq); + if (err) + return err; + + err = crypto_ccm_auth(req, req->dst, cryptlen); + if (err) + return err; + + /* verify */ + if (memcmp(authtag, odata, authsize)) + return -EBADMSG; + + return err; +} + +static int crypto_ccm_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct ccm_instance_ctx *ictx = crypto_instance_ctx(inst); + struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_cipher *cipher; + struct crypto_ablkcipher *ctr; + unsigned long align; + int err; + + cipher = crypto_spawn_cipher(&ictx->cipher); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + ctr = crypto_spawn_skcipher(&ictx->ctr); + err = PTR_ERR(ctr); + if (IS_ERR(ctr)) + goto err_free_cipher; + + ctx->cipher = cipher; + ctx->ctr = ctr; + + align = crypto_tfm_alg_alignmask(tfm); + align &= ~(crypto_tfm_ctx_alignment() - 1); + tfm->crt_aead.reqsize = align + + sizeof(struct crypto_ccm_req_priv_ctx) + + crypto_ablkcipher_reqsize(ctr); + + return 0; + +err_free_cipher: + crypto_free_cipher(cipher); + return err; +} + +static void crypto_ccm_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_cipher(ctx->cipher); + crypto_free_ablkcipher(ctx->ctr); +} + +static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb, + const char *full_name, + const char *ctr_name, + const char *cipher_name) +{ + struct crypto_attr_type *algt; + struct crypto_instance *inst; + struct crypto_alg *ctr; + struct crypto_alg *cipher; + struct ccm_instance_ctx *ictx; + int err; + + algt = crypto_get_attr_type(tb); + err = PTR_ERR(algt); + if (IS_ERR(algt)) + return ERR_PTR(err); + + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) + return ERR_PTR(-EINVAL); + + cipher = crypto_alg_mod_lookup(cipher_name, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK); + err = PTR_ERR(cipher); + if (IS_ERR(cipher)) + return ERR_PTR(err); + + err = -EINVAL; + if (cipher->cra_blocksize != 16) + goto out_put_cipher; + + inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); + err = -ENOMEM; + if (!inst) + goto out_put_cipher; + + ictx = crypto_instance_ctx(inst); + + err = crypto_init_spawn(&ictx->cipher, cipher, inst, + CRYPTO_ALG_TYPE_MASK); + if (err) + goto err_free_inst; + + crypto_set_skcipher_spawn(&ictx->ctr, inst); + err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); + if (err) + goto err_drop_cipher; + + ctr = crypto_skcipher_spawn_alg(&ictx->ctr); + + /* Not a stream cipher? */ + err = -EINVAL; + if (ctr->cra_blocksize != 1) + goto err_drop_ctr; + + /* We want the real thing! */ + if (ctr->cra_ablkcipher.ivsize != 16) + goto err_drop_ctr; + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "ccm_base(%s,%s)", ctr->cra_driver_name, + cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + goto err_drop_ctr; + + memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME); + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; + inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC; + inst->alg.cra_priority = cipher->cra_priority + ctr->cra_priority; + inst->alg.cra_blocksize = 1; + inst->alg.cra_alignmask = cipher->cra_alignmask | ctr->cra_alignmask | + (__alignof__(u32) - 1); + inst->alg.cra_type = &crypto_aead_type; + inst->alg.cra_aead.ivsize = 16; + inst->alg.cra_aead.maxauthsize = 16; + inst->alg.cra_ctxsize = sizeof(struct crypto_ccm_ctx); + inst->alg.cra_init = crypto_ccm_init_tfm; + inst->alg.cra_exit = crypto_ccm_exit_tfm; + inst->alg.cra_aead.setkey = crypto_ccm_setkey; + inst->alg.cra_aead.setauthsize = crypto_ccm_setauthsize; + inst->alg.cra_aead.encrypt = crypto_ccm_encrypt; + inst->alg.cra_aead.decrypt = crypto_ccm_decrypt; + +out: + crypto_mod_put(cipher); + return inst; + +err_drop_ctr: + crypto_drop_skcipher(&ictx->ctr); +err_drop_cipher: + crypto_drop_spawn(&ictx->cipher); +err_free_inst: + kfree(inst); +out_put_cipher: + inst = ERR_PTR(err); + goto out; +} + +static struct crypto_instance *crypto_ccm_alloc(struct rtattr **tb) +{ + int err; + const char *cipher_name; + char ctr_name[CRYPTO_MAX_ALG_NAME]; + char full_name[CRYPTO_MAX_ALG_NAME]; + + cipher_name = crypto_attr_alg_name(tb[1]); + err = PTR_ERR(cipher_name); + if (IS_ERR(cipher_name)) + return ERR_PTR(err); + + if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", + cipher_name) >= CRYPTO_MAX_ALG_NAME) + return ERR_PTR(-ENAMETOOLONG); + + if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >= + CRYPTO_MAX_ALG_NAME) + return ERR_PTR(-ENAMETOOLONG); + + return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name); +} + +static void crypto_ccm_free(struct crypto_instance *inst) +{ + struct ccm_instance_ctx *ctx = crypto_instance_ctx(inst); + + crypto_drop_spawn(&ctx->cipher); + crypto_drop_skcipher(&ctx->ctr); + kfree(inst); +} + +static struct crypto_template crypto_ccm_tmpl = { + .name = "ccm", + .alloc = crypto_ccm_alloc, + .free = crypto_ccm_free, + .module = THIS_MODULE, +}; + +static struct crypto_instance *crypto_ccm_base_alloc(struct rtattr **tb) +{ + int err; + const char *ctr_name; + const char *cipher_name; + char full_name[CRYPTO_MAX_ALG_NAME]; + + ctr_name = crypto_attr_alg_name(tb[1]); + err = PTR_ERR(ctr_name); + if (IS_ERR(ctr_name)) + return ERR_PTR(err); + + cipher_name = crypto_attr_alg_name(tb[2]); + err = PTR_ERR(cipher_name); + if (IS_ERR(cipher_name)) + return ERR_PTR(err); + + if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)", + ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME) + return ERR_PTR(-ENAMETOOLONG); + + return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name); +} + +static struct crypto_template crypto_ccm_base_tmpl = { + .name = "ccm_base", + .alloc = crypto_ccm_base_alloc, + .free = crypto_ccm_free, + .module = THIS_MODULE, +}; + +static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key, + unsigned int keylen) +{ + struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent); + struct crypto_aead *child = ctx->child; + int err; + + if (keylen < 3) + return -EINVAL; + + keylen -= 3; + memcpy(ctx->nonce, key + keylen, 3); + + crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_aead_set_flags(child, crypto_aead_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_aead_setkey(child, key, keylen); + crypto_aead_set_flags(parent, crypto_aead_get_flags(child) & + CRYPTO_TFM_RES_MASK); + + return err; +} + +static int crypto_rfc4309_setauthsize(struct crypto_aead *parent, + unsigned int authsize) +{ + struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent); + + switch (authsize) { + case 8: + case 12: + case 16: + break; + default: + return -EINVAL; + } + + return crypto_aead_setauthsize(ctx->child, authsize); +} + +static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req) +{ + struct aead_request *subreq = aead_request_ctx(req); + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead); + struct crypto_aead *child = ctx->child; + u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), + crypto_aead_alignmask(child) + 1); + + /* L' */ + iv[0] = 3; + + memcpy(iv + 1, ctx->nonce, 3); + memcpy(iv + 4, req->iv, 8); + + aead_request_set_tfm(subreq, child); + aead_request_set_callback(subreq, req->base.flags, req->base.complete, + req->base.data); + aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv); + aead_request_set_assoc(subreq, req->assoc, req->assoclen); + + return subreq; +} + +static int crypto_rfc4309_encrypt(struct aead_request *req) +{ + req = crypto_rfc4309_crypt(req); + + return crypto_aead_encrypt(req); +} + +static int crypto_rfc4309_decrypt(struct aead_request *req) +{ + req = crypto_rfc4309_crypt(req); + + return crypto_aead_decrypt(req); +} + +static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst); + struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_aead *aead; + unsigned long align; + + aead = crypto_spawn_aead(spawn); + if (IS_ERR(aead)) + return PTR_ERR(aead); + + ctx->child = aead; + + align = crypto_aead_alignmask(aead); + align &= ~(crypto_tfm_ctx_alignment() - 1); + tfm->crt_aead.reqsize = sizeof(struct aead_request) + + ALIGN(crypto_aead_reqsize(aead), + crypto_tfm_ctx_alignment()) + + align + 16; + + return 0; +} + +static void crypto_rfc4309_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_aead(ctx->child); +} + +static struct crypto_instance *crypto_rfc4309_alloc(struct rtattr **tb) +{ + struct crypto_attr_type *algt; + struct crypto_instance *inst; + struct crypto_aead_spawn *spawn; + struct crypto_alg *alg; + const char *ccm_name; + int err; + + algt = crypto_get_attr_type(tb); + err = PTR_ERR(algt); + if (IS_ERR(algt)) + return ERR_PTR(err); + + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) + return ERR_PTR(-EINVAL); + + ccm_name = crypto_attr_alg_name(tb[1]); + err = PTR_ERR(ccm_name); + if (IS_ERR(ccm_name)) + return ERR_PTR(err); + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return ERR_PTR(-ENOMEM); + + spawn = crypto_instance_ctx(inst); + crypto_set_aead_spawn(spawn, inst); + err = crypto_grab_aead(spawn, ccm_name, 0, + crypto_requires_sync(algt->type, algt->mask)); + if (err) + goto out_free_inst; + + alg = crypto_aead_spawn_alg(spawn); + + err = -EINVAL; + + /* We only support 16-byte blocks. */ + if (alg->cra_aead.ivsize != 16) + goto out_drop_alg; + + /* Not a stream cipher? */ + if (alg->cra_blocksize != 1) + goto out_drop_alg; + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, + "rfc4309(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME || + snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "rfc4309(%s)", alg->cra_driver_name) >= + CRYPTO_MAX_ALG_NAME) + goto out_drop_alg; + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; + inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; + inst->alg.cra_priority = alg->cra_priority; + inst->alg.cra_blocksize = 1; + inst->alg.cra_alignmask = alg->cra_alignmask; + inst->alg.cra_type = &crypto_nivaead_type; + + inst->alg.cra_aead.ivsize = 8; + inst->alg.cra_aead.maxauthsize = 16; + + inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx); + + inst->alg.cra_init = crypto_rfc4309_init_tfm; + inst->alg.cra_exit = crypto_rfc4309_exit_tfm; + + inst->alg.cra_aead.setkey = crypto_rfc4309_setkey; + inst->alg.cra_aead.setauthsize = crypto_rfc4309_setauthsize; + inst->alg.cra_aead.encrypt = crypto_rfc4309_encrypt; + inst->alg.cra_aead.decrypt = crypto_rfc4309_decrypt; + + inst->alg.cra_aead.geniv = "seqiv"; + +out: + return inst; + +out_drop_alg: + crypto_drop_aead(spawn); +out_free_inst: + kfree(inst); + inst = ERR_PTR(err); + goto out; +} + +static void crypto_rfc4309_free(struct crypto_instance *inst) +{ + crypto_drop_spawn(crypto_instance_ctx(inst)); + kfree(inst); +} + +static struct crypto_template crypto_rfc4309_tmpl = { + .name = "rfc4309", + .alloc = crypto_rfc4309_alloc, + .free = crypto_rfc4309_free, + .module = THIS_MODULE, +}; + +static int __init crypto_ccm_module_init(void) +{ + int err; + + err = crypto_register_template(&crypto_ccm_base_tmpl); + if (err) + goto out; + + err = crypto_register_template(&crypto_ccm_tmpl); + if (err) + goto out_undo_base; + + err = crypto_register_template(&crypto_rfc4309_tmpl); + if (err) + goto out_undo_ccm; + +out: + return err; + +out_undo_ccm: + crypto_unregister_template(&crypto_ccm_tmpl); +out_undo_base: + crypto_unregister_template(&crypto_ccm_base_tmpl); + goto out; +} + +static void __exit crypto_ccm_module_exit(void) +{ + crypto_unregister_template(&crypto_rfc4309_tmpl); + crypto_unregister_template(&crypto_ccm_tmpl); + crypto_unregister_template(&crypto_ccm_base_tmpl); +} + +module_init(crypto_ccm_module_init); +module_exit(crypto_ccm_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Counter with CBC MAC"); +MODULE_ALIAS("ccm_base"); +MODULE_ALIAS("rfc4309"); -- cgit v1.1 From 93cc74e078eed8735585e5687903727bcfbcc8b4 Mon Sep 17 00:00:00 2001 From: Joy Latten Date: Wed, 12 Dec 2007 20:24:22 +0800 Subject: [CRYPTO] tcrypt: Add CCM vectors This patch adds 7 test vectors to tcrypt for CCM. The test vectors are from rfc 3610. There are about 10 more test vectors in RFC 3610 and 4 or 5 more in NIST. I can add these as time permits. I also needed to set authsize. CCM has a prerequisite of authsize. Signed-off-by: Joy Latten Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 31 ++++++- crypto/tcrypt.h | 260 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 289 insertions(+), 2 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 1142b49..2b52df7 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -266,8 +266,6 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template, return; } - authsize = crypto_aead_authsize(tfm); - req = aead_request_alloc(tfm, GFP_KERNEL); if (!req) { printk(KERN_INFO "failed to allocate request for %s\n", algo); @@ -298,6 +296,15 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template, goto out; } + authsize = abs(aead_tv[i].rlen - aead_tv[i].ilen); + ret = crypto_aead_setauthsize(tfm, authsize); + if (ret) { + printk(KERN_INFO + "failed to set authsize = %u\n", + authsize); + goto out; + } + sg_init_one(&sg[0], aead_tv[i].input, aead_tv[i].ilen + (enc ? authsize : 0)); @@ -374,6 +381,15 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template, aead_tv[i].tap[k]); } + authsize = abs(aead_tv[i].rlen - aead_tv[i].ilen); + ret = crypto_aead_setauthsize(tfm, authsize); + if (ret) { + printk(KERN_INFO + "failed to set authsize = %u\n", + authsize); + goto out; + } + if (enc) sg[k - 1].length += authsize; @@ -1201,6 +1217,10 @@ static void do_test(void) AES_GCM_ENC_TEST_VECTORS); test_aead("gcm(aes)", DECRYPT, aes_gcm_dec_tv_template, AES_GCM_DEC_TEST_VECTORS); + test_aead("ccm(aes)", ENCRYPT, aes_ccm_enc_tv_template, + AES_CCM_ENC_TEST_VECTORS); + test_aead("ccm(aes)", DECRYPT, aes_ccm_dec_tv_template, + AES_CCM_DEC_TEST_VECTORS); //CAST5 test_cipher("ecb(cast5)", ENCRYPT, cast5_enc_tv_template, @@ -1557,6 +1577,13 @@ static void do_test(void) LZO_COMP_TEST_VECTORS, LZO_DECOMP_TEST_VECTORS); break; + case 37: + test_aead("ccm(aes)", ENCRYPT, aes_ccm_enc_tv_template, + AES_CCM_ENC_TEST_VECTORS); + test_aead("ccm(aes)", DECRYPT, aes_ccm_dec_tv_template, + AES_CCM_DEC_TEST_VECTORS); + break; + case 100: test_hash("hmac(md5)", hmac_md5_tv_template, HMAC_MD5_TEST_VECTORS); diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 5b86509..f785e561 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -2313,6 +2313,8 @@ static struct cipher_testvec cast6_dec_tv_template[] = { #define AES_CTR_DEC_TEST_VECTORS 6 #define AES_GCM_ENC_TEST_VECTORS 9 #define AES_GCM_DEC_TEST_VECTORS 8 +#define AES_CCM_ENC_TEST_VECTORS 7 +#define AES_CCM_DEC_TEST_VECTORS 7 static struct cipher_testvec aes_enc_tv_template[] = { { /* From FIPS-197 */ @@ -5028,6 +5030,264 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = { } }; +static struct aead_testvec aes_ccm_enc_tv_template[] = { + { /* From RFC 3610 */ + .key = { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf }, + .klen = 16, + .iv = { 0x01, 0x00, 0x00, 0x00, 0x03, 0x02, 0x01, 0x00, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 }, + .assoc = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 }, + .alen = 8, + .input = { 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e }, + .ilen = 23, + .result = { 0x58, 0x8c, 0x97, 0x9a, 0x61, 0xc6, 0x63, 0xd2, + 0xf0, 0x66, 0xd0, 0xc2, 0xc0, 0xf9, 0x89, 0x80, + 0x6d, 0x5f, 0x6b, 0x61, 0xda, 0xc3, 0x84, 0x17, + 0xe8, 0xd1, 0x2c, 0xfd, 0xf9, 0x26, 0xe0 }, + .rlen = 31, + }, { + .key = { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf }, + .klen = 16, + .iv = { 0x01, 0x00, 0x00, 0x00, 0x07, 0x06, 0x05, 0x04, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 }, + .assoc = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b }, + .alen = 12, + .input = { 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, + 0x1c, 0x1d, 0x1e, 0x1f }, + .ilen = 20, + .result = { 0xdc, 0xf1, 0xfb, 0x7b, 0x5d, 0x9e, 0x23, 0xfb, + 0x9d, 0x4e, 0x13, 0x12, 0x53, 0x65, 0x8a, 0xd8, + 0x6e, 0xbd, 0xca, 0x3e, 0x51, 0xe8, 0x3f, 0x07, + 0x7d, 0x9c, 0x2d, 0x93 }, + .rlen = 28, + }, { + .key = { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf }, + .klen = 16, + .iv = { 0x01, 0x00, 0x00, 0x00, 0x0b, 0x0a, 0x09, 0x08, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 }, + .assoc = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 }, + .alen = 8, + .input = { 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20 }, + .ilen = 25, + .result = { 0x82, 0x53, 0x1a, 0x60, 0xcc, 0x24, 0x94, 0x5a, + 0x4b, 0x82, 0x79, 0x18, 0x1a, 0xb5, 0xc8, 0x4d, + 0xf2, 0x1c, 0xe7, 0xf9, 0xb7, 0x3f, 0x42, 0xe1, + 0x97, 0xea, 0x9c, 0x07, 0xe5, 0x6b, 0x5e, 0xb1, + 0x7e, 0x5f, 0x4e }, + .rlen = 35, + }, { + .key = { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf }, + .klen = 16, + .iv = { 0x01, 0x00, 0x00, 0x00, 0x0c, 0x0b, 0x0a, 0x09, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 }, + .assoc = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b }, + .alen = 12, + .input = { 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, + 0x1c, 0x1d, 0x1e }, + .ilen = 19, + .result = { 0x07, 0x34, 0x25, 0x94, 0x15, 0x77, 0x85, 0x15, + 0x2b, 0x07, 0x40, 0x98, 0x33, 0x0a, 0xbb, 0x14, + 0x1b, 0x94, 0x7b, 0x56, 0x6a, 0xa9, 0x40, 0x6b, + 0x4d, 0x99, 0x99, 0x88, 0xdd }, + .rlen = 29, + }, { + .key = { 0xd7, 0x82, 0x8d, 0x13, 0xb2, 0xb0, 0xbd, 0xc3, + 0x25, 0xa7, 0x62, 0x36, 0xdf, 0x93, 0xcc, 0x6b }, + .klen = 16, + .iv = { 0x01, 0x00, 0x33, 0x56, 0x8e, 0xf7, 0xb2, 0x63, + 0x3c, 0x96, 0x96, 0x76, 0x6c, 0xfa, 0x00, 0x00 }, + .assoc = { 0x63, 0x01, 0x8f, 0x76, 0xdc, 0x8a, 0x1b, 0xcb }, + .alen = 8, + .input = { 0x90, 0x20, 0xea, 0x6f, 0x91, 0xbd, 0xd8, 0x5a, + 0xfa, 0x00, 0x39, 0xba, 0x4b, 0xaf, 0xf9, 0xbf, + 0xb7, 0x9c, 0x70, 0x28, 0x94, 0x9c, 0xd0, 0xec }, + .ilen = 24, + .result = { 0x4c, 0xcb, 0x1e, 0x7c, 0xa9, 0x81, 0xbe, 0xfa, + 0xa0, 0x72, 0x6c, 0x55, 0xd3, 0x78, 0x06, 0x12, + 0x98, 0xc8, 0x5c, 0x92, 0x81, 0x4a, 0xbc, 0x33, + 0xc5, 0x2e, 0xe8, 0x1d, 0x7d, 0x77, 0xc0, 0x8a }, + .rlen = 32, + }, { + .key = { 0xd7, 0x82, 0x8d, 0x13, 0xb2, 0xb0, 0xbd, 0xc3, + 0x25, 0xa7, 0x62, 0x36, 0xdf, 0x93, 0xcc, 0x6b }, + .klen = 16, + .iv = { 0x01, 0x00, 0xd5, 0x60, 0x91, 0x2d, 0x3f, 0x70, + 0x3c, 0x96, 0x96, 0x76, 0x6c, 0xfa, 0x00, 0x00 }, + .assoc = { 0xcd, 0x90, 0x44, 0xd2, 0xb7, 0x1f, 0xdb, 0x81, + 0x20, 0xea, 0x60, 0xc0 }, + .alen = 12, + .input = { 0x64, 0x35, 0xac, 0xba, 0xfb, 0x11, 0xa8, 0x2e, + 0x2f, 0x07, 0x1d, 0x7c, 0xa4, 0xa5, 0xeb, 0xd9, + 0x3a, 0x80, 0x3b, 0xa8, 0x7f }, + .ilen = 21, + .result = { 0x00, 0x97, 0x69, 0xec, 0xab, 0xdf, 0x48, 0x62, + 0x55, 0x94, 0xc5, 0x92, 0x51, 0xe6, 0x03, 0x57, + 0x22, 0x67, 0x5e, 0x04, 0xc8, 0x47, 0x09, 0x9e, + 0x5a, 0xe0, 0x70, 0x45, 0x51 }, + .rlen = 29, + }, { + .key = { 0xd7, 0x82, 0x8d, 0x13, 0xb2, 0xb0, 0xbd, 0xc3, + 0x25, 0xa7, 0x62, 0x36, 0xdf, 0x93, 0xcc, 0x6b }, + .klen = 16, + .iv = { 0x01, 0x00, 0x42, 0xff, 0xf8, 0xf1, 0x95, 0x1c, + 0x3c, 0x96, 0x96, 0x76, 0x6c, 0xfa, 0x00, 0x00 }, + .assoc = { 0xd8, 0x5b, 0xc7, 0xe6, 0x9f, 0x94, 0x4f, 0xb8 }, + .alen = 8, + .input = { 0x8a, 0x19, 0xb9, 0x50, 0xbc, 0xf7, 0x1a, 0x01, + 0x8e, 0x5e, 0x67, 0x01, 0xc9, 0x17, 0x87, 0x65, + 0x98, 0x09, 0xd6, 0x7d, 0xbe, 0xdd, 0x18 }, + .ilen = 23, + .result = { 0xbc, 0x21, 0x8d, 0xaa, 0x94, 0x74, 0x27, 0xb6, + 0xdb, 0x38, 0x6a, 0x99, 0xac, 0x1a, 0xef, 0x23, + 0xad, 0xe0, 0xb5, 0x29, 0x39, 0xcb, 0x6a, 0x63, + 0x7c, 0xf9, 0xbe, 0xc2, 0x40, 0x88, 0x97, 0xc6, + 0xba }, + .rlen = 33, + }, +}; + +static struct aead_testvec aes_ccm_dec_tv_template[] = { + { /* From RFC 3610 */ + .key = { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf }, + .klen = 16, + .iv = { 0x01, 0x00, 0x00, 0x00, 0x03, 0x02, 0x01, 0x00, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 }, + .assoc = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 }, + .alen = 8, + .input = { 0x58, 0x8c, 0x97, 0x9a, 0x61, 0xc6, 0x63, 0xd2, + 0xf0, 0x66, 0xd0, 0xc2, 0xc0, 0xf9, 0x89, 0x80, + 0x6d, 0x5f, 0x6b, 0x61, 0xda, 0xc3, 0x84, 0x17, + 0xe8, 0xd1, 0x2c, 0xfd, 0xf9, 0x26, 0xe0 }, + .ilen = 31, + .result = { 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e }, + .rlen = 23, + }, { + .key = { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf }, + .klen = 16, + .iv = { 0x01, 0x00, 0x00, 0x00, 0x07, 0x06, 0x05, 0x04, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 }, + .assoc = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b }, + .alen = 12, + .input = { 0xdc, 0xf1, 0xfb, 0x7b, 0x5d, 0x9e, 0x23, 0xfb, + 0x9d, 0x4e, 0x13, 0x12, 0x53, 0x65, 0x8a, 0xd8, + 0x6e, 0xbd, 0xca, 0x3e, 0x51, 0xe8, 0x3f, 0x07, + 0x7d, 0x9c, 0x2d, 0x93 }, + .ilen = 28, + .result = { 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, + 0x1c, 0x1d, 0x1e, 0x1f }, + .rlen = 20, + }, { + .key = { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf }, + .klen = 16, + .iv = { 0x01, 0x00, 0x00, 0x00, 0x0b, 0x0a, 0x09, 0x08, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 }, + .assoc = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 }, + .alen = 8, + .input = { 0x82, 0x53, 0x1a, 0x60, 0xcc, 0x24, 0x94, 0x5a, + 0x4b, 0x82, 0x79, 0x18, 0x1a, 0xb5, 0xc8, 0x4d, + 0xf2, 0x1c, 0xe7, 0xf9, 0xb7, 0x3f, 0x42, 0xe1, + 0x97, 0xea, 0x9c, 0x07, 0xe5, 0x6b, 0x5e, 0xb1, + 0x7e, 0x5f, 0x4e }, + .ilen = 35, + .result = { 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20 }, + .rlen = 25, + }, { + .key = { 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf }, + .klen = 16, + .iv = { 0x01, 0x00, 0x00, 0x00, 0x0c, 0x0b, 0x0a, 0x09, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x00, 0x00 }, + .assoc = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b }, + .alen = 12, + .input = { 0x07, 0x34, 0x25, 0x94, 0x15, 0x77, 0x85, 0x15, + 0x2b, 0x07, 0x40, 0x98, 0x33, 0x0a, 0xbb, 0x14, + 0x1b, 0x94, 0x7b, 0x56, 0x6a, 0xa9, 0x40, 0x6b, + 0x4d, 0x99, 0x99, 0x88, 0xdd }, + .ilen = 29, + .result = { 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, + 0x1c, 0x1d, 0x1e }, + .rlen = 19, + }, { + .key = { 0xd7, 0x82, 0x8d, 0x13, 0xb2, 0xb0, 0xbd, 0xc3, + 0x25, 0xa7, 0x62, 0x36, 0xdf, 0x93, 0xcc, 0x6b }, + .klen = 16, + .iv = { 0x01, 0x00, 0x33, 0x56, 0x8e, 0xf7, 0xb2, 0x63, + 0x3c, 0x96, 0x96, 0x76, 0x6c, 0xfa, 0x00, 0x00 }, + .assoc = { 0x63, 0x01, 0x8f, 0x76, 0xdc, 0x8a, 0x1b, 0xcb }, + .alen = 8, + .input = { 0x4c, 0xcb, 0x1e, 0x7c, 0xa9, 0x81, 0xbe, 0xfa, + 0xa0, 0x72, 0x6c, 0x55, 0xd3, 0x78, 0x06, 0x12, + 0x98, 0xc8, 0x5c, 0x92, 0x81, 0x4a, 0xbc, 0x33, + 0xc5, 0x2e, 0xe8, 0x1d, 0x7d, 0x77, 0xc0, 0x8a }, + .ilen = 32, + .result = { 0x90, 0x20, 0xea, 0x6f, 0x91, 0xbd, 0xd8, 0x5a, + 0xfa, 0x00, 0x39, 0xba, 0x4b, 0xaf, 0xf9, 0xbf, + 0xb7, 0x9c, 0x70, 0x28, 0x94, 0x9c, 0xd0, 0xec }, + .rlen = 24, + }, { + .key = { 0xd7, 0x82, 0x8d, 0x13, 0xb2, 0xb0, 0xbd, 0xc3, + 0x25, 0xa7, 0x62, 0x36, 0xdf, 0x93, 0xcc, 0x6b }, + .klen = 16, + .iv = { 0x01, 0x00, 0xd5, 0x60, 0x91, 0x2d, 0x3f, 0x70, + 0x3c, 0x96, 0x96, 0x76, 0x6c, 0xfa, 0x00, 0x00 }, + .assoc = { 0xcd, 0x90, 0x44, 0xd2, 0xb7, 0x1f, 0xdb, 0x81, + 0x20, 0xea, 0x60, 0xc0 }, + .alen = 12, + .input = { 0x00, 0x97, 0x69, 0xec, 0xab, 0xdf, 0x48, 0x62, + 0x55, 0x94, 0xc5, 0x92, 0x51, 0xe6, 0x03, 0x57, + 0x22, 0x67, 0x5e, 0x04, 0xc8, 0x47, 0x09, 0x9e, + 0x5a, 0xe0, 0x70, 0x45, 0x51 }, + .ilen = 29, + .result = { 0x64, 0x35, 0xac, 0xba, 0xfb, 0x11, 0xa8, 0x2e, + 0x2f, 0x07, 0x1d, 0x7c, 0xa4, 0xa5, 0xeb, 0xd9, + 0x3a, 0x80, 0x3b, 0xa8, 0x7f }, + .rlen = 21, + }, { + .key = { 0xd7, 0x82, 0x8d, 0x13, 0xb2, 0xb0, 0xbd, 0xc3, + 0x25, 0xa7, 0x62, 0x36, 0xdf, 0x93, 0xcc, 0x6b }, + .klen = 16, + .iv = { 0x01, 0x00, 0x42, 0xff, 0xf8, 0xf1, 0x95, 0x1c, + 0x3c, 0x96, 0x96, 0x76, 0x6c, 0xfa, 0x00, 0x00 }, + .assoc = { 0xd8, 0x5b, 0xc7, 0xe6, 0x9f, 0x94, 0x4f, 0xb8 }, + .alen = 8, + .input = { 0xbc, 0x21, 0x8d, 0xaa, 0x94, 0x74, 0x27, 0xb6, + 0xdb, 0x38, 0x6a, 0x99, 0xac, 0x1a, 0xef, 0x23, + 0xad, 0xe0, 0xb5, 0x29, 0x39, 0xcb, 0x6a, 0x63, + 0x7c, 0xf9, 0xbe, 0xc2, 0x40, 0x88, 0x97, 0xc6, + 0xba }, + .ilen = 33, + .result = { 0x8a, 0x19, 0xb9, 0x50, 0xbc, 0xf7, 0x1a, 0x01, + 0x8e, 0x5e, 0x67, 0x01, 0xc9, 0x17, 0x87, 0x65, + 0x98, 0x09, 0xd6, 0x7d, 0xbe, 0xdd, 0x18 }, + .rlen = 23, + }, +}; + /* Cast5 test vectors from RFC 2144 */ #define CAST5_ENC_TEST_VECTORS 3 #define CAST5_DEC_TEST_VECTORS 3 -- cgit v1.1 From 3631c650c495d61b1dabf32eb26b46873636e918 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 13 Dec 2007 22:28:59 +0800 Subject: [CRYPTO] null: Add null blkcipher algorithm This patch adds a null blkcipher algorithm called ecb(cipher_null) for backwards compatibility. Previously the null algorithm when used by IPsec copied the data byte by byte. This new algorithm optimises that to a straight memcpy which lets us better measure inherent overheads in our IPsec code. Signed-off-by: Herbert Xu --- crypto/crypto_null.c | 69 ++++++++++++++++++++++++++++++++------ include/crypto/internal/skcipher.h | 5 +++ 2 files changed, 64 insertions(+), 10 deletions(-) diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c index 29f7747..fc2d2a4 100644 --- a/crypto/crypto_null.c +++ b/crypto/crypto_null.c @@ -16,15 +16,17 @@ * (at your option) any later version. * */ + +#include #include #include #include -#include #include #define NULL_KEY_SIZE 0 #define NULL_BLOCK_SIZE 1 #define NULL_DIGEST_SIZE 0 +#define NULL_IV_SIZE 0 static int null_compress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) @@ -55,6 +57,26 @@ static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) memcpy(dst, src, NULL_BLOCK_SIZE); } +static int skcipher_null_crypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) +{ + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + while (walk.nbytes) { + if (walk.src.virt.addr != walk.dst.virt.addr) + memcpy(walk.dst.virt.addr, walk.src.virt.addr, + walk.nbytes); + err = blkcipher_walk_done(desc, &walk, 0); + } + + return err; +} + static struct crypto_alg compress_null = { .cra_name = "compress_null", .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, @@ -96,6 +118,25 @@ static struct crypto_alg cipher_null = { .cia_decrypt = null_crypt } } }; +static struct crypto_alg skcipher_null = { + .cra_name = "ecb(cipher_null)", + .cra_driver_name = "ecb-cipher_null", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = NULL_BLOCK_SIZE, + .cra_type = &crypto_blkcipher_type, + .cra_ctxsize = 0, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(skcipher_null.cra_list), + .cra_u = { .blkcipher = { + .min_keysize = NULL_KEY_SIZE, + .max_keysize = NULL_KEY_SIZE, + .ivsize = NULL_IV_SIZE, + .setkey = null_setkey, + .encrypt = skcipher_null_crypt, + .decrypt = skcipher_null_crypt } } +}; + MODULE_ALIAS("compress_null"); MODULE_ALIAS("digest_null"); MODULE_ALIAS("cipher_null"); @@ -108,27 +149,35 @@ static int __init init(void) if (ret < 0) goto out; + ret = crypto_register_alg(&skcipher_null); + if (ret < 0) + goto out_unregister_cipher; + ret = crypto_register_alg(&digest_null); - if (ret < 0) { - crypto_unregister_alg(&cipher_null); - goto out; - } + if (ret < 0) + goto out_unregister_skcipher; ret = crypto_register_alg(&compress_null); - if (ret < 0) { - crypto_unregister_alg(&digest_null); - crypto_unregister_alg(&cipher_null); - goto out; - } + if (ret < 0) + goto out_unregister_digest; out: return ret; + +out_unregister_digest: + crypto_unregister_alg(&digest_null); +out_unregister_skcipher: + crypto_unregister_alg(&skcipher_null); +out_unregister_cipher: + crypto_unregister_alg(&cipher_null); + goto out; } static void __exit fini(void) { crypto_unregister_alg(&compress_null); crypto_unregister_alg(&digest_null); + crypto_unregister_alg(&skcipher_null); crypto_unregister_alg(&cipher_null); } diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 2071999..0053f34 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -88,5 +88,10 @@ static inline void skcipher_givcrypt_complete( ablkcipher_request_complete(&req->creq, err); } +static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req) +{ + return req->base.flags; +} + #endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ -- cgit v1.1 From ce5bd4aca3c467936370846119b7f3daf9ccea78 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 14 Dec 2007 00:28:40 +0800 Subject: [CRYPTO] null: Allow setkey on digest_null We need to allow setkey on digest_null if it is to be used directly by authenc instead of through hmac. Signed-off-by: Herbert Xu --- crypto/crypto_null.c | 1 + 1 file changed, 1 insertion(+) diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c index fc2d2a4..ff7b3de 100644 --- a/crypto/crypto_null.c +++ b/crypto/crypto_null.c @@ -98,6 +98,7 @@ static struct crypto_alg digest_null = { .cra_list = LIST_HEAD_INIT(digest_null.cra_list), .cra_u = { .digest = { .dia_digestsize = NULL_DIGEST_SIZE, + .dia_setkey = null_setkey, .dia_init = null_init, .dia_update = null_update, .dia_final = null_final } } -- cgit v1.1 From 6e050778c5d08a97d5d98f7cb62bc503e872615c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 14 Dec 2007 00:44:11 +0800 Subject: [CRYPTO] scatterwalk: Handle zero nbytes in scatterwalk_map_and_copy It's better to return silently than crash and burn when someone feeds us a zero length. In particular the null digest algorithm when used as part of authenc will do that to us. Signed-off-by: Herbert Xu --- crypto/scatterwalk.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 297e19d..9aeeb52 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -105,6 +105,9 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, struct scatter_walk walk; unsigned int offset = 0; + if (!nbytes) + return; + for (;;) { scatterwalk_start(&walk, sg); -- cgit v1.1 From 4726204200327c04a77b819e2c653c063f1bc6ab Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 14 Dec 2007 10:19:27 +0800 Subject: [CRYPTO] seqiv: Add select AEAD in Kconfig Now that seqiv supports AEAD algorithms it needs to select the AEAD option. Thanks to Erez Zadok for pointing out the problem. Signed-off-by: Herbert Xu --- crypto/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/crypto/Kconfig b/crypto/Kconfig index 0d89f77..6c086ee 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -34,6 +34,7 @@ config CRYPTO_BLKCIPHER config CRYPTO_SEQIV tristate "Sequence Number IV Generator" + select CRYPTO_AEAD select CRYPTO_BLKCIPHER help This IV generator generates an IV based on a sequence number by -- cgit v1.1 From e7cd2514ea506f06bd4f7b13a9b62afd60f9c73b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 14 Dec 2007 22:28:14 +0800 Subject: [CRYPTO] chainiv: Avoid lock spinning where possible This patch makes chainiv avoid spinning by postponing requests on lock contention if the user allows the use of asynchronous algorithms. If a synchronous algorithm is requested then we behave as before. This should improve IPsec performance on SMP when two CPUs attempt to transmit over the same SA. Currently one of them will spin doing nothing waiting for the other CPU to finish its encryption. This patch makes it postpone the request and get on with other work. If only one CPU is transmitting for a given SA, then we will process the request synchronously as before. Signed-off-by: Herbert Xu --- crypto/chainiv.c | 208 +++++++++++++++++++++++++++++++++++-- include/crypto/internal/skcipher.h | 13 +++ 2 files changed, 213 insertions(+), 8 deletions(-) diff --git a/crypto/chainiv.c b/crypto/chainiv.c index 38868d1..d17fa04 100644 --- a/crypto/chainiv.c +++ b/crypto/chainiv.c @@ -16,16 +16,34 @@ #include #include #include +#include #include #include #include #include +#include + +enum { + CHAINIV_STATE_INUSE = 0, +}; struct chainiv_ctx { spinlock_t lock; char iv[]; }; +struct async_chainiv_ctx { + unsigned long state; + + spinlock_t lock; + int err; + + struct crypto_queue queue; + struct work_struct postponed; + + char iv[]; +}; + static int chainiv_givencrypt(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); @@ -80,26 +98,187 @@ unlock: return chainiv_givencrypt(req); } +static int chainiv_init_common(struct crypto_tfm *tfm) +{ + tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request); + + return skcipher_geniv_init(tfm); +} + static int chainiv_init(struct crypto_tfm *tfm) { - struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); - struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); + struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm); spin_lock_init(&ctx->lock); - tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request); + return chainiv_init_common(tfm); +} - return skcipher_geniv_init(tfm); +static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx) +{ + int queued; + + if (!ctx->queue.qlen) { + smp_mb__before_clear_bit(); + clear_bit(CHAINIV_STATE_INUSE, &ctx->state); + + if (!ctx->queue.qlen || + test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) + goto out; + } + + queued = schedule_work(&ctx->postponed); + BUG_ON(!queued); + +out: + return ctx->err; +} + +static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req) +{ + struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); + struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); + int err; + + spin_lock_bh(&ctx->lock); + err = skcipher_enqueue_givcrypt(&ctx->queue, req); + spin_unlock_bh(&ctx->lock); + + if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) + return err; + + ctx->err = err; + return async_chainiv_schedule_work(ctx); +} + +static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request *req) +{ + struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); + struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); + struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); + unsigned int ivsize = crypto_ablkcipher_ivsize(geniv); + + memcpy(req->giv, ctx->iv, ivsize); + memcpy(subreq->info, ctx->iv, ivsize); + + ctx->err = crypto_ablkcipher_encrypt(subreq); + if (ctx->err) + goto out; + + memcpy(ctx->iv, subreq->info, ivsize); + +out: + return async_chainiv_schedule_work(ctx); +} + +static int async_chainiv_givencrypt(struct skcipher_givcrypt_request *req) +{ + struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); + struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); + struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); + + ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); + ablkcipher_request_set_callback(subreq, req->creq.base.flags, + req->creq.base.complete, + req->creq.base.data); + ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, + req->creq.nbytes, req->creq.info); + + if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) + goto postpone; + + if (ctx->queue.qlen) { + clear_bit(CHAINIV_STATE_INUSE, &ctx->state); + goto postpone; + } + + return async_chainiv_givencrypt_tail(req); + +postpone: + return async_chainiv_postpone_request(req); +} + +static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request *req) +{ + struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); + struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); + + if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) + goto out; + + if (crypto_ablkcipher_crt(geniv)->givencrypt != + async_chainiv_givencrypt_first) + goto unlock; + + crypto_ablkcipher_crt(geniv)->givencrypt = async_chainiv_givencrypt; + get_random_bytes(ctx->iv, crypto_ablkcipher_ivsize(geniv)); + +unlock: + clear_bit(CHAINIV_STATE_INUSE, &ctx->state); + +out: + return async_chainiv_givencrypt(req); +} + +static void async_chainiv_do_postponed(struct work_struct *work) +{ + struct async_chainiv_ctx *ctx = container_of(work, + struct async_chainiv_ctx, + postponed); + struct skcipher_givcrypt_request *req; + struct ablkcipher_request *subreq; + + /* Only handle one request at a time to avoid hogging keventd. */ + spin_lock_bh(&ctx->lock); + req = skcipher_dequeue_givcrypt(&ctx->queue); + spin_unlock_bh(&ctx->lock); + + if (!req) { + async_chainiv_schedule_work(ctx); + return; + } + + subreq = skcipher_givcrypt_reqctx(req); + subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP; + + async_chainiv_givencrypt_tail(req); +} + +static int async_chainiv_init(struct crypto_tfm *tfm) +{ + struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm); + + spin_lock_init(&ctx->lock); + + crypto_init_queue(&ctx->queue, 100); + INIT_WORK(&ctx->postponed, async_chainiv_do_postponed); + + return chainiv_init_common(tfm); +} + +static void async_chainiv_exit(struct crypto_tfm *tfm) +{ + struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm); + + BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen); + + skcipher_geniv_exit(tfm); } static struct crypto_template chainiv_tmpl; static struct crypto_instance *chainiv_alloc(struct rtattr **tb) { + struct crypto_attr_type *algt; struct crypto_instance *inst; + int err; - inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, - CRYPTO_ALG_ASYNC); + algt = crypto_get_attr_type(tb); + err = PTR_ERR(algt); + if (IS_ERR(algt)) + return ERR_PTR(err); + + inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0); if (IS_ERR(inst)) goto out; @@ -108,8 +287,21 @@ static struct crypto_instance *chainiv_alloc(struct rtattr **tb) inst->alg.cra_init = chainiv_init; inst->alg.cra_exit = skcipher_geniv_exit; - inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx) + - inst->alg.cra_ablkcipher.ivsize; + inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx); + + if (!crypto_requires_sync(algt->type, algt->mask)) { + inst->alg.cra_flags |= CRYPTO_ALG_ASYNC; + + inst->alg.cra_ablkcipher.givencrypt = + async_chainiv_givencrypt_first; + + inst->alg.cra_init = async_chainiv_init; + inst->alg.cra_exit = async_chainiv_exit; + + inst->alg.cra_ctxsize = sizeof(struct async_chainiv_ctx); + } + + inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize; out: return inst; diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 0053f34..2ba42cd 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -70,6 +70,19 @@ static inline struct crypto_ablkcipher *skcipher_geniv_cipher( return crypto_ablkcipher_crt(geniv)->base; } +static inline int skcipher_enqueue_givcrypt( + struct crypto_queue *queue, struct skcipher_givcrypt_request *request) +{ + return ablkcipher_enqueue_request(queue, &request->creq); +} + +static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt( + struct crypto_queue *queue) +{ + return container_of(ablkcipher_dequeue_request(queue), + struct skcipher_givcrypt_request, creq); +} + static inline void *skcipher_givcrypt_reqctx( struct skcipher_givcrypt_request *req) { -- cgit v1.1 From 189ed66e95fb23666a62963b718dcbe62adbadde Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 14 Dec 2007 22:29:37 +0800 Subject: [CRYPTO] api: Show async type This patch adds an async field to /proc/crypto for ablkcipher and aead algorithms. Signed-off-by: Herbert Xu --- crypto/ablkcipher.c | 4 ++++ crypto/aead.c | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index d1df528..3bcb099 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -109,6 +109,8 @@ static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher; seq_printf(m, "type : ablkcipher\n"); + seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? + "yes" : "no"); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize); seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize); @@ -158,6 +160,8 @@ static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg) struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher; seq_printf(m, "type : givcipher\n"); + seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? + "yes" : "no"); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize); seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize); diff --git a/crypto/aead.c b/crypto/aead.c index f5b1add..3a6f3f5 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -115,6 +115,8 @@ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) struct aead_alg *aead = &alg->cra_aead; seq_printf(m, "type : aead\n"); + seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? + "yes" : "no"); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "ivsize : %u\n", aead->ivsize); seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize); @@ -169,6 +171,8 @@ static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg) struct aead_alg *aead = &alg->cra_aead; seq_printf(m, "type : nivaead\n"); + seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? + "yes" : "no"); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "ivsize : %u\n", aead->ivsize); seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize); -- cgit v1.1 From dadbc53d0bbde0e84c40b9f6bc5c50eb9eb7352a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 17 Dec 2007 15:33:17 +0800 Subject: [CRYPTO] gcm: Introduce rfc4106 This patch introduces the rfc4106 wrapper for GCM just as we have an rfc4309 wrapper for CCM. The purpose of the wrapper is to include part of the IV in the key so that it can be negotiated by IPsec. Signed-off-by: Herbert Xu --- crypto/gcm.c | 239 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 239 insertions(+) diff --git a/crypto/gcm.c b/crypto/gcm.c index d539f5e..e70afd0 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -9,6 +9,7 @@ */ #include +#include #include #include #include @@ -27,6 +28,11 @@ struct crypto_gcm_ctx { struct gf128mul_4k *gf128; }; +struct crypto_rfc4106_ctx { + struct crypto_aead *child; + u8 nonce[4]; +}; + struct crypto_gcm_ghash_ctx { u32 bytes; u32 flags; @@ -240,6 +246,25 @@ out: return err; } +static int crypto_gcm_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + switch (authsize) { + case 4: + case 8: + case 12: + case 13: + case 14: + case 15: + case 16: + break; + default: + return -EINVAL; + } + + return 0; +} + static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, struct aead_request *req, unsigned int cryptlen) @@ -472,6 +497,7 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, inst->alg.cra_init = crypto_gcm_init_tfm; inst->alg.cra_exit = crypto_gcm_exit_tfm; inst->alg.cra_aead.setkey = crypto_gcm_setkey; + inst->alg.cra_aead.setauthsize = crypto_gcm_setauthsize; inst->alg.cra_aead.encrypt = crypto_gcm_encrypt; inst->alg.cra_aead.decrypt = crypto_gcm_decrypt; @@ -549,6 +575,211 @@ static struct crypto_template crypto_gcm_base_tmpl = { .module = THIS_MODULE, }; +static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key, + unsigned int keylen) +{ + struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent); + struct crypto_aead *child = ctx->child; + int err; + + if (keylen < 4) + return -EINVAL; + + keylen -= 4; + memcpy(ctx->nonce, key + keylen, 4); + + crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_aead_set_flags(child, crypto_aead_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_aead_setkey(child, key, keylen); + crypto_aead_set_flags(parent, crypto_aead_get_flags(child) & + CRYPTO_TFM_RES_MASK); + + return err; +} + +static int crypto_rfc4106_setauthsize(struct crypto_aead *parent, + unsigned int authsize) +{ + struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent); + + switch (authsize) { + case 8: + case 12: + case 16: + break; + default: + return -EINVAL; + } + + return crypto_aead_setauthsize(ctx->child, authsize); +} + +static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req) +{ + struct aead_request *subreq = aead_request_ctx(req); + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(aead); + struct crypto_aead *child = ctx->child; + u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), + crypto_aead_alignmask(child) + 1); + + memcpy(iv, ctx->nonce, 4); + memcpy(iv + 4, req->iv, 8); + + aead_request_set_tfm(subreq, child); + aead_request_set_callback(subreq, req->base.flags, req->base.complete, + req->base.data); + aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv); + aead_request_set_assoc(subreq, req->assoc, req->assoclen); + + return subreq; +} + +static int crypto_rfc4106_encrypt(struct aead_request *req) +{ + req = crypto_rfc4106_crypt(req); + + return crypto_aead_encrypt(req); +} + +static int crypto_rfc4106_decrypt(struct aead_request *req) +{ + req = crypto_rfc4106_crypt(req); + + return crypto_aead_decrypt(req); +} + +static int crypto_rfc4106_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst); + struct crypto_rfc4106_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_aead *aead; + unsigned long align; + + aead = crypto_spawn_aead(spawn); + if (IS_ERR(aead)) + return PTR_ERR(aead); + + ctx->child = aead; + + align = crypto_aead_alignmask(aead); + align &= ~(crypto_tfm_ctx_alignment() - 1); + tfm->crt_aead.reqsize = sizeof(struct aead_request) + + ALIGN(crypto_aead_reqsize(aead), + crypto_tfm_ctx_alignment()) + + align + 16; + + return 0; +} + +static void crypto_rfc4106_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_rfc4106_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_aead(ctx->child); +} + +static struct crypto_instance *crypto_rfc4106_alloc(struct rtattr **tb) +{ + struct crypto_attr_type *algt; + struct crypto_instance *inst; + struct crypto_aead_spawn *spawn; + struct crypto_alg *alg; + const char *ccm_name; + int err; + + algt = crypto_get_attr_type(tb); + err = PTR_ERR(algt); + if (IS_ERR(algt)) + return ERR_PTR(err); + + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) + return ERR_PTR(-EINVAL); + + ccm_name = crypto_attr_alg_name(tb[1]); + err = PTR_ERR(ccm_name); + if (IS_ERR(ccm_name)) + return ERR_PTR(err); + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return ERR_PTR(-ENOMEM); + + spawn = crypto_instance_ctx(inst); + crypto_set_aead_spawn(spawn, inst); + err = crypto_grab_aead(spawn, ccm_name, 0, + crypto_requires_sync(algt->type, algt->mask)); + if (err) + goto out_free_inst; + + alg = crypto_aead_spawn_alg(spawn); + + err = -EINVAL; + + /* We only support 16-byte blocks. */ + if (alg->cra_aead.ivsize != 16) + goto out_drop_alg; + + /* Not a stream cipher? */ + if (alg->cra_blocksize != 1) + goto out_drop_alg; + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, + "rfc4106(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME || + snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "rfc4106(%s)", alg->cra_driver_name) >= + CRYPTO_MAX_ALG_NAME) + goto out_drop_alg; + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; + inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; + inst->alg.cra_priority = alg->cra_priority; + inst->alg.cra_blocksize = 1; + inst->alg.cra_alignmask = alg->cra_alignmask; + inst->alg.cra_type = &crypto_nivaead_type; + + inst->alg.cra_aead.ivsize = 8; + inst->alg.cra_aead.maxauthsize = 16; + + inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx); + + inst->alg.cra_init = crypto_rfc4106_init_tfm; + inst->alg.cra_exit = crypto_rfc4106_exit_tfm; + + inst->alg.cra_aead.setkey = crypto_rfc4106_setkey; + inst->alg.cra_aead.setauthsize = crypto_rfc4106_setauthsize; + inst->alg.cra_aead.encrypt = crypto_rfc4106_encrypt; + inst->alg.cra_aead.decrypt = crypto_rfc4106_decrypt; + + inst->alg.cra_aead.geniv = "seqiv"; + +out: + return inst; + +out_drop_alg: + crypto_drop_aead(spawn); +out_free_inst: + kfree(inst); + inst = ERR_PTR(err); + goto out; +} + +static void crypto_rfc4106_free(struct crypto_instance *inst) +{ + crypto_drop_spawn(crypto_instance_ctx(inst)); + kfree(inst); +} + +static struct crypto_template crypto_rfc4106_tmpl = { + .name = "rfc4106", + .alloc = crypto_rfc4106_alloc, + .free = crypto_rfc4106_free, + .module = THIS_MODULE, +}; + static int __init crypto_gcm_module_init(void) { int err; @@ -561,9 +792,15 @@ static int __init crypto_gcm_module_init(void) if (err) goto out_undo_base; + err = crypto_register_template(&crypto_rfc4106_tmpl); + if (err) + goto out_undo_gcm; + out: return err; +out_undo_gcm: + crypto_unregister_template(&crypto_gcm_tmpl); out_undo_base: crypto_unregister_template(&crypto_gcm_base_tmpl); goto out; @@ -571,6 +808,7 @@ out_undo_base: static void __exit crypto_gcm_module_exit(void) { + crypto_unregister_template(&crypto_rfc4106_tmpl); crypto_unregister_template(&crypto_gcm_tmpl); crypto_unregister_template(&crypto_gcm_base_tmpl); } @@ -582,3 +820,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Galois/Counter Mode"); MODULE_AUTHOR("Mikko Herranen "); MODULE_ALIAS("gcm_base"); +MODULE_ALIAS("rfc4106"); -- cgit v1.1 From 974e4b752ee623854c5dc2bbfc7c7725029ce173 Mon Sep 17 00:00:00 2001 From: Tan Swee Heng Date: Mon, 10 Dec 2007 15:52:56 +0800 Subject: [CRYPTO] salsa20_i586: Salsa20 stream cipher algorithm (i586 version) This patch contains the salsa20-i586 implementation. The original assembly code came from . I have reformatted it (added indents) so that it matches the other algorithms in arch/x86/crypto. Signed-off-by: Tan Swee Heng Signed-off-by: Herbert Xu --- arch/x86/crypto/Makefile | 2 + arch/x86/crypto/salsa20-i586-asm_32.S | 1114 +++++++++++++++++++++++++++++++++ arch/x86/crypto/salsa20_glue.c | 127 ++++ crypto/Kconfig | 15 + 4 files changed, 1258 insertions(+) create mode 100644 arch/x86/crypto/salsa20-i586-asm_32.S create mode 100644 arch/x86/crypto/salsa20_glue.c diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index b8fbb43..25cc844 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -4,12 +4,14 @@ obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o +obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o aes-i586-y := aes-i586-asm_32.o aes_glue.o twofish-i586-y := twofish-i586-asm_32.o twofish_32.o +salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_64.o diff --git a/arch/x86/crypto/salsa20-i586-asm_32.S b/arch/x86/crypto/salsa20-i586-asm_32.S new file mode 100644 index 0000000..72eb306 --- /dev/null +++ b/arch/x86/crypto/salsa20-i586-asm_32.S @@ -0,0 +1,1114 @@ +# salsa20_pm.s version 20051229 +# D. J. Bernstein +# Public domain. + +# enter ECRYPT_encrypt_bytes +.text +.p2align 5 +.globl ECRYPT_encrypt_bytes +ECRYPT_encrypt_bytes: + mov %esp,%eax + and $31,%eax + add $256,%eax + sub %eax,%esp + # eax_stack = eax + movl %eax,80(%esp) + # ebx_stack = ebx + movl %ebx,84(%esp) + # esi_stack = esi + movl %esi,88(%esp) + # edi_stack = edi + movl %edi,92(%esp) + # ebp_stack = ebp + movl %ebp,96(%esp) + # x = arg1 + movl 4(%esp,%eax),%edx + # m = arg2 + movl 8(%esp,%eax),%esi + # out = arg3 + movl 12(%esp,%eax),%edi + # bytes = arg4 + movl 16(%esp,%eax),%ebx + # bytes -= 0 + sub $0,%ebx + # goto done if unsigned<= + jbe ._done +._start: + # in0 = *(uint32 *) (x + 0) + movl 0(%edx),%eax + # in1 = *(uint32 *) (x + 4) + movl 4(%edx),%ecx + # in2 = *(uint32 *) (x + 8) + movl 8(%edx),%ebp + # j0 = in0 + movl %eax,164(%esp) + # in3 = *(uint32 *) (x + 12) + movl 12(%edx),%eax + # j1 = in1 + movl %ecx,168(%esp) + # in4 = *(uint32 *) (x + 16) + movl 16(%edx),%ecx + # j2 = in2 + movl %ebp,172(%esp) + # in5 = *(uint32 *) (x + 20) + movl 20(%edx),%ebp + # j3 = in3 + movl %eax,176(%esp) + # in6 = *(uint32 *) (x + 24) + movl 24(%edx),%eax + # j4 = in4 + movl %ecx,180(%esp) + # in7 = *(uint32 *) (x + 28) + movl 28(%edx),%ecx + # j5 = in5 + movl %ebp,184(%esp) + # in8 = *(uint32 *) (x + 32) + movl 32(%edx),%ebp + # j6 = in6 + movl %eax,188(%esp) + # in9 = *(uint32 *) (x + 36) + movl 36(%edx),%eax + # j7 = in7 + movl %ecx,192(%esp) + # in10 = *(uint32 *) (x + 40) + movl 40(%edx),%ecx + # j8 = in8 + movl %ebp,196(%esp) + # in11 = *(uint32 *) (x + 44) + movl 44(%edx),%ebp + # j9 = in9 + movl %eax,200(%esp) + # in12 = *(uint32 *) (x + 48) + movl 48(%edx),%eax + # j10 = in10 + movl %ecx,204(%esp) + # in13 = *(uint32 *) (x + 52) + movl 52(%edx),%ecx + # j11 = in11 + movl %ebp,208(%esp) + # in14 = *(uint32 *) (x + 56) + movl 56(%edx),%ebp + # j12 = in12 + movl %eax,212(%esp) + # in15 = *(uint32 *) (x + 60) + movl 60(%edx),%eax + # j13 = in13 + movl %ecx,216(%esp) + # j14 = in14 + movl %ebp,220(%esp) + # j15 = in15 + movl %eax,224(%esp) + # x_backup = x + movl %edx,64(%esp) +._bytesatleast1: + # bytes - 64 + cmp $64,%ebx + # goto nocopy if unsigned>= + jae ._nocopy + # ctarget = out + movl %edi,228(%esp) + # out = &tmp + leal 0(%esp),%edi + # i = bytes + mov %ebx,%ecx + # while (i) { *out++ = *m++; --i } + rep movsb + # out = &tmp + leal 0(%esp),%edi + # m = &tmp + leal 0(%esp),%esi +._nocopy: + # out_backup = out + movl %edi,72(%esp) + # m_backup = m + movl %esi,68(%esp) + # bytes_backup = bytes + movl %ebx,76(%esp) + # in0 = j0 + movl 164(%esp),%eax + # in1 = j1 + movl 168(%esp),%ecx + # in2 = j2 + movl 172(%esp),%edx + # in3 = j3 + movl 176(%esp),%ebx + # x0 = in0 + movl %eax,100(%esp) + # x1 = in1 + movl %ecx,104(%esp) + # x2 = in2 + movl %edx,108(%esp) + # x3 = in3 + movl %ebx,112(%esp) + # in4 = j4 + movl 180(%esp),%eax + # in5 = j5 + movl 184(%esp),%ecx + # in6 = j6 + movl 188(%esp),%edx + # in7 = j7 + movl 192(%esp),%ebx + # x4 = in4 + movl %eax,116(%esp) + # x5 = in5 + movl %ecx,120(%esp) + # x6 = in6 + movl %edx,124(%esp) + # x7 = in7 + movl %ebx,128(%esp) + # in8 = j8 + movl 196(%esp),%eax + # in9 = j9 + movl 200(%esp),%ecx + # in10 = j10 + movl 204(%esp),%edx + # in11 = j11 + movl 208(%esp),%ebx + # x8 = in8 + movl %eax,132(%esp) + # x9 = in9 + movl %ecx,136(%esp) + # x10 = in10 + movl %edx,140(%esp) + # x11 = in11 + movl %ebx,144(%esp) + # in12 = j12 + movl 212(%esp),%eax + # in13 = j13 + movl 216(%esp),%ecx + # in14 = j14 + movl 220(%esp),%edx + # in15 = j15 + movl 224(%esp),%ebx + # x12 = in12 + movl %eax,148(%esp) + # x13 = in13 + movl %ecx,152(%esp) + # x14 = in14 + movl %edx,156(%esp) + # x15 = in15 + movl %ebx,160(%esp) + # i = 20 + mov $20,%ebp + # p = x0 + movl 100(%esp),%eax + # s = x5 + movl 120(%esp),%ecx + # t = x10 + movl 140(%esp),%edx + # w = x15 + movl 160(%esp),%ebx +._mainloop: + # x0 = p + movl %eax,100(%esp) + # x10 = t + movl %edx,140(%esp) + # p += x12 + addl 148(%esp),%eax + # x5 = s + movl %ecx,120(%esp) + # t += x6 + addl 124(%esp),%edx + # x15 = w + movl %ebx,160(%esp) + # r = x1 + movl 104(%esp),%esi + # r += s + add %ecx,%esi + # v = x11 + movl 144(%esp),%edi + # v += w + add %ebx,%edi + # p <<<= 7 + rol $7,%eax + # p ^= x4 + xorl 116(%esp),%eax + # t <<<= 7 + rol $7,%edx + # t ^= x14 + xorl 156(%esp),%edx + # r <<<= 7 + rol $7,%esi + # r ^= x9 + xorl 136(%esp),%esi + # v <<<= 7 + rol $7,%edi + # v ^= x3 + xorl 112(%esp),%edi + # x4 = p + movl %eax,116(%esp) + # x14 = t + movl %edx,156(%esp) + # p += x0 + addl 100(%esp),%eax + # x9 = r + movl %esi,136(%esp) + # t += x10 + addl 140(%esp),%edx + # x3 = v + movl %edi,112(%esp) + # p <<<= 9 + rol $9,%eax + # p ^= x8 + xorl 132(%esp),%eax + # t <<<= 9 + rol $9,%edx + # t ^= x2 + xorl 108(%esp),%edx + # s += r + add %esi,%ecx + # s <<<= 9 + rol $9,%ecx + # s ^= x13 + xorl 152(%esp),%ecx + # w += v + add %edi,%ebx + # w <<<= 9 + rol $9,%ebx + # w ^= x7 + xorl 128(%esp),%ebx + # x8 = p + movl %eax,132(%esp) + # x2 = t + movl %edx,108(%esp) + # p += x4 + addl 116(%esp),%eax + # x13 = s + movl %ecx,152(%esp) + # t += x14 + addl 156(%esp),%edx + # x7 = w + movl %ebx,128(%esp) + # p <<<= 13 + rol $13,%eax + # p ^= x12 + xorl 148(%esp),%eax + # t <<<= 13 + rol $13,%edx + # t ^= x6 + xorl 124(%esp),%edx + # r += s + add %ecx,%esi + # r <<<= 13 + rol $13,%esi + # r ^= x1 + xorl 104(%esp),%esi + # v += w + add %ebx,%edi + # v <<<= 13 + rol $13,%edi + # v ^= x11 + xorl 144(%esp),%edi + # x12 = p + movl %eax,148(%esp) + # x6 = t + movl %edx,124(%esp) + # p += x8 + addl 132(%esp),%eax + # x1 = r + movl %esi,104(%esp) + # t += x2 + addl 108(%esp),%edx + # x11 = v + movl %edi,144(%esp) + # p <<<= 18 + rol $18,%eax + # p ^= x0 + xorl 100(%esp),%eax + # t <<<= 18 + rol $18,%edx + # t ^= x10 + xorl 140(%esp),%edx + # s += r + add %esi,%ecx + # s <<<= 18 + rol $18,%ecx + # s ^= x5 + xorl 120(%esp),%ecx + # w += v + add %edi,%ebx + # w <<<= 18 + rol $18,%ebx + # w ^= x15 + xorl 160(%esp),%ebx + # x0 = p + movl %eax,100(%esp) + # x10 = t + movl %edx,140(%esp) + # p += x3 + addl 112(%esp),%eax + # p <<<= 7 + rol $7,%eax + # x5 = s + movl %ecx,120(%esp) + # t += x9 + addl 136(%esp),%edx + # x15 = w + movl %ebx,160(%esp) + # r = x4 + movl 116(%esp),%esi + # r += s + add %ecx,%esi + # v = x14 + movl 156(%esp),%edi + # v += w + add %ebx,%edi + # p ^= x1 + xorl 104(%esp),%eax + # t <<<= 7 + rol $7,%edx + # t ^= x11 + xorl 144(%esp),%edx + # r <<<= 7 + rol $7,%esi + # r ^= x6 + xorl 124(%esp),%esi + # v <<<= 7 + rol $7,%edi + # v ^= x12 + xorl 148(%esp),%edi + # x1 = p + movl %eax,104(%esp) + # x11 = t + movl %edx,144(%esp) + # p += x0 + addl 100(%esp),%eax + # x6 = r + movl %esi,124(%esp) + # t += x10 + addl 140(%esp),%edx + # x12 = v + movl %edi,148(%esp) + # p <<<= 9 + rol $9,%eax + # p ^= x2 + xorl 108(%esp),%eax + # t <<<= 9 + rol $9,%edx + # t ^= x8 + xorl 132(%esp),%edx + # s += r + add %esi,%ecx + # s <<<= 9 + rol $9,%ecx + # s ^= x7 + xorl 128(%esp),%ecx + # w += v + add %edi,%ebx + # w <<<= 9 + rol $9,%ebx + # w ^= x13 + xorl 152(%esp),%ebx + # x2 = p + movl %eax,108(%esp) + # x8 = t + movl %edx,132(%esp) + # p += x1 + addl 104(%esp),%eax + # x7 = s + movl %ecx,128(%esp) + # t += x11 + addl 144(%esp),%edx + # x13 = w + movl %ebx,152(%esp) + # p <<<= 13 + rol $13,%eax + # p ^= x3 + xorl 112(%esp),%eax + # t <<<= 13 + rol $13,%edx + # t ^= x9 + xorl 136(%esp),%edx + # r += s + add %ecx,%esi + # r <<<= 13 + rol $13,%esi + # r ^= x4 + xorl 116(%esp),%esi + # v += w + add %ebx,%edi + # v <<<= 13 + rol $13,%edi + # v ^= x14 + xorl 156(%esp),%edi + # x3 = p + movl %eax,112(%esp) + # x9 = t + movl %edx,136(%esp) + # p += x2 + addl 108(%esp),%eax + # x4 = r + movl %esi,116(%esp) + # t += x8 + addl 132(%esp),%edx + # x14 = v + movl %edi,156(%esp) + # p <<<= 18 + rol $18,%eax + # p ^= x0 + xorl 100(%esp),%eax + # t <<<= 18 + rol $18,%edx + # t ^= x10 + xorl 140(%esp),%edx + # s += r + add %esi,%ecx + # s <<<= 18 + rol $18,%ecx + # s ^= x5 + xorl 120(%esp),%ecx + # w += v + add %edi,%ebx + # w <<<= 18 + rol $18,%ebx + # w ^= x15 + xorl 160(%esp),%ebx + # x0 = p + movl %eax,100(%esp) + # x10 = t + movl %edx,140(%esp) + # p += x12 + addl 148(%esp),%eax + # x5 = s + movl %ecx,120(%esp) + # t += x6 + addl 124(%esp),%edx + # x15 = w + movl %ebx,160(%esp) + # r = x1 + movl 104(%esp),%esi + # r += s + add %ecx,%esi + # v = x11 + movl 144(%esp),%edi + # v += w + add %ebx,%edi + # p <<<= 7 + rol $7,%eax + # p ^= x4 + xorl 116(%esp),%eax + # t <<<= 7 + rol $7,%edx + # t ^= x14 + xorl 156(%esp),%edx + # r <<<= 7 + rol $7,%esi + # r ^= x9 + xorl 136(%esp),%esi + # v <<<= 7 + rol $7,%edi + # v ^= x3 + xorl 112(%esp),%edi + # x4 = p + movl %eax,116(%esp) + # x14 = t + movl %edx,156(%esp) + # p += x0 + addl 100(%esp),%eax + # x9 = r + movl %esi,136(%esp) + # t += x10 + addl 140(%esp),%edx + # x3 = v + movl %edi,112(%esp) + # p <<<= 9 + rol $9,%eax + # p ^= x8 + xorl 132(%esp),%eax + # t <<<= 9 + rol $9,%edx + # t ^= x2 + xorl 108(%esp),%edx + # s += r + add %esi,%ecx + # s <<<= 9 + rol $9,%ecx + # s ^= x13 + xorl 152(%esp),%ecx + # w += v + add %edi,%ebx + # w <<<= 9 + rol $9,%ebx + # w ^= x7 + xorl 128(%esp),%ebx + # x8 = p + movl %eax,132(%esp) + # x2 = t + movl %edx,108(%esp) + # p += x4 + addl 116(%esp),%eax + # x13 = s + movl %ecx,152(%esp) + # t += x14 + addl 156(%esp),%edx + # x7 = w + movl %ebx,128(%esp) + # p <<<= 13 + rol $13,%eax + # p ^= x12 + xorl 148(%esp),%eax + # t <<<= 13 + rol $13,%edx + # t ^= x6 + xorl 124(%esp),%edx + # r += s + add %ecx,%esi + # r <<<= 13 + rol $13,%esi + # r ^= x1 + xorl 104(%esp),%esi + # v += w + add %ebx,%edi + # v <<<= 13 + rol $13,%edi + # v ^= x11 + xorl 144(%esp),%edi + # x12 = p + movl %eax,148(%esp) + # x6 = t + movl %edx,124(%esp) + # p += x8 + addl 132(%esp),%eax + # x1 = r + movl %esi,104(%esp) + # t += x2 + addl 108(%esp),%edx + # x11 = v + movl %edi,144(%esp) + # p <<<= 18 + rol $18,%eax + # p ^= x0 + xorl 100(%esp),%eax + # t <<<= 18 + rol $18,%edx + # t ^= x10 + xorl 140(%esp),%edx + # s += r + add %esi,%ecx + # s <<<= 18 + rol $18,%ecx + # s ^= x5 + xorl 120(%esp),%ecx + # w += v + add %edi,%ebx + # w <<<= 18 + rol $18,%ebx + # w ^= x15 + xorl 160(%esp),%ebx + # x0 = p + movl %eax,100(%esp) + # x10 = t + movl %edx,140(%esp) + # p += x3 + addl 112(%esp),%eax + # p <<<= 7 + rol $7,%eax + # x5 = s + movl %ecx,120(%esp) + # t += x9 + addl 136(%esp),%edx + # x15 = w + movl %ebx,160(%esp) + # r = x4 + movl 116(%esp),%esi + # r += s + add %ecx,%esi + # v = x14 + movl 156(%esp),%edi + # v += w + add %ebx,%edi + # p ^= x1 + xorl 104(%esp),%eax + # t <<<= 7 + rol $7,%edx + # t ^= x11 + xorl 144(%esp),%edx + # r <<<= 7 + rol $7,%esi + # r ^= x6 + xorl 124(%esp),%esi + # v <<<= 7 + rol $7,%edi + # v ^= x12 + xorl 148(%esp),%edi + # x1 = p + movl %eax,104(%esp) + # x11 = t + movl %edx,144(%esp) + # p += x0 + addl 100(%esp),%eax + # x6 = r + movl %esi,124(%esp) + # t += x10 + addl 140(%esp),%edx + # x12 = v + movl %edi,148(%esp) + # p <<<= 9 + rol $9,%eax + # p ^= x2 + xorl 108(%esp),%eax + # t <<<= 9 + rol $9,%edx + # t ^= x8 + xorl 132(%esp),%edx + # s += r + add %esi,%ecx + # s <<<= 9 + rol $9,%ecx + # s ^= x7 + xorl 128(%esp),%ecx + # w += v + add %edi,%ebx + # w <<<= 9 + rol $9,%ebx + # w ^= x13 + xorl 152(%esp),%ebx + # x2 = p + movl %eax,108(%esp) + # x8 = t + movl %edx,132(%esp) + # p += x1 + addl 104(%esp),%eax + # x7 = s + movl %ecx,128(%esp) + # t += x11 + addl 144(%esp),%edx + # x13 = w + movl %ebx,152(%esp) + # p <<<= 13 + rol $13,%eax + # p ^= x3 + xorl 112(%esp),%eax + # t <<<= 13 + rol $13,%edx + # t ^= x9 + xorl 136(%esp),%edx + # r += s + add %ecx,%esi + # r <<<= 13 + rol $13,%esi + # r ^= x4 + xorl 116(%esp),%esi + # v += w + add %ebx,%edi + # v <<<= 13 + rol $13,%edi + # v ^= x14 + xorl 156(%esp),%edi + # x3 = p + movl %eax,112(%esp) + # x9 = t + movl %edx,136(%esp) + # p += x2 + addl 108(%esp),%eax + # x4 = r + movl %esi,116(%esp) + # t += x8 + addl 132(%esp),%edx + # x14 = v + movl %edi,156(%esp) + # p <<<= 18 + rol $18,%eax + # p ^= x0 + xorl 100(%esp),%eax + # t <<<= 18 + rol $18,%edx + # t ^= x10 + xorl 140(%esp),%edx + # s += r + add %esi,%ecx + # s <<<= 18 + rol $18,%ecx + # s ^= x5 + xorl 120(%esp),%ecx + # w += v + add %edi,%ebx + # w <<<= 18 + rol $18,%ebx + # w ^= x15 + xorl 160(%esp),%ebx + # i -= 4 + sub $4,%ebp + # goto mainloop if unsigned > + ja ._mainloop + # x0 = p + movl %eax,100(%esp) + # x5 = s + movl %ecx,120(%esp) + # x10 = t + movl %edx,140(%esp) + # x15 = w + movl %ebx,160(%esp) + # out = out_backup + movl 72(%esp),%edi + # m = m_backup + movl 68(%esp),%esi + # in0 = x0 + movl 100(%esp),%eax + # in1 = x1 + movl 104(%esp),%ecx + # in0 += j0 + addl 164(%esp),%eax + # in1 += j1 + addl 168(%esp),%ecx + # in0 ^= *(uint32 *) (m + 0) + xorl 0(%esi),%eax + # in1 ^= *(uint32 *) (m + 4) + xorl 4(%esi),%ecx + # *(uint32 *) (out + 0) = in0 + movl %eax,0(%edi) + # *(uint32 *) (out + 4) = in1 + movl %ecx,4(%edi) + # in2 = x2 + movl 108(%esp),%eax + # in3 = x3 + movl 112(%esp),%ecx + # in2 += j2 + addl 172(%esp),%eax + # in3 += j3 + addl 176(%esp),%ecx + # in2 ^= *(uint32 *) (m + 8) + xorl 8(%esi),%eax + # in3 ^= *(uint32 *) (m + 12) + xorl 12(%esi),%ecx + # *(uint32 *) (out + 8) = in2 + movl %eax,8(%edi) + # *(uint32 *) (out + 12) = in3 + movl %ecx,12(%edi) + # in4 = x4 + movl 116(%esp),%eax + # in5 = x5 + movl 120(%esp),%ecx + # in4 += j4 + addl 180(%esp),%eax + # in5 += j5 + addl 184(%esp),%ecx + # in4 ^= *(uint32 *) (m + 16) + xorl 16(%esi),%eax + # in5 ^= *(uint32 *) (m + 20) + xorl 20(%esi),%ecx + # *(uint32 *) (out + 16) = in4 + movl %eax,16(%edi) + # *(uint32 *) (out + 20) = in5 + movl %ecx,20(%edi) + # in6 = x6 + movl 124(%esp),%eax + # in7 = x7 + movl 128(%esp),%ecx + # in6 += j6 + addl 188(%esp),%eax + # in7 += j7 + addl 192(%esp),%ecx + # in6 ^= *(uint32 *) (m + 24) + xorl 24(%esi),%eax + # in7 ^= *(uint32 *) (m + 28) + xorl 28(%esi),%ecx + # *(uint32 *) (out + 24) = in6 + movl %eax,24(%edi) + # *(uint32 *) (out + 28) = in7 + movl %ecx,28(%edi) + # in8 = x8 + movl 132(%esp),%eax + # in9 = x9 + movl 136(%esp),%ecx + # in8 += j8 + addl 196(%esp),%eax + # in9 += j9 + addl 200(%esp),%ecx + # in8 ^= *(uint32 *) (m + 32) + xorl 32(%esi),%eax + # in9 ^= *(uint32 *) (m + 36) + xorl 36(%esi),%ecx + # *(uint32 *) (out + 32) = in8 + movl %eax,32(%edi) + # *(uint32 *) (out + 36) = in9 + movl %ecx,36(%edi) + # in10 = x10 + movl 140(%esp),%eax + # in11 = x11 + movl 144(%esp),%ecx + # in10 += j10 + addl 204(%esp),%eax + # in11 += j11 + addl 208(%esp),%ecx + # in10 ^= *(uint32 *) (m + 40) + xorl 40(%esi),%eax + # in11 ^= *(uint32 *) (m + 44) + xorl 44(%esi),%ecx + # *(uint32 *) (out + 40) = in10 + movl %eax,40(%edi) + # *(uint32 *) (out + 44) = in11 + movl %ecx,44(%edi) + # in12 = x12 + movl 148(%esp),%eax + # in13 = x13 + movl 152(%esp),%ecx + # in12 += j12 + addl 212(%esp),%eax + # in13 += j13 + addl 216(%esp),%ecx + # in12 ^= *(uint32 *) (m + 48) + xorl 48(%esi),%eax + # in13 ^= *(uint32 *) (m + 52) + xorl 52(%esi),%ecx + # *(uint32 *) (out + 48) = in12 + movl %eax,48(%edi) + # *(uint32 *) (out + 52) = in13 + movl %ecx,52(%edi) + # in14 = x14 + movl 156(%esp),%eax + # in15 = x15 + movl 160(%esp),%ecx + # in14 += j14 + addl 220(%esp),%eax + # in15 += j15 + addl 224(%esp),%ecx + # in14 ^= *(uint32 *) (m + 56) + xorl 56(%esi),%eax + # in15 ^= *(uint32 *) (m + 60) + xorl 60(%esi),%ecx + # *(uint32 *) (out + 56) = in14 + movl %eax,56(%edi) + # *(uint32 *) (out + 60) = in15 + movl %ecx,60(%edi) + # bytes = bytes_backup + movl 76(%esp),%ebx + # in8 = j8 + movl 196(%esp),%eax + # in9 = j9 + movl 200(%esp),%ecx + # in8 += 1 + add $1,%eax + # in9 += 0 + carry + adc $0,%ecx + # j8 = in8 + movl %eax,196(%esp) + # j9 = in9 + movl %ecx,200(%esp) + # bytes - 64 + cmp $64,%ebx + # goto bytesatleast65 if unsigned> + ja ._bytesatleast65 + # goto bytesatleast64 if unsigned>= + jae ._bytesatleast64 + # m = out + mov %edi,%esi + # out = ctarget + movl 228(%esp),%edi + # i = bytes + mov %ebx,%ecx + # while (i) { *out++ = *m++; --i } + rep movsb +._bytesatleast64: + # x = x_backup + movl 64(%esp),%eax + # in8 = j8 + movl 196(%esp),%ecx + # in9 = j9 + movl 200(%esp),%edx + # *(uint32 *) (x + 32) = in8 + movl %ecx,32(%eax) + # *(uint32 *) (x + 36) = in9 + movl %edx,36(%eax) +._done: + # eax = eax_stack + movl 80(%esp),%eax + # ebx = ebx_stack + movl 84(%esp),%ebx + # esi = esi_stack + movl 88(%esp),%esi + # edi = edi_stack + movl 92(%esp),%edi + # ebp = ebp_stack + movl 96(%esp),%ebp + # leave + add %eax,%esp + ret +._bytesatleast65: + # bytes -= 64 + sub $64,%ebx + # out += 64 + add $64,%edi + # m += 64 + add $64,%esi + # goto bytesatleast1 + jmp ._bytesatleast1 +# enter ECRYPT_keysetup +.text +.p2align 5 +.globl ECRYPT_keysetup +ECRYPT_keysetup: + mov %esp,%eax + and $31,%eax + add $256,%eax + sub %eax,%esp + # eax_stack = eax + movl %eax,64(%esp) + # ebx_stack = ebx + movl %ebx,68(%esp) + # esi_stack = esi + movl %esi,72(%esp) + # edi_stack = edi + movl %edi,76(%esp) + # ebp_stack = ebp + movl %ebp,80(%esp) + # k = arg2 + movl 8(%esp,%eax),%ecx + # kbits = arg3 + movl 12(%esp,%eax),%edx + # x = arg1 + movl 4(%esp,%eax),%eax + # in1 = *(uint32 *) (k + 0) + movl 0(%ecx),%ebx + # in2 = *(uint32 *) (k + 4) + movl 4(%ecx),%esi + # in3 = *(uint32 *) (k + 8) + movl 8(%ecx),%edi + # in4 = *(uint32 *) (k + 12) + movl 12(%ecx),%ebp + # *(uint32 *) (x + 4) = in1 + movl %ebx,4(%eax) + # *(uint32 *) (x + 8) = in2 + movl %esi,8(%eax) + # *(uint32 *) (x + 12) = in3 + movl %edi,12(%eax) + # *(uint32 *) (x + 16) = in4 + movl %ebp,16(%eax) + # kbits - 256 + cmp $256,%edx + # goto kbits128 if unsigned< + jb ._kbits128 +._kbits256: + # in11 = *(uint32 *) (k + 16) + movl 16(%ecx),%edx + # in12 = *(uint32 *) (k + 20) + movl 20(%ecx),%ebx + # in13 = *(uint32 *) (k + 24) + movl 24(%ecx),%esi + # in14 = *(uint32 *) (k + 28) + movl 28(%ecx),%ecx + # *(uint32 *) (x + 44) = in11 + movl %edx,44(%eax) + # *(uint32 *) (x + 48) = in12 + movl %ebx,48(%eax) + # *(uint32 *) (x + 52) = in13 + movl %esi,52(%eax) + # *(uint32 *) (x + 56) = in14 + movl %ecx,56(%eax) + # in0 = 1634760805 + mov $1634760805,%ecx + # in5 = 857760878 + mov $857760878,%edx + # in10 = 2036477234 + mov $2036477234,%ebx + # in15 = 1797285236 + mov $1797285236,%esi + # *(uint32 *) (x + 0) = in0 + movl %ecx,0(%eax) + # *(uint32 *) (x + 20) = in5 + movl %edx,20(%eax) + # *(uint32 *) (x + 40) = in10 + movl %ebx,40(%eax) + # *(uint32 *) (x + 60) = in15 + movl %esi,60(%eax) + # goto keysetupdone + jmp ._keysetupdone +._kbits128: + # in11 = *(uint32 *) (k + 0) + movl 0(%ecx),%edx + # in12 = *(uint32 *) (k + 4) + movl 4(%ecx),%ebx + # in13 = *(uint32 *) (k + 8) + movl 8(%ecx),%esi + # in14 = *(uint32 *) (k + 12) + movl 12(%ecx),%ecx + # *(uint32 *) (x + 44) = in11 + movl %edx,44(%eax) + # *(uint32 *) (x + 48) = in12 + movl %ebx,48(%eax) + # *(uint32 *) (x + 52) = in13 + movl %esi,52(%eax) + # *(uint32 *) (x + 56) = in14 + movl %ecx,56(%eax) + # in0 = 1634760805 + mov $1634760805,%ecx + # in5 = 824206446 + mov $824206446,%edx + # in10 = 2036477238 + mov $2036477238,%ebx + # in15 = 1797285236 + mov $1797285236,%esi + # *(uint32 *) (x + 0) = in0 + movl %ecx,0(%eax) + # *(uint32 *) (x + 20) = in5 + movl %edx,20(%eax) + # *(uint32 *) (x + 40) = in10 + movl %ebx,40(%eax) + # *(uint32 *) (x + 60) = in15 + movl %esi,60(%eax) +._keysetupdone: + # eax = eax_stack + movl 64(%esp),%eax + # ebx = ebx_stack + movl 68(%esp),%ebx + # esi = esi_stack + movl 72(%esp),%esi + # edi = edi_stack + movl 76(%esp),%edi + # ebp = ebp_stack + movl 80(%esp),%ebp + # leave + add %eax,%esp + ret +# enter ECRYPT_ivsetup +.text +.p2align 5 +.globl ECRYPT_ivsetup +ECRYPT_ivsetup: + mov %esp,%eax + and $31,%eax + add $256,%eax + sub %eax,%esp + # eax_stack = eax + movl %eax,64(%esp) + # ebx_stack = ebx + movl %ebx,68(%esp) + # esi_stack = esi + movl %esi,72(%esp) + # edi_stack = edi + movl %edi,76(%esp) + # ebp_stack = ebp + movl %ebp,80(%esp) + # iv = arg2 + movl 8(%esp,%eax),%ecx + # x = arg1 + movl 4(%esp,%eax),%eax + # in6 = *(uint32 *) (iv + 0) + movl 0(%ecx),%edx + # in7 = *(uint32 *) (iv + 4) + movl 4(%ecx),%ecx + # in8 = 0 + mov $0,%ebx + # in9 = 0 + mov $0,%esi + # *(uint32 *) (x + 24) = in6 + movl %edx,24(%eax) + # *(uint32 *) (x + 28) = in7 + movl %ecx,28(%eax) + # *(uint32 *) (x + 32) = in8 + movl %ebx,32(%eax) + # *(uint32 *) (x + 36) = in9 + movl %esi,36(%eax) + # eax = eax_stack + movl 64(%esp),%eax + # ebx = ebx_stack + movl 68(%esp),%ebx + # esi = esi_stack + movl 72(%esp),%esi + # edi = edi_stack + movl 76(%esp),%edi + # ebp = ebp_stack + movl 80(%esp),%ebp + # leave + add %eax,%esp + ret diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c new file mode 100644 index 0000000..3be4439 --- /dev/null +++ b/arch/x86/crypto/salsa20_glue.c @@ -0,0 +1,127 @@ +/* + * Glue code for optimized assembly version of Salsa20. + * + * Copyright (c) 2007 Tan Swee Heng + * + * The assembly codes are public domain assembly codes written by Daniel. J. + * Bernstein . The codes are modified to include indentation + * and to remove extraneous comments and functions that are not needed. + * - i586 version, renamed as salsa20-i586-asm_32.S + * available from + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include + +#define SALSA20_IV_SIZE 8U +#define SALSA20_MIN_KEY_SIZE 16U +#define SALSA20_MAX_KEY_SIZE 32U + +// use the ECRYPT_* function names +#define salsa20_keysetup ECRYPT_keysetup +#define salsa20_ivsetup ECRYPT_ivsetup +#define salsa20_encrypt_bytes ECRYPT_encrypt_bytes + +struct salsa20_ctx +{ + u32 input[16]; +}; + +asmlinkage void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, + u32 keysize, u32 ivsize); +asmlinkage void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv); +asmlinkage void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, + const u8 *src, u8 *dst, u32 bytes); + +static int setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keysize) +{ + struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm); + salsa20_keysetup(ctx, key, keysize*8, SALSA20_IV_SIZE*8); + return 0; +} + +static int encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct blkcipher_walk walk; + struct crypto_blkcipher *tfm = desc->tfm; + struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm); + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt_block(desc, &walk, 64); + + salsa20_ivsetup(ctx, walk.iv); + + if (likely(walk.nbytes == nbytes)) + { + salsa20_encrypt_bytes(ctx, walk.src.virt.addr, + walk.dst.virt.addr, nbytes); + return blkcipher_walk_done(desc, &walk, 0); + } + + while (walk.nbytes >= 64) { + salsa20_encrypt_bytes(ctx, walk.src.virt.addr, + walk.dst.virt.addr, + walk.nbytes - (walk.nbytes % 64)); + err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64); + } + + if (walk.nbytes) { + salsa20_encrypt_bytes(ctx, walk.src.virt.addr, + walk.dst.virt.addr, walk.nbytes); + err = blkcipher_walk_done(desc, &walk, 0); + } + + return err; +} + +static struct crypto_alg alg = { + .cra_name = "salsa20", + .cra_driver_name = "salsa20-asm", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_type = &crypto_blkcipher_type, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct salsa20_ctx), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(alg.cra_list), + .cra_u = { + .blkcipher = { + .setkey = setkey, + .encrypt = encrypt, + .decrypt = encrypt, + .min_keysize = SALSA20_MIN_KEY_SIZE, + .max_keysize = SALSA20_MAX_KEY_SIZE, + .ivsize = SALSA20_IV_SIZE, + } + } +}; + +static int __init init(void) +{ + return crypto_register_alg(&alg); +} + +static void __exit fini(void) +{ + crypto_unregister_alg(&alg); +} + +module_init(init); +module_exit(fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)"); +MODULE_ALIAS("salsa20"); +MODULE_ALIAS("salsa20-asm"); diff --git a/crypto/Kconfig b/crypto/Kconfig index 6c086ee..221356b 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -489,6 +489,21 @@ config CRYPTO_SALSA20 The Salsa20 stream cipher algorithm is designed by Daniel J. Bernstein . See +config CRYPTO_SALSA20_586 + tristate "Salsa20 stream cipher algorithm (i586) (EXPERIMENTAL)" + depends on (X86 || UML_X86) && !64BIT + depends on EXPERIMENTAL + select CRYPTO_BLKCIPHER + select CRYPTO_SALSA20 + help + Salsa20 stream cipher algorithm. + + Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT + Stream Cipher Project. See + + The Salsa20 stream cipher algorithm is designed by Daniel J. + Bernstein . See + config CRYPTO_DEFLATE tristate "Deflate compression algorithm" select CRYPTO_ALGAPI -- cgit v1.1 From 9a7dafbba47384c330779c75a1546684efaa8c1a Mon Sep 17 00:00:00 2001 From: Tan Swee Heng Date: Tue, 18 Dec 2007 00:04:40 +0800 Subject: [CRYPTO] salsa20: Add x86-64 assembly version This is the x86-64 version of the Salsa20 stream cipher algorithm. The original assembly code came from . It has been reformatted for clarity. Signed-off-by: Tan Swee Heng Signed-off-by: Herbert Xu --- arch/x86/crypto/Makefile | 2 + arch/x86/crypto/salsa20-x86_64-asm_64.S | 920 ++++++++++++++++++++++++++++++++ arch/x86/crypto/salsa20_glue.c | 2 + crypto/Kconfig | 15 + 4 files changed, 939 insertions(+) create mode 100644 arch/x86/crypto/salsa20-x86_64-asm_64.S diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 25cc844..09200e1 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o +obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o aes-i586-y := aes-i586-asm_32.o aes_glue.o twofish-i586-y := twofish-i586-asm_32.o twofish_32.o @@ -15,3 +16,4 @@ salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_64.o +salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S new file mode 100644 index 0000000..6214a9b --- /dev/null +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S @@ -0,0 +1,920 @@ +# enter ECRYPT_encrypt_bytes +.text +.p2align 5 +.globl ECRYPT_encrypt_bytes +ECRYPT_encrypt_bytes: + mov %rsp,%r11 + and $31,%r11 + add $256,%r11 + sub %r11,%rsp + # x = arg1 + mov %rdi,%r8 + # m = arg2 + mov %rsi,%rsi + # out = arg3 + mov %rdx,%rdi + # bytes = arg4 + mov %rcx,%rdx + # unsigned>? bytes - 0 + cmp $0,%rdx + # comment:fp stack unchanged by jump + # goto done if !unsigned> + jbe ._done + # comment:fp stack unchanged by fallthrough +# start: +._start: + # r11_stack = r11 + movq %r11,0(%rsp) + # r12_stack = r12 + movq %r12,8(%rsp) + # r13_stack = r13 + movq %r13,16(%rsp) + # r14_stack = r14 + movq %r14,24(%rsp) + # r15_stack = r15 + movq %r15,32(%rsp) + # rbx_stack = rbx + movq %rbx,40(%rsp) + # rbp_stack = rbp + movq %rbp,48(%rsp) + # in0 = *(uint64 *) (x + 0) + movq 0(%r8),%rcx + # in2 = *(uint64 *) (x + 8) + movq 8(%r8),%r9 + # in4 = *(uint64 *) (x + 16) + movq 16(%r8),%rax + # in6 = *(uint64 *) (x + 24) + movq 24(%r8),%r10 + # in8 = *(uint64 *) (x + 32) + movq 32(%r8),%r11 + # in10 = *(uint64 *) (x + 40) + movq 40(%r8),%r12 + # in12 = *(uint64 *) (x + 48) + movq 48(%r8),%r13 + # in14 = *(uint64 *) (x + 56) + movq 56(%r8),%r14 + # j0 = in0 + movq %rcx,56(%rsp) + # j2 = in2 + movq %r9,64(%rsp) + # j4 = in4 + movq %rax,72(%rsp) + # j6 = in6 + movq %r10,80(%rsp) + # j8 = in8 + movq %r11,88(%rsp) + # j10 = in10 + movq %r12,96(%rsp) + # j12 = in12 + movq %r13,104(%rsp) + # j14 = in14 + movq %r14,112(%rsp) + # x_backup = x + movq %r8,120(%rsp) +# bytesatleast1: +._bytesatleast1: + # unsigned>= 32 + shr $32,%rdi + # x3 = j2 + movq 64(%rsp),%rsi + # x2 = x3 + mov %rsi,%rcx + # (uint64) x3 >>= 32 + shr $32,%rsi + # x5 = j4 + movq 72(%rsp),%r8 + # x4 = x5 + mov %r8,%r9 + # (uint64) x5 >>= 32 + shr $32,%r8 + # x5_stack = x5 + movq %r8,160(%rsp) + # x7 = j6 + movq 80(%rsp),%r8 + # x6 = x7 + mov %r8,%rax + # (uint64) x7 >>= 32 + shr $32,%r8 + # x9 = j8 + movq 88(%rsp),%r10 + # x8 = x9 + mov %r10,%r11 + # (uint64) x9 >>= 32 + shr $32,%r10 + # x11 = j10 + movq 96(%rsp),%r12 + # x10 = x11 + mov %r12,%r13 + # x10_stack = x10 + movq %r13,168(%rsp) + # (uint64) x11 >>= 32 + shr $32,%r12 + # x13 = j12 + movq 104(%rsp),%r13 + # x12 = x13 + mov %r13,%r14 + # (uint64) x13 >>= 32 + shr $32,%r13 + # x15 = j14 + movq 112(%rsp),%r15 + # x14 = x15 + mov %r15,%rbx + # (uint64) x15 >>= 32 + shr $32,%r15 + # x15_stack = x15 + movq %r15,176(%rsp) + # i = 20 + mov $20,%r15 +# mainloop: +._mainloop: + # i_backup = i + movq %r15,184(%rsp) + # x5 = x5_stack + movq 160(%rsp),%r15 + # a = x12 + x0 + lea (%r14,%rdx),%rbp + # (uint32) a <<<= 7 + rol $7,%ebp + # x4 ^= a + xor %rbp,%r9 + # b = x1 + x5 + lea (%rdi,%r15),%rbp + # (uint32) b <<<= 7 + rol $7,%ebp + # x9 ^= b + xor %rbp,%r10 + # a = x0 + x4 + lea (%rdx,%r9),%rbp + # (uint32) a <<<= 9 + rol $9,%ebp + # x8 ^= a + xor %rbp,%r11 + # b = x5 + x9 + lea (%r15,%r10),%rbp + # (uint32) b <<<= 9 + rol $9,%ebp + # x13 ^= b + xor %rbp,%r13 + # a = x4 + x8 + lea (%r9,%r11),%rbp + # (uint32) a <<<= 13 + rol $13,%ebp + # x12 ^= a + xor %rbp,%r14 + # b = x9 + x13 + lea (%r10,%r13),%rbp + # (uint32) b <<<= 13 + rol $13,%ebp + # x1 ^= b + xor %rbp,%rdi + # a = x8 + x12 + lea (%r11,%r14),%rbp + # (uint32) a <<<= 18 + rol $18,%ebp + # x0 ^= a + xor %rbp,%rdx + # b = x13 + x1 + lea (%r13,%rdi),%rbp + # (uint32) b <<<= 18 + rol $18,%ebp + # x5 ^= b + xor %rbp,%r15 + # x10 = x10_stack + movq 168(%rsp),%rbp + # x5_stack = x5 + movq %r15,160(%rsp) + # c = x6 + x10 + lea (%rax,%rbp),%r15 + # (uint32) c <<<= 7 + rol $7,%r15d + # x14 ^= c + xor %r15,%rbx + # c = x10 + x14 + lea (%rbp,%rbx),%r15 + # (uint32) c <<<= 9 + rol $9,%r15d + # x2 ^= c + xor %r15,%rcx + # c = x14 + x2 + lea (%rbx,%rcx),%r15 + # (uint32) c <<<= 13 + rol $13,%r15d + # x6 ^= c + xor %r15,%rax + # c = x2 + x6 + lea (%rcx,%rax),%r15 + # (uint32) c <<<= 18 + rol $18,%r15d + # x10 ^= c + xor %r15,%rbp + # x15 = x15_stack + movq 176(%rsp),%r15 + # x10_stack = x10 + movq %rbp,168(%rsp) + # d = x11 + x15 + lea (%r12,%r15),%rbp + # (uint32) d <<<= 7 + rol $7,%ebp + # x3 ^= d + xor %rbp,%rsi + # d = x15 + x3 + lea (%r15,%rsi),%rbp + # (uint32) d <<<= 9 + rol $9,%ebp + # x7 ^= d + xor %rbp,%r8 + # d = x3 + x7 + lea (%rsi,%r8),%rbp + # (uint32) d <<<= 13 + rol $13,%ebp + # x11 ^= d + xor %rbp,%r12 + # d = x7 + x11 + lea (%r8,%r12),%rbp + # (uint32) d <<<= 18 + rol $18,%ebp + # x15 ^= d + xor %rbp,%r15 + # x15_stack = x15 + movq %r15,176(%rsp) + # x5 = x5_stack + movq 160(%rsp),%r15 + # a = x3 + x0 + lea (%rsi,%rdx),%rbp + # (uint32) a <<<= 7 + rol $7,%ebp + # x1 ^= a + xor %rbp,%rdi + # b = x4 + x5 + lea (%r9,%r15),%rbp + # (uint32) b <<<= 7 + rol $7,%ebp + # x6 ^= b + xor %rbp,%rax + # a = x0 + x1 + lea (%rdx,%rdi),%rbp + # (uint32) a <<<= 9 + rol $9,%ebp + # x2 ^= a + xor %rbp,%rcx + # b = x5 + x6 + lea (%r15,%rax),%rbp + # (uint32) b <<<= 9 + rol $9,%ebp + # x7 ^= b + xor %rbp,%r8 + # a = x1 + x2 + lea (%rdi,%rcx),%rbp + # (uint32) a <<<= 13 + rol $13,%ebp + # x3 ^= a + xor %rbp,%rsi + # b = x6 + x7 + lea (%rax,%r8),%rbp + # (uint32) b <<<= 13 + rol $13,%ebp + # x4 ^= b + xor %rbp,%r9 + # a = x2 + x3 + lea (%rcx,%rsi),%rbp + # (uint32) a <<<= 18 + rol $18,%ebp + # x0 ^= a + xor %rbp,%rdx + # b = x7 + x4 + lea (%r8,%r9),%rbp + # (uint32) b <<<= 18 + rol $18,%ebp + # x5 ^= b + xor %rbp,%r15 + # x10 = x10_stack + movq 168(%rsp),%rbp + # x5_stack = x5 + movq %r15,160(%rsp) + # c = x9 + x10 + lea (%r10,%rbp),%r15 + # (uint32) c <<<= 7 + rol $7,%r15d + # x11 ^= c + xor %r15,%r12 + # c = x10 + x11 + lea (%rbp,%r12),%r15 + # (uint32) c <<<= 9 + rol $9,%r15d + # x8 ^= c + xor %r15,%r11 + # c = x11 + x8 + lea (%r12,%r11),%r15 + # (uint32) c <<<= 13 + rol $13,%r15d + # x9 ^= c + xor %r15,%r10 + # c = x8 + x9 + lea (%r11,%r10),%r15 + # (uint32) c <<<= 18 + rol $18,%r15d + # x10 ^= c + xor %r15,%rbp + # x15 = x15_stack + movq 176(%rsp),%r15 + # x10_stack = x10 + movq %rbp,168(%rsp) + # d = x14 + x15 + lea (%rbx,%r15),%rbp + # (uint32) d <<<= 7 + rol $7,%ebp + # x12 ^= d + xor %rbp,%r14 + # d = x15 + x12 + lea (%r15,%r14),%rbp + # (uint32) d <<<= 9 + rol $9,%ebp + # x13 ^= d + xor %rbp,%r13 + # d = x12 + x13 + lea (%r14,%r13),%rbp + # (uint32) d <<<= 13 + rol $13,%ebp + # x14 ^= d + xor %rbp,%rbx + # d = x13 + x14 + lea (%r13,%rbx),%rbp + # (uint32) d <<<= 18 + rol $18,%ebp + # x15 ^= d + xor %rbp,%r15 + # x15_stack = x15 + movq %r15,176(%rsp) + # x5 = x5_stack + movq 160(%rsp),%r15 + # a = x12 + x0 + lea (%r14,%rdx),%rbp + # (uint32) a <<<= 7 + rol $7,%ebp + # x4 ^= a + xor %rbp,%r9 + # b = x1 + x5 + lea (%rdi,%r15),%rbp + # (uint32) b <<<= 7 + rol $7,%ebp + # x9 ^= b + xor %rbp,%r10 + # a = x0 + x4 + lea (%rdx,%r9),%rbp + # (uint32) a <<<= 9 + rol $9,%ebp + # x8 ^= a + xor %rbp,%r11 + # b = x5 + x9 + lea (%r15,%r10),%rbp + # (uint32) b <<<= 9 + rol $9,%ebp + # x13 ^= b + xor %rbp,%r13 + # a = x4 + x8 + lea (%r9,%r11),%rbp + # (uint32) a <<<= 13 + rol $13,%ebp + # x12 ^= a + xor %rbp,%r14 + # b = x9 + x13 + lea (%r10,%r13),%rbp + # (uint32) b <<<= 13 + rol $13,%ebp + # x1 ^= b + xor %rbp,%rdi + # a = x8 + x12 + lea (%r11,%r14),%rbp + # (uint32) a <<<= 18 + rol $18,%ebp + # x0 ^= a + xor %rbp,%rdx + # b = x13 + x1 + lea (%r13,%rdi),%rbp + # (uint32) b <<<= 18 + rol $18,%ebp + # x5 ^= b + xor %rbp,%r15 + # x10 = x10_stack + movq 168(%rsp),%rbp + # x5_stack = x5 + movq %r15,160(%rsp) + # c = x6 + x10 + lea (%rax,%rbp),%r15 + # (uint32) c <<<= 7 + rol $7,%r15d + # x14 ^= c + xor %r15,%rbx + # c = x10 + x14 + lea (%rbp,%rbx),%r15 + # (uint32) c <<<= 9 + rol $9,%r15d + # x2 ^= c + xor %r15,%rcx + # c = x14 + x2 + lea (%rbx,%rcx),%r15 + # (uint32) c <<<= 13 + rol $13,%r15d + # x6 ^= c + xor %r15,%rax + # c = x2 + x6 + lea (%rcx,%rax),%r15 + # (uint32) c <<<= 18 + rol $18,%r15d + # x10 ^= c + xor %r15,%rbp + # x15 = x15_stack + movq 176(%rsp),%r15 + # x10_stack = x10 + movq %rbp,168(%rsp) + # d = x11 + x15 + lea (%r12,%r15),%rbp + # (uint32) d <<<= 7 + rol $7,%ebp + # x3 ^= d + xor %rbp,%rsi + # d = x15 + x3 + lea (%r15,%rsi),%rbp + # (uint32) d <<<= 9 + rol $9,%ebp + # x7 ^= d + xor %rbp,%r8 + # d = x3 + x7 + lea (%rsi,%r8),%rbp + # (uint32) d <<<= 13 + rol $13,%ebp + # x11 ^= d + xor %rbp,%r12 + # d = x7 + x11 + lea (%r8,%r12),%rbp + # (uint32) d <<<= 18 + rol $18,%ebp + # x15 ^= d + xor %rbp,%r15 + # x15_stack = x15 + movq %r15,176(%rsp) + # x5 = x5_stack + movq 160(%rsp),%r15 + # a = x3 + x0 + lea (%rsi,%rdx),%rbp + # (uint32) a <<<= 7 + rol $7,%ebp + # x1 ^= a + xor %rbp,%rdi + # b = x4 + x5 + lea (%r9,%r15),%rbp + # (uint32) b <<<= 7 + rol $7,%ebp + # x6 ^= b + xor %rbp,%rax + # a = x0 + x1 + lea (%rdx,%rdi),%rbp + # (uint32) a <<<= 9 + rol $9,%ebp + # x2 ^= a + xor %rbp,%rcx + # b = x5 + x6 + lea (%r15,%rax),%rbp + # (uint32) b <<<= 9 + rol $9,%ebp + # x7 ^= b + xor %rbp,%r8 + # a = x1 + x2 + lea (%rdi,%rcx),%rbp + # (uint32) a <<<= 13 + rol $13,%ebp + # x3 ^= a + xor %rbp,%rsi + # b = x6 + x7 + lea (%rax,%r8),%rbp + # (uint32) b <<<= 13 + rol $13,%ebp + # x4 ^= b + xor %rbp,%r9 + # a = x2 + x3 + lea (%rcx,%rsi),%rbp + # (uint32) a <<<= 18 + rol $18,%ebp + # x0 ^= a + xor %rbp,%rdx + # b = x7 + x4 + lea (%r8,%r9),%rbp + # (uint32) b <<<= 18 + rol $18,%ebp + # x5 ^= b + xor %rbp,%r15 + # x10 = x10_stack + movq 168(%rsp),%rbp + # x5_stack = x5 + movq %r15,160(%rsp) + # c = x9 + x10 + lea (%r10,%rbp),%r15 + # (uint32) c <<<= 7 + rol $7,%r15d + # x11 ^= c + xor %r15,%r12 + # c = x10 + x11 + lea (%rbp,%r12),%r15 + # (uint32) c <<<= 9 + rol $9,%r15d + # x8 ^= c + xor %r15,%r11 + # c = x11 + x8 + lea (%r12,%r11),%r15 + # (uint32) c <<<= 13 + rol $13,%r15d + # x9 ^= c + xor %r15,%r10 + # c = x8 + x9 + lea (%r11,%r10),%r15 + # (uint32) c <<<= 18 + rol $18,%r15d + # x10 ^= c + xor %r15,%rbp + # x15 = x15_stack + movq 176(%rsp),%r15 + # x10_stack = x10 + movq %rbp,168(%rsp) + # d = x14 + x15 + lea (%rbx,%r15),%rbp + # (uint32) d <<<= 7 + rol $7,%ebp + # x12 ^= d + xor %rbp,%r14 + # d = x15 + x12 + lea (%r15,%r14),%rbp + # (uint32) d <<<= 9 + rol $9,%ebp + # x13 ^= d + xor %rbp,%r13 + # d = x12 + x13 + lea (%r14,%r13),%rbp + # (uint32) d <<<= 13 + rol $13,%ebp + # x14 ^= d + xor %rbp,%rbx + # d = x13 + x14 + lea (%r13,%rbx),%rbp + # (uint32) d <<<= 18 + rol $18,%ebp + # x15 ^= d + xor %rbp,%r15 + # x15_stack = x15 + movq %r15,176(%rsp) + # i = i_backup + movq 184(%rsp),%r15 + # unsigned>? i -= 4 + sub $4,%r15 + # comment:fp stack unchanged by jump + # goto mainloop if unsigned> + ja ._mainloop + # (uint32) x2 += j2 + addl 64(%rsp),%ecx + # x3 <<= 32 + shl $32,%rsi + # x3 += j2 + addq 64(%rsp),%rsi + # (uint64) x3 >>= 32 + shr $32,%rsi + # x3 <<= 32 + shl $32,%rsi + # x2 += x3 + add %rsi,%rcx + # (uint32) x6 += j6 + addl 80(%rsp),%eax + # x7 <<= 32 + shl $32,%r8 + # x7 += j6 + addq 80(%rsp),%r8 + # (uint64) x7 >>= 32 + shr $32,%r8 + # x7 <<= 32 + shl $32,%r8 + # x6 += x7 + add %r8,%rax + # (uint32) x8 += j8 + addl 88(%rsp),%r11d + # x9 <<= 32 + shl $32,%r10 + # x9 += j8 + addq 88(%rsp),%r10 + # (uint64) x9 >>= 32 + shr $32,%r10 + # x9 <<= 32 + shl $32,%r10 + # x8 += x9 + add %r10,%r11 + # (uint32) x12 += j12 + addl 104(%rsp),%r14d + # x13 <<= 32 + shl $32,%r13 + # x13 += j12 + addq 104(%rsp),%r13 + # (uint64) x13 >>= 32 + shr $32,%r13 + # x13 <<= 32 + shl $32,%r13 + # x12 += x13 + add %r13,%r14 + # (uint32) x0 += j0 + addl 56(%rsp),%edx + # x1 <<= 32 + shl $32,%rdi + # x1 += j0 + addq 56(%rsp),%rdi + # (uint64) x1 >>= 32 + shr $32,%rdi + # x1 <<= 32 + shl $32,%rdi + # x0 += x1 + add %rdi,%rdx + # x5 = x5_stack + movq 160(%rsp),%rdi + # (uint32) x4 += j4 + addl 72(%rsp),%r9d + # x5 <<= 32 + shl $32,%rdi + # x5 += j4 + addq 72(%rsp),%rdi + # (uint64) x5 >>= 32 + shr $32,%rdi + # x5 <<= 32 + shl $32,%rdi + # x4 += x5 + add %rdi,%r9 + # x10 = x10_stack + movq 168(%rsp),%r8 + # (uint32) x10 += j10 + addl 96(%rsp),%r8d + # x11 <<= 32 + shl $32,%r12 + # x11 += j10 + addq 96(%rsp),%r12 + # (uint64) x11 >>= 32 + shr $32,%r12 + # x11 <<= 32 + shl $32,%r12 + # x10 += x11 + add %r12,%r8 + # x15 = x15_stack + movq 176(%rsp),%rdi + # (uint32) x14 += j14 + addl 112(%rsp),%ebx + # x15 <<= 32 + shl $32,%rdi + # x15 += j14 + addq 112(%rsp),%rdi + # (uint64) x15 >>= 32 + shr $32,%rdi + # x15 <<= 32 + shl $32,%rdi + # x14 += x15 + add %rdi,%rbx + # out = out_backup + movq 136(%rsp),%rdi + # m = m_backup + movq 144(%rsp),%rsi + # x0 ^= *(uint64 *) (m + 0) + xorq 0(%rsi),%rdx + # *(uint64 *) (out + 0) = x0 + movq %rdx,0(%rdi) + # x2 ^= *(uint64 *) (m + 8) + xorq 8(%rsi),%rcx + # *(uint64 *) (out + 8) = x2 + movq %rcx,8(%rdi) + # x4 ^= *(uint64 *) (m + 16) + xorq 16(%rsi),%r9 + # *(uint64 *) (out + 16) = x4 + movq %r9,16(%rdi) + # x6 ^= *(uint64 *) (m + 24) + xorq 24(%rsi),%rax + # *(uint64 *) (out + 24) = x6 + movq %rax,24(%rdi) + # x8 ^= *(uint64 *) (m + 32) + xorq 32(%rsi),%r11 + # *(uint64 *) (out + 32) = x8 + movq %r11,32(%rdi) + # x10 ^= *(uint64 *) (m + 40) + xorq 40(%rsi),%r8 + # *(uint64 *) (out + 40) = x10 + movq %r8,40(%rdi) + # x12 ^= *(uint64 *) (m + 48) + xorq 48(%rsi),%r14 + # *(uint64 *) (out + 48) = x12 + movq %r14,48(%rdi) + # x14 ^= *(uint64 *) (m + 56) + xorq 56(%rsi),%rbx + # *(uint64 *) (out + 56) = x14 + movq %rbx,56(%rdi) + # bytes = bytes_backup + movq 152(%rsp),%rdx + # in8 = j8 + movq 88(%rsp),%rcx + # in8 += 1 + add $1,%rcx + # j8 = in8 + movq %rcx,88(%rsp) + # unsigned>? unsigned + ja ._bytesatleast65 + # comment:fp stack unchanged by jump + # goto bytesatleast64 if !unsigned< + jae ._bytesatleast64 + # m = out + mov %rdi,%rsi + # out = ctarget + movq 128(%rsp),%rdi + # i = bytes + mov %rdx,%rcx + # while (i) { *out++ = *m++; --i } + rep movsb + # comment:fp stack unchanged by fallthrough +# bytesatleast64: +._bytesatleast64: + # x = x_backup + movq 120(%rsp),%rdi + # in8 = j8 + movq 88(%rsp),%rsi + # *(uint64 *) (x + 32) = in8 + movq %rsi,32(%rdi) + # r11 = r11_stack + movq 0(%rsp),%r11 + # r12 = r12_stack + movq 8(%rsp),%r12 + # r13 = r13_stack + movq 16(%rsp),%r13 + # r14 = r14_stack + movq 24(%rsp),%r14 + # r15 = r15_stack + movq 32(%rsp),%r15 + # rbx = rbx_stack + movq 40(%rsp),%rbx + # rbp = rbp_stack + movq 48(%rsp),%rbp + # comment:fp stack unchanged by fallthrough +# done: +._done: + # leave + add %r11,%rsp + mov %rdi,%rax + mov %rsi,%rdx + ret +# bytesatleast65: +._bytesatleast65: + # bytes -= 64 + sub $64,%rdx + # out += 64 + add $64,%rdi + # m += 64 + add $64,%rsi + # comment:fp stack unchanged by jump + # goto bytesatleast1 + jmp ._bytesatleast1 +# enter ECRYPT_keysetup +.text +.p2align 5 +.globl ECRYPT_keysetup +ECRYPT_keysetup: + mov %rsp,%r11 + and $31,%r11 + add $256,%r11 + sub %r11,%rsp + # k = arg2 + mov %rsi,%rsi + # kbits = arg3 + mov %rdx,%rdx + # x = arg1 + mov %rdi,%rdi + # in0 = *(uint64 *) (k + 0) + movq 0(%rsi),%r8 + # in2 = *(uint64 *) (k + 8) + movq 8(%rsi),%r9 + # *(uint64 *) (x + 4) = in0 + movq %r8,4(%rdi) + # *(uint64 *) (x + 12) = in2 + movq %r9,12(%rdi) + # unsigned + * - x86-64 version, renamed as salsa20-x86_64-asm_64.S + * available from * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free diff --git a/crypto/Kconfig b/crypto/Kconfig index 221356b..b0481f7 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -504,6 +504,21 @@ config CRYPTO_SALSA20_586 The Salsa20 stream cipher algorithm is designed by Daniel J. Bernstein . See +config CRYPTO_SALSA20_X86_64 + tristate "Salsa20 stream cipher algorithm (x86_64) (EXPERIMENTAL)" + depends on (X86 || UML_X86) && 64BIT + depends on EXPERIMENTAL + select CRYPTO_BLKCIPHER + select CRYPTO_SALSA20 + help + Salsa20 stream cipher algorithm. + + Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT + Stream Cipher Project. See + + The Salsa20 stream cipher algorithm is designed by Daniel J. + Bernstein . See + config CRYPTO_DEFLATE tristate "Deflate compression algorithm" select CRYPTO_ALGAPI -- cgit v1.1 From d1cda4e39638e906f022c8ce56f566b617e1f77e Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Tue, 18 Dec 2007 00:08:27 +0800 Subject: [CRYPTO] tcrypt: Add select of AEAD ERROR: "crypto_aead_setauthsize" [crypto/tcrypt.ko] undefined! ERROR: "crypto_alloc_aead" [crypto/tcrypt.ko] undefined! Signed-off-by: Sebastian Siewior Signed-off-by: Herbert Xu --- crypto/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/crypto/Kconfig b/crypto/Kconfig index b0481f7..93ad53a 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -568,6 +568,7 @@ config CRYPTO_TEST tristate "Testing module" depends on m select CRYPTO_ALGAPI + select CRYPTO_AEAD help Quick & dirty crypto test module. -- cgit v1.1 From 214dc54f6f1b71bcd895ef272c108c67ef046200 Mon Sep 17 00:00:00 2001 From: Tan Swee Heng Date: Tue, 18 Dec 2007 22:45:35 +0800 Subject: [CRYPTO] salsa20-asm: Remove unnecessary dependency on CRYPTO_SALSA20 Signed-off-by: Tan Swee Heng Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 -- 1 file changed, 2 deletions(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index 93ad53a..c3166a1 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -494,7 +494,6 @@ config CRYPTO_SALSA20_586 depends on (X86 || UML_X86) && !64BIT depends on EXPERIMENTAL select CRYPTO_BLKCIPHER - select CRYPTO_SALSA20 help Salsa20 stream cipher algorithm. @@ -509,7 +508,6 @@ config CRYPTO_SALSA20_X86_64 depends on (X86 || UML_X86) && 64BIT depends on EXPERIMENTAL select CRYPTO_BLKCIPHER - select CRYPTO_SALSA20 help Salsa20 stream cipher algorithm. -- cgit v1.1 From 1c5dfe6a959b79215c0f73d793169a7d5755900e Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 24 Dec 2007 08:54:24 +0800 Subject: [CRYPTO] api: Include sched.h for cond_resched in scatterwalk.h As Andrew Morton correctly points out, we need to explicitly include sched.h as we use the function cond_resched in crypto/scatterwalk.h. Signed-off-by: Herbert Xu --- include/crypto/scatterwalk.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index bd62431..224658b 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -23,6 +23,7 @@ #include #include #include +#include static inline enum km_type crypto_kmap_type(int out) { -- cgit v1.1 From 866cd902e864e9d0e31299efa9d61fc9a9bec315 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 27 Dec 2007 00:04:44 +1100 Subject: [CRYPTO] padlock: Only reset the key once for each CBC and ECB operation Currently we reset the key for each segment fed to the xcrypt instructions. This patch optimises this for CBC and ECB so that we only do this once for each encrypt/decrypt operation. Signed-off-by: Herbert Xu --- drivers/crypto/padlock-aes.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index c33334a..2f3ad3f 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -417,6 +417,11 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, /* ====== Encryption/decryption routines ====== */ /* These are the real call to PadLock. */ +static inline void padlock_reset_key(void) +{ + asm volatile ("pushfl; popfl"); +} + static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, void *control_word) { @@ -437,8 +442,6 @@ static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword) static inline void aes_crypt(const u8 *in, u8 *out, u32 *key, struct cword *cword) { - asm volatile ("pushfl; popfl"); - /* padlock_xcrypt requires at least two blocks of data. */ if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) & (PAGE_SIZE - 1)))) { @@ -457,7 +460,6 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, return; } - asm volatile ("pushfl; popfl"); /* enforce key reload. */ asm volatile ("test $1, %%cl;" "je 1f;" "lea -1(%%ecx), %%eax;" @@ -474,8 +476,6 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, u8 *iv, void *control_word, u32 count) { - /* Enforce key reload. */ - asm volatile ("pushfl; popfl"); /* rep xcryptcbc */ asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" : "+S" (input), "+D" (output), "+a" (iv) @@ -486,12 +486,14 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); + padlock_reset_key(); aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); } static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); + padlock_reset_key(); aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); } @@ -524,6 +526,8 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc, struct blkcipher_walk walk; int err; + padlock_reset_key(); + blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -546,6 +550,8 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc, struct blkcipher_walk walk; int err; + padlock_reset_key(); + blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -590,6 +596,8 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, struct blkcipher_walk walk; int err; + padlock_reset_key(); + blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -614,6 +622,8 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, struct blkcipher_walk walk; int err; + padlock_reset_key(); + blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); -- cgit v1.1 From 2a999a3abb2d3885741b09b9d05280db7e757544 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 30 Dec 2007 20:24:11 +1100 Subject: [CRYPTO] tcrypt: Zero axbuf in the right function The axbuf buffer is used by test_aead and therefore should be zeroed there instead of in test_hash. Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 2b52df7..72073ef 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -172,7 +172,6 @@ static void test_hash(char *algo, struct hash_testvec *template, /* setup the dummy buffer first */ memset(xbuf, 0, XBUFSIZE); - memset(axbuf, 0, XBUFSIZE); j = 0; for (i = 0; i < tcount; i++) { @@ -350,6 +349,7 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template, printk(KERN_INFO "\ntesting %s %s across pages (chunking)\n", algo, e); memset(xbuf, 0, XBUFSIZE); + memset(axbuf, 0, XBUFSIZE); for (i = 0, j = 0; i < tcount; i++) { if (aead_tv[i].np) { -- cgit v1.1 From 1b87887d6c232becba77835b29a424cf78442b7d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 1 Jan 2008 15:44:50 +1100 Subject: [CRYPTO] xcbc: Fix algorithm leak when block size check fails When the underlying algorithm has a block size other than 16 we abort without freeing it. In fact, we try to return the algorithm itself as an error! This patch plugs the leak and makes it return -EINVAL instead. Signed-off-by: Herbert Xu --- crypto/xcbc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crypto/xcbc.c b/crypto/xcbc.c index 789cdee..e3d9503 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -307,7 +307,8 @@ static struct crypto_instance *xcbc_alloc(struct rtattr **tb) case 16: break; default: - return ERR_PTR(PTR_ERR(alg)); + inst = ERR_PTR(-EINVAL); + goto out_put_alg; } inst = crypto_alloc_instance("xcbc", alg); -- cgit v1.1 From 94765b9e4cba8e3c51c292338db16aa174894d30 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 1 Jan 2008 15:49:17 +1100 Subject: [CRYPTO] xcbc: Remove bogus hash/cipher test When setting the digest size xcbc tests to see if the underlying algorithm is a hash. This is silly because we don't allow it to be a hash and we've specifically requested for a cipher. This patch removes the bogus test. Signed-off-by: Herbert Xu --- crypto/xcbc.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/crypto/xcbc.c b/crypto/xcbc.c index e3d9503..a82959d 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -321,10 +321,7 @@ static struct crypto_instance *xcbc_alloc(struct rtattr **tb) inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_type = &crypto_hash_type; - inst->alg.cra_hash.digestsize = - (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == - CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize : - alg->cra_blocksize; + inst->alg.cra_hash.digestsize = alg->cra_blocksize; inst->alg.cra_ctxsize = sizeof(struct crypto_xcbc_ctx) + ALIGN(inst->alg.cra_blocksize * 3, sizeof(void *)); inst->alg.cra_init = xcbc_init_tfm; -- cgit v1.1 From 38ed9ab23b8614c9c1553b2961ef2627f3088fd9 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 1 Jan 2008 15:59:28 +1100 Subject: [CRYPTO] tcrypt: Make xcbc available as a standalone test Currently the gcm(aes) tests have to be taken together with all other algorithms. This patch makes it available by itself at number 106. Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 72073ef..1ab8c01 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1608,11 +1608,17 @@ static void do_test(void) test_hash("hmac(sha512)", hmac_sha512_tv_template, HMAC_SHA512_TEST_VECTORS); break; + case 105: test_hash("hmac(sha224)", hmac_sha224_tv_template, HMAC_SHA224_TEST_VECTORS); break; + case 106: + test_hash("xcbc(aes)", aes_xcbc128_tv_template, + XCBC_AES_TEST_VECTORS); + break; + case 200: test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, aes_speed_template); -- cgit v1.1 From 6eb7228421c01ba48a6a88a7a5b3e71cfb70d4a9 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 8 Jan 2008 17:16:44 +1100 Subject: [CRYPTO] api: Set default CRYPTO_MINALIGN to unsigned long long Thanks to David Miller for pointing out that the SLAB (or SLOB/SLUB) cache uses the alignment of unsigned long long if the architecture kmalloc/slab alignment macros are not defined. This patch changes the CRYPTO_MINALIGN so that it uses the same default value. Signed-off-by: Herbert Xu --- include/linux/crypto.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 0aba104..5e02d1b 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -90,13 +90,11 @@ #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN #elif defined(ARCH_SLAB_MINALIGN) #define CRYPTO_MINALIGN ARCH_SLAB_MINALIGN +#else +#define CRYPTO_MINALIGN __alignof__(unsigned long long) #endif -#ifdef CRYPTO_MINALIGN #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) -#else -#define CRYPTO_MINALIGN_ATTR -#endif struct scatterlist; struct crypto_ablkcipher; -- cgit v1.1 From e6ccc727f30a02670f6a00df6d548942bc988f43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ilpo=20J=C3=A4rvinen?= Date: Tue, 8 Jan 2008 17:20:31 +1100 Subject: [CRYPTO] cast6: inline bloat-- MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bloat-o-meter shows rather high readings for cast6... crypto/cast6.c: cast6_setkey | -1310 cast6_encrypt | -4567 cast6_decrypt | -4561 3 functions changed, 10438 bytes removed, diff: -10438 crypto/cast6.c: W | +659 Q | +308 QBAR | +316 3 functions changed, 1283 bytes added, diff: +1283 crypto/cast6.o: 6 functions changed, 1283 bytes added, 10438 bytes removed, diff: -9155 Signed-off-by: Ilpo Järvinen Signed-off-by: Herbert Xu --- crypto/cast6.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crypto/cast6.c b/crypto/cast6.c index 136ab6d..5fd9420 100644 --- a/crypto/cast6.c +++ b/crypto/cast6.c @@ -369,7 +369,7 @@ static const u8 Tr[4][8] = { }; /* forward octave */ -static inline void W(u32 *key, unsigned int i) { +static void W(u32 *key, unsigned int i) { u32 I; key[6] ^= F1(key[7], Tr[i % 4][0], Tm[i][0]); key[5] ^= F2(key[6], Tr[i % 4][1], Tm[i][1]); @@ -428,7 +428,7 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key, } /*forward quad round*/ -static inline void Q (u32 * block, u8 * Kr, u32 * Km) { +static void Q (u32 * block, u8 * Kr, u32 * Km) { u32 I; block[2] ^= F1(block[3], Kr[0], Km[0]); block[1] ^= F2(block[2], Kr[1], Km[1]); @@ -437,7 +437,7 @@ static inline void Q (u32 * block, u8 * Kr, u32 * Km) { } /*reverse quad round*/ -static inline void QBAR (u32 * block, u8 * Kr, u32 * Km) { +static void QBAR (u32 * block, u8 * Kr, u32 * Km) { u32 I; block[3] ^= F1(block[0], Kr[3], Km[3]); block[0] ^= F3(block[1], Kr[2], Km[2]); -- cgit v1.1 From b966b54654598aebdac9c57f102d769b36d2f68f Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Tue, 8 Jan 2008 21:36:34 +1100 Subject: [CRYPTO] hifn_795x: Fixup container_of() usage Signed-off-by: Alexey Dobriyan Signed-off-by: Herbert Xu --- drivers/crypto/hifn_795x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 1a19700..16413e5 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -664,7 +664,7 @@ struct hifn_context atomic_t sg_num; }; -#define crypto_alg_to_hifn(alg) container_of(alg, struct hifn_crypto_alg, alg) +#define crypto_alg_to_hifn(a) container_of(a, struct hifn_crypto_alg, alg) static inline u32 hifn_read_0(struct hifn_device *dev, u32 reg) { -- cgit v1.1 From 15e7b4452b72ae890f2fcb027b4c4fa63a1c9a7a Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Mon, 14 Jan 2008 17:07:57 +1100 Subject: [CRYPTO] twofish: Merge common glue code There is almost no difference between 32 & 64 bit glue code. Signed-off-by: Sebastian Siewior Signed-off-by: Herbert Xu --- arch/x86/crypto/Makefile | 4 +- arch/x86/crypto/twofish_32.c | 97 ------------------------------------------ arch/x86/crypto/twofish_64.c | 97 ------------------------------------------ arch/x86/crypto/twofish_glue.c | 97 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 99 insertions(+), 196 deletions(-) delete mode 100644 arch/x86/crypto/twofish_32.c delete mode 100644 arch/x86/crypto/twofish_64.c create mode 100644 arch/x86/crypto/twofish_glue.c diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 09200e1..3874c2d 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -11,9 +11,9 @@ obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o aes-i586-y := aes-i586-asm_32.o aes_glue.o -twofish-i586-y := twofish-i586-asm_32.o twofish_32.o +twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o -twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_64.o +twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o diff --git a/arch/x86/crypto/twofish_32.c b/arch/x86/crypto/twofish_32.c deleted file mode 100644 index e3004df..0000000 --- a/arch/x86/crypto/twofish_32.c +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Glue Code for optimized 586 assembler version of TWOFISH - * - * Originally Twofish for GPG - * By Matthew Skala , July 26, 1998 - * 256-bit key length added March 20, 1999 - * Some modifications to reduce the text size by Werner Koch, April, 1998 - * Ported to the kerneli patch by Marc Mutz - * Ported to CryptoAPI by Colin Slater - * - * The original author has disclaimed all copyright interest in this - * code and thus put it in the public domain. The subsequent authors - * have put this under the GNU General Public License. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA - * - * This code is a "clean room" implementation, written from the paper - * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey, - * Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available - * through http://www.counterpane.com/twofish.html - * - * For background information on multiplication in finite fields, used for - * the matrix operations in the key schedule, see the book _Contemporary - * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the - * Third Edition. - */ - -#include -#include -#include -#include -#include - - -asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); -asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); - -static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) -{ - twofish_enc_blk(tfm, dst, src); -} - -static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) -{ - twofish_dec_blk(tfm, dst, src); -} - -static struct crypto_alg alg = { - .cra_name = "twofish", - .cra_driver_name = "twofish-i586", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, - .cra_blocksize = TF_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct twofish_ctx), - .cra_alignmask = 3, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(alg.cra_list), - .cra_u = { - .cipher = { - .cia_min_keysize = TF_MIN_KEY_SIZE, - .cia_max_keysize = TF_MAX_KEY_SIZE, - .cia_setkey = twofish_setkey, - .cia_encrypt = twofish_encrypt, - .cia_decrypt = twofish_decrypt - } - } -}; - -static int __init init(void) -{ - return crypto_register_alg(&alg); -} - -static void __exit fini(void) -{ - crypto_unregister_alg(&alg); -} - -module_init(init); -module_exit(fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION ("Twofish Cipher Algorithm, i586 asm optimized"); -MODULE_ALIAS("twofish"); diff --git a/arch/x86/crypto/twofish_64.c b/arch/x86/crypto/twofish_64.c deleted file mode 100644 index 182d91d..0000000 --- a/arch/x86/crypto/twofish_64.c +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Glue Code for optimized x86_64 assembler version of TWOFISH - * - * Originally Twofish for GPG - * By Matthew Skala , July 26, 1998 - * 256-bit key length added March 20, 1999 - * Some modifications to reduce the text size by Werner Koch, April, 1998 - * Ported to the kerneli patch by Marc Mutz - * Ported to CryptoAPI by Colin Slater - * - * The original author has disclaimed all copyright interest in this - * code and thus put it in the public domain. The subsequent authors - * have put this under the GNU General Public License. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA - * - * This code is a "clean room" implementation, written from the paper - * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey, - * Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available - * through http://www.counterpane.com/twofish.html - * - * For background information on multiplication in finite fields, used for - * the matrix operations in the key schedule, see the book _Contemporary - * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the - * Third Edition. - */ - -#include -#include -#include -#include -#include -#include - -asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); -asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); - -static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) -{ - twofish_enc_blk(tfm, dst, src); -} - -static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) -{ - twofish_dec_blk(tfm, dst, src); -} - -static struct crypto_alg alg = { - .cra_name = "twofish", - .cra_driver_name = "twofish-x86_64", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, - .cra_blocksize = TF_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct twofish_ctx), - .cra_alignmask = 3, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(alg.cra_list), - .cra_u = { - .cipher = { - .cia_min_keysize = TF_MIN_KEY_SIZE, - .cia_max_keysize = TF_MAX_KEY_SIZE, - .cia_setkey = twofish_setkey, - .cia_encrypt = twofish_encrypt, - .cia_decrypt = twofish_decrypt - } - } -}; - -static int __init init(void) -{ - return crypto_register_alg(&alg); -} - -static void __exit fini(void) -{ - crypto_unregister_alg(&alg); -} - -module_init(init); -module_exit(fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION ("Twofish Cipher Algorithm, x86_64 asm optimized"); -MODULE_ALIAS("twofish"); diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c new file mode 100644 index 0000000..cefaf8b --- /dev/null +++ b/arch/x86/crypto/twofish_glue.c @@ -0,0 +1,97 @@ +/* + * Glue Code for assembler optimized version of TWOFISH + * + * Originally Twofish for GPG + * By Matthew Skala , July 26, 1998 + * 256-bit key length added March 20, 1999 + * Some modifications to reduce the text size by Werner Koch, April, 1998 + * Ported to the kerneli patch by Marc Mutz + * Ported to CryptoAPI by Colin Slater + * + * The original author has disclaimed all copyright interest in this + * code and thus put it in the public domain. The subsequent authors + * have put this under the GNU General Public License. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA + * + * This code is a "clean room" implementation, written from the paper + * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey, + * Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available + * through http://www.counterpane.com/twofish.html + * + * For background information on multiplication in finite fields, used for + * the matrix operations in the key schedule, see the book _Contemporary + * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the + * Third Edition. + */ + +#include +#include +#include +#include +#include + +asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); +asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + +static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ + twofish_enc_blk(tfm, dst, src); +} + +static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ + twofish_dec_blk(tfm, dst, src); +} + +static struct crypto_alg alg = { + .cra_name = "twofish", + .cra_driver_name = "twofish-asm", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = TF_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct twofish_ctx), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(alg.cra_list), + .cra_u = { + .cipher = { + .cia_min_keysize = TF_MIN_KEY_SIZE, + .cia_max_keysize = TF_MAX_KEY_SIZE, + .cia_setkey = twofish_setkey, + .cia_encrypt = twofish_encrypt, + .cia_decrypt = twofish_decrypt + } + } +}; + +static int __init init(void) +{ + return crypto_register_alg(&alg); +} + +static void __exit fini(void) +{ + crypto_unregister_alg(&alg); +} + +module_init(init); +module_exit(fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION ("Twofish Cipher Algorithm, asm optimized"); +MODULE_ALIAS("twofish"); +MODULE_ALIAS("twofish-asm"); -- cgit v1.1