diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2006-08-21 21:38:42 +1000 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2006-09-21 11:44:35 +1000 |
commit | 28ce728a90cce3a0c6c0ed00354299de52db94b1 (patch) | |
tree | d5f8c799cb949a5afdfb9db4fb9c4c749820c35e /drivers/crypto | |
parent | db131ef9084110d9e82549c0a627e157e8bb99d7 (diff) | |
download | op-kernel-dev-28ce728a90cce3a0c6c0ed00354299de52db94b1.zip op-kernel-dev-28ce728a90cce3a0c6c0ed00354299de52db94b1.tar.gz |
[CRYPTO] padlock: Added block cipher versions of CBC/ECB
This patch adds block cipher algorithms for cbc(aes) and ecb(aes) for
the PadLock device. Once all users to the old cipher type have been
converted the old cbc/ecb PadLock operations will be removed.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/Kconfig | 1 | ||||
-rw-r--r-- | drivers/crypto/padlock-aes.c | 174 | ||||
-rw-r--r-- | drivers/crypto/padlock.h | 1 |
3 files changed, 169 insertions, 7 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 86c99cd..adb5541 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -27,6 +27,7 @@ config CRYPTO_DEV_PADLOCK config CRYPTO_DEV_PADLOCK_AES tristate "PadLock driver for AES algorithm" depends on CRYPTO_DEV_PADLOCK + select CRYPTO_BLKCIPHER default m help Use VIA PadLock for AES algorithm. diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 3e68370..f53301e 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -43,11 +43,11 @@ * --------------------------------------------------------------------------- */ +#include <crypto/algapi.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/errno.h> -#include <linux/crypto.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <asm/byteorder.h> @@ -297,9 +297,9 @@ aes_hw_extkey_available(uint8_t key_len) return 0; } -static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) +static inline struct aes_ctx *aes_ctx_common(void *ctx) { - unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); + unsigned long addr = (unsigned long)ctx; unsigned long align = PADLOCK_ALIGNMENT; if (align <= crypto_tfm_ctx_alignment()) @@ -307,6 +307,16 @@ static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) return (struct aes_ctx *)ALIGN(addr, align); } +static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) +{ + return aes_ctx_common(crypto_tfm_ctx(tfm)); +} + +static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm) +{ + return aes_ctx_common(crypto_blkcipher_ctx(tfm)); +} + static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { @@ -507,6 +517,141 @@ static struct crypto_alg aes_alg = { } }; +static int ecb_aes_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + while ((nbytes = walk.nbytes)) { + padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, + ctx->E, &ctx->cword.encrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + return err; +} + +static int ecb_aes_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + while ((nbytes = walk.nbytes)) { + padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, + ctx->D, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + return err; +} + +static struct crypto_alg ecb_aes_alg = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-padlock", + .cra_priority = PADLOCK_COMPOSITE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aes_ctx), + .cra_alignmask = PADLOCK_ALIGNMENT - 1, + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list), + .cra_u = { + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_set_key, + .encrypt = ecb_aes_encrypt, + .decrypt = ecb_aes_decrypt, + } + } +}; + +static int cbc_aes_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + while ((nbytes = walk.nbytes)) { + u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, + walk.dst.virt.addr, ctx->E, + walk.iv, &ctx->cword.encrypt, + nbytes / AES_BLOCK_SIZE); + memcpy(walk.iv, iv, AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + return err; +} + +static int cbc_aes_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + while ((nbytes = walk.nbytes)) { + padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, + ctx->D, walk.iv, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + return err; +} + +static struct crypto_alg cbc_aes_alg = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-padlock", + .cra_priority = PADLOCK_COMPOSITE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aes_ctx), + .cra_alignmask = PADLOCK_ALIGNMENT - 1, + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list), + .cra_u = { + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = aes_set_key, + .encrypt = cbc_aes_encrypt, + .decrypt = cbc_aes_decrypt, + } + } +}; + static int __init padlock_init(void) { int ret; @@ -522,18 +667,33 @@ static int __init padlock_init(void) } gen_tabs(); - if ((ret = crypto_register_alg(&aes_alg))) { - printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n"); - return ret; - } + if ((ret = crypto_register_alg(&aes_alg))) + goto aes_err; + + if ((ret = crypto_register_alg(&ecb_aes_alg))) + goto ecb_aes_err; + + if ((ret = crypto_register_alg(&cbc_aes_alg))) + goto cbc_aes_err; printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); +out: return ret; + +cbc_aes_err: + crypto_unregister_alg(&ecb_aes_alg); +ecb_aes_err: + crypto_unregister_alg(&aes_alg); +aes_err: + printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n"); + goto out; } static void __exit padlock_fini(void) { + crypto_unregister_alg(&cbc_aes_alg); + crypto_unregister_alg(&ecb_aes_alg); crypto_unregister_alg(&aes_alg); } diff --git a/drivers/crypto/padlock.h b/drivers/crypto/padlock.h index 7e3385b..b728e45 100644 --- a/drivers/crypto/padlock.h +++ b/drivers/crypto/padlock.h @@ -18,5 +18,6 @@ #define PFX "padlock: " #define PADLOCK_CRA_PRIORITY 300 +#define PADLOCK_COMPOSITE_PRIORITY 400 #endif /* _CRYPTO_PADLOCK_H */ |