From d4a7dd8e637b322faaa934ffcd6dd07711af831f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 28 Dec 2007 11:05:46 +1100 Subject: [CRYPTO] padlock: Fix spurious ECB page fault The xcryptecb instruction always processes an even number of blocks so we need to ensure th existence of an extra block if we have to process an odd number of blocks. Signed-off-by: Herbert Xu --- drivers/crypto/padlock-aes.c | 53 ++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 49 insertions(+), 4 deletions(-) (limited to 'drivers/crypto/padlock-aes.c') diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index abbcff0..a337b69 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -419,13 +419,58 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, /* ====== Encryption/decryption routines ====== */ /* These are the real call to PadLock. */ +static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, + void *control_word) +{ + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(1)); +} + +static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword) +{ + u8 tmp[AES_BLOCK_SIZE * 2] + __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); + + memcpy(tmp, in, AES_BLOCK_SIZE); + padlock_xcrypt(tmp, out, key, cword); +} + +static inline void aes_crypt(const u8 *in, u8 *out, u32 *key, + struct cword *cword) +{ + asm volatile ("pushfl; popfl"); + + /* padlock_xcrypt requires at least two blocks of data. */ + if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) & + (PAGE_SIZE - 1)))) { + aes_crypt_copy(in, out, key, cword); + return; + } + + padlock_xcrypt(in, out, key, cword); +} + static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, void *control_word, u32 count) { + if (count == 1) { + aes_crypt(input, output, key, control_word); + return; + } + asm volatile ("pushfl; popfl"); /* enforce key reload. */ - asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + asm volatile ("test $1, %%cl;" + "je 1f;" + "lea -1(%%ecx), %%eax;" + "mov $1, %%ecx;" + ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */ + "mov %%eax, %%ecx;" + "1:" + ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ : "+S"(input), "+D"(output) - : "d"(control_word), "b"(key), "c"(count)); + : "d"(control_word), "b"(key), "c"(count) + : "ax"); } static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, @@ -443,13 +488,13 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); - padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, 1); + aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); } static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); - padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1); + aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); } static struct crypto_alg aes_alg = { -- cgit v1.1 From 490fe3f05be3f7c87d7932bcb6e6e53e3db2cd9c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 11 Jan 2008 08:09:35 +1100 Subject: [CRYPTO] padlock: Fix alignment fault in aes_crypt_copy The previous patch fixed spurious read faults from occuring by copying the data if we happen to have a single block at the end of a page. It appears that gcc cannot guarantee 16-byte alignment in the kernel with __attribute__. The following report from Torben Viets shows a buffer that's only 8-byte aligned: > eneral protection fault: 0000 [#1] > Modules linked in: xt_TCPMSS xt_tcpmss iptable_mangle ipt_MASQUERADE > xt_tcpudp xt_mark xt_state iptable_nat nf_nat nf_conntrack_ipv4 > iptable_filter ip_tables x_tables pppoe pppox af_packet ppp_generic slhc > aes_i586 > CPU: 0 > EIP: 0060:[] Not tainted VLI > EFLAGS: 00010292 (2.6.23.12 #7) > EIP is at aes_crypt_copy+0x28/0x40 > eax: f7639ff0 ebx: f6c24050 ecx: 00000001 edx: f6c24030 > esi: f7e89dc8 edi: f7639ff0 ebp: 00010000 esp: f7e89dc8 Since the hardware must have 16-byte alignment, the following patch fixes this by open coding the alignment adjustment. Signed-off-by: Herbert Xu --- drivers/crypto/padlock-aes.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/crypto/padlock-aes.c') diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index a337b69..5f7e718 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -429,8 +429,8 @@ static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword) { - u8 tmp[AES_BLOCK_SIZE * 2] - __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); + u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1]; + u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); memcpy(tmp, in, AES_BLOCK_SIZE); padlock_xcrypt(tmp, out, key, cword); -- cgit v1.1 From 89e12654312dddbbdbf17b5adc95b22cb672f947 Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Wed, 17 Oct 2007 23:18:57 +0800 Subject: [CRYPTO] aes: Move common defines into a header file This three defines are used in all AES related hardware. Signed-off-by: Sebastian Siewior Signed-off-by: Herbert Xu --- drivers/crypto/padlock-aes.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/crypto/padlock-aes.c') diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 5f7e718..c33334a 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -44,6 +44,7 @@ */ #include +#include #include #include #include @@ -53,9 +54,6 @@ #include #include "padlock.h" -#define AES_MIN_KEY_SIZE 16 /* in uint8_t units */ -#define AES_MAX_KEY_SIZE 32 /* ditto */ -#define AES_BLOCK_SIZE 16 /* ditto */ #define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */ #define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t)) -- cgit v1.1 From 866cd902e864e9d0e31299efa9d61fc9a9bec315 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 27 Dec 2007 00:04:44 +1100 Subject: [CRYPTO] padlock: Only reset the key once for each CBC and ECB operation Currently we reset the key for each segment fed to the xcrypt instructions. This patch optimises this for CBC and ECB so that we only do this once for each encrypt/decrypt operation. Signed-off-by: Herbert Xu --- drivers/crypto/padlock-aes.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) (limited to 'drivers/crypto/padlock-aes.c') diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index c33334a..2f3ad3f 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -417,6 +417,11 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, /* ====== Encryption/decryption routines ====== */ /* These are the real call to PadLock. */ +static inline void padlock_reset_key(void) +{ + asm volatile ("pushfl; popfl"); +} + static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, void *control_word) { @@ -437,8 +442,6 @@ static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword) static inline void aes_crypt(const u8 *in, u8 *out, u32 *key, struct cword *cword) { - asm volatile ("pushfl; popfl"); - /* padlock_xcrypt requires at least two blocks of data. */ if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) & (PAGE_SIZE - 1)))) { @@ -457,7 +460,6 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, return; } - asm volatile ("pushfl; popfl"); /* enforce key reload. */ asm volatile ("test $1, %%cl;" "je 1f;" "lea -1(%%ecx), %%eax;" @@ -474,8 +476,6 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, u8 *iv, void *control_word, u32 count) { - /* Enforce key reload. */ - asm volatile ("pushfl; popfl"); /* rep xcryptcbc */ asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" : "+S" (input), "+D" (output), "+a" (iv) @@ -486,12 +486,14 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); + padlock_reset_key(); aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); } static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); + padlock_reset_key(); aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); } @@ -524,6 +526,8 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc, struct blkcipher_walk walk; int err; + padlock_reset_key(); + blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -546,6 +550,8 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc, struct blkcipher_walk walk; int err; + padlock_reset_key(); + blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -590,6 +596,8 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, struct blkcipher_walk walk; int err; + padlock_reset_key(); + blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -614,6 +622,8 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, struct blkcipher_walk walk; int err; + padlock_reset_key(); + blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); -- cgit v1.1