diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-12 18:51:14 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-12 18:51:14 -0800 |
commit | c597b6bcd5c624534afc3df65cdc42bb05173bca (patch) | |
tree | 8fedd26c5dc0357a10db08a6bef31085e2508280 /crypto | |
parent | 60b7eca1dc2ec066916b3b7ac6ad89bea13cb9af (diff) | |
parent | 48d627648141479c8be8acd110191072e24eba25 (diff) | |
download | op-kernel-dev-c597b6bcd5c624534afc3df65cdc42bb05173bca.zip op-kernel-dev-c597b6bcd5c624534afc3df65cdc42bb05173bca.tar.gz |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
"Algorithms:
- Add RSA padding algorithm
Drivers:
- Add GCM mode support to atmel
- Add atmel support for SAMA5D2 devices
- Add cipher modes to talitos
- Add rockchip driver for rk3288
- Add qat support for C3XXX and C62X"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (103 commits)
crypto: hifn_795x, picoxcell - use ablkcipher_request_cast
crypto: qat - fix SKU definiftion for c3xxx dev
crypto: qat - Fix random config build issue
crypto: ccp - use to_pci_dev and to_platform_device
crypto: qat - Rename dh895xcc mmp firmware
crypto: 842 - remove WARN inside printk
crypto: atmel-aes - add debug facilities to monitor register accesses.
crypto: atmel-aes - add support to GCM mode
crypto: atmel-aes - change the DMA threshold
crypto: atmel-aes - fix the counter overflow in CTR mode
crypto: atmel-aes - fix atmel-ctr-aes driver for RFC 3686
crypto: atmel-aes - create sections to regroup functions by usage
crypto: atmel-aes - fix typo and indentation
crypto: atmel-aes - use SIZE_IN_WORDS() helper macro
crypto: atmel-aes - improve performances of data transfer
crypto: atmel-aes - fix atmel_aes_remove()
crypto: atmel-aes - remove useless AES_FLAGS_DMA flag
crypto: atmel-aes - reduce latency of DMA completion
crypto: atmel-aes - remove unused 'err' member of struct atmel_aes_dev
crypto: atmel-aes - rework crypto request completion
...
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/Makefile | 1 | ||||
-rw-r--r-- | crypto/akcipher.c | 34 | ||||
-rw-r--r-- | crypto/algapi.c | 9 | ||||
-rw-r--r-- | crypto/algif_aead.c | 6 | ||||
-rw-r--r-- | crypto/algif_skcipher.c | 10 | ||||
-rw-r--r-- | crypto/asymmetric_keys/signature.c | 2 | ||||
-rw-r--r-- | crypto/chacha20poly1305.c | 8 | ||||
-rw-r--r-- | crypto/cryptd.c | 4 | ||||
-rw-r--r-- | crypto/drbg.c | 6 | ||||
-rw-r--r-- | crypto/mcryptd.c | 8 | ||||
-rw-r--r-- | crypto/md5.c | 6 | ||||
-rw-r--r-- | crypto/rsa-pkcs1pad.c | 628 | ||||
-rw-r--r-- | crypto/rsa.c | 40 | ||||
-rw-r--r-- | crypto/sha1_generic.c | 7 | ||||
-rw-r--r-- | crypto/sha256_generic.c | 16 | ||||
-rw-r--r-- | crypto/tcrypt.c | 2 |
16 files changed, 735 insertions, 52 deletions
diff --git a/crypto/Makefile b/crypto/Makefile index f7aba92..2acdbbd 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -40,6 +40,7 @@ rsa_generic-y := rsapubkey-asn1.o rsa_generic-y += rsaprivkey-asn1.o rsa_generic-y += rsa.o rsa_generic-y += rsa_helper.o +rsa_generic-y += rsa-pkcs1pad.o obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o cryptomgr-y := algboss.o testmgr.o diff --git a/crypto/akcipher.c b/crypto/akcipher.c index 120ec04..def301e 100644 --- a/crypto/akcipher.c +++ b/crypto/akcipher.c @@ -21,6 +21,7 @@ #include <linux/cryptouser.h> #include <net/netlink.h> #include <crypto/akcipher.h> +#include <crypto/internal/akcipher.h> #include "internal.h" #ifdef CONFIG_NET @@ -75,9 +76,17 @@ static int crypto_akcipher_init_tfm(struct crypto_tfm *tfm) return 0; } +static void crypto_akcipher_free_instance(struct crypto_instance *inst) +{ + struct akcipher_instance *akcipher = akcipher_instance(inst); + + akcipher->free(akcipher); +} + static const struct crypto_type crypto_akcipher_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_akcipher_init_tfm, + .free = crypto_akcipher_free_instance, #ifdef CONFIG_PROC_FS .show = crypto_akcipher_show, #endif @@ -88,6 +97,14 @@ static const struct crypto_type crypto_akcipher_type = { .tfmsize = offsetof(struct crypto_akcipher, base), }; +int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name, + u32 type, u32 mask) +{ + spawn->base.frontend = &crypto_akcipher_type; + return crypto_grab_spawn(&spawn->base, name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_grab_akcipher); + struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type, u32 mask) { @@ -95,13 +112,20 @@ struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type, } EXPORT_SYMBOL_GPL(crypto_alloc_akcipher); -int crypto_register_akcipher(struct akcipher_alg *alg) +static void akcipher_prepare_alg(struct akcipher_alg *alg) { struct crypto_alg *base = &alg->base; base->cra_type = &crypto_akcipher_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER; +} + +int crypto_register_akcipher(struct akcipher_alg *alg) +{ + struct crypto_alg *base = &alg->base; + + akcipher_prepare_alg(alg); return crypto_register_alg(base); } EXPORT_SYMBOL_GPL(crypto_register_akcipher); @@ -112,5 +136,13 @@ void crypto_unregister_akcipher(struct akcipher_alg *alg) } EXPORT_SYMBOL_GPL(crypto_unregister_akcipher); +int akcipher_register_instance(struct crypto_template *tmpl, + struct akcipher_instance *inst) +{ + akcipher_prepare_alg(&inst->alg); + return crypto_register_instance(tmpl, akcipher_crypto_instance(inst)); +} +EXPORT_SYMBOL_GPL(akcipher_register_instance); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Generic public key cipher type"); diff --git a/crypto/algapi.c b/crypto/algapi.c index 59bf491..7be76aa 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -93,16 +93,15 @@ static struct list_head *crypto_more_spawns(struct crypto_alg *alg, { struct crypto_spawn *spawn, *n; - if (list_empty(stack)) + spawn = list_first_entry_or_null(stack, struct crypto_spawn, list); + if (!spawn) return NULL; - spawn = list_first_entry(stack, struct crypto_spawn, list); - n = list_entry(spawn->list.next, struct crypto_spawn, list); + n = list_next_entry(spawn, list); if (spawn->alg && &n->list != stack && !n->alg) n->alg = (n->list.next == stack) ? alg : - &list_entry(n->list.next, struct crypto_spawn, - list)->inst->alg; + &list_next_entry(n, list)->inst->alg; list_move(&spawn->list, secondary_spawns); diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 6d4d456..4c93b8a 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -213,7 +213,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) } while (size) { - unsigned long len = size; + size_t len = size; struct scatterlist *sg = NULL; /* use the existing memory in an allocated page */ @@ -247,7 +247,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) /* allocate a new page */ len = min_t(unsigned long, size, aead_sndbuf(sk)); while (len) { - int plen = 0; + size_t plen = 0; if (sgl->cur >= ALG_MAX_PAGES) { aead_put_sgl(sk); @@ -256,7 +256,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) } sg = sgl->sg + sgl->cur; - plen = min_t(int, len, PAGE_SIZE); + plen = min_t(size_t, len, PAGE_SIZE); sg_assign_page(sg, alloc_page(GFP_KERNEL)); err = -ENOMEM; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 634b4d1..5c756b3 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -40,7 +40,7 @@ struct skcipher_ctx { struct af_alg_completion completion; atomic_t inflight; - unsigned used; + size_t used; unsigned int len; bool more; @@ -153,7 +153,7 @@ static int skcipher_alloc_sgl(struct sock *sk) return 0; } -static void skcipher_pull_sgl(struct sock *sk, int used, int put) +static void skcipher_pull_sgl(struct sock *sk, size_t used, int put) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; @@ -167,7 +167,7 @@ static void skcipher_pull_sgl(struct sock *sk, int used, int put) sg = sgl->sg; for (i = 0; i < sgl->cur; i++) { - int plen = min_t(int, used, sg[i].length); + size_t plen = min_t(size_t, used, sg[i].length); if (!sg_page(sg + i)) continue; @@ -348,7 +348,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, while (size) { struct scatterlist *sg; unsigned long len = size; - int plen; + size_t plen; if (ctx->merge) { sgl = list_entry(ctx->tsgl.prev, @@ -390,7 +390,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, sg_unmark_end(sg + sgl->cur); do { i = sgl->cur; - plen = min_t(int, len, PAGE_SIZE); + plen = min_t(size_t, len, PAGE_SIZE); sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); err = -ENOMEM; diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c index 9441240..004d5fc 100644 --- a/crypto/asymmetric_keys/signature.c +++ b/crypto/asymmetric_keys/signature.c @@ -13,7 +13,7 @@ #define pr_fmt(fmt) "SIG: "fmt #include <keys/asymmetric-subtype.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/err.h> #include <crypto/public_key.h> #include "asymmetric_keys.h" diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index 99c3cce..7b6b935 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -130,6 +130,9 @@ static int chacha_decrypt(struct aead_request *req) struct scatterlist *src, *dst; int err; + if (rctx->cryptlen == 0) + goto skip; + chacha_iv(creq->iv, req, 1); sg_init_table(rctx->src, 2); @@ -150,6 +153,7 @@ static int chacha_decrypt(struct aead_request *req) if (err) return err; +skip: return poly_verify_tag(req); } @@ -415,6 +419,9 @@ static int chacha_encrypt(struct aead_request *req) struct scatterlist *src, *dst; int err; + if (req->cryptlen == 0) + goto skip; + chacha_iv(creq->iv, req, 1); sg_init_table(rctx->src, 2); @@ -435,6 +442,7 @@ static int chacha_encrypt(struct aead_request *req) if (err) return err; +skip: return poly_genkey(req); } diff --git a/crypto/cryptd.c b/crypto/cryptd.c index c81861b..7921251 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -637,6 +637,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.halg.base.cra_flags = type; inst->alg.halg.digestsize = salg->digestsize; + inst->alg.halg.statesize = salg->statesize; inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; @@ -887,8 +888,7 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) return ERR_PTR(-EINVAL); - type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); - type |= CRYPTO_ALG_TYPE_BLKCIPHER; + type = crypto_skcipher_type(type); mask &= ~CRYPTO_ALG_TYPE_MASK; mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); tfm = crypto_alloc_base(cryptd_alg_name, type, mask); diff --git a/crypto/drbg.c b/crypto/drbg.c index a7c2314..ab6ef1d 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -626,7 +626,7 @@ out: return len; } -static struct drbg_state_ops drbg_ctr_ops = { +static const struct drbg_state_ops drbg_ctr_ops = { .update = drbg_ctr_update, .generate = drbg_ctr_generate, .crypto_init = drbg_init_sym_kernel, @@ -752,7 +752,7 @@ static int drbg_hmac_generate(struct drbg_state *drbg, return len; } -static struct drbg_state_ops drbg_hmac_ops = { +static const struct drbg_state_ops drbg_hmac_ops = { .update = drbg_hmac_update, .generate = drbg_hmac_generate, .crypto_init = drbg_init_hash_kernel, @@ -1032,7 +1032,7 @@ out: * scratchpad usage: as update and generate are used isolated, both * can use the scratchpad */ -static struct drbg_state_ops drbg_hash_ops = { +static const struct drbg_state_ops drbg_hash_ops = { .update = drbg_hash_update, .generate = drbg_hash_generate, .crypto_init = drbg_init_hash_kernel, diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index fe5b495a..f78d4fc 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c @@ -128,13 +128,9 @@ static void mcryptd_opportunistic_flush(void) flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); while (single_task_running()) { mutex_lock(&flist->lock); - if (list_empty(&flist->list)) { - mutex_unlock(&flist->lock); - return; - } - cstate = list_entry(flist->list.next, + cstate = list_first_entry_or_null(&flist->list, struct mcryptd_alg_cstate, flush_list); - if (!cstate->flusher_engaged) { + if (!cstate || !cstate->flusher_engaged) { mutex_unlock(&flist->lock); return; } diff --git a/crypto/md5.c b/crypto/md5.c index 33d17e9..2355a7c 100644 --- a/crypto/md5.c +++ b/crypto/md5.c @@ -24,6 +24,12 @@ #include <linux/cryptohash.h> #include <asm/byteorder.h> +const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = { + 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, + 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, +}; +EXPORT_SYMBOL_GPL(md5_zero_message_hash); + /* XXX: this stuff can be optimized */ static inline void le32_to_cpu_array(u32 *buf, unsigned int words) { diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c new file mode 100644 index 0000000..50f5c97 --- /dev/null +++ b/crypto/rsa-pkcs1pad.c @@ -0,0 +1,628 @@ +/* + * RSA padding templates. + * + * Copyright (c) 2015 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <crypto/algapi.h> +#include <crypto/akcipher.h> +#include <crypto/internal/akcipher.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/random.h> + +struct pkcs1pad_ctx { + struct crypto_akcipher *child; + + unsigned int key_size; +}; + +struct pkcs1pad_request { + struct akcipher_request child_req; + + struct scatterlist in_sg[3], out_sg[2]; + uint8_t *in_buf, *out_buf; +}; + +static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + int err, size; + + err = crypto_akcipher_set_pub_key(ctx->child, key, keylen); + + if (!err) { + /* Find out new modulus size from rsa implementation */ + size = crypto_akcipher_maxsize(ctx->child); + + ctx->key_size = size > 0 ? size : 0; + if (size <= 0) + err = size; + } + + return err; +} + +static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + int err, size; + + err = crypto_akcipher_set_priv_key(ctx->child, key, keylen); + + if (!err) { + /* Find out new modulus size from rsa implementation */ + size = crypto_akcipher_maxsize(ctx->child); + + ctx->key_size = size > 0 ? size : 0; + if (size <= 0) + err = size; + } + + return err; +} + +static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm) +{ + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + + /* + * The maximum destination buffer size for the encrypt/sign operations + * will be the same as for RSA, even though it's smaller for + * decrypt/verify. + */ + + return ctx->key_size ?: -EINVAL; +} + +static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len, + struct scatterlist *next) +{ + int nsegs = next ? 1 : 0; + + if (offset_in_page(buf) + len <= PAGE_SIZE) { + nsegs += 1; + sg_init_table(sg, nsegs); + sg_set_buf(sg, buf, len); + } else { + nsegs += 2; + sg_init_table(sg, nsegs); + sg_set_buf(sg + 0, buf, PAGE_SIZE - offset_in_page(buf)); + sg_set_buf(sg + 1, buf + PAGE_SIZE - offset_in_page(buf), + offset_in_page(buf) + len - PAGE_SIZE); + } + + if (next) + sg_chain(sg, nsegs, next); +} + +static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + size_t pad_len = ctx->key_size - req_ctx->child_req.dst_len; + size_t chunk_len, pad_left; + struct sg_mapping_iter miter; + + if (!err) { + if (pad_len) { + sg_miter_start(&miter, req->dst, + sg_nents_for_len(req->dst, pad_len), + SG_MITER_ATOMIC | SG_MITER_TO_SG); + + pad_left = pad_len; + while (pad_left) { + sg_miter_next(&miter); + + chunk_len = min(miter.length, pad_left); + memset(miter.addr, 0, chunk_len); + pad_left -= chunk_len; + } + + sg_miter_stop(&miter); + } + + sg_pcopy_from_buffer(req->dst, + sg_nents_for_len(req->dst, ctx->key_size), + req_ctx->out_buf, req_ctx->child_req.dst_len, + pad_len); + } + req->dst_len = ctx->key_size; + + kfree(req_ctx->in_buf); + kzfree(req_ctx->out_buf); + + return err; +} + +static void pkcs1pad_encrypt_sign_complete_cb( + struct crypto_async_request *child_async_req, int err) +{ + struct akcipher_request *req = child_async_req->data; + struct crypto_async_request async_req; + + if (err == -EINPROGRESS) + return; + + async_req.data = req->base.data; + async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); + async_req.flags = child_async_req->flags; + req->base.complete(&async_req, + pkcs1pad_encrypt_sign_complete(req, err)); +} + +static int pkcs1pad_encrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + int err; + unsigned int i, ps_end; + + if (!ctx->key_size) + return -EINVAL; + + if (req->src_len > ctx->key_size - 11) + return -EOVERFLOW; + + if (req->dst_len < ctx->key_size) { + req->dst_len = ctx->key_size; + return -EOVERFLOW; + } + + if (ctx->key_size > PAGE_SIZE) + return -ENOTSUPP; + + /* + * Replace both input and output to add the padding in the input and + * the potential missing leading zeros in the output. + */ + req_ctx->child_req.src = req_ctx->in_sg; + req_ctx->child_req.src_len = ctx->key_size - 1; + req_ctx->child_req.dst = req_ctx->out_sg; + req_ctx->child_req.dst_len = ctx->key_size; + + req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->in_buf) + return -ENOMEM; + + ps_end = ctx->key_size - req->src_len - 2; + req_ctx->in_buf[0] = 0x02; + for (i = 1; i < ps_end; i++) + req_ctx->in_buf[i] = 1 + prandom_u32_max(255); + req_ctx->in_buf[ps_end] = 0x00; + + pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, + ctx->key_size - 1 - req->src_len, req->src); + + req_ctx->out_buf = kmalloc(ctx->key_size, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->out_buf) { + kfree(req_ctx->in_buf); + return -ENOMEM; + } + + pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, + ctx->key_size, NULL); + + akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); + akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, + pkcs1pad_encrypt_sign_complete_cb, req); + + err = crypto_akcipher_encrypt(&req_ctx->child_req); + if (err != -EINPROGRESS && + (err != -EBUSY || + !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + return pkcs1pad_encrypt_sign_complete(req, err); + + return err; +} + +static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + unsigned int pos; + + if (err == -EOVERFLOW) + /* Decrypted value had no leading 0 byte */ + err = -EINVAL; + + if (err) + goto done; + + if (req_ctx->child_req.dst_len != ctx->key_size - 1) { + err = -EINVAL; + goto done; + } + + if (req_ctx->out_buf[0] != 0x02) { + err = -EINVAL; + goto done; + } + for (pos = 1; pos < req_ctx->child_req.dst_len; pos++) + if (req_ctx->out_buf[pos] == 0x00) + break; + if (pos < 9 || pos == req_ctx->child_req.dst_len) { + err = -EINVAL; + goto done; + } + pos++; + + if (req->dst_len < req_ctx->child_req.dst_len - pos) + err = -EOVERFLOW; + req->dst_len = req_ctx->child_req.dst_len - pos; + + if (!err) + sg_copy_from_buffer(req->dst, + sg_nents_for_len(req->dst, req->dst_len), + req_ctx->out_buf + pos, req->dst_len); + +done: + kzfree(req_ctx->out_buf); + + return err; +} + +static void pkcs1pad_decrypt_complete_cb( + struct crypto_async_request *child_async_req, int err) +{ + struct akcipher_request *req = child_async_req->data; + struct crypto_async_request async_req; + + if (err == -EINPROGRESS) + return; + + async_req.data = req->base.data; + async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); + async_req.flags = child_async_req->flags; + req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err)); +} + +static int pkcs1pad_decrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + int err; + + if (!ctx->key_size || req->src_len != ctx->key_size) + return -EINVAL; + + if (ctx->key_size > PAGE_SIZE) + return -ENOTSUPP; + + /* Reuse input buffer, output to a new buffer */ + req_ctx->child_req.src = req->src; + req_ctx->child_req.src_len = req->src_len; + req_ctx->child_req.dst = req_ctx->out_sg; + req_ctx->child_req.dst_len = ctx->key_size - 1; + + req_ctx->out_buf = kmalloc(ctx->key_size - 1, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->out_buf) + return -ENOMEM; + + pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, + ctx->key_size - 1, NULL); + + akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); + akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, + pkcs1pad_decrypt_complete_cb, req); + + err = crypto_akcipher_decrypt(&req_ctx->child_req); + if (err != -EINPROGRESS && + (err != -EBUSY || + !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + return pkcs1pad_decrypt_complete(req, err); + + return err; +} + +static int pkcs1pad_sign(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + int err; + unsigned int ps_end; + + if (!ctx->key_size) + return -EINVAL; + + if (req->src_len > ctx->key_size - 11) + return -EOVERFLOW; + + if (req->dst_len < ctx->key_size) { + req->dst_len = ctx->key_size; + return -EOVERFLOW; + } + + if (ctx->key_size > PAGE_SIZE) + return -ENOTSUPP; + + /* + * Replace both input and output to add the padding in the input and + * the potential missing leading zeros in the output. + */ + req_ctx->child_req.src = req_ctx->in_sg; + req_ctx->child_req.src_len = ctx->key_size - 1; + req_ctx->child_req.dst = req_ctx->out_sg; + req_ctx->child_req.dst_len = ctx->key_size; + + req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->in_buf) + return -ENOMEM; + + ps_end = ctx->key_size - req->src_len - 2; + req_ctx->in_buf[0] = 0x01; + memset(req_ctx->in_buf + 1, 0xff, ps_end - 1); + req_ctx->in_buf[ps_end] = 0x00; + + pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, + ctx->key_size - 1 - req->src_len, req->src); + + req_ctx->out_buf = kmalloc(ctx->key_size, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->out_buf) { + kfree(req_ctx->in_buf); + return -ENOMEM; + } + + pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, + ctx->key_size, NULL); + + akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); + akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, + pkcs1pad_encrypt_sign_complete_cb, req); + + err = crypto_akcipher_sign(&req_ctx->child_req); + if (err != -EINPROGRESS && + (err != -EBUSY || + !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + return pkcs1pad_encrypt_sign_complete(req, err); + + return err; +} + +static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + unsigned int pos; + + if (err == -EOVERFLOW) + /* Decrypted value had no leading 0 byte */ + err = -EINVAL; + + if (err) + goto done; + + if (req_ctx->child_req.dst_len != ctx->key_size - 1) { + err = -EINVAL; + goto done; + } + + if (req_ctx->out_buf[0] != 0x01) { + err = -EINVAL; + goto done; + } + for (pos = 1; pos < req_ctx->child_req.dst_len; pos++) + if (req_ctx->out_buf[pos] != 0xff) + break; + if (pos < 9 || pos == req_ctx->child_req.dst_len || + req_ctx->out_buf[pos] != 0x00) { + err = -EINVAL; + goto done; + } + pos++; + + if (req->dst_len < req_ctx->child_req.dst_len - pos) + err = -EOVERFLOW; + req->dst_len = req_ctx->child_req.dst_len - pos; + + if (!err) + sg_copy_from_buffer(req->dst, + sg_nents_for_len(req->dst, req->dst_len), + req_ctx->out_buf + pos, req->dst_len); + +done: + kzfree(req_ctx->out_buf); + + return err; +} + +static void pkcs1pad_verify_complete_cb( + struct crypto_async_request *child_async_req, int err) +{ + struct akcipher_request *req = child_async_req->data; + struct crypto_async_request async_req; + + if (err == -EINPROGRESS) + return; + + async_req.data = req->base.data; + async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); + async_req.flags = child_async_req->flags; + req->base.complete(&async_req, pkcs1pad_verify_complete(req, err)); +} + +/* + * The verify operation is here for completeness similar to the verification + * defined in RFC2313 section 10.2 except that block type 0 is not accepted, + * as in RFC2437. RFC2437 section 9.2 doesn't define any operation to + * retrieve the DigestInfo from a signature, instead the user is expected + * to call the sign operation to generate the expected signature and compare + * signatures instead of the message-digests. + */ +static int pkcs1pad_verify(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + int err; + + if (!ctx->key_size || req->src_len != ctx->key_size) + return -EINVAL; + + if (ctx->key_size > PAGE_SIZE) + return -ENOTSUPP; + + /* Reuse input buffer, output to a new buffer */ + req_ctx->child_req.src = req->src; + req_ctx->child_req.src_len = req->src_len; + req_ctx->child_req.dst = req_ctx->out_sg; + req_ctx->child_req.dst_len = ctx->key_size - 1; + + req_ctx->out_buf = kmalloc(ctx->key_size - 1, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->out_buf) + return -ENOMEM; + + pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, + ctx->key_size - 1, NULL); + + akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); + akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, + pkcs1pad_verify_complete_cb, req); + + err = crypto_akcipher_verify(&req_ctx->child_req); + if (err != -EINPROGRESS && + (err != -EBUSY || + !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + return pkcs1pad_verify_complete(req, err); + + return err; +} + +static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm) +{ + struct akcipher_instance *inst = akcipher_alg_instance(tfm); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct crypto_akcipher *child_tfm; + + child_tfm = crypto_spawn_akcipher(akcipher_instance_ctx(inst)); + if (IS_ERR(child_tfm)) + return PTR_ERR(child_tfm); + + ctx->child = child_tfm; + + return 0; +} + +static void pkcs1pad_exit_tfm(struct crypto_akcipher *tfm) +{ + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + + crypto_free_akcipher(ctx->child); +} + +static void pkcs1pad_free(struct akcipher_instance *inst) +{ + struct crypto_akcipher_spawn *spawn = akcipher_instance_ctx(inst); + + crypto_drop_akcipher(spawn); + + kfree(inst); +} + +static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct crypto_attr_type *algt; + struct akcipher_instance *inst; + struct crypto_akcipher_spawn *spawn; + struct akcipher_alg *rsa_alg; + const char *rsa_alg_name; + int err; + + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return PTR_ERR(algt); + + if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask) + return -EINVAL; + + rsa_alg_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(rsa_alg_name)) + return PTR_ERR(rsa_alg_name); + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + spawn = akcipher_instance_ctx(inst); + crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst)); + err = crypto_grab_akcipher(spawn, rsa_alg_name, 0, + crypto_requires_sync(algt->type, algt->mask)); + if (err) + goto out_free_inst; + + rsa_alg = crypto_spawn_akcipher_alg(spawn); + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.base.cra_name, + CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", + rsa_alg->base.cra_name) >= + CRYPTO_MAX_ALG_NAME || + snprintf(inst->alg.base.cra_driver_name, + CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", + rsa_alg->base.cra_driver_name) >= + CRYPTO_MAX_ALG_NAME) + goto out_drop_alg; + + inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC; + inst->alg.base.cra_priority = rsa_alg->base.cra_priority; + inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx); + + inst->alg.init = pkcs1pad_init_tfm; + inst->alg.exit = pkcs1pad_exit_tfm; + + inst->alg.encrypt = pkcs1pad_encrypt; + inst->alg.decrypt = pkcs1pad_decrypt; + inst->alg.sign = pkcs1pad_sign; + inst->alg.verify = pkcs1pad_verify; + inst->alg.set_pub_key = pkcs1pad_set_pub_key; + inst->alg.set_priv_key = pkcs1pad_set_priv_key; + inst->alg.max_size = pkcs1pad_get_max_size; + inst->alg.reqsize = sizeof(struct pkcs1pad_request) + rsa_alg->reqsize; + + inst->free = pkcs1pad_free; + + err = akcipher_register_instance(tmpl, inst); + if (err) + goto out_drop_alg; + + return 0; + +out_drop_alg: + crypto_drop_akcipher(spawn); +out_free_inst: + kfree(inst); + return err; +} + +struct crypto_template rsa_pkcs1pad_tmpl = { + .name = "pkcs1pad", + .create = pkcs1pad_create, + .module = THIS_MODULE, +}; diff --git a/crypto/rsa.c b/crypto/rsa.c index 1093e04..77d737f 100644 --- a/crypto/rsa.c +++ b/crypto/rsa.c @@ -13,6 +13,7 @@ #include <crypto/internal/rsa.h> #include <crypto/internal/akcipher.h> #include <crypto/akcipher.h> +#include <crypto/algapi.h> /* * RSAEP function [RFC3447 sec 5.1.1] @@ -91,12 +92,6 @@ static int rsa_enc(struct akcipher_request *req) goto err_free_c; } - if (req->dst_len < mpi_get_size(pkey->n)) { - req->dst_len = mpi_get_size(pkey->n); - ret = -EOVERFLOW; - goto err_free_c; - } - ret = -ENOMEM; m = mpi_read_raw_from_sgl(req->src, req->src_len); if (!m) @@ -136,12 +131,6 @@ static int rsa_dec(struct akcipher_request *req) goto err_free_m; } - if (req->dst_len < mpi_get_size(pkey->n)) { - req->dst_len = mpi_get_size(pkey->n); - ret = -EOVERFLOW; - goto err_free_m; - } - ret = -ENOMEM; c = mpi_read_raw_from_sgl(req->src, req->src_len); if (!c) @@ -180,12 +169,6 @@ static int rsa_sign(struct akcipher_request *req) goto err_free_s; } - if (req->dst_len < mpi_get_size(pkey->n)) { - req->dst_len = mpi_get_size(pkey->n); - ret = -EOVERFLOW; - goto err_free_s; - } - ret = -ENOMEM; m = mpi_read_raw_from_sgl(req->src, req->src_len); if (!m) @@ -225,12 +208,6 @@ static int rsa_verify(struct akcipher_request *req) goto err_free_m; } - if (req->dst_len < mpi_get_size(pkey->n)) { - req->dst_len = mpi_get_size(pkey->n); - ret = -EOVERFLOW; - goto err_free_m; - } - ret = -ENOMEM; s = mpi_read_raw_from_sgl(req->src, req->src_len); if (!s) { @@ -339,11 +316,24 @@ static struct akcipher_alg rsa = { static int rsa_init(void) { - return crypto_register_akcipher(&rsa); + int err; + + err = crypto_register_akcipher(&rsa); + if (err) + return err; + + err = crypto_register_template(&rsa_pkcs1pad_tmpl); + if (err) { + crypto_unregister_akcipher(&rsa); + return err; + } + + return 0; } static void rsa_exit(void) { + crypto_unregister_template(&rsa_pkcs1pad_tmpl); crypto_unregister_akcipher(&rsa); } diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c index 39e3acc..6877cbb 100644 --- a/crypto/sha1_generic.c +++ b/crypto/sha1_generic.c @@ -26,6 +26,13 @@ #include <crypto/sha1_base.h> #include <asm/byteorder.h> +const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = { + 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, + 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, + 0xaf, 0xd8, 0x07, 0x09 +}; +EXPORT_SYMBOL_GPL(sha1_zero_message_hash); + static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src, int blocks) { diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index 7843116..8f9c47e 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c @@ -27,6 +27,22 @@ #include <asm/byteorder.h> #include <asm/unaligned.h> +const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = { + 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, + 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, + 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, + 0x2f +}; +EXPORT_SYMBOL_GPL(sha224_zero_message_hash); + +const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = { + 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, + 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, + 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, + 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 +}; +EXPORT_SYMBOL_GPL(sha256_zero_message_hash); + static inline u32 Ch(u32 x, u32 y, u32 z) { return z ^ (x & (y ^ z)); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 46a4a75..270bc4b 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1789,7 +1789,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, NULL, 0, 16, 16, aead_speed_template_20); test_aead_speed("gcm(aes)", ENCRYPT, sec, - NULL, 0, 16, 8, aead_speed_template_20); + NULL, 0, 16, 8, speed_template_16_24_32); break; case 212: |