summaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-08-04 15:23:14 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-04 15:23:14 -0700
commitb7c8e55db7141dcbb9d5305a3260fa0ed62a1bcc (patch)
tree59fbd52d8e80e5a83d9747961d28aaf4d400613a /drivers/crypto
parentffd386a9a8273dcfa61705d0b349eebc7525ef87 (diff)
parent4015d9a865e3bcc42d88bedc8ce1551000bab664 (diff)
downloadop-kernel-dev-b7c8e55db7141dcbb9d5305a3260fa0ed62a1bcc.zip
op-kernel-dev-b7c8e55db7141dcbb9d5305a3260fa0ed62a1bcc.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (39 commits) random: Reorder struct entropy_store to remove padding on 64bits padata: update API documentation padata: Remove padata_get_cpumask crypto: pcrypt - Update pcrypt cpumask according to the padata cpumask notifier crypto: pcrypt - Rename pcrypt_instance padata: Pass the padata cpumasks to the cpumask_change_notifier chain padata: Rearrange set_cpumask functions padata: Rename padata_alloc functions crypto: pcrypt - Dont calulate a callback cpu on empty callback cpumask padata: Check for valid cpumasks padata: Allocate cpumask dependend recources in any case padata: Fix cpu index counting crypto: geode_aes - Convert pci_table entries to PCI_VDEVICE (if PCI_ANY_ID is used) pcrypt: Added sysfs interface to pcrypt padata: Added sysfs primitives to padata subsystem padata: Make two separate cpumasks padata: update documentation padata: simplify serialization mechanism padata: make padata_do_parallel to return zero on success padata: Handle empty padata cpumasks ...
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/geode-aes.c2
-rw-r--r--drivers/crypto/hifn_795x.c4
-rw-r--r--drivers/crypto/mv_cesa.c10
-rw-r--r--drivers/crypto/n2_core.c415
-rw-r--r--drivers/crypto/omap-sham.c1
-rw-r--r--drivers/crypto/talitos.c77
6 files changed, 342 insertions, 167 deletions
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 09389dd..219d09c 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -573,7 +573,7 @@ geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
}
static struct pci_device_id geode_aes_tbl[] = {
- { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES, PCI_ANY_ID, PCI_ANY_ID} ,
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), } ,
{ 0, }
};
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 16fce3a..e449ac5 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2018,7 +2018,6 @@ static void hifn_flush(struct hifn_device *dev)
{
unsigned long flags;
struct crypto_async_request *async_req;
- struct hifn_context *ctx;
struct ablkcipher_request *req;
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
int i;
@@ -2035,7 +2034,6 @@ static void hifn_flush(struct hifn_device *dev)
spin_lock_irqsave(&dev->lock, flags);
while ((async_req = crypto_dequeue_request(&dev->queue))) {
- ctx = crypto_tfm_ctx(async_req->tfm);
req = container_of(async_req, struct ablkcipher_request, base);
spin_unlock_irqrestore(&dev->lock, flags);
@@ -2139,7 +2137,6 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
static int hifn_process_queue(struct hifn_device *dev)
{
struct crypto_async_request *async_req, *backlog;
- struct hifn_context *ctx;
struct ablkcipher_request *req;
unsigned long flags;
int err = 0;
@@ -2156,7 +2153,6 @@ static int hifn_process_queue(struct hifn_device *dev)
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
- ctx = crypto_tfm_ctx(async_req->tfm);
req = container_of(async_req, struct ablkcipher_request, base);
err = hifn_handle_req(req);
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index e095422..7d279e5 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -1055,20 +1055,20 @@ static int mv_probe(struct platform_device *pdev)
cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
if (IS_ERR(cp->queue_th)) {
ret = PTR_ERR(cp->queue_th);
- goto err_thread;
+ goto err_unmap_sram;
}
ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
cp);
if (ret)
- goto err_unmap_sram;
+ goto err_thread;
writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
ret = crypto_register_alg(&mv_aes_alg_ecb);
if (ret)
- goto err_reg;
+ goto err_irq;
ret = crypto_register_alg(&mv_aes_alg_cbc);
if (ret)
@@ -1091,9 +1091,9 @@ static int mv_probe(struct platform_device *pdev)
return 0;
err_unreg_ecb:
crypto_unregister_alg(&mv_aes_alg_ecb);
-err_thread:
+err_irq:
free_irq(irq, cp);
-err_reg:
+err_thread:
kthread_stop(cp->queue_th);
err_unmap_sram:
iounmap(cp->sram);
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 23163fd..b99c38f 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -239,21 +239,57 @@ static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
}
#endif
-struct n2_base_ctx {
- struct list_head list;
+struct n2_ahash_alg {
+ struct list_head entry;
+ const char *hash_zero;
+ const u32 *hash_init;
+ u8 hw_op_hashsz;
+ u8 digest_size;
+ u8 auth_type;
+ u8 hmac_type;
+ struct ahash_alg alg;
};
-static void n2_base_ctx_init(struct n2_base_ctx *ctx)
+static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
{
- INIT_LIST_HEAD(&ctx->list);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct ahash_alg *ahash_alg;
+
+ ahash_alg = container_of(alg, struct ahash_alg, halg.base);
+
+ return container_of(ahash_alg, struct n2_ahash_alg, alg);
}
-struct n2_hash_ctx {
- struct n2_base_ctx base;
+struct n2_hmac_alg {
+ const char *child_alg;
+ struct n2_ahash_alg derived;
+};
+
+static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct ahash_alg *ahash_alg;
+
+ ahash_alg = container_of(alg, struct ahash_alg, halg.base);
+
+ return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
+}
+struct n2_hash_ctx {
struct crypto_ahash *fallback_tfm;
};
+#define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */
+
+struct n2_hmac_ctx {
+ struct n2_hash_ctx base;
+
+ struct crypto_shash *child_shash;
+
+ int hash_key_len;
+ unsigned char hash_key[N2_HASH_KEY_MAX];
+};
+
struct n2_hash_req_ctx {
union {
struct md5_state md5;
@@ -261,9 +297,6 @@ struct n2_hash_req_ctx {
struct sha256_state sha256;
} u;
- unsigned char hash_key[64];
- unsigned char keyed_zero_hash[32];
-
struct ahash_request fallback_req;
};
@@ -356,6 +389,94 @@ static void n2_hash_cra_exit(struct crypto_tfm *tfm)
crypto_free_ahash(ctx->fallback_tfm);
}
+static int n2_hmac_cra_init(struct crypto_tfm *tfm)
+{
+ const char *fallback_driver_name = tfm->__crt_alg->cra_name;
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
+ struct crypto_ahash *fallback_tfm;
+ struct crypto_shash *child_shash;
+ int err;
+
+ fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback_tfm)) {
+ pr_warning("Fallback driver '%s' could not be loaded!\n",
+ fallback_driver_name);
+ err = PTR_ERR(fallback_tfm);
+ goto out;
+ }
+
+ child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
+ if (IS_ERR(child_shash)) {
+ pr_warning("Child shash '%s' could not be loaded!\n",
+ n2alg->child_alg);
+ err = PTR_ERR(child_shash);
+ goto out_free_fallback;
+ }
+
+ crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
+ crypto_ahash_reqsize(fallback_tfm)));
+
+ ctx->child_shash = child_shash;
+ ctx->base.fallback_tfm = fallback_tfm;
+ return 0;
+
+out_free_fallback:
+ crypto_free_ahash(fallback_tfm);
+
+out:
+ return err;
+}
+
+static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
+
+ crypto_free_ahash(ctx->base.fallback_tfm);
+ crypto_free_shash(ctx->child_shash);
+}
+
+static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct crypto_shash *child_shash = ctx->child_shash;
+ struct crypto_ahash *fallback_tfm;
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(child_shash)];
+ } desc;
+ int err, bs, ds;
+
+ fallback_tfm = ctx->base.fallback_tfm;
+ err = crypto_ahash_setkey(fallback_tfm, key, keylen);
+ if (err)
+ return err;
+
+ desc.shash.tfm = child_shash;
+ desc.shash.flags = crypto_ahash_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ bs = crypto_shash_blocksize(child_shash);
+ ds = crypto_shash_digestsize(child_shash);
+ BUG_ON(ds > N2_HASH_KEY_MAX);
+ if (keylen > bs) {
+ err = crypto_shash_digest(&desc.shash, key, keylen,
+ ctx->hash_key);
+ if (err)
+ return err;
+ keylen = ds;
+ } else if (keylen <= N2_HASH_KEY_MAX)
+ memcpy(ctx->hash_key, key, keylen);
+
+ ctx->hash_key_len = keylen;
+
+ return err;
+}
+
static unsigned long wait_for_tail(struct spu_queue *qp)
{
unsigned long head, hv_ret;
@@ -385,12 +506,12 @@ static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
return hv_ret;
}
-static int n2_hash_async_digest(struct ahash_request *req,
- unsigned int auth_type, unsigned int digest_size,
- unsigned int result_size, void *hash_loc)
+static int n2_do_async_digest(struct ahash_request *req,
+ unsigned int auth_type, unsigned int digest_size,
+ unsigned int result_size, void *hash_loc,
+ unsigned long auth_key, unsigned int auth_key_len)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct cwq_initial_entry *ent;
struct crypto_hash_walk walk;
struct spu_queue *qp;
@@ -403,6 +524,7 @@ static int n2_hash_async_digest(struct ahash_request *req,
*/
if (unlikely(req->nbytes > (1 << 16))) {
struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
rctx->fallback_req.base.flags =
@@ -414,8 +536,6 @@ static int n2_hash_async_digest(struct ahash_request *req,
return crypto_ahash_digest(&rctx->fallback_req);
}
- n2_base_ctx_init(&ctx->base);
-
nbytes = crypto_hash_walk_first(req, &walk);
cpu = get_cpu();
@@ -430,13 +550,13 @@ static int n2_hash_async_digest(struct ahash_request *req,
*/
ent = qp->q + qp->tail;
- ent->control = control_word_base(nbytes, 0, 0,
+ ent->control = control_word_base(nbytes, auth_key_len, 0,
auth_type, digest_size,
false, true, false, false,
OPCODE_INPLACE_BIT |
OPCODE_AUTH_MAC);
ent->src_addr = __pa(walk.data);
- ent->auth_key_addr = 0UL;
+ ent->auth_key_addr = auth_key;
ent->auth_iv_addr = __pa(hash_loc);
ent->final_auth_state_addr = 0UL;
ent->enc_key_addr = 0UL;
@@ -475,114 +595,55 @@ out:
return err;
}
-static int n2_md5_async_digest(struct ahash_request *req)
+static int n2_hash_async_digest(struct ahash_request *req)
{
+ struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct md5_state *m = &rctx->u.md5;
+ int ds;
+ ds = n2alg->digest_size;
if (unlikely(req->nbytes == 0)) {
- static const char md5_zero[MD5_DIGEST_SIZE] = {
- 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
- 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
- };
-
- memcpy(req->result, md5_zero, MD5_DIGEST_SIZE);
+ memcpy(req->result, n2alg->hash_zero, ds);
return 0;
}
- m->hash[0] = cpu_to_le32(0x67452301);
- m->hash[1] = cpu_to_le32(0xefcdab89);
- m->hash[2] = cpu_to_le32(0x98badcfe);
- m->hash[3] = cpu_to_le32(0x10325476);
+ memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
- return n2_hash_async_digest(req, AUTH_TYPE_MD5,
- MD5_DIGEST_SIZE, MD5_DIGEST_SIZE,
- m->hash);
+ return n2_do_async_digest(req, n2alg->auth_type,
+ n2alg->hw_op_hashsz, ds,
+ &rctx->u, 0UL, 0);
}
-static int n2_sha1_async_digest(struct ahash_request *req)
+static int n2_hmac_async_digest(struct ahash_request *req)
{
+ struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct sha1_state *s = &rctx->u.sha1;
-
- if (unlikely(req->nbytes == 0)) {
- static const char sha1_zero[SHA1_DIGEST_SIZE] = {
- 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
- 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
- 0x07, 0x09
- };
-
- memcpy(req->result, sha1_zero, SHA1_DIGEST_SIZE);
- return 0;
- }
- s->state[0] = SHA1_H0;
- s->state[1] = SHA1_H1;
- s->state[2] = SHA1_H2;
- s->state[3] = SHA1_H3;
- s->state[4] = SHA1_H4;
-
- return n2_hash_async_digest(req, AUTH_TYPE_SHA1,
- SHA1_DIGEST_SIZE, SHA1_DIGEST_SIZE,
- s->state);
-}
-
-static int n2_sha256_async_digest(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct sha256_state *s = &rctx->u.sha256;
-
- if (req->nbytes == 0) {
- static const char sha256_zero[SHA256_DIGEST_SIZE] = {
- 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
- 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
- 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
- 0x1b, 0x78, 0x52, 0xb8, 0x55
- };
-
- memcpy(req->result, sha256_zero, SHA256_DIGEST_SIZE);
- return 0;
- }
- s->state[0] = SHA256_H0;
- s->state[1] = SHA256_H1;
- s->state[2] = SHA256_H2;
- s->state[3] = SHA256_H3;
- s->state[4] = SHA256_H4;
- s->state[5] = SHA256_H5;
- s->state[6] = SHA256_H6;
- s->state[7] = SHA256_H7;
-
- return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
- SHA256_DIGEST_SIZE, SHA256_DIGEST_SIZE,
- s->state);
-}
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ int ds;
-static int n2_sha224_async_digest(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct sha256_state *s = &rctx->u.sha256;
+ ds = n2alg->derived.digest_size;
+ if (unlikely(req->nbytes == 0) ||
+ unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
+ struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- if (req->nbytes == 0) {
- static const char sha224_zero[SHA224_DIGEST_SIZE] = {
- 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
- 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
- 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
- 0x2f
- };
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags =
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+ rctx->fallback_req.result = req->result;
- memcpy(req->result, sha224_zero, SHA224_DIGEST_SIZE);
- return 0;
+ return crypto_ahash_digest(&rctx->fallback_req);
}
- s->state[0] = SHA224_H0;
- s->state[1] = SHA224_H1;
- s->state[2] = SHA224_H2;
- s->state[3] = SHA224_H3;
- s->state[4] = SHA224_H4;
- s->state[5] = SHA224_H5;
- s->state[6] = SHA224_H6;
- s->state[7] = SHA224_H7;
+ memcpy(&rctx->u, n2alg->derived.hash_init,
+ n2alg->derived.hw_op_hashsz);
- return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
- SHA256_DIGEST_SIZE, SHA224_DIGEST_SIZE,
- s->state);
+ return n2_do_async_digest(req, n2alg->derived.hmac_type,
+ n2alg->derived.hw_op_hashsz, ds,
+ &rctx->u,
+ __pa(&ctx->hash_key),
+ ctx->hash_key_len);
}
struct n2_cipher_context {
@@ -1209,35 +1270,92 @@ static LIST_HEAD(cipher_algs);
struct n2_hash_tmpl {
const char *name;
- int (*digest)(struct ahash_request *req);
+ const char *hash_zero;
+ const u32 *hash_init;
+ u8 hw_op_hashsz;
u8 digest_size;
u8 block_size;
+ u8 auth_type;
+ u8 hmac_type;
+};
+
+static const char md5_zero[MD5_DIGEST_SIZE] = {
+ 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
+ 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
+};
+static const u32 md5_init[MD5_HASH_WORDS] = {
+ cpu_to_le32(0x67452301),
+ cpu_to_le32(0xefcdab89),
+ cpu_to_le32(0x98badcfe),
+ cpu_to_le32(0x10325476),
+};
+static const char sha1_zero[SHA1_DIGEST_SIZE] = {
+ 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
+ 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
+ 0x07, 0x09
};
+static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
+ SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
+};
+static const char sha256_zero[SHA256_DIGEST_SIZE] = {
+ 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
+ 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
+ 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
+ 0x1b, 0x78, 0x52, 0xb8, 0x55
+};
+static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = {
+ SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+ SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
+};
+static const char sha224_zero[SHA224_DIGEST_SIZE] = {
+ 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
+ 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
+ 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
+ 0x2f
+};
+static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
+ SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
+ SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
+};
+
static const struct n2_hash_tmpl hash_tmpls[] = {
{ .name = "md5",
- .digest = n2_md5_async_digest,
+ .hash_zero = md5_zero,
+ .hash_init = md5_init,
+ .auth_type = AUTH_TYPE_MD5,
+ .hmac_type = AUTH_TYPE_HMAC_MD5,
+ .hw_op_hashsz = MD5_DIGEST_SIZE,
.digest_size = MD5_DIGEST_SIZE,
.block_size = MD5_HMAC_BLOCK_SIZE },
{ .name = "sha1",
- .digest = n2_sha1_async_digest,
+ .hash_zero = sha1_zero,
+ .hash_init = sha1_init,
+ .auth_type = AUTH_TYPE_SHA1,
+ .hmac_type = AUTH_TYPE_HMAC_SHA1,
+ .hw_op_hashsz = SHA1_DIGEST_SIZE,
.digest_size = SHA1_DIGEST_SIZE,
.block_size = SHA1_BLOCK_SIZE },
{ .name = "sha256",
- .digest = n2_sha256_async_digest,
+ .hash_zero = sha256_zero,
+ .hash_init = sha256_init,
+ .auth_type = AUTH_TYPE_SHA256,
+ .hmac_type = AUTH_TYPE_HMAC_SHA256,
+ .hw_op_hashsz = SHA256_DIGEST_SIZE,
.digest_size = SHA256_DIGEST_SIZE,
.block_size = SHA256_BLOCK_SIZE },
{ .name = "sha224",
- .digest = n2_sha224_async_digest,
+ .hash_zero = sha224_zero,
+ .hash_init = sha224_init,
+ .auth_type = AUTH_TYPE_SHA256,
+ .hmac_type = AUTH_TYPE_RESERVED,
+ .hw_op_hashsz = SHA256_DIGEST_SIZE,
.digest_size = SHA224_DIGEST_SIZE,
.block_size = SHA224_BLOCK_SIZE },
};
#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
-struct n2_ahash_alg {
- struct list_head entry;
- struct ahash_alg alg;
-};
static LIST_HEAD(ahash_algs);
+static LIST_HEAD(hmac_algs);
static int algs_registered;
@@ -1245,12 +1363,18 @@ static void __n2_unregister_algs(void)
{
struct n2_cipher_alg *cipher, *cipher_tmp;
struct n2_ahash_alg *alg, *alg_tmp;
+ struct n2_hmac_alg *hmac, *hmac_tmp;
list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
crypto_unregister_alg(&cipher->alg);
list_del(&cipher->entry);
kfree(cipher);
}
+ list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
+ crypto_unregister_ahash(&hmac->derived.alg);
+ list_del(&hmac->derived.entry);
+ kfree(hmac);
+ }
list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
crypto_unregister_ahash(&alg->alg);
list_del(&alg->entry);
@@ -1290,8 +1414,49 @@ static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
list_add(&p->entry, &cipher_algs);
err = crypto_register_alg(alg);
if (err) {
+ pr_err("%s alg registration failed\n", alg->cra_name);
list_del(&p->entry);
kfree(p);
+ } else {
+ pr_info("%s alg registered\n", alg->cra_name);
+ }
+ return err;
+}
+
+static int __devinit __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
+{
+ struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
+ struct ahash_alg *ahash;
+ struct crypto_alg *base;
+ int err;
+
+ if (!p)
+ return -ENOMEM;
+
+ p->child_alg = n2ahash->alg.halg.base.cra_name;
+ memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
+ INIT_LIST_HEAD(&p->derived.entry);
+
+ ahash = &p->derived.alg;
+ ahash->digest = n2_hmac_async_digest;
+ ahash->setkey = n2_hmac_async_setkey;
+
+ base = &ahash->halg.base;
+ snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
+ snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
+
+ base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
+ base->cra_init = n2_hmac_cra_init;
+ base->cra_exit = n2_hmac_cra_exit;
+
+ list_add(&p->derived.entry, &hmac_algs);
+ err = crypto_register_ahash(ahash);
+ if (err) {
+ pr_err("%s alg registration failed\n", base->cra_name);
+ list_del(&p->derived.entry);
+ kfree(p);
+ } else {
+ pr_info("%s alg registered\n", base->cra_name);
}
return err;
}
@@ -1307,12 +1472,19 @@ static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
if (!p)
return -ENOMEM;
+ p->hash_zero = tmpl->hash_zero;
+ p->hash_init = tmpl->hash_init;
+ p->auth_type = tmpl->auth_type;
+ p->hmac_type = tmpl->hmac_type;
+ p->hw_op_hashsz = tmpl->hw_op_hashsz;
+ p->digest_size = tmpl->digest_size;
+
ahash = &p->alg;
ahash->init = n2_hash_async_init;
ahash->update = n2_hash_async_update;
ahash->final = n2_hash_async_final;
ahash->finup = n2_hash_async_finup;
- ahash->digest = tmpl->digest;
+ ahash->digest = n2_hash_async_digest;
halg = &ahash->halg;
halg->digestsize = tmpl->digest_size;
@@ -1331,9 +1503,14 @@ static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
list_add(&p->entry, &ahash_algs);
err = crypto_register_ahash(ahash);
if (err) {
+ pr_err("%s alg registration failed\n", base->cra_name);
list_del(&p->entry);
kfree(p);
+ } else {
+ pr_info("%s alg registered\n", base->cra_name);
}
+ if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
+ err = __n2_register_one_hmac(p);
return err;
}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 8b03433..7d14856 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -15,7 +15,6 @@
#define pr_fmt(fmt) "%s: " fmt, __func__
-#include <linux/version.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/module.h>
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index bd78acf..97f4af1 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -720,7 +720,6 @@ struct talitos_ctx {
#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
struct talitos_ahash_req_ctx {
- u64 count;
u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
unsigned int hw_context_size;
u8 buf[HASH_MAX_BLOCK_SIZE];
@@ -729,6 +728,7 @@ struct talitos_ahash_req_ctx {
unsigned int first;
unsigned int last;
unsigned int to_hash_later;
+ u64 nbuf;
struct scatterlist bufsl[2];
struct scatterlist *psrc;
};
@@ -1613,6 +1613,7 @@ static void ahash_done(struct device *dev,
if (!req_ctx->last && req_ctx->to_hash_later) {
/* Position any partial block for next update/final/finup */
memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
+ req_ctx->nbuf = req_ctx->to_hash_later;
}
common_nonsnoop_hash_unmap(dev, edesc, areq);
@@ -1728,7 +1729,7 @@ static int ahash_init(struct ahash_request *areq)
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
/* Initialize the context */
- req_ctx->count = 0;
+ req_ctx->nbuf = 0;
req_ctx->first = 1; /* first indicates h/w must init its context */
req_ctx->swinit = 0; /* assume h/w init of context */
req_ctx->hw_context_size =
@@ -1776,52 +1777,54 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
unsigned int nbytes_to_hash;
unsigned int to_hash_later;
- unsigned int index;
+ unsigned int nsg;
int chained;
- index = req_ctx->count & (blocksize - 1);
- req_ctx->count += nbytes;
-
- if (!req_ctx->last && (index + nbytes) < blocksize) {
- /* Buffer the partial block */
+ if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
+ /* Buffer up to one whole block */
sg_copy_to_buffer(areq->src,
sg_count(areq->src, nbytes, &chained),
- req_ctx->buf + index, nbytes);
+ req_ctx->buf + req_ctx->nbuf, nbytes);
+ req_ctx->nbuf += nbytes;
return 0;
}
- if (index) {
- /* partial block from previous update; chain it in. */
- sg_init_table(req_ctx->bufsl, (nbytes) ? 2 : 1);
- sg_set_buf(req_ctx->bufsl, req_ctx->buf, index);
- if (nbytes)
- scatterwalk_sg_chain(req_ctx->bufsl, 2,
- areq->src);
+ /* At least (blocksize + 1) bytes are available to hash */
+ nbytes_to_hash = nbytes + req_ctx->nbuf;
+ to_hash_later = nbytes_to_hash & (blocksize - 1);
+
+ if (req_ctx->last)
+ to_hash_later = 0;
+ else if (to_hash_later)
+ /* There is a partial block. Hash the full block(s) now */
+ nbytes_to_hash -= to_hash_later;
+ else {
+ /* Keep one block buffered */
+ nbytes_to_hash -= blocksize;
+ to_hash_later = blocksize;
+ }
+
+ /* Chain in any previously buffered data */
+ if (req_ctx->nbuf) {
+ nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
+ sg_init_table(req_ctx->bufsl, nsg);
+ sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
+ if (nsg > 1)
+ scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
req_ctx->psrc = req_ctx->bufsl;
- } else {
+ } else
req_ctx->psrc = areq->src;
+
+ if (to_hash_later) {
+ int nents = sg_count(areq->src, nbytes, &chained);
+ sg_copy_end_to_buffer(areq->src, nents,
+ req_ctx->bufnext,
+ to_hash_later,
+ nbytes - to_hash_later);
}
- nbytes_to_hash = index + nbytes;
- if (!req_ctx->last) {
- to_hash_later = (nbytes_to_hash & (blocksize - 1));
- if (to_hash_later) {
- int nents;
- /* Must copy to_hash_later bytes from the end
- * to bufnext (a partial block) for later.
- */
- nents = sg_count(areq->src, nbytes, &chained);
- sg_copy_end_to_buffer(areq->src, nents,
- req_ctx->bufnext,
- to_hash_later,
- nbytes - to_hash_later);
-
- /* Adjust count for what will be hashed now */
- nbytes_to_hash -= to_hash_later;
- }
- req_ctx->to_hash_later = to_hash_later;
- }
+ req_ctx->to_hash_later = to_hash_later;
- /* allocate extended descriptor */
+ /* Allocate extended descriptor */
edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
OpenPOWER on IntegriCloud