summaryrefslogtreecommitdiffstats
path: root/crypto/evp/e_aes_cbc_hmac_sha256.c
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/evp/e_aes_cbc_hmac_sha256.c')
-rw-r--r--crypto/evp/e_aes_cbc_hmac_sha256.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/crypto/evp/e_aes_cbc_hmac_sha256.c b/crypto/evp/e_aes_cbc_hmac_sha256.c
index b1c586e..3780021 100644
--- a/crypto/evp/e_aes_cbc_hmac_sha256.c
+++ b/crypto/evp/e_aes_cbc_hmac_sha256.c
@@ -498,7 +498,18 @@ static int aesni_cbc_hmac_sha256_cipher(EVP_CIPHER_CTX *ctx,
iv = AES_BLOCK_SIZE;
# if defined(STITCHED_CALL)
+ /*
+ * Assembly stitch handles AVX-capable processors, but its
+ * performance is not optimal on AMD Jaguar, ~40% worse, for
+ * unknown reasons. Incidentally processor in question supports
+ * AVX, but not AMD-specific XOP extension, which can be used
+ * to identify it and avoid stitch invocation. So that after we
+ * establish that current CPU supports AVX, we even see if it's
+ * either even XOP-capable Bulldozer-based or GenuineIntel one.
+ */
if (OPENSSL_ia32cap_P[1] & (1 << (60 - 32)) && /* AVX? */
+ ((OPENSSL_ia32cap_P[1] & (1 << (43 - 32))) /* XOP? */
+ | (OPENSSL_ia32cap_P[0] & (1<<30))) && /* "Intel CPU"? */
plen > (sha_off + iv) &&
(blocks = (plen - (sha_off + iv)) / SHA256_CBLOCK)) {
SHA256_Update(&key->md, in + iv, sha_off);
@@ -816,8 +827,6 @@ static int aesni_cbc_hmac_sha256_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg,
if (arg != EVP_AEAD_TLS1_AAD_LEN)
return -1;
- len = p[arg - 2] << 8 | p[arg - 1];
-
if (ctx->encrypt) {
key->payload_length = len;
if ((key->aux.tls_ver =
OpenPOWER on IntegriCloud