summaryrefslogtreecommitdiffstats
path: root/secure/lib
diff options
context:
space:
mode:
authorjkim <jkim@FreeBSD.org>2015-10-30 20:51:33 +0000
committerjkim <jkim@FreeBSD.org>2015-10-30 20:51:33 +0000
commit6b741bee156148072e0e9588e7c9f4a9d66d1ab9 (patch)
treee8d8b5ada49f5cdbf70d1e455c13f2625fdcdd45 /secure/lib
parent979d5cd34dadfb0b78c606ecca3ec8d3a6ca245f (diff)
parent64cb0c902e312216cdc4c826fc0be9ba9e1bf4da (diff)
downloadFreeBSD-src-6b741bee156148072e0e9588e7c9f4a9d66d1ab9.zip
FreeBSD-src-6b741bee156148072e0e9588e7c9f4a9d66d1ab9.tar.gz
Merge OpenSSL 1.0.2d.
Diffstat (limited to 'secure/lib')
-rw-r--r--secure/lib/libcrypto/Makefile53
-rw-r--r--secure/lib/libcrypto/Makefile.asm19
-rw-r--r--secure/lib/libcrypto/Makefile.inc3
-rw-r--r--secure/lib/libcrypto/Makefile.man255
-rw-r--r--secure/lib/libcrypto/amd64/aes-x86_64.S341
-rw-r--r--secure/lib/libcrypto/amd64/aesni-gcm-x86_64.S16
-rw-r--r--secure/lib/libcrypto/amd64/aesni-mb-x86_64.S507
-rw-r--r--secure/lib/libcrypto/amd64/aesni-sha1-x86_64.S2003
-rw-r--r--secure/lib/libcrypto/amd64/aesni-sha256-x86_64.S58
-rw-r--r--secure/lib/libcrypto/amd64/aesni-x86_64.S2594
-rw-r--r--secure/lib/libcrypto/amd64/bsaes-x86_64.S82
-rw-r--r--secure/lib/libcrypto/amd64/cmll-x86_64.S12
-rw-r--r--secure/lib/libcrypto/amd64/ecp_nistz256-x86_64.S2005
-rw-r--r--secure/lib/libcrypto/amd64/ghash-x86_64.S570
-rw-r--r--secure/lib/libcrypto/amd64/md5-x86_64.S4
-rw-r--r--secure/lib/libcrypto/amd64/modexp512-x86_64.S1774
-rw-r--r--secure/lib/libcrypto/amd64/rc4-x86_64.S28
-rw-r--r--secure/lib/libcrypto/amd64/rsaz-avx2.S26
-rw-r--r--secure/lib/libcrypto/amd64/rsaz-x86_64.S1118
-rw-r--r--secure/lib/libcrypto/amd64/sha1-mb-x86_64.S2935
-rw-r--r--secure/lib/libcrypto/amd64/sha1-x86_64.S2725
-rw-r--r--secure/lib/libcrypto/amd64/sha256-mb-x86_64.S3259
-rw-r--r--secure/lib/libcrypto/amd64/sha256-x86_64.S2817
-rw-r--r--secure/lib/libcrypto/amd64/sha512-x86_64.S1527
-rw-r--r--secure/lib/libcrypto/amd64/vpaes-x86_64.S105
-rw-r--r--secure/lib/libcrypto/amd64/wp-x86_64.S323
-rw-r--r--secure/lib/libcrypto/amd64/x86_64-gf2m.S6
-rw-r--r--secure/lib/libcrypto/amd64/x86_64-mont.S848
-rw-r--r--secure/lib/libcrypto/amd64/x86_64-mont5.S1464
-rw-r--r--secure/lib/libcrypto/amd64/x86_64cpuid.S33
-rw-r--r--secure/lib/libcrypto/engines/Makefile4
-rw-r--r--secure/lib/libcrypto/engines/libcapi/Makefile6
-rw-r--r--secure/lib/libcrypto/engines/libgost/Makefile8
-rw-r--r--secure/lib/libcrypto/i386/aes-586.s480
-rw-r--r--secure/lib/libcrypto/i386/aesni-x86.s1159
-rw-r--r--secure/lib/libcrypto/i386/bn-586.s2
-rw-r--r--secure/lib/libcrypto/i386/cast-586.s934
-rw-r--r--secure/lib/libcrypto/i386/des-586.s5
-rw-r--r--secure/lib/libcrypto/i386/ghash-x86.s226
-rw-r--r--secure/lib/libcrypto/i386/rc4-586.s4
-rw-r--r--secure/lib/libcrypto/i386/sha1-586.s1335
-rw-r--r--secure/lib/libcrypto/i386/sha256-586.s4551
-rw-r--r--secure/lib/libcrypto/i386/sha512-586.s2246
-rw-r--r--secure/lib/libcrypto/i386/vpaes-x86.s88
-rw-r--r--secure/lib/libcrypto/i386/wp-mmx.s322
-rw-r--r--secure/lib/libcrypto/i386/x86-gf2m.s2
-rw-r--r--secure/lib/libcrypto/i386/x86-mont.s2
-rw-r--r--secure/lib/libcrypto/i386/x86cpuid.s117
-rw-r--r--secure/lib/libcrypto/man/ASN1_OBJECT_new.32
-rw-r--r--secure/lib/libcrypto/man/ASN1_STRING_length.34
-rw-r--r--secure/lib/libcrypto/man/ASN1_STRING_new.32
-rw-r--r--secure/lib/libcrypto/man/ASN1_STRING_print_ex.34
-rw-r--r--secure/lib/libcrypto/man/ASN1_TIME_set.3264
-rw-r--r--secure/lib/libcrypto/man/ASN1_generate_nconf.32
-rw-r--r--secure/lib/libcrypto/man/BIO_ctrl.32
-rw-r--r--secure/lib/libcrypto/man/BIO_f_base64.32
-rw-r--r--secure/lib/libcrypto/man/BIO_f_buffer.32
-rw-r--r--secure/lib/libcrypto/man/BIO_f_cipher.32
-rw-r--r--secure/lib/libcrypto/man/BIO_f_md.32
-rw-r--r--secure/lib/libcrypto/man/BIO_f_null.32
-rw-r--r--secure/lib/libcrypto/man/BIO_f_ssl.34
-rw-r--r--secure/lib/libcrypto/man/BIO_find_type.34
-rw-r--r--secure/lib/libcrypto/man/BIO_new.32
-rw-r--r--secure/lib/libcrypto/man/BIO_new_CMS.32
-rw-r--r--secure/lib/libcrypto/man/BIO_push.32
-rw-r--r--secure/lib/libcrypto/man/BIO_read.32
-rw-r--r--secure/lib/libcrypto/man/BIO_s_accept.34
-rw-r--r--secure/lib/libcrypto/man/BIO_s_bio.32
-rw-r--r--secure/lib/libcrypto/man/BIO_s_connect.34
-rw-r--r--secure/lib/libcrypto/man/BIO_s_fd.32
-rw-r--r--secure/lib/libcrypto/man/BIO_s_file.32
-rw-r--r--secure/lib/libcrypto/man/BIO_s_mem.32
-rw-r--r--secure/lib/libcrypto/man/BIO_s_null.32
-rw-r--r--secure/lib/libcrypto/man/BIO_s_socket.32
-rw-r--r--secure/lib/libcrypto/man/BIO_set_callback.32
-rw-r--r--secure/lib/libcrypto/man/BIO_should_retry.32
-rw-r--r--secure/lib/libcrypto/man/BN_BLINDING_new.36
-rw-r--r--secure/lib/libcrypto/man/BN_CTX_new.316
-rw-r--r--secure/lib/libcrypto/man/BN_CTX_start.32
-rw-r--r--secure/lib/libcrypto/man/BN_add.32
-rw-r--r--secure/lib/libcrypto/man/BN_add_word.32
-rw-r--r--secure/lib/libcrypto/man/BN_bn2bin.32
-rw-r--r--secure/lib/libcrypto/man/BN_cmp.32
-rw-r--r--secure/lib/libcrypto/man/BN_copy.32
-rw-r--r--secure/lib/libcrypto/man/BN_generate_prime.393
-rw-r--r--secure/lib/libcrypto/man/BN_mod_inverse.32
-rw-r--r--secure/lib/libcrypto/man/BN_mod_mul_montgomery.32
-rw-r--r--secure/lib/libcrypto/man/BN_mod_mul_reciprocal.32
-rw-r--r--secure/lib/libcrypto/man/BN_new.32
-rw-r--r--secure/lib/libcrypto/man/BN_num_bytes.32
-rw-r--r--secure/lib/libcrypto/man/BN_rand.34
-rw-r--r--secure/lib/libcrypto/man/BN_set_bit.32
-rw-r--r--secure/lib/libcrypto/man/BN_swap.32
-rw-r--r--secure/lib/libcrypto/man/BN_zero.32
-rw-r--r--secure/lib/libcrypto/man/CMS_add0_cert.36
-rw-r--r--secure/lib/libcrypto/man/CMS_add1_recipient_cert.32
-rw-r--r--secure/lib/libcrypto/man/CMS_add1_signer.32
-rw-r--r--secure/lib/libcrypto/man/CMS_compress.32
-rw-r--r--secure/lib/libcrypto/man/CMS_decrypt.32
-rw-r--r--secure/lib/libcrypto/man/CMS_encrypt.32
-rw-r--r--secure/lib/libcrypto/man/CMS_final.32
-rw-r--r--secure/lib/libcrypto/man/CMS_get0_RecipientInfos.320
-rw-r--r--secure/lib/libcrypto/man/CMS_get0_SignerInfos.312
-rw-r--r--secure/lib/libcrypto/man/CMS_get0_type.32
-rw-r--r--secure/lib/libcrypto/man/CMS_get1_ReceiptRequest.32
-rw-r--r--secure/lib/libcrypto/man/CMS_sign.32
-rw-r--r--secure/lib/libcrypto/man/CMS_sign_receipt.32
-rw-r--r--secure/lib/libcrypto/man/CMS_uncompress.32
-rw-r--r--secure/lib/libcrypto/man/CMS_verify.36
-rw-r--r--secure/lib/libcrypto/man/CMS_verify_receipt.32
-rw-r--r--secure/lib/libcrypto/man/CONF_modules_free.32
-rw-r--r--secure/lib/libcrypto/man/CONF_modules_load_file.32
-rw-r--r--secure/lib/libcrypto/man/CRYPTO_set_ex_data.32
-rw-r--r--secure/lib/libcrypto/man/DH_generate_key.32
-rw-r--r--secure/lib/libcrypto/man/DH_generate_parameters.346
-rw-r--r--secure/lib/libcrypto/man/DH_get_ex_new_index.32
-rw-r--r--secure/lib/libcrypto/man/DH_new.32
-rw-r--r--secure/lib/libcrypto/man/DH_set_method.32
-rw-r--r--secure/lib/libcrypto/man/DH_size.32
-rw-r--r--secure/lib/libcrypto/man/DSA_SIG_new.32
-rw-r--r--secure/lib/libcrypto/man/DSA_do_sign.32
-rw-r--r--secure/lib/libcrypto/man/DSA_dup_DH.32
-rw-r--r--secure/lib/libcrypto/man/DSA_generate_key.32
-rw-r--r--secure/lib/libcrypto/man/DSA_generate_parameters.358
-rw-r--r--secure/lib/libcrypto/man/DSA_get_ex_new_index.32
-rw-r--r--secure/lib/libcrypto/man/DSA_new.32
-rw-r--r--secure/lib/libcrypto/man/DSA_set_method.32
-rw-r--r--secure/lib/libcrypto/man/DSA_sign.32
-rw-r--r--secure/lib/libcrypto/man/DSA_size.32
-rw-r--r--secure/lib/libcrypto/man/EC_GFp_simple_method.3193
-rw-r--r--secure/lib/libcrypto/man/EC_GROUP_copy.3308
-rw-r--r--secure/lib/libcrypto/man/EC_GROUP_new.3230
-rw-r--r--secure/lib/libcrypto/man/EC_KEY_new.3239
-rw-r--r--secure/lib/libcrypto/man/EC_POINT_add.3203
-rw-r--r--secure/lib/libcrypto/man/EC_POINT_new.3260
-rw-r--r--secure/lib/libcrypto/man/ERR_GET_LIB.32
-rw-r--r--secure/lib/libcrypto/man/ERR_clear_error.32
-rw-r--r--secure/lib/libcrypto/man/ERR_error_string.32
-rw-r--r--secure/lib/libcrypto/man/ERR_get_error.32
-rw-r--r--secure/lib/libcrypto/man/ERR_load_crypto_strings.32
-rw-r--r--secure/lib/libcrypto/man/ERR_load_strings.32
-rw-r--r--secure/lib/libcrypto/man/ERR_print_errors.32
-rw-r--r--secure/lib/libcrypto/man/ERR_put_error.32
-rw-r--r--secure/lib/libcrypto/man/ERR_remove_state.325
-rw-r--r--secure/lib/libcrypto/man/ERR_set_mark.32
-rw-r--r--secure/lib/libcrypto/man/EVP_BytesToKey.311
-rw-r--r--secure/lib/libcrypto/man/EVP_DigestInit.312
-rw-r--r--secure/lib/libcrypto/man/EVP_DigestSignInit.32
-rw-r--r--secure/lib/libcrypto/man/EVP_DigestVerifyInit.34
-rw-r--r--secure/lib/libcrypto/man/EVP_EncryptInit.3103
-rw-r--r--secure/lib/libcrypto/man/EVP_OpenInit.32
-rw-r--r--secure/lib/libcrypto/man/EVP_PKEY_CTX_ctrl.312
-rw-r--r--secure/lib/libcrypto/man/EVP_PKEY_CTX_new.32
-rw-r--r--secure/lib/libcrypto/man/EVP_PKEY_cmp.36
-rw-r--r--secure/lib/libcrypto/man/EVP_PKEY_decrypt.32
-rw-r--r--secure/lib/libcrypto/man/EVP_PKEY_derive.32
-rw-r--r--secure/lib/libcrypto/man/EVP_PKEY_encrypt.32
-rw-r--r--secure/lib/libcrypto/man/EVP_PKEY_get_default_digest.32
-rw-r--r--secure/lib/libcrypto/man/EVP_PKEY_keygen.32
-rw-r--r--secure/lib/libcrypto/man/EVP_PKEY_new.32
-rw-r--r--secure/lib/libcrypto/man/EVP_PKEY_print_private.32
-rw-r--r--secure/lib/libcrypto/man/EVP_PKEY_set1_RSA.32
-rw-r--r--secure/lib/libcrypto/man/EVP_PKEY_sign.32
-rw-r--r--secure/lib/libcrypto/man/EVP_PKEY_verify.32
-rw-r--r--secure/lib/libcrypto/man/EVP_PKEY_verify_recover.32
-rw-r--r--secure/lib/libcrypto/man/EVP_SealInit.32
-rw-r--r--secure/lib/libcrypto/man/EVP_SignInit.32
-rw-r--r--secure/lib/libcrypto/man/EVP_VerifyInit.32
-rw-r--r--secure/lib/libcrypto/man/OBJ_nid2obj.32
-rw-r--r--secure/lib/libcrypto/man/OPENSSL_Applink.32
-rw-r--r--secure/lib/libcrypto/man/OPENSSL_VERSION_NUMBER.34
-rw-r--r--secure/lib/libcrypto/man/OPENSSL_config.312
-rw-r--r--secure/lib/libcrypto/man/OPENSSL_ia32cap.3106
-rw-r--r--secure/lib/libcrypto/man/OPENSSL_instrument_bus.3178
-rw-r--r--secure/lib/libcrypto/man/OPENSSL_load_builtin_modules.34
-rw-r--r--secure/lib/libcrypto/man/OpenSSL_add_all_algorithms.34
-rw-r--r--secure/lib/libcrypto/man/PEM_write_bio_CMS_stream.32
-rw-r--r--secure/lib/libcrypto/man/PEM_write_bio_PKCS7_stream.32
-rw-r--r--secure/lib/libcrypto/man/PKCS12_create.32
-rw-r--r--secure/lib/libcrypto/man/PKCS12_parse.32
-rw-r--r--secure/lib/libcrypto/man/PKCS7_decrypt.32
-rw-r--r--secure/lib/libcrypto/man/PKCS7_encrypt.32
-rw-r--r--secure/lib/libcrypto/man/PKCS7_sign.32
-rw-r--r--secure/lib/libcrypto/man/PKCS7_sign_add_signer.32
-rw-r--r--secure/lib/libcrypto/man/PKCS7_verify.38
-rw-r--r--secure/lib/libcrypto/man/RAND_add.32
-rw-r--r--secure/lib/libcrypto/man/RAND_bytes.32
-rw-r--r--secure/lib/libcrypto/man/RAND_cleanup.32
-rw-r--r--secure/lib/libcrypto/man/RAND_egd.34
-rw-r--r--secure/lib/libcrypto/man/RAND_load_file.32
-rw-r--r--secure/lib/libcrypto/man/RAND_set_rand_method.32
-rw-r--r--secure/lib/libcrypto/man/RSA_blinding_on.32
-rw-r--r--secure/lib/libcrypto/man/RSA_check_key.32
-rw-r--r--secure/lib/libcrypto/man/RSA_generate_key.343
-rw-r--r--secure/lib/libcrypto/man/RSA_get_ex_new_index.32
-rw-r--r--secure/lib/libcrypto/man/RSA_new.32
-rw-r--r--secure/lib/libcrypto/man/RSA_padding_add_PKCS1_type_1.32
-rw-r--r--secure/lib/libcrypto/man/RSA_print.32
-rw-r--r--secure/lib/libcrypto/man/RSA_private_encrypt.32
-rw-r--r--secure/lib/libcrypto/man/RSA_public_encrypt.32
-rw-r--r--secure/lib/libcrypto/man/RSA_set_method.32
-rw-r--r--secure/lib/libcrypto/man/RSA_sign.32
-rw-r--r--secure/lib/libcrypto/man/RSA_sign_ASN1_OCTET_STRING.32
-rw-r--r--secure/lib/libcrypto/man/RSA_size.32
-rw-r--r--secure/lib/libcrypto/man/SMIME_read_CMS.32
-rw-r--r--secure/lib/libcrypto/man/SMIME_read_PKCS7.32
-rw-r--r--secure/lib/libcrypto/man/SMIME_write_CMS.32
-rw-r--r--secure/lib/libcrypto/man/SMIME_write_PKCS7.32
-rw-r--r--secure/lib/libcrypto/man/SSLeay_version.3192
-rw-r--r--secure/lib/libcrypto/man/X509_NAME_ENTRY_get_object.32
-rw-r--r--secure/lib/libcrypto/man/X509_NAME_add_entry_by_txt.34
-rw-r--r--secure/lib/libcrypto/man/X509_NAME_get_index_by_NID.32
-rw-r--r--secure/lib/libcrypto/man/X509_NAME_print_ex.32
-rw-r--r--secure/lib/libcrypto/man/X509_STORE_CTX_get_error.310
-rw-r--r--secure/lib/libcrypto/man/X509_STORE_CTX_get_ex_new_index.32
-rw-r--r--secure/lib/libcrypto/man/X509_STORE_CTX_new.32
-rw-r--r--secure/lib/libcrypto/man/X509_STORE_CTX_set_verify_cb.32
-rw-r--r--secure/lib/libcrypto/man/X509_STORE_set_verify_cb_func.32
-rw-r--r--secure/lib/libcrypto/man/X509_VERIFY_PARAM_set_flags.383
-rw-r--r--secure/lib/libcrypto/man/X509_check_host.3269
-rw-r--r--secure/lib/libcrypto/man/X509_new.32
-rw-r--r--secure/lib/libcrypto/man/X509_verify_cert.32
-rw-r--r--secure/lib/libcrypto/man/bio.32
-rw-r--r--secure/lib/libcrypto/man/blowfish.32
-rw-r--r--secure/lib/libcrypto/man/bn.32
-rw-r--r--secure/lib/libcrypto/man/bn_internal.32
-rw-r--r--secure/lib/libcrypto/man/buffer.32
-rw-r--r--secure/lib/libcrypto/man/crypto.34
-rw-r--r--secure/lib/libcrypto/man/d2i_ASN1_OBJECT.32
-rw-r--r--secure/lib/libcrypto/man/d2i_CMS_ContentInfo.32
-rw-r--r--secure/lib/libcrypto/man/d2i_DHparams.32
-rw-r--r--secure/lib/libcrypto/man/d2i_DSAPublicKey.34
-rw-r--r--secure/lib/libcrypto/man/d2i_ECPKParameters.3216
-rw-r--r--secure/lib/libcrypto/man/d2i_ECPrivateKey.32
-rw-r--r--secure/lib/libcrypto/man/d2i_PKCS8PrivateKey.32
-rw-r--r--secure/lib/libcrypto/man/d2i_RSAPublicKey.32
-rw-r--r--secure/lib/libcrypto/man/d2i_X509.329
-rw-r--r--secure/lib/libcrypto/man/d2i_X509_ALGOR.32
-rw-r--r--secure/lib/libcrypto/man/d2i_X509_CRL.34
-rw-r--r--secure/lib/libcrypto/man/d2i_X509_NAME.32
-rw-r--r--secure/lib/libcrypto/man/d2i_X509_REQ.32
-rw-r--r--secure/lib/libcrypto/man/d2i_X509_SIG.32
-rw-r--r--secure/lib/libcrypto/man/des.32
-rw-r--r--secure/lib/libcrypto/man/dh.32
-rw-r--r--secure/lib/libcrypto/man/dsa.32
-rw-r--r--secure/lib/libcrypto/man/ec.3333
-rw-r--r--secure/lib/libcrypto/man/ecdsa.34
-rw-r--r--secure/lib/libcrypto/man/engine.32
-rw-r--r--secure/lib/libcrypto/man/err.32
-rw-r--r--secure/lib/libcrypto/man/evp.349
-rw-r--r--secure/lib/libcrypto/man/hmac.36
-rw-r--r--secure/lib/libcrypto/man/i2d_CMS_bio_stream.32
-rw-r--r--secure/lib/libcrypto/man/i2d_PKCS7_bio_stream.34
-rw-r--r--secure/lib/libcrypto/man/lh_stats.32
-rw-r--r--secure/lib/libcrypto/man/lhash.32
-rw-r--r--secure/lib/libcrypto/man/md5.32
-rw-r--r--secure/lib/libcrypto/man/mdc2.32
-rw-r--r--secure/lib/libcrypto/man/pem.32
-rw-r--r--secure/lib/libcrypto/man/rand.34
-rw-r--r--secure/lib/libcrypto/man/rc4.32
-rw-r--r--secure/lib/libcrypto/man/ripemd.32
-rw-r--r--secure/lib/libcrypto/man/rsa.32
-rw-r--r--secure/lib/libcrypto/man/sha.366
-rw-r--r--secure/lib/libcrypto/man/threads.32
-rw-r--r--secure/lib/libcrypto/man/ui.32
-rw-r--r--secure/lib/libcrypto/man/ui_compat.32
-rw-r--r--secure/lib/libcrypto/man/x509.32
-rw-r--r--secure/lib/libcrypto/opensslconf-aarch64.h14
-rw-r--r--secure/lib/libcrypto/opensslconf-arm.h14
-rw-r--r--secure/lib/libcrypto/opensslconf-mips.h14
-rw-r--r--secure/lib/libcrypto/opensslconf-powerpc.h14
-rw-r--r--secure/lib/libcrypto/opensslconf-sparc64.h14
-rw-r--r--secure/lib/libcrypto/opensslconf-x86.h14
-rw-r--r--secure/lib/libssl/Makefile14
-rw-r--r--secure/lib/libssl/Makefile.man55
-rw-r--r--secure/lib/libssl/man/SSL_CIPHER_get_name.315
-rw-r--r--secure/lib/libssl/man/SSL_COMP_add_compression_method.314
-rw-r--r--secure/lib/libssl/man/SSL_CONF_CTX_new.3172
-rw-r--r--secure/lib/libssl/man/SSL_CONF_CTX_set1_prefix.3180
-rw-r--r--secure/lib/libssl/man/SSL_CONF_CTX_set_flags.3191
-rw-r--r--secure/lib/libssl/man/SSL_CONF_CTX_set_ssl_ctx.3178
-rw-r--r--secure/lib/libssl/man/SSL_CONF_cmd.3536
-rw-r--r--secure/lib/libssl/man/SSL_CONF_cmd_argv.3174
-rw-r--r--secure/lib/libssl/man/SSL_CTX_add1_chain_cert.3280
-rw-r--r--secure/lib/libssl/man/SSL_CTX_add_extra_chain_cert.315
-rw-r--r--secure/lib/libssl/man/SSL_CTX_add_session.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_ctrl.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_flush_sessions.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_free.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_get0_param.3187
-rw-r--r--secure/lib/libssl/man/SSL_CTX_get_ex_new_index.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_get_verify_mode.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_load_verify_locations.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_new.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_sess_number.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_sess_set_cache_size.38
-rw-r--r--secure/lib/libssl/man/SSL_CTX_sess_set_get_cb.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_sessions.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set1_curves.3236
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set1_verify_cert_store.3222
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_cert_cb.3201
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_cert_store.38
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_cert_verify_callback.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_cipher_list.34
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_client_CA_list.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_client_cert_cb.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_custom_cli_ext.3264
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_default_passwd_cb.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_generate_session_id.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_info_callback.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_max_cert_list.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_mode.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_msg_callback.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_options.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_psk_client_callback.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_quiet_shutdown.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_read_ahead.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_session_cache_mode.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_session_id_context.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_ssl_version.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_timeout.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_tlsext_ticket_key_cb.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_tmp_dh_callback.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_tmp_rsa_callback.34
-rw-r--r--secure/lib/libssl/man/SSL_CTX_set_verify.32
-rw-r--r--secure/lib/libssl/man/SSL_CTX_use_certificate.326
-rw-r--r--secure/lib/libssl/man/SSL_CTX_use_psk_identity_hint.312
-rw-r--r--secure/lib/libssl/man/SSL_CTX_use_serverinfo.3179
-rw-r--r--secure/lib/libssl/man/SSL_SESSION_free.32
-rw-r--r--secure/lib/libssl/man/SSL_SESSION_get_ex_new_index.32
-rw-r--r--secure/lib/libssl/man/SSL_SESSION_get_time.32
-rw-r--r--secure/lib/libssl/man/SSL_accept.37
-rw-r--r--secure/lib/libssl/man/SSL_alert_type_string.32
-rw-r--r--secure/lib/libssl/man/SSL_clear.32
-rw-r--r--secure/lib/libssl/man/SSL_connect.32
-rw-r--r--secure/lib/libssl/man/SSL_do_handshake.37
-rw-r--r--secure/lib/libssl/man/SSL_free.32
-rw-r--r--secure/lib/libssl/man/SSL_get_SSL_CTX.32
-rw-r--r--secure/lib/libssl/man/SSL_get_ciphers.32
-rw-r--r--secure/lib/libssl/man/SSL_get_client_CA_list.32
-rw-r--r--secure/lib/libssl/man/SSL_get_current_cipher.32
-rw-r--r--secure/lib/libssl/man/SSL_get_default_timeout.32
-rw-r--r--secure/lib/libssl/man/SSL_get_error.32
-rw-r--r--secure/lib/libssl/man/SSL_get_ex_data_X509_STORE_CTX_idx.32
-rw-r--r--secure/lib/libssl/man/SSL_get_ex_new_index.32
-rw-r--r--secure/lib/libssl/man/SSL_get_fd.32
-rw-r--r--secure/lib/libssl/man/SSL_get_peer_cert_chain.32
-rw-r--r--secure/lib/libssl/man/SSL_get_peer_certificate.32
-rw-r--r--secure/lib/libssl/man/SSL_get_psk_identity.32
-rw-r--r--secure/lib/libssl/man/SSL_get_rbio.32
-rw-r--r--secure/lib/libssl/man/SSL_get_session.32
-rw-r--r--secure/lib/libssl/man/SSL_get_verify_result.32
-rw-r--r--secure/lib/libssl/man/SSL_get_version.32
-rw-r--r--secure/lib/libssl/man/SSL_library_init.32
-rw-r--r--secure/lib/libssl/man/SSL_load_client_CA_file.32
-rw-r--r--secure/lib/libssl/man/SSL_new.32
-rw-r--r--secure/lib/libssl/man/SSL_pending.32
-rw-r--r--secure/lib/libssl/man/SSL_read.32
-rw-r--r--secure/lib/libssl/man/SSL_rstate_string.32
-rw-r--r--secure/lib/libssl/man/SSL_session_reused.32
-rw-r--r--secure/lib/libssl/man/SSL_set_bio.32
-rw-r--r--secure/lib/libssl/man/SSL_set_connect_state.32
-rw-r--r--secure/lib/libssl/man/SSL_set_fd.32
-rw-r--r--secure/lib/libssl/man/SSL_set_session.32
-rw-r--r--secure/lib/libssl/man/SSL_set_shutdown.32
-rw-r--r--secure/lib/libssl/man/SSL_set_verify_result.32
-rw-r--r--secure/lib/libssl/man/SSL_shutdown.36
-rw-r--r--secure/lib/libssl/man/SSL_state_string.32
-rw-r--r--secure/lib/libssl/man/SSL_want.32
-rw-r--r--secure/lib/libssl/man/SSL_write.32
-rw-r--r--secure/lib/libssl/man/d2i_SSL_SESSION.32
-rw-r--r--secure/lib/libssl/man/ssl.310
372 files changed, 35343 insertions, 11220 deletions
diff --git a/secure/lib/libcrypto/Makefile b/secure/lib/libcrypto/Makefile
index 4c396a0..43122d7 100644
--- a/secure/lib/libcrypto/Makefile
+++ b/secure/lib/libcrypto/Makefile
@@ -6,7 +6,7 @@ SUBDIR= engines
.include <bsd.own.mk>
LIB= crypto
-SHLIB_MAJOR= 7
+SHLIB_MAJOR= 8
ALLOW_SHARED_TEXTREL=
NO_LINT=
@@ -35,8 +35,8 @@ INCS+= crypto.h ebcdic.h opensslv.h ossl_typ.h symhacks.h ../e_os2.h
# aes
SRCS+= aes_cfb.c aes_ctr.c aes_ecb.c aes_ige.c aes_misc.c aes_ofb.c aes_wrap.c
.if ${MACHINE_CPUARCH} == "amd64"
-SRCS+= aes-x86_64.S aesni-sha1-x86_64.S aesni-x86_64.S bsaes-x86_64.S \
- vpaes-x86_64.S
+SRCS+= aes-x86_64.S aesni-mb-x86_64.S aesni-sha1-x86_64.S \
+ aesni-sha256-x86_64.S aesni-x86_64.S bsaes-x86_64.S vpaes-x86_64.S
.elif ${MACHINE_CPUARCH} == "i386"
SRCS+= aes-586.s aesni-x86.s vpaes-x86.s
.else
@@ -84,8 +84,8 @@ SRCS+= bn_add.c bn_blind.c bn_const.c bn_ctx.c bn_depr.c bn_div.c bn_err.c \
bn_mont.c bn_mpi.c bn_mul.c bn_nist.c bn_prime.c bn_print.c bn_rand.c \
bn_recp.c bn_shift.c bn_sqr.c bn_sqrt.c bn_word.c bn_x931p.c
.if ${MACHINE_CPUARCH} == "amd64"
-SRCS+= modexp512-x86_64.S x86_64-gcc.c x86_64-gf2m.S x86_64-mont.S \
- x86_64-mont5.S
+SRCS+= rsaz-avx2.S rsaz-x86_64.S rsaz_exp.c x86_64-gcc.c x86_64-gf2m.S \
+ x86_64-mont.S x86_64-mont5.S
.elif ${MACHINE_CPUARCH} == "i386"
SRCS+= bn-586.s co-586.s x86-gf2m.s x86-mont.s
.else
@@ -118,7 +118,8 @@ INCS+= cmac.h
# cms
SRCS+= cms_asn1.c cms_att.c cms_dd.c cms_enc.c cms_env.c cms_err.c \
- cms_ess.c cms_io.c cms_lib.c cms_pwri.c cms_sd.c cms_smime.c
+ cms_ess.c cms_io.c cms_kari.c cms_lib.c cms_pwri.c cms_sd.c \
+ cms_smime.c
INCS+= cms.h
# comp
@@ -143,8 +144,8 @@ SRCS+= des_enc.c fcrypt_b.c
INCS+= des.h des_old.h
# dh
-SRCS+= dh_ameth.c dh_asn1.c dh_check.c dh_depr.c dh_err.c dh_gen.c dh_key.c \
- dh_lib.c dh_pmeth.c dh_prn.c
+SRCS+= dh_ameth.c dh_asn1.c dh_check.c dh_depr.c dh_err.c dh_gen.c dh_kdf.c \
+ dh_key.c dh_lib.c dh_pmeth.c dh_prn.c dh_rfc5114.c
INCS+= dh.h
# dsa
@@ -161,10 +162,13 @@ SRCS+= ec2_mult.c ec2_oct.c ec2_smpl.c ec_ameth.c ec_asn1.c ec_check.c \
ec_curve.c ec_cvt.c ec_err.c ec_key.c ec_lib.c ec_mult.c ec_oct.c \
ec_pmeth.c ec_print.c eck_prn.c ecp_mont.c ecp_nist.c ecp_oct.c \
ecp_smpl.c
+.if ${MACHINE_CPUARCH} == "amd64"
+SRCS+= ecp_nistz256.c ecp_nistz256-x86_64.S
+.endif
INCS+= ec.h
# ecdh
-SRCS+= ech_err.c ech_key.c ech_lib.c ech_ossl.c
+SRCS+= ech_err.c ech_kdf.c ech_key.c ech_lib.c ech_ossl.c
INCS+= ecdh.h
# ecdsa
@@ -174,9 +178,8 @@ INCS+= ecdsa.h
# engine
SRCS+= eng_all.c eng_cnf.c eng_cryptodev.c eng_ctrl.c eng_dyn.c eng_err.c \
eng_fat.c eng_init.c eng_lib.c eng_list.c eng_openssl.c eng_pkey.c \
- eng_rdrand.c eng_rsax.c eng_table.c tb_asnmth.c tb_cipher.c tb_dh.c \
- tb_digest.c tb_dsa.c tb_ecdh.c tb_ecdsa.c tb_pkmeth.c tb_rand.c \
- tb_rsa.c tb_store.c
+ eng_rdrand.c eng_table.c tb_asnmth.c tb_cipher.c tb_dh.c tb_digest.c \
+ tb_dsa.c tb_ecdh.c tb_ecdsa.c tb_pkmeth.c tb_rand.c tb_rsa.c tb_store.c
INCS+= engine.h
# err
@@ -185,14 +188,14 @@ INCS+= err.h
# evp
SRCS+= bio_b64.c bio_enc.c bio_md.c bio_ok.c c_all.c c_allc.c c_alld.c \
- digest.c e_aes.c e_aes_cbc_hmac_sha1.c e_bf.c e_camellia.c e_cast.c \
- e_des.c e_des3.c e_idea.c e_null.c e_old.c e_rc2.c e_rc4.c \
- e_rc4_hmac_md5.c e_rc5.c e_seed.c e_xcbc_d.c encode.c evp_acnf.c \
- evp_cnf.c evp_enc.c evp_err.c evp_key.c evp_lib.c evp_pbe.c \
- evp_pkey.c m_dss.c m_dss1.c m_ecdsa.c m_md4.c m_md5.c m_mdc2.c \
- m_null.c m_ripemd.c m_sha.c m_sha1.c m_sigver.c m_wp.c names.c \
- p5_crpt.c p5_crpt2.c p_dec.c p_enc.c p_lib.c p_open.c p_seal.c \
- p_sign.c p_verify.c pmeth_fn.c pmeth_gn.c pmeth_lib.c
+ digest.c e_aes.c e_aes_cbc_hmac_sha1.c e_aes_cbc_hmac_sha256.c e_bf.c \
+ e_camellia.c e_cast.c e_des.c e_des3.c e_idea.c e_null.c e_old.c \
+ e_rc2.c e_rc4.c e_rc4_hmac_md5.c e_rc5.c e_seed.c e_xcbc_d.c encode.c \
+ evp_acnf.c evp_cnf.c evp_enc.c evp_err.c evp_key.c evp_lib.c \
+ evp_pbe.c evp_pkey.c m_dss.c m_dss1.c m_ecdsa.c m_md4.c m_md5.c \
+ m_mdc2.c m_null.c m_ripemd.c m_sha.c m_sha1.c m_sigver.c m_wp.c \
+ names.c p5_crpt.c p5_crpt2.c p_dec.c p_enc.c p_lib.c p_open.c \
+ p_seal.c p_sign.c p_verify.c pmeth_fn.c pmeth_gn.c pmeth_lib.c
INCS+= evp.h
# hmac
@@ -228,9 +231,10 @@ SRCS+= mdc2_one.c mdc2dgst.c
INCS+= mdc2.h
# modes
-SRCS+= cbc128.c ccm128.c cfb128.c ctr128.c cts128.c gcm128.c ofb128.c xts128.c
+SRCS+= cbc128.c ccm128.c cfb128.c ctr128.c cts128.c gcm128.c ofb128.c \
+ wrap128.c xts128.c
.if ${MACHINE_CPUARCH} == "amd64"
-SRCS+= ghash-x86_64.S
+SRCS+= aesni-gcm-x86_64.S ghash-x86_64.S
.elif ${MACHINE_CPUARCH} == "i386"
SRCS+= ghash-x86.s
.endif
@@ -314,7 +318,8 @@ INCS+= seed.h
# sha
SRCS+= sha1_one.c sha1dgst.c sha256.c sha512.c sha_dgst.c sha_one.c
.if ${MACHINE_CPUARCH} == "amd64"
-SRCS+= sha1-x86_64.S sha256-x86_64.S sha512-x86_64.S
+SRCS+= sha1-mb-x86_64.S sha1-x86_64.S sha256-mb-x86_64.S sha256-x86_64.S \
+ sha512-x86_64.S
.elif ${MACHINE_CPUARCH} == "i386"
SRCS+= sha1-586.s sha256-586.s sha512-586.s
.endif
@@ -366,7 +371,7 @@ SRCS+= pcy_cache.c pcy_data.c pcy_lib.c pcy_map.c pcy_node.c pcy_tree.c \
v3_bitst.c v3_conf.c v3_cpols.c v3_crld.c v3_enum.c v3_extku.c \
v3_genn.c v3_ia5.c v3_info.c v3_int.c v3_lib.c v3_ncons.c v3_ocsp.c \
v3_pci.c v3_pcia.c v3_pcons.c v3_pku.c v3_pmaps.c v3_prn.c v3_purp.c \
- v3_skey.c v3_sxnet.c v3_utl.c v3err.c
+ v3_scts.c v3_skey.c v3_sxnet.c v3_utl.c v3err.c
INCS+= x509v3.h
SRCS+= buildinf.h
diff --git a/secure/lib/libcrypto/Makefile.asm b/secure/lib/libcrypto/Makefile.asm
index eb62c7c..6f06348 100644
--- a/secure/lib/libcrypto/Makefile.asm
+++ b/secure/lib/libcrypto/Makefile.asm
@@ -12,34 +12,39 @@
${LCRYPTO_SRC}/crypto/aes/asm \
${LCRYPTO_SRC}/crypto/bn/asm \
${LCRYPTO_SRC}/crypto/camellia/asm \
+ ${LCRYPTO_SRC}/crypto/ec/asm \
${LCRYPTO_SRC}/crypto/md5/asm \
${LCRYPTO_SRC}/crypto/modes/asm \
${LCRYPTO_SRC}/crypto/rc4/asm \
- ${LCRYPTO_SRC}/crypto/rc5/asm \
${LCRYPTO_SRC}/crypto/sha/asm \
${LCRYPTO_SRC}/crypto/whrlpool/asm
# aes
-SRCS= aes-x86_64.pl aesni-sha1-x86_64.pl aesni-x86_64.pl bsaes-x86_64.pl \
+SRCS= aes-x86_64.pl aesni-mb-x86_64.pl aesni-sha1-x86_64.pl \
+ aesni-sha256-x86_64.pl aesni-x86_64.pl bsaes-x86_64.pl \
vpaes-x86_64.pl
# bn
-SRCS+= modexp512-x86_64.pl x86_64-gf2m.pl x86_64-mont.pl x86_64-mont5.pl
+SRCS+= rsaz-avx2.pl rsaz-x86_64.pl x86_64-gf2m.pl x86_64-mont.pl \
+ x86_64-mont5.pl
# camellia
SRCS+= cmll-x86_64.pl
+# ec
+SRCS+= ecp_nistz256-x86_64.pl
+
# md5
SRCS+= md5-x86_64.pl
# modes
-SRCS+= ghash-x86_64.pl
+SRCS+= aesni-gcm-x86_64.pl ghash-x86_64.pl
# rc4
SRCS+= rc4-md5-x86_64.pl rc4-x86_64.pl
# sha
-SRCS+= sha1-x86_64.pl sha512-x86_64.pl
+SRCS+= sha1-mb-x86_64.pl sha1-x86_64.pl sha256-mb-x86_64.pl sha512-x86_64.pl
# whrlpool
SRCS+= wp-x86_64.pl
@@ -72,7 +77,6 @@ x86_64cpuid.cmt: x86_64cpuid.pl
${LCRYPTO_SRC}/crypto/bf/asm \
${LCRYPTO_SRC}/crypto/bn/asm \
${LCRYPTO_SRC}/crypto/camellia/asm \
- ${LCRYPTO_SRC}/crypto/cast/asm \
${LCRYPTO_SRC}/crypto/des/asm \
${LCRYPTO_SRC}/crypto/md5/asm \
${LCRYPTO_SRC}/crypto/modes/asm \
@@ -96,9 +100,6 @@ SRCS+= bn-586.pl co-586.pl x86-gf2m.pl x86-mont.pl
# camellia
SRCS+= cmll-x86.pl
-# cast
-SRCS+= cast-586.pl
-
# des
SRCS+= crypt586.pl des-586.pl
diff --git a/secure/lib/libcrypto/Makefile.inc b/secure/lib/libcrypto/Makefile.inc
index 29d563f..a9387c0 100644
--- a/secure/lib/libcrypto/Makefile.inc
+++ b/secure/lib/libcrypto/Makefile.inc
@@ -3,7 +3,7 @@
.include <bsd.own.mk>
# OpenSSL version used for manual page generation
-OPENSSL_VER= 1.0.1p
+OPENSSL_VER= 1.0.2d
OPENSSL_DATE= 2015-07-09
LCRYPTO_SRC= ${.CURDIR}/../../../crypto/openssl
@@ -24,6 +24,7 @@ CFLAGS+=-DB_ENDIAN
.if ${MACHINE_CPUARCH} == "amd64"
CFLAGS+=-DOPENSSL_IA32_SSE2
CFLAGS+=-DAES_ASM -DBSAES_ASM -DVPAES_ASM
+CFLAGS+=-DECP_NISTZ256_ASM
CFLAGS+=-DOPENSSL_BN_ASM_MONT -DOPENSSL_BN_ASM_MONT5 -DOPENSSL_BN_ASM_GF2m
CFLAGS+=-DMD5_ASM
CFLAGS+=-DGHASH_ASM
diff --git a/secure/lib/libcrypto/Makefile.man b/secure/lib/libcrypto/Makefile.man
index 11c2cc9..239d1e1 100644
--- a/secure/lib/libcrypto/Makefile.man
+++ b/secure/lib/libcrypto/Makefile.man
@@ -4,6 +4,7 @@ MAN+= ASN1_OBJECT_new.3
MAN+= ASN1_STRING_length.3
MAN+= ASN1_STRING_new.3
MAN+= ASN1_STRING_print_ex.3
+MAN+= ASN1_TIME_set.3
MAN+= ASN1_generate_nconf.3
MAN+= BIO_ctrl.3
MAN+= BIO_f_base64.3
@@ -80,6 +81,12 @@ MAN+= DSA_new.3
MAN+= DSA_set_method.3
MAN+= DSA_sign.3
MAN+= DSA_size.3
+MAN+= EC_GFp_simple_method.3
+MAN+= EC_GROUP_copy.3
+MAN+= EC_GROUP_new.3
+MAN+= EC_KEY_new.3
+MAN+= EC_POINT_add.3
+MAN+= EC_POINT_new.3
MAN+= ERR_GET_LIB.3
MAN+= ERR_clear_error.3
MAN+= ERR_error_string.3
@@ -118,6 +125,7 @@ MAN+= OPENSSL_Applink.3
MAN+= OPENSSL_VERSION_NUMBER.3
MAN+= OPENSSL_config.3
MAN+= OPENSSL_ia32cap.3
+MAN+= OPENSSL_instrument_bus.3
MAN+= OPENSSL_load_builtin_modules.3
MAN+= OpenSSL_add_all_algorithms.3
MAN+= PEM_write_bio_CMS_stream.3
@@ -152,6 +160,7 @@ MAN+= SMIME_read_CMS.3
MAN+= SMIME_read_PKCS7.3
MAN+= SMIME_write_CMS.3
MAN+= SMIME_write_PKCS7.3
+MAN+= SSLeay_version.3
MAN+= X509_NAME_ENTRY_get_object.3
MAN+= X509_NAME_add_entry_by_txt.3
MAN+= X509_NAME_get_index_by_NID.3
@@ -162,6 +171,7 @@ MAN+= X509_STORE_CTX_new.3
MAN+= X509_STORE_CTX_set_verify_cb.3
MAN+= X509_STORE_set_verify_cb_func.3
MAN+= X509_VERIFY_PARAM_set_flags.3
+MAN+= X509_check_host.3
MAN+= X509_new.3
MAN+= X509_verify_cert.3
MAN+= bio.3
@@ -174,6 +184,7 @@ MAN+= d2i_ASN1_OBJECT.3
MAN+= d2i_CMS_ContentInfo.3
MAN+= d2i_DHparams.3
MAN+= d2i_DSAPublicKey.3
+MAN+= d2i_ECPKParameters.3
MAN+= d2i_ECPrivateKey.3
MAN+= d2i_PKCS8PrivateKey.3
MAN+= d2i_RSAPublicKey.3
@@ -186,6 +197,7 @@ MAN+= d2i_X509_SIG.3
MAN+= des.3
MAN+= dh.3
MAN+= dsa.3
+MAN+= ec.3
MAN+= ecdsa.3
MAN+= engine.3
MAN+= err.3
@@ -214,9 +226,16 @@ MLINKS+= ASN1_STRING_length.3 ASN1_STRING_set.3
MLINKS+= ASN1_STRING_length.3 ASN1_STRING_length_set.3
MLINKS+= ASN1_STRING_length.3 ASN1_STRING_type.3
MLINKS+= ASN1_STRING_length.3 ASN1_STRING_data.3
+MLINKS+= ASN1_STRING_length.3 ASN1_STRING_to_UTF8.3
MLINKS+= ASN1_STRING_new.3 ASN1_STRING_type_new.3
MLINKS+= ASN1_STRING_new.3 ASN1_STRING_free.3
MLINKS+= ASN1_STRING_print_ex.3 ASN1_STRING_print_ex_fp.3
+MLINKS+= ASN1_STRING_print_ex.3 ASN1_STRING_print.3
+MLINKS+= ASN1_TIME_set.3 ASN1_TIME_adj.3
+MLINKS+= ASN1_TIME_set.3 ASN1_TIME_check.3
+MLINKS+= ASN1_TIME_set.3 ASN1_TIME_set_string.3
+MLINKS+= ASN1_TIME_set.3 ASN1_TIME_print.3
+MLINKS+= ASN1_TIME_set.3 ASN1_TIME_diff.3
MLINKS+= ASN1_generate_nconf.3 ASN1_generate_v3.3
MLINKS+= BIO_ctrl.3 BIO_callback_ctrl.3
MLINKS+= BIO_ctrl.3 BIO_ptr_ctrl.3
@@ -252,6 +271,7 @@ MLINKS+= BIO_f_ssl.3 BIO_new_buffer_ssl_connect.3
MLINKS+= BIO_f_ssl.3 BIO_ssl_copy_session_id.3
MLINKS+= BIO_f_ssl.3 BIO_ssl_shutdown.3
MLINKS+= BIO_find_type.3 BIO_next.3
+MLINKS+= BIO_find_type.3 BIO_method_type.3
MLINKS+= BIO_new.3 BIO_set.3
MLINKS+= BIO_new.3 BIO_free.3
MLINKS+= BIO_new.3 BIO_vfree.3
@@ -262,6 +282,7 @@ MLINKS+= BIO_read.3 BIO_gets.3
MLINKS+= BIO_read.3 BIO_puts.3
MLINKS+= BIO_s_accept.3 BIO_set_accept_port.3
MLINKS+= BIO_s_accept.3 BIO_get_accept_port.3
+MLINKS+= BIO_s_accept.3 BIO_new_accept.3
MLINKS+= BIO_s_accept.3 BIO_set_nbio_accept.3
MLINKS+= BIO_s_accept.3 BIO_set_accept_bios.3
MLINKS+= BIO_s_accept.3 BIO_set_bind_mode.3
@@ -278,6 +299,7 @@ MLINKS+= BIO_s_bio.3 BIO_ctrl_get_write_guarantee.3
MLINKS+= BIO_s_bio.3 BIO_get_read_request.3
MLINKS+= BIO_s_bio.3 BIO_ctrl_get_read_request.3
MLINKS+= BIO_s_bio.3 BIO_ctrl_reset_read_request.3
+MLINKS+= BIO_s_connect.3 BIO_new_connect.3
MLINKS+= BIO_s_connect.3 BIO_set_conn_hostname.3
MLINKS+= BIO_s_connect.3 BIO_set_conn_port.3
MLINKS+= BIO_s_connect.3 BIO_set_conn_ip.3
@@ -323,6 +345,7 @@ MLINKS+= BN_BLINDING_new.3 BN_BLINDING_convert_ex.3
MLINKS+= BN_BLINDING_new.3 BN_BLINDING_invert_ex.3
MLINKS+= BN_BLINDING_new.3 BN_BLINDING_get_thread_id.3
MLINKS+= BN_BLINDING_new.3 BN_BLINDING_set_thread_id.3
+MLINKS+= BN_BLINDING_new.3 BN_BLINDING_thread_id.3
MLINKS+= BN_BLINDING_new.3 BN_BLINDING_get_flags.3
MLINKS+= BN_BLINDING_new.3 BN_BLINDING_set_flags.3
MLINKS+= BN_BLINDING_new.3 BN_BLINDING_create_param.3
@@ -362,6 +385,12 @@ MLINKS+= BN_cmp.3 BN_is_one.3
MLINKS+= BN_cmp.3 BN_is_word.3
MLINKS+= BN_cmp.3 BN_is_odd.3
MLINKS+= BN_copy.3 BN_dup.3
+MLINKS+= BN_generate_prime.3 BN_generate_prime_ex.3
+MLINKS+= BN_generate_prime.3 BN_is_prime_ex.3
+MLINKS+= BN_generate_prime.3 BN_is_prime_fasttest_ex.3
+MLINKS+= BN_generate_prime.3 BN_GENCB_call.3
+MLINKS+= BN_generate_prime.3 BN_GENCB_set_old.3
+MLINKS+= BN_generate_prime.3 BN_GENCB_set.3
MLINKS+= BN_generate_prime.3 BN_is_prime.3
MLINKS+= BN_generate_prime.3 BN_is_prime_fasttest.3
MLINKS+= BN_mod_mul_montgomery.3 BN_MONT_CTX_new.3
@@ -383,6 +412,8 @@ MLINKS+= BN_new.3 BN_clear_free.3
MLINKS+= BN_num_bytes.3 BN_num_bits.3
MLINKS+= BN_num_bytes.3 BN_num_bits_word.3
MLINKS+= BN_rand.3 BN_pseudo_rand.3
+MLINKS+= BN_rand.3 BN_rand_range.3
+MLINKS+= BN_rand.3 BN_pseudo_rand_range.3
MLINKS+= BN_set_bit.3 BN_clear_bit.3
MLINKS+= BN_set_bit.3 BN_is_bit_set.3
MLINKS+= BN_set_bit.3 BN_mask_bits.3
@@ -397,6 +428,7 @@ MLINKS+= BN_zero.3 BN_get_word.3
MLINKS+= CMS_add0_cert.3 CMS_add1_cert.3
MLINKS+= CMS_add0_cert.3 CMS_get1_certs.3
MLINKS+= CMS_add0_cert.3 CMS_add0_crl.3
+MLINKS+= CMS_add0_cert.3 CMS_add1_crl.3
MLINKS+= CMS_add0_cert.3 CMS_get1_crls.3
MLINKS+= CMS_add1_recipient_cert.3 CMS_add0_recipient_key.3
MLINKS+= CMS_add1_signer.3 CMS_SignerInfo_sign.3
@@ -408,20 +440,24 @@ MLINKS+= CMS_get0_RecipientInfos.3 CMS_RecipientInfo_kekri_get0_id.3
MLINKS+= CMS_get0_RecipientInfos.3 CMS_RecipientInfo_kekri_id_cmp.3
MLINKS+= CMS_get0_RecipientInfos.3 CMS_RecipientInfo_set0_key.3
MLINKS+= CMS_get0_RecipientInfos.3 CMS_RecipientInfo_decrypt.3
+MLINKS+= CMS_get0_RecipientInfos.3 CMS_RecipientInfo_encrypt.3
MLINKS+= CMS_get0_SignerInfos.3 CMS_SignerInfo_get0_signer_id.3
+MLINKS+= CMS_get0_SignerInfos.3 CMS_SignerInfo_get0_signature.3
MLINKS+= CMS_get0_SignerInfos.3 CMS_SignerInfo_cert_cmp.3
-MLINKS+= CMS_get0_SignerInfos.3 CMS_set1_signer_certs.3
+MLINKS+= CMS_get0_SignerInfos.3 CMS_set1_signer_cert.3
MLINKS+= CMS_get0_type.3 CMS_set1_eContentType.3
MLINKS+= CMS_get0_type.3 CMS_get0_eContentType.3
MLINKS+= CMS_get0_type.3 CMS_get0_content.3
MLINKS+= CMS_get1_ReceiptRequest.3 CMS_ReceiptRequest_create0.3
MLINKS+= CMS_get1_ReceiptRequest.3 CMS_add1_ReceiptRequest.3
MLINKS+= CMS_get1_ReceiptRequest.3 CMS_ReceiptRequest_get0_values.3
+MLINKS+= CMS_verify.3 CMS_get0_signers.3
MLINKS+= CONF_modules_free.3 CONF_modules_finish.3
MLINKS+= CONF_modules_free.3 CONF_modules_unload.3
MLINKS+= CONF_modules_load_file.3 CONF_modules_load.3
MLINKS+= CRYPTO_set_ex_data.3 CRYPTO_get_ex_data.3
MLINKS+= DH_generate_key.3 DH_compute_key.3
+MLINKS+= DH_generate_parameters.3 DH_generate_parameters_ex.3
MLINKS+= DH_generate_parameters.3 DH_check.3
MLINKS+= DH_get_ex_new_index.3 DH_set_ex_data.3
MLINKS+= DH_get_ex_new_index.3 DH_get_ex_data.3
@@ -432,6 +468,7 @@ MLINKS+= DH_set_method.3 DH_new_method.3
MLINKS+= DH_set_method.3 DH_OpenSSL.3
MLINKS+= DSA_SIG_new.3 DSA_SIG_free.3
MLINKS+= DSA_do_sign.3 DSA_do_verify.3
+MLINKS+= DSA_generate_parameters.3 DSA_generate_parameters_ex.3
MLINKS+= DSA_get_ex_new_index.3 DSA_set_ex_data.3
MLINKS+= DSA_get_ex_new_index.3 DSA_get_ex_data.3
MLINKS+= DSA_new.3 DSA_free.3
@@ -441,6 +478,101 @@ MLINKS+= DSA_set_method.3 DSA_new_method.3
MLINKS+= DSA_set_method.3 DSA_OpenSSL.3
MLINKS+= DSA_sign.3 DSA_sign_setup.3
MLINKS+= DSA_sign.3 DSA_verify.3
+MLINKS+= EC_GFp_simple_method.3 EC_GFp_mont_method.3
+MLINKS+= EC_GFp_simple_method.3 EC_GFp_nist_method.3
+MLINKS+= EC_GFp_simple_method.3 EC_GFp_nistp224_method.3
+MLINKS+= EC_GFp_simple_method.3 EC_GFp_nistp256_method.3
+MLINKS+= EC_GFp_simple_method.3 EC_GFp_nistp521_method.3
+MLINKS+= EC_GFp_simple_method.3 EC_GF2m_simple_method.3
+MLINKS+= EC_GFp_simple_method.3 EC_METHOD_get_field_type.3
+MLINKS+= EC_GROUP_copy.3 EC_GROUP_dup.3
+MLINKS+= EC_GROUP_copy.3 EC_GROUP_method_of.3
+MLINKS+= EC_GROUP_copy.3 EC_GROUP_set_generator.3
+MLINKS+= EC_GROUP_copy.3 EC_GROUP_get0_generator.3
+MLINKS+= EC_GROUP_copy.3 EC_GROUP_get_order.3
+MLINKS+= EC_GROUP_copy.3 EC_GROUP_get_cofactor.3
+MLINKS+= EC_GROUP_copy.3 EC_GROUP_set_curve_name.3
+MLINKS+= EC_GROUP_copy.3 EC_GROUP_get_curve_name.3
+MLINKS+= EC_GROUP_copy.3 EC_GROUP_set_asn1_flag.3
+MLINKS+= EC_GROUP_copy.3 EC_GROUP_get_asn1_flag.3
+MLINKS+= EC_GROUP_copy.3 EC_GROUP_set_point_conversion_form.3
+MLINKS+= EC_GROUP_copy.3 EC_GROUP_get_point_conversion_form.3
+MLINKS+= EC_GROUP_copy.3 EC_GROUP_get0_seed.3
+MLINKS+= EC_GROUP_copy.3 EC_GROUP_get_seed_len.3
+MLINKS+= EC_GROUP_copy.3 EC_GROUP_set_seed.3
+MLINKS+= EC_GROUP_copy.3 EC_GROUP_get_degree.3
+MLINKS+= EC_GROUP_copy.3 EC_GROUP_check.3
+MLINKS+= EC_GROUP_copy.3 EC_GROUP_check_discriminant.3
+MLINKS+= EC_GROUP_copy.3 EC_GROUP_cmp.3
+MLINKS+= EC_GROUP_copy.3 EC_GROUP_get_basis_type.3
+MLINKS+= EC_GROUP_copy.3 EC_GROUP_get_trinomial_basis.3
+MLINKS+= EC_GROUP_copy.3 EC_GROUP_get_pentanomial_basis.3
+MLINKS+= EC_GROUP_new.3 EC_GROUP_free.3
+MLINKS+= EC_GROUP_new.3 EC_GROUP_clear_free.3
+MLINKS+= EC_GROUP_new.3 EC_GROUP_new_curve_GFp.3
+MLINKS+= EC_GROUP_new.3 EC_GROUP_new_curve_GF2m.3
+MLINKS+= EC_GROUP_new.3 EC_GROUP_new_by_curve_name.3
+MLINKS+= EC_GROUP_new.3 EC_GROUP_set_curve_GFp.3
+MLINKS+= EC_GROUP_new.3 EC_GROUP_get_curve_GFp.3
+MLINKS+= EC_GROUP_new.3 EC_GROUP_set_curve_GF2m.3
+MLINKS+= EC_GROUP_new.3 EC_GROUP_get_curve_GF2m.3
+MLINKS+= EC_GROUP_new.3 EC_get_builtin_curves.3
+MLINKS+= EC_KEY_new.3 EC_KEY_get_flags.3
+MLINKS+= EC_KEY_new.3 EC_KEY_set_flags.3
+MLINKS+= EC_KEY_new.3 EC_KEY_clear_flags.3
+MLINKS+= EC_KEY_new.3 EC_KEY_new_by_curve_name.3
+MLINKS+= EC_KEY_new.3 EC_KEY_free.3
+MLINKS+= EC_KEY_new.3 EC_KEY_copy.3
+MLINKS+= EC_KEY_new.3 EC_KEY_dup.3
+MLINKS+= EC_KEY_new.3 EC_KEY_up_ref.3
+MLINKS+= EC_KEY_new.3 EC_KEY_get0_group.3
+MLINKS+= EC_KEY_new.3 EC_KEY_set_group.3
+MLINKS+= EC_KEY_new.3 EC_KEY_get0_private_key.3
+MLINKS+= EC_KEY_new.3 EC_KEY_set_private_key.3
+MLINKS+= EC_KEY_new.3 EC_KEY_get0_public_key.3
+MLINKS+= EC_KEY_new.3 EC_KEY_set_public_key.3
+MLINKS+= EC_KEY_new.3 EC_KEY_get_enc_flags.3
+MLINKS+= EC_KEY_new.3 EC_KEY_set_enc_flags.3
+MLINKS+= EC_KEY_new.3 EC_KEY_get_conv_form.3
+MLINKS+= EC_KEY_new.3 EC_KEY_set_conv_form.3
+MLINKS+= EC_KEY_new.3 EC_KEY_get_key_method_data.3
+MLINKS+= EC_KEY_new.3 EC_KEY_insert_key_method_data.3
+MLINKS+= EC_KEY_new.3 EC_KEY_set_asn1_flag.3
+MLINKS+= EC_KEY_new.3 EC_KEY_precompute_mult.3
+MLINKS+= EC_KEY_new.3 EC_KEY_generate_key.3
+MLINKS+= EC_KEY_new.3 EC_KEY_check_key.3
+MLINKS+= EC_KEY_new.3 EC_KEY_set_public_key_affine_coordinates.3
+MLINKS+= EC_POINT_add.3 EC_POINT_dbl.3
+MLINKS+= EC_POINT_add.3 EC_POINT_invert.3
+MLINKS+= EC_POINT_add.3 EC_POINT_is_at_infinity.3
+MLINKS+= EC_POINT_add.3 EC_POINT_is_on_curve.3
+MLINKS+= EC_POINT_add.3 EC_POINT_cmp.3
+MLINKS+= EC_POINT_add.3 EC_POINT_make_affine.3
+MLINKS+= EC_POINT_add.3 EC_POINTs_make_affine.3
+MLINKS+= EC_POINT_add.3 EC_POINTs_mul.3
+MLINKS+= EC_POINT_add.3 EC_POINT_mul.3
+MLINKS+= EC_POINT_add.3 EC_GROUP_precompute_mult.3
+MLINKS+= EC_POINT_add.3 EC_GROUP_have_precompute_mult.3
+MLINKS+= EC_POINT_new.3 EC_POINT_free.3
+MLINKS+= EC_POINT_new.3 EC_POINT_clear_free.3
+MLINKS+= EC_POINT_new.3 EC_POINT_copy.3
+MLINKS+= EC_POINT_new.3 EC_POINT_dup.3
+MLINKS+= EC_POINT_new.3 EC_POINT_method_of.3
+MLINKS+= EC_POINT_new.3 EC_POINT_set_to_infinity.3
+MLINKS+= EC_POINT_new.3 EC_POINT_set_Jprojective_coordinates.3
+MLINKS+= EC_POINT_new.3 EC_POINT_get_Jprojective_coordinates_GFp.3
+MLINKS+= EC_POINT_new.3 EC_POINT_set_affine_coordinates_GFp.3
+MLINKS+= EC_POINT_new.3 EC_POINT_get_affine_coordinates_GFp.3
+MLINKS+= EC_POINT_new.3 EC_POINT_set_compressed_coordinates_GFp.3
+MLINKS+= EC_POINT_new.3 EC_POINT_set_affine_coordinates_GF2m.3
+MLINKS+= EC_POINT_new.3 EC_POINT_get_affine_coordinates_GF2m.3
+MLINKS+= EC_POINT_new.3 EC_POINT_set_compressed_coordinates_GF2m.3
+MLINKS+= EC_POINT_new.3 EC_POINT_point2oct.3
+MLINKS+= EC_POINT_new.3 EC_POINT_oct2point.3
+MLINKS+= EC_POINT_new.3 EC_POINT_point2bn.3
+MLINKS+= EC_POINT_new.3 EC_POINT_bn2point.3
+MLINKS+= EC_POINT_new.3 EC_POINT_point2hex.3
+MLINKS+= EC_POINT_new.3 EC_POINT_hex2point.3
MLINKS+= ERR_GET_LIB.3 ERR_GET_FUNC.3
MLINKS+= ERR_GET_LIB.3 ERR_GET_REASON.3
MLINKS+= ERR_error_string.3 ERR_error_string_n.3
@@ -461,6 +593,7 @@ MLINKS+= ERR_load_strings.3 ERR_PACK.3
MLINKS+= ERR_load_strings.3 ERR_get_next_error_library.3
MLINKS+= ERR_print_errors.3 ERR_print_errors_fp.3
MLINKS+= ERR_put_error.3 ERR_add_error_data.3
+MLINKS+= ERR_remove_state.3 ERR_remove_thread_state.3
MLINKS+= ERR_set_mark.3 ERR_pop_to_mark.3
MLINKS+= EVP_DigestInit.3 EVP_MD_CTX_init.3
MLINKS+= EVP_DigestInit.3 EVP_MD_CTX_create.3
@@ -471,6 +604,7 @@ MLINKS+= EVP_DigestInit.3 EVP_MD_CTX_cleanup.3
MLINKS+= EVP_DigestInit.3 EVP_MD_CTX_destroy.3
MLINKS+= EVP_DigestInit.3 EVP_MAX_MD_SIZE.3
MLINKS+= EVP_DigestInit.3 EVP_MD_CTX_copy_ex.3
+MLINKS+= EVP_DigestInit.3 EVP_DigestFinal.3
MLINKS+= EVP_DigestInit.3 EVP_MD_CTX_copy.3
MLINKS+= EVP_DigestInit.3 EVP_MD_type.3
MLINKS+= EVP_DigestInit.3 EVP_MD_pkey_type.3
@@ -541,10 +675,64 @@ MLINKS+= EVP_EncryptInit.3 EVP_CIPHER_CTX_mode.3
MLINKS+= EVP_EncryptInit.3 EVP_CIPHER_param_to_asn1.3
MLINKS+= EVP_EncryptInit.3 EVP_CIPHER_asn1_to_param.3
MLINKS+= EVP_EncryptInit.3 EVP_CIPHER_CTX_set_padding.3
+MLINKS+= EVP_EncryptInit.3 EVP_enc_null.3
+MLINKS+= EVP_EncryptInit.3 EVP_des_cbc.3
+MLINKS+= EVP_EncryptInit.3 EVP_des_ecb.3
+MLINKS+= EVP_EncryptInit.3 EVP_des_cfb.3
+MLINKS+= EVP_EncryptInit.3 EVP_des_ofb.3
+MLINKS+= EVP_EncryptInit.3 EVP_des_ede_cbc.3
+MLINKS+= EVP_EncryptInit.3 EVP_des_ede.3
+MLINKS+= EVP_EncryptInit.3 EVP_des_ede_ofb.3
+MLINKS+= EVP_EncryptInit.3 EVP_des_ede_cfb.3
+MLINKS+= EVP_EncryptInit.3 EVP_des_ede3_cbc.3
+MLINKS+= EVP_EncryptInit.3 EVP_des_ede3.3
+MLINKS+= EVP_EncryptInit.3 EVP_des_ede3_ofb.3
+MLINKS+= EVP_EncryptInit.3 EVP_des_ede3_cfb.3
+MLINKS+= EVP_EncryptInit.3 EVP_desx_cbc.3
+MLINKS+= EVP_EncryptInit.3 EVP_rc4.3
+MLINKS+= EVP_EncryptInit.3 EVP_rc4_40.3
+MLINKS+= EVP_EncryptInit.3 EVP_idea_cbc.3
+MLINKS+= EVP_EncryptInit.3 EVP_idea_ecb.3
+MLINKS+= EVP_EncryptInit.3 EVP_idea_cfb.3
+MLINKS+= EVP_EncryptInit.3 EVP_idea_ofb.3
+MLINKS+= EVP_EncryptInit.3 EVP_idea_cbc.3
+MLINKS+= EVP_EncryptInit.3 EVP_rc2_cbc.3
+MLINKS+= EVP_EncryptInit.3 EVP_rc2_ecb.3
+MLINKS+= EVP_EncryptInit.3 EVP_rc2_cfb.3
+MLINKS+= EVP_EncryptInit.3 EVP_rc2_ofb.3
+MLINKS+= EVP_EncryptInit.3 EVP_rc2_40_cbc.3
+MLINKS+= EVP_EncryptInit.3 EVP_rc2_64_cbc.3
+MLINKS+= EVP_EncryptInit.3 EVP_bf_cbc.3
+MLINKS+= EVP_EncryptInit.3 EVP_bf_ecb.3
+MLINKS+= EVP_EncryptInit.3 EVP_bf_cfb.3
+MLINKS+= EVP_EncryptInit.3 EVP_bf_ofb.3
+MLINKS+= EVP_EncryptInit.3 EVP_cast5_cbc.3
+MLINKS+= EVP_EncryptInit.3 EVP_cast5_ecb.3
+MLINKS+= EVP_EncryptInit.3 EVP_cast5_cfb.3
+MLINKS+= EVP_EncryptInit.3 EVP_cast5_ofb.3
+MLINKS+= EVP_EncryptInit.3 EVP_rc5_32_12_16_cbc.3
+MLINKS+= EVP_EncryptInit.3 EVP_rc5_32_12_16_ecb.3
+MLINKS+= EVP_EncryptInit.3 EVP_rc5_32_12_16_cfb.3
+MLINKS+= EVP_EncryptInit.3 EVP_rc5_32_12_16_ofb.3
+MLINKS+= EVP_EncryptInit.3 EVP_aes_128_gcm.3
+MLINKS+= EVP_EncryptInit.3 EVP_aes_192_gcm.3
+MLINKS+= EVP_EncryptInit.3 EVP_aes_256_gcm.3
+MLINKS+= EVP_EncryptInit.3 EVP_aes_128_ccm.3
+MLINKS+= EVP_EncryptInit.3 EVP_aes_192_ccm.3
+MLINKS+= EVP_EncryptInit.3 EVP_aes_256_ccm.3
MLINKS+= EVP_OpenInit.3 EVP_OpenUpdate.3
MLINKS+= EVP_OpenInit.3 EVP_OpenFinal.3
-MLINKS+= EVP_PKEY_CTX_ctrl.3 EVP_PKEY_ctrl.3
-MLINKS+= EVP_PKEY_CTX_ctrl.3 EVP_PKEY_ctrl_str.3
+MLINKS+= EVP_PKEY_CTX_ctrl.3 EVP_PKEY_CTX_ctrl_str.3
+MLINKS+= EVP_PKEY_CTX_ctrl.3 EVP_PKEY_get_default_digest_nid.3
+MLINKS+= EVP_PKEY_CTX_ctrl.3 EVP_PKEY_CTX_set_signature_md.3
+MLINKS+= EVP_PKEY_CTX_ctrl.3 EVP_PKEY_CTX_set_rsa_padding.3
+MLINKS+= EVP_PKEY_CTX_ctrl.3 EVP_PKEY_CTX_set_rsa_pss_saltlen.3
+MLINKS+= EVP_PKEY_CTX_ctrl.3 EVP_PKEY_CTX_set_rsa_rsa_keygen_bits.3
+MLINKS+= EVP_PKEY_CTX_ctrl.3 EVP_PKEY_CTX_set_rsa_keygen_pubexp.3
+MLINKS+= EVP_PKEY_CTX_ctrl.3 EVP_PKEY_CTX_set_dsa_paramgen_bits.3
+MLINKS+= EVP_PKEY_CTX_ctrl.3 EVP_PKEY_CTX_set_dh_paramgen_prime_len.3
+MLINKS+= EVP_PKEY_CTX_ctrl.3 EVP_PKEY_CTX_set_dh_paramgen_generator.3
+MLINKS+= EVP_PKEY_CTX_ctrl.3 EVP_PKEY_CTX_set_ec_paramgen_curve_nid.3
MLINKS+= EVP_PKEY_CTX_new.3 EVP_PKEY_CTX_new_id.3
MLINKS+= EVP_PKEY_CTX_new.3 EVP_PKEY_CTX_dup.3
MLINKS+= EVP_PKEY_CTX_new.3 EVP_PKEY_CTX_free.3
@@ -603,18 +791,27 @@ MLINKS+= OBJ_nid2obj.3 OBJ_cleanup.3
MLINKS+= OPENSSL_VERSION_NUMBER.3 SSLeay.3
MLINKS+= OPENSSL_VERSION_NUMBER.3 SSLeay_version.3
MLINKS+= OPENSSL_config.3 OPENSSL_no_config.3
+MLINKS+= OPENSSL_ia32cap.3 OPENSSL_ia32cap_loc.3
+MLINKS+= OPENSSL_instrument_bus.3 OPENSSL_instrument_bus2.3
+MLINKS+= OPENSSL_load_builtin_modules.3 ASN1_add_oid_module.3
+MLINKS+= OPENSSL_load_builtin_modules.3 ENGINE_add_conf_module.3
MLINKS+= OpenSSL_add_all_algorithms.3 OpenSSL_add_all_ciphers.3
MLINKS+= OpenSSL_add_all_algorithms.3 OpenSSL_add_all_digests.3
+MLINKS+= OpenSSL_add_all_algorithms.3 EVP_cleanup.3
+MLINKS+= PKCS7_verify.3 PKCS7_get0_signers.3
MLINKS+= RAND_add.3 RAND_seed.3
MLINKS+= RAND_add.3 RAND_status.3
MLINKS+= RAND_add.3 RAND_event.3
MLINKS+= RAND_add.3 RAND_screen.3
MLINKS+= RAND_bytes.3 RAND_pseudo_bytes.3
+MLINKS+= RAND_egd.3 RAND_egd_bytes.3
+MLINKS+= RAND_egd.3 RAND_query_egd_bytes.3
MLINKS+= RAND_load_file.3 RAND_write_file.3
MLINKS+= RAND_load_file.3 RAND_file_name.3
MLINKS+= RAND_set_rand_method.3 RAND_get_rand_method.3
MLINKS+= RAND_set_rand_method.3 RAND_SSLeay.3
MLINKS+= RSA_blinding_on.3 RSA_blinding_off.3
+MLINKS+= RSA_generate_key.3 RSA_generate_key_ex.3
MLINKS+= RSA_get_ex_new_index.3 RSA_set_ex_data.3
MLINKS+= RSA_get_ex_new_index.3 RSA_get_ex_data.3
MLINKS+= RSA_new.3 RSA_free.3
@@ -690,6 +887,16 @@ MLINKS+= X509_VERIFY_PARAM_set_flags.3 X509_VERIFY_PARAM_get_depth.3
MLINKS+= X509_VERIFY_PARAM_set_flags.3 X509_VERIFY_PARAM_set_time.3
MLINKS+= X509_VERIFY_PARAM_set_flags.3 X509_VERIFY_PARAM_add0_policy.3
MLINKS+= X509_VERIFY_PARAM_set_flags.3 X509_VERIFY_PARAM_set1_policies.3
+MLINKS+= X509_VERIFY_PARAM_set_flags.3 X509_VERIFY_PARAM_set1_host.3
+MLINKS+= X509_VERIFY_PARAM_set_flags.3 X509_VERIFY_PARAM_add1_host.3
+MLINKS+= X509_VERIFY_PARAM_set_flags.3 X509_VERIFY_PARAM_set_hostflags.3
+MLINKS+= X509_VERIFY_PARAM_set_flags.3 X509_VERIFY_PARAM_get0_peername.3
+MLINKS+= X509_VERIFY_PARAM_set_flags.3 X509_VERIFY_PARAM_set1_email.3
+MLINKS+= X509_VERIFY_PARAM_set_flags.3 X509_VERIFY_PARAM_set1_ip.3
+MLINKS+= X509_VERIFY_PARAM_set_flags.3 X509_VERIFY_PARAM_set1_ip_asc.3
+MLINKS+= X509_check_host.3 X509_check_email.3
+MLINKS+= X509_check_host.3 X509_check_ip.3
+MLINKS+= X509_check_host.3 X509_check_ip_asc.3
MLINKS+= X509_new.3 X509_free.3
MLINKS+= blowfish.3 BF_set_key.3
MLINKS+= blowfish.3 BF_encrypt.3
@@ -740,8 +947,17 @@ MLINKS+= d2i_DSAPublicKey.3 d2i_DSAPrivateKey.3
MLINKS+= d2i_DSAPublicKey.3 i2d_DSAPrivateKey.3
MLINKS+= d2i_DSAPublicKey.3 d2i_DSA_PUBKEY.3
MLINKS+= d2i_DSAPublicKey.3 i2d_DSA_PUBKEY.3
+MLINKS+= d2i_DSAPublicKey.3 d2i_DSAparams.3
+MLINKS+= d2i_DSAPublicKey.3 i2d_DSAparams.3
MLINKS+= d2i_DSAPublicKey.3 d2i_DSA_SIG.3
MLINKS+= d2i_DSAPublicKey.3 i2d_DSA_SIG.3
+MLINKS+= d2i_ECPKParameters.3 i2d_ECPKParameters.3
+MLINKS+= d2i_ECPKParameters.3 d2i_ECPKParameters_bio.3
+MLINKS+= d2i_ECPKParameters.3 i2d_ECPKParameters_bio.3
+MLINKS+= d2i_ECPKParameters.3 d2i_ECPKParameters_fp.3
+MLINKS+= d2i_ECPKParameters.3 i2d_ECPKParameters_fp.3
+MLINKS+= d2i_ECPKParameters.3 ECPKParameters_print.3
+MLINKS+= d2i_ECPKParameters.3 ECPKParameters_print_fp.3
MLINKS+= d2i_ECPrivateKey.3 i2d_ECPrivateKey.3
MLINKS+= d2i_ECPrivateKey.3 d2i_ECPrivate_key.3
MLINKS+= d2i_PKCS8PrivateKey.3 d2i_PKCS8PrivateKey_bio.3
@@ -765,7 +981,7 @@ MLINKS+= d2i_X509.3 i2d_X509_fp.3
MLINKS+= d2i_X509_ALGOR.3 i2d_X509_ALGOR.3
MLINKS+= d2i_X509_CRL.3 i2d_X509_CRL.3
MLINKS+= d2i_X509_CRL.3 d2i_X509_CRL_bio.3
-MLINKS+= d2i_X509_CRL.3 d2i_509_CRL_fp.3
+MLINKS+= d2i_X509_CRL.3 d2i_X509_CRL_fp.3
MLINKS+= d2i_X509_CRL.3 i2d_X509_CRL_bio.3
MLINKS+= d2i_X509_CRL.3 i2d_X509_CRL_fp.3
MLINKS+= d2i_X509_NAME.3 i2d_X509_NAME.3
@@ -807,10 +1023,25 @@ MLINKS+= des.3 DES_fcrypt.3
MLINKS+= des.3 DES_crypt.3
MLINKS+= des.3 DES_enc_read.3
MLINKS+= des.3 DES_enc_write.3
+MLINKS+= ecdsa.3 ECDSA_SIG_new.3
+MLINKS+= ecdsa.3 ECDSA_SIG_free.3
+MLINKS+= ecdsa.3 i2d_ECDSA_SIG.3
+MLINKS+= ecdsa.3 d2i_ECDSA_SIG.3
+MLINKS+= ecdsa.3 ECDSA_size.3
+MLINKS+= ecdsa.3 ECDSA_sign_setup.3
+MLINKS+= ecdsa.3 ECDSA_sign.3
+MLINKS+= ecdsa.3 ECDSA_sign_ex.3
+MLINKS+= ecdsa.3 ECDSA_verify.3
+MLINKS+= ecdsa.3 ECDSA_do_sign.3
+MLINKS+= ecdsa.3 ECDSA_do_sign_ex.3
+MLINKS+= ecdsa.3 ECDSA_do_verify.3
MLINKS+= hmac.3 HMAC.3
+MLINKS+= hmac.3 HMAC_CTX_init.3
MLINKS+= hmac.3 HMAC_Init.3
+MLINKS+= hmac.3 HMAC_Init_ex.3
MLINKS+= hmac.3 HMAC_Update.3
MLINKS+= hmac.3 HMAC_Final.3
+MLINKS+= hmac.3 HMAC_CTX_cleanup.3
MLINKS+= hmac.3 HMAC_cleanup.3
MLINKS+= lh_stats.3 lh_node_stats.3
MLINKS+= lh_stats.3 lh_node_usage_stats.3
@@ -918,6 +1149,22 @@ MLINKS+= sha.3 SHA1.3
MLINKS+= sha.3 SHA1_Init.3
MLINKS+= sha.3 SHA1_Update.3
MLINKS+= sha.3 SHA1_Final.3
+MLINKS+= sha.3 SHA224.3
+MLINKS+= sha.3 SHA224_Init.3
+MLINKS+= sha.3 SHA224_Update.3
+MLINKS+= sha.3 SHA224_Final.3
+MLINKS+= sha.3 SHA256.3
+MLINKS+= sha.3 SHA256_Init.3
+MLINKS+= sha.3 SHA256_Update.3
+MLINKS+= sha.3 SHA256_Final.3
+MLINKS+= sha.3 SHA384.3
+MLINKS+= sha.3 SHA384_Init.3
+MLINKS+= sha.3 SHA384_Update.3
+MLINKS+= sha.3 SHA384_Final.3
+MLINKS+= sha.3 SHA512.3
+MLINKS+= sha.3 SHA512_Init.3
+MLINKS+= sha.3 SHA512_Update.3
+MLINKS+= sha.3 SHA512_Final.3
MLINKS+= threads.3 CRYPTO_THREADID_set_callback.3
MLINKS+= threads.3 CRYPTO_THREADID_get_callback.3
MLINKS+= threads.3 CRYPTO_THREADID_current.3
diff --git a/secure/lib/libcrypto/amd64/aes-x86_64.S b/secure/lib/libcrypto/amd64/aes-x86_64.S
index c800d5e..3243d6d 100644
--- a/secure/lib/libcrypto/amd64/aes-x86_64.S
+++ b/secure/lib/libcrypto/amd64/aes-x86_64.S
@@ -151,7 +151,7 @@ _x86_64_AES_encrypt:
xorl %r11d,%ebx
xorl %r12d,%ecx
xorl %r8d,%edx
-.byte 0xf3,0xc3
+.byte 0xf3,0xc3
.size _x86_64_AES_encrypt,.-_x86_64_AES_encrypt
.type _x86_64_AES_encrypt_compact,@function
.align 16
@@ -176,80 +176,78 @@ _x86_64_AES_encrypt_compact:
movzbl %al,%r10d
movzbl %bl,%r11d
movzbl %cl,%r12d
- movzbl (%r14,%r10,1),%r10d
- movzbl (%r14,%r11,1),%r11d
- movzbl (%r14,%r12,1),%r12d
-
movzbl %dl,%r8d
movzbl %bh,%esi
movzbl %ch,%edi
+ shrl $16,%ecx
+ movzbl %dh,%ebp
+ movzbl (%r14,%r10,1),%r10d
+ movzbl (%r14,%r11,1),%r11d
+ movzbl (%r14,%r12,1),%r12d
movzbl (%r14,%r8,1),%r8d
- movzbl (%r14,%rsi,1),%r9d
- movzbl (%r14,%rdi,1),%r13d
- movzbl %dh,%ebp
+ movzbl (%r14,%rsi,1),%r9d
movzbl %ah,%esi
- shrl $16,%ecx
+ movzbl (%r14,%rdi,1),%r13d
+ movzbl %cl,%edi
movzbl (%r14,%rbp,1),%ebp
movzbl (%r14,%rsi,1),%esi
- shrl $16,%edx
- movzbl %cl,%edi
shll $8,%r9d
+ shrl $16,%edx
shll $8,%r13d
- movzbl (%r14,%rdi,1),%edi
xorl %r9d,%r10d
- xorl %r13d,%r11d
-
- movzbl %dl,%r9d
shrl $16,%eax
+ movzbl %dl,%r9d
shrl $16,%ebx
- movzbl %al,%r13d
+ xorl %r13d,%r11d
shll $8,%ebp
- shll $8,%esi
- movzbl (%r14,%r9,1),%r9d
- movzbl (%r14,%r13,1),%r13d
+ movzbl %al,%r13d
+ movzbl (%r14,%rdi,1),%edi
xorl %ebp,%r12d
- xorl %esi,%r8d
+ shll $8,%esi
movzbl %bl,%ebp
- movzbl %dh,%esi
shll $16,%edi
- movzbl (%r14,%rbp,1),%ebp
- movzbl (%r14,%rsi,1),%esi
+ xorl %esi,%r8d
+ movzbl (%r14,%r9,1),%r9d
+ movzbl %dh,%esi
+ movzbl (%r14,%r13,1),%r13d
xorl %edi,%r10d
- movzbl %ah,%edi
shrl $8,%ecx
+ movzbl %ah,%edi
+ shll $16,%r9d
shrl $8,%ebx
+ shll $16,%r13d
+ xorl %r9d,%r11d
+ movzbl (%r14,%rbp,1),%ebp
+ movzbl (%r14,%rsi,1),%esi
movzbl (%r14,%rdi,1),%edi
movzbl (%r14,%rcx,1),%edx
movzbl (%r14,%rbx,1),%ecx
- shll $16,%r9d
- shll $16,%r13d
+
shll $16,%ebp
- xorl %r9d,%r11d
xorl %r13d,%r12d
- xorl %ebp,%r8d
-
shll $24,%esi
+ xorl %ebp,%r8d
shll $24,%edi
- shll $24,%edx
xorl %esi,%r10d
- shll $24,%ecx
+ shll $24,%edx
xorl %edi,%r11d
+ shll $24,%ecx
movl %r10d,%eax
movl %r11d,%ebx
xorl %r12d,%ecx
xorl %r8d,%edx
cmpq 16(%rsp),%r15
je .Lenc_compact_done
- movl %eax,%esi
- movl %ebx,%edi
- andl $2155905152,%esi
- andl $2155905152,%edi
- movl %esi,%r10d
- movl %edi,%r11d
+ movl $2155905152,%r10d
+ movl $2155905152,%r11d
+ andl %eax,%r10d
+ andl %ebx,%r11d
+ movl %r10d,%esi
+ movl %r11d,%edi
shrl $7,%r10d
leal (%rax,%rax,1),%r8d
shrl $7,%r11d
@@ -267,25 +265,25 @@ _x86_64_AES_encrypt_compact:
xorl %r8d,%eax
xorl %r9d,%ebx
- movl %ecx,%esi
- movl %edx,%edi
+ movl $2155905152,%r12d
roll $24,%eax
+ movl $2155905152,%ebp
roll $24,%ebx
- andl $2155905152,%esi
- andl $2155905152,%edi
+ andl %ecx,%r12d
+ andl %edx,%ebp
xorl %r8d,%eax
xorl %r9d,%ebx
- movl %esi,%r12d
- movl %edi,%ebp
+ movl %r12d,%esi
rorl $16,%r10d
+ movl %ebp,%edi
rorl $16,%r11d
- shrl $7,%r12d
leal (%rcx,%rcx,1),%r8d
+ shrl $7,%r12d
xorl %r10d,%eax
- xorl %r11d,%ebx
shrl $7,%ebp
- leal (%rdx,%rdx,1),%r9d
+ xorl %r11d,%ebx
rorl $8,%r10d
+ leal (%rdx,%rdx,1),%r9d
rorl $8,%r11d
subl %r12d,%esi
subl %ebp,%edi
@@ -301,23 +299,23 @@ _x86_64_AES_encrypt_compact:
xorl %esi,%r8d
xorl %edi,%r9d
+ rorl $16,%r12d
xorl %r8d,%ecx
+ rorl $16,%ebp
xorl %r9d,%edx
roll $24,%ecx
+ movl 0(%r14),%esi
roll $24,%edx
xorl %r8d,%ecx
- xorl %r9d,%edx
- movl 0(%r14),%esi
- rorl $16,%r12d
- rorl $16,%ebp
movl 64(%r14),%edi
- xorl %r12d,%ecx
- xorl %ebp,%edx
+ xorl %r9d,%edx
movl 128(%r14),%r8d
+ xorl %r12d,%ecx
rorl $8,%r12d
+ xorl %ebp,%edx
rorl $8,%ebp
- movl 192(%r14),%r9d
xorl %r12d,%ecx
+ movl 192(%r14),%r9d
xorl %ebp,%edx
jmp .Lenc_loop_compact
.align 16
@@ -326,7 +324,7 @@ _x86_64_AES_encrypt_compact:
xorl 4(%r15),%ebx
xorl 8(%r15),%ecx
xorl 12(%r15),%edx
-.byte 0xf3,0xc3
+.byte 0xf3,0xc3
.size _x86_64_AES_encrypt_compact,.-_x86_64_AES_encrypt_compact
.globl AES_encrypt
.type AES_encrypt,@function
@@ -548,7 +546,7 @@ _x86_64_AES_decrypt:
xorl %r11d,%ebx
xorl %r12d,%ecx
xorl %r8d,%edx
-.byte 0xf3,0xc3
+.byte 0xf3,0xc3
.size _x86_64_AES_decrypt,.-_x86_64_AES_decrypt
.type _x86_64_AES_decrypt_compact,@function
.align 16
@@ -574,70 +572,69 @@ _x86_64_AES_decrypt_compact:
movzbl %al,%r10d
movzbl %bl,%r11d
movzbl %cl,%r12d
- movzbl (%r14,%r10,1),%r10d
- movzbl (%r14,%r11,1),%r11d
- movzbl (%r14,%r12,1),%r12d
-
movzbl %dl,%r8d
movzbl %dh,%esi
movzbl %ah,%edi
+ shrl $16,%edx
+ movzbl %bh,%ebp
+ movzbl (%r14,%r10,1),%r10d
+ movzbl (%r14,%r11,1),%r11d
+ movzbl (%r14,%r12,1),%r12d
movzbl (%r14,%r8,1),%r8d
- movzbl (%r14,%rsi,1),%r9d
- movzbl (%r14,%rdi,1),%r13d
- movzbl %bh,%ebp
+ movzbl (%r14,%rsi,1),%r9d
movzbl %ch,%esi
- shrl $16,%ecx
+ movzbl (%r14,%rdi,1),%r13d
movzbl (%r14,%rbp,1),%ebp
movzbl (%r14,%rsi,1),%esi
- shrl $16,%edx
- movzbl %cl,%edi
- shll $8,%r9d
+ shrl $16,%ecx
shll $8,%r13d
- movzbl (%r14,%rdi,1),%edi
- xorl %r9d,%r10d
- xorl %r13d,%r11d
-
- movzbl %dl,%r9d
+ shll $8,%r9d
+ movzbl %cl,%edi
shrl $16,%eax
+ xorl %r9d,%r10d
shrl $16,%ebx
- movzbl %al,%r13d
+ movzbl %dl,%r9d
+
shll $8,%ebp
+ xorl %r13d,%r11d
shll $8,%esi
- movzbl (%r14,%r9,1),%r9d
- movzbl (%r14,%r13,1),%r13d
+ movzbl %al,%r13d
+ movzbl (%r14,%rdi,1),%edi
xorl %ebp,%r12d
- xorl %esi,%r8d
-
movzbl %bl,%ebp
- movzbl %bh,%esi
+
shll $16,%edi
+ xorl %esi,%r8d
+ movzbl (%r14,%r9,1),%r9d
+ movzbl %bh,%esi
movzbl (%r14,%rbp,1),%ebp
- movzbl (%r14,%rsi,1),%esi
xorl %edi,%r10d
-
+ movzbl (%r14,%r13,1),%r13d
movzbl %ch,%edi
+
+ shll $16,%ebp
shll $16,%r9d
shll $16,%r13d
- movzbl (%r14,%rdi,1),%ebx
+ xorl %ebp,%r8d
+ movzbl %dh,%ebp
xorl %r9d,%r11d
+ shrl $8,%eax
xorl %r13d,%r12d
- movzbl %dh,%edi
- shrl $8,%eax
- shll $16,%ebp
- movzbl (%r14,%rdi,1),%ecx
+ movzbl (%r14,%rsi,1),%esi
+ movzbl (%r14,%rdi,1),%ebx
+ movzbl (%r14,%rbp,1),%ecx
movzbl (%r14,%rax,1),%edx
- xorl %ebp,%r8d
+ movl %r10d,%eax
shll $24,%esi
shll $24,%ebx
shll $24,%ecx
- xorl %esi,%r10d
+ xorl %esi,%eax
shll $24,%edx
xorl %r11d,%ebx
- movl %r10d,%eax
xorl %r12d,%ecx
xorl %r8d,%edx
cmpq 16(%rsp),%r15
@@ -650,12 +647,12 @@ _x86_64_AES_decrypt_compact:
orq %rbx,%rax
orq %rdx,%rcx
movq 256+16(%r14),%rbp
- movq %rax,%rbx
- movq %rcx,%rdx
- andq %rsi,%rbx
- andq %rsi,%rdx
- movq %rbx,%r9
- movq %rdx,%r12
+ movq %rsi,%r9
+ movq %rsi,%r12
+ andq %rax,%r9
+ andq %rcx,%r12
+ movq %r9,%rbx
+ movq %r12,%rdx
shrq $7,%r9
leaq (%rax,%rax,1),%r8
shrq $7,%r12
@@ -666,15 +663,15 @@ _x86_64_AES_decrypt_compact:
andq %rdi,%r11
andq %rbp,%rbx
andq %rbp,%rdx
- xorq %r8,%rbx
- xorq %r11,%rdx
- movq %rbx,%r8
- movq %rdx,%r11
-
- andq %rsi,%rbx
- andq %rsi,%rdx
- movq %rbx,%r10
- movq %rdx,%r13
+ xorq %rbx,%r8
+ xorq %rdx,%r11
+ movq %rsi,%r10
+ movq %rsi,%r13
+
+ andq %r8,%r10
+ andq %r11,%r13
+ movq %r10,%rbx
+ movq %r13,%rdx
shrq $7,%r10
leaq (%r8,%r8,1),%r9
shrq $7,%r13
@@ -685,15 +682,15 @@ _x86_64_AES_decrypt_compact:
andq %rdi,%r12
andq %rbp,%rbx
andq %rbp,%rdx
- xorq %r9,%rbx
- xorq %r12,%rdx
- movq %rbx,%r9
- movq %rdx,%r12
-
- andq %rsi,%rbx
- andq %rsi,%rdx
- movq %rbx,%r10
- movq %rdx,%r13
+ xorq %rbx,%r9
+ xorq %rdx,%r12
+ movq %rsi,%r10
+ movq %rsi,%r13
+
+ andq %r9,%r10
+ andq %r12,%r13
+ movq %r10,%rbx
+ movq %r13,%rdx
shrq $7,%r10
xorq %rax,%r8
shrq $7,%r13
@@ -718,51 +715,51 @@ _x86_64_AES_decrypt_compact:
movq %rax,%rbx
movq %rcx,%rdx
xorq %r10,%r9
- xorq %r13,%r12
shrq $32,%rbx
+ xorq %r13,%r12
shrq $32,%rdx
xorq %r8,%r10
- xorq %r11,%r13
roll $8,%eax
+ xorq %r11,%r13
roll $8,%ecx
xorq %r9,%r10
+ roll $8,%ebx
xorq %r12,%r13
- roll $8,%ebx
roll $8,%edx
xorl %r10d,%eax
- xorl %r13d,%ecx
shrq $32,%r10
+ xorl %r13d,%ecx
shrq $32,%r13
xorl %r10d,%ebx
xorl %r13d,%edx
movq %r8,%r10
- movq %r11,%r13
- shrq $32,%r10
- shrq $32,%r13
roll $24,%r8d
+ movq %r11,%r13
roll $24,%r11d
- roll $24,%r10d
- roll $24,%r13d
+ shrq $32,%r10
xorl %r8d,%eax
+ shrq $32,%r13
xorl %r11d,%ecx
+ roll $24,%r10d
movq %r9,%r8
+ roll $24,%r13d
movq %r12,%r11
+ shrq $32,%r8
xorl %r10d,%ebx
+ shrq $32,%r11
xorl %r13d,%edx
movq 0(%r14),%rsi
- shrq $32,%r8
- shrq $32,%r11
- movq 64(%r14),%rdi
roll $16,%r9d
+ movq 64(%r14),%rdi
roll $16,%r12d
movq 128(%r14),%rbp
roll $16,%r8d
- roll $16,%r11d
movq 192(%r14),%r10
xorl %r9d,%eax
+ roll $16,%r11d
xorl %r12d,%ecx
movq 256(%r14),%r13
xorl %r8d,%ebx
@@ -774,7 +771,7 @@ _x86_64_AES_decrypt_compact:
xorl 4(%r15),%ebx
xorl 8(%r15),%ecx
xorl 12(%r15),%edx
-.byte 0xf3,0xc3
+.byte 0xf3,0xc3
.size _x86_64_AES_decrypt_compact,.-_x86_64_AES_decrypt_compact
.globl AES_decrypt
.type AES_decrypt,@function
@@ -860,10 +857,6 @@ private_AES_set_encrypt_key:
call _x86_64_AES_set_encrypt_key
- movq 8(%rsp),%r15
- movq 16(%rsp),%r14
- movq 24(%rsp),%r13
- movq 32(%rsp),%r12
movq 40(%rsp),%rbp
movq 48(%rsp),%rbx
addq $56,%rsp
@@ -1108,7 +1101,7 @@ _x86_64_AES_set_encrypt_key:
.Lbadpointer:
movq $-1,%rax
.Lexit:
-.byte 0xf3,0xc3
+.byte 0xf3,0xc3
.size _x86_64_AES_set_encrypt_key,.-_x86_64_AES_set_encrypt_key
.globl private_AES_set_decrypt_key
.type private_AES_set_decrypt_key,@function
@@ -1161,12 +1154,12 @@ private_AES_set_decrypt_key:
leaq 16(%r15),%r15
movq 0(%r15),%rax
movq 8(%r15),%rcx
- movq %rax,%rbx
- movq %rcx,%rdx
- andq %rsi,%rbx
- andq %rsi,%rdx
- movq %rbx,%r9
- movq %rdx,%r12
+ movq %rsi,%r9
+ movq %rsi,%r12
+ andq %rax,%r9
+ andq %rcx,%r12
+ movq %r9,%rbx
+ movq %r12,%rdx
shrq $7,%r9
leaq (%rax,%rax,1),%r8
shrq $7,%r12
@@ -1177,15 +1170,15 @@ private_AES_set_decrypt_key:
andq %rdi,%r11
andq %rbp,%rbx
andq %rbp,%rdx
- xorq %r8,%rbx
- xorq %r11,%rdx
- movq %rbx,%r8
- movq %rdx,%r11
-
- andq %rsi,%rbx
- andq %rsi,%rdx
- movq %rbx,%r10
- movq %rdx,%r13
+ xorq %rbx,%r8
+ xorq %rdx,%r11
+ movq %rsi,%r10
+ movq %rsi,%r13
+
+ andq %r8,%r10
+ andq %r11,%r13
+ movq %r10,%rbx
+ movq %r13,%rdx
shrq $7,%r10
leaq (%r8,%r8,1),%r9
shrq $7,%r13
@@ -1196,15 +1189,15 @@ private_AES_set_decrypt_key:
andq %rdi,%r12
andq %rbp,%rbx
andq %rbp,%rdx
- xorq %r9,%rbx
- xorq %r12,%rdx
- movq %rbx,%r9
- movq %rdx,%r12
-
- andq %rsi,%rbx
- andq %rsi,%rdx
- movq %rbx,%r10
- movq %rdx,%r13
+ xorq %rbx,%r9
+ xorq %rdx,%r12
+ movq %rsi,%r10
+ movq %rsi,%r13
+
+ andq %r9,%r10
+ andq %r12,%r13
+ movq %r10,%rbx
+ movq %r13,%rdx
shrq $7,%r10
xorq %rax,%r8
shrq $7,%r13
@@ -1229,51 +1222,51 @@ private_AES_set_decrypt_key:
movq %rax,%rbx
movq %rcx,%rdx
xorq %r10,%r9
- xorq %r13,%r12
shrq $32,%rbx
+ xorq %r13,%r12
shrq $32,%rdx
xorq %r8,%r10
- xorq %r11,%r13
roll $8,%eax
+ xorq %r11,%r13
roll $8,%ecx
xorq %r9,%r10
+ roll $8,%ebx
xorq %r12,%r13
- roll $8,%ebx
roll $8,%edx
xorl %r10d,%eax
- xorl %r13d,%ecx
shrq $32,%r10
+ xorl %r13d,%ecx
shrq $32,%r13
xorl %r10d,%ebx
xorl %r13d,%edx
movq %r8,%r10
- movq %r11,%r13
- shrq $32,%r10
- shrq $32,%r13
roll $24,%r8d
+ movq %r11,%r13
roll $24,%r11d
- roll $24,%r10d
- roll $24,%r13d
+ shrq $32,%r10
xorl %r8d,%eax
+ shrq $32,%r13
xorl %r11d,%ecx
+ roll $24,%r10d
movq %r9,%r8
+ roll $24,%r13d
movq %r12,%r11
+ shrq $32,%r8
xorl %r10d,%ebx
+ shrq $32,%r11
xorl %r13d,%edx
- shrq $32,%r8
- shrq $32,%r11
-
roll $16,%r9d
+
roll $16,%r12d
roll $16,%r8d
- roll $16,%r11d
xorl %r9d,%eax
+ roll $16,%r11d
xorl %r12d,%ecx
xorl %r8d,%ebx
@@ -1389,7 +1382,7 @@ AES_cbc_encrypt:
leaq 80(%rsp),%rdi
leaq 80(%rsp),%r15
movl $30,%ecx
-.long 0x90A548F3
+.long 0x90A548F3
movl %eax,(%rdi)
.Lcbc_skip_ecopy:
movq %r15,0(%rsp)
@@ -1551,7 +1544,7 @@ AES_cbc_encrypt:
je .Lcbc_exit
movl $30,%ecx
xorq %rax,%rax
-.long 0x90AB48F3
+.long 0x90AB48F3
jmp .Lcbc_exit
@@ -1606,7 +1599,7 @@ AES_cbc_encrypt:
movl 4(%rbp),%ebx
movl 8(%rbp),%ecx
movl 12(%rbp),%edx
- jz .Lcbc_slow_enc_tail
+ jz .Lcbc_slow_enc_tail
.align 4
.Lcbc_slow_enc_loop:
@@ -1651,16 +1644,16 @@ AES_cbc_encrypt:
movq %r10,%rcx
movq %r8,%rsi
movq %r9,%rdi
-.long 0x9066A4F3
+.long 0x9066A4F3
movq $16,%rcx
subq %r10,%rcx
xorq %rax,%rax
-.long 0x9066AAF3
+.long 0x9066AAF3
movq %r9,%r8
movq $16,%r10
movq %r11,%rax
movq %r12,%rcx
- jmp .Lcbc_slow_enc_loop
+ jmp .Lcbc_slow_enc_loop
.align 16
.LSLOW_DECRYPT:
@@ -1736,7 +1729,7 @@ AES_cbc_encrypt:
movq %r9,%rdi
leaq 64(%rsp),%rsi
leaq 16(%r10),%rcx
-.long 0x9066A4F3
+.long 0x9066A4F3
jmp .Lcbc_exit
.align 16
diff --git a/secure/lib/libcrypto/amd64/aesni-gcm-x86_64.S b/secure/lib/libcrypto/amd64/aesni-gcm-x86_64.S
new file mode 100644
index 0000000..9e99e71
--- /dev/null
+++ b/secure/lib/libcrypto/amd64/aesni-gcm-x86_64.S
@@ -0,0 +1,16 @@
+ # $FreeBSD$
+.text
+
+.globl aesni_gcm_encrypt
+.type aesni_gcm_encrypt,@function
+aesni_gcm_encrypt:
+ xorl %eax,%eax
+ .byte 0xf3,0xc3
+.size aesni_gcm_encrypt,.-aesni_gcm_encrypt
+
+.globl aesni_gcm_decrypt
+.type aesni_gcm_decrypt,@function
+aesni_gcm_decrypt:
+ xorl %eax,%eax
+ .byte 0xf3,0xc3
+.size aesni_gcm_decrypt,.-aesni_gcm_decrypt
diff --git a/secure/lib/libcrypto/amd64/aesni-mb-x86_64.S b/secure/lib/libcrypto/amd64/aesni-mb-x86_64.S
new file mode 100644
index 0000000..7043ec3
--- /dev/null
+++ b/secure/lib/libcrypto/amd64/aesni-mb-x86_64.S
@@ -0,0 +1,507 @@
+ # $FreeBSD$
+.text
+
+
+
+.globl aesni_multi_cbc_encrypt
+.type aesni_multi_cbc_encrypt,@function
+.align 32
+aesni_multi_cbc_encrypt:
+ movq %rsp,%rax
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+
+
+
+
+
+
+ subq $48,%rsp
+ andq $-64,%rsp
+ movq %rax,16(%rsp)
+
+.Lenc4x_body:
+ movdqu (%rsi),%xmm12
+ leaq 120(%rsi),%rsi
+ leaq 80(%rdi),%rdi
+
+.Lenc4x_loop_grande:
+ movl %edx,24(%rsp)
+ xorl %edx,%edx
+ movl -64(%rdi),%ecx
+ movq -80(%rdi),%r8
+ cmpl %edx,%ecx
+ movq -72(%rdi),%r12
+ cmovgl %ecx,%edx
+ testl %ecx,%ecx
+ movdqu -56(%rdi),%xmm2
+ movl %ecx,32(%rsp)
+ cmovleq %rsp,%r8
+ movl -24(%rdi),%ecx
+ movq -40(%rdi),%r9
+ cmpl %edx,%ecx
+ movq -32(%rdi),%r13
+ cmovgl %ecx,%edx
+ testl %ecx,%ecx
+ movdqu -16(%rdi),%xmm3
+ movl %ecx,36(%rsp)
+ cmovleq %rsp,%r9
+ movl 16(%rdi),%ecx
+ movq 0(%rdi),%r10
+ cmpl %edx,%ecx
+ movq 8(%rdi),%r14
+ cmovgl %ecx,%edx
+ testl %ecx,%ecx
+ movdqu 24(%rdi),%xmm4
+ movl %ecx,40(%rsp)
+ cmovleq %rsp,%r10
+ movl 56(%rdi),%ecx
+ movq 40(%rdi),%r11
+ cmpl %edx,%ecx
+ movq 48(%rdi),%r15
+ cmovgl %ecx,%edx
+ testl %ecx,%ecx
+ movdqu 64(%rdi),%xmm5
+ movl %ecx,44(%rsp)
+ cmovleq %rsp,%r11
+ testl %edx,%edx
+ jz .Lenc4x_done
+
+ movups 16-120(%rsi),%xmm1
+ pxor %xmm12,%xmm2
+ movups 32-120(%rsi),%xmm0
+ pxor %xmm12,%xmm3
+ movl 240-120(%rsi),%eax
+ pxor %xmm12,%xmm4
+ movdqu (%r8),%xmm6
+ pxor %xmm12,%xmm5
+ movdqu (%r9),%xmm7
+ pxor %xmm6,%xmm2
+ movdqu (%r10),%xmm8
+ pxor %xmm7,%xmm3
+ movdqu (%r11),%xmm9
+ pxor %xmm8,%xmm4
+ pxor %xmm9,%xmm5
+ movdqa 32(%rsp),%xmm10
+ xorq %rbx,%rbx
+ jmp .Loop_enc4x
+
+.align 32
+.Loop_enc4x:
+ addq $16,%rbx
+ leaq 16(%rsp),%rbp
+ movl $1,%ecx
+ subq %rbx,%rbp
+
+.byte 102,15,56,220,209
+ prefetcht0 31(%r8,%rbx,1)
+ prefetcht0 31(%r9,%rbx,1)
+.byte 102,15,56,220,217
+ prefetcht0 31(%r10,%rbx,1)
+ prefetcht0 31(%r10,%rbx,1)
+.byte 102,15,56,220,225
+.byte 102,15,56,220,233
+ movups 48-120(%rsi),%xmm1
+ cmpl 32(%rsp),%ecx
+.byte 102,15,56,220,208
+.byte 102,15,56,220,216
+.byte 102,15,56,220,224
+ cmovgeq %rbp,%r8
+ cmovgq %rbp,%r12
+.byte 102,15,56,220,232
+ movups -56(%rsi),%xmm0
+ cmpl 36(%rsp),%ecx
+.byte 102,15,56,220,209
+.byte 102,15,56,220,217
+.byte 102,15,56,220,225
+ cmovgeq %rbp,%r9
+ cmovgq %rbp,%r13
+.byte 102,15,56,220,233
+ movups -40(%rsi),%xmm1
+ cmpl 40(%rsp),%ecx
+.byte 102,15,56,220,208
+.byte 102,15,56,220,216
+.byte 102,15,56,220,224
+ cmovgeq %rbp,%r10
+ cmovgq %rbp,%r14
+.byte 102,15,56,220,232
+ movups -24(%rsi),%xmm0
+ cmpl 44(%rsp),%ecx
+.byte 102,15,56,220,209
+.byte 102,15,56,220,217
+.byte 102,15,56,220,225
+ cmovgeq %rbp,%r11
+ cmovgq %rbp,%r15
+.byte 102,15,56,220,233
+ movups -8(%rsi),%xmm1
+ movdqa %xmm10,%xmm11
+.byte 102,15,56,220,208
+ prefetcht0 15(%r12,%rbx,1)
+ prefetcht0 15(%r13,%rbx,1)
+.byte 102,15,56,220,216
+ prefetcht0 15(%r14,%rbx,1)
+ prefetcht0 15(%r15,%rbx,1)
+.byte 102,15,56,220,224
+.byte 102,15,56,220,232
+ movups 128-120(%rsi),%xmm0
+ pxor %xmm12,%xmm12
+
+.byte 102,15,56,220,209
+ pcmpgtd %xmm12,%xmm11
+ movdqu -120(%rsi),%xmm12
+.byte 102,15,56,220,217
+ paddd %xmm11,%xmm10
+ movdqa %xmm10,32(%rsp)
+.byte 102,15,56,220,225
+.byte 102,15,56,220,233
+ movups 144-120(%rsi),%xmm1
+
+ cmpl $11,%eax
+
+.byte 102,15,56,220,208
+.byte 102,15,56,220,216
+.byte 102,15,56,220,224
+.byte 102,15,56,220,232
+ movups 160-120(%rsi),%xmm0
+
+ jb .Lenc4x_tail
+
+.byte 102,15,56,220,209
+.byte 102,15,56,220,217
+.byte 102,15,56,220,225
+.byte 102,15,56,220,233
+ movups 176-120(%rsi),%xmm1
+
+.byte 102,15,56,220,208
+.byte 102,15,56,220,216
+.byte 102,15,56,220,224
+.byte 102,15,56,220,232
+ movups 192-120(%rsi),%xmm0
+
+ je .Lenc4x_tail
+
+.byte 102,15,56,220,209
+.byte 102,15,56,220,217
+.byte 102,15,56,220,225
+.byte 102,15,56,220,233
+ movups 208-120(%rsi),%xmm1
+
+.byte 102,15,56,220,208
+.byte 102,15,56,220,216
+.byte 102,15,56,220,224
+.byte 102,15,56,220,232
+ movups 224-120(%rsi),%xmm0
+ jmp .Lenc4x_tail
+
+.align 32
+.Lenc4x_tail:
+.byte 102,15,56,220,209
+.byte 102,15,56,220,217
+.byte 102,15,56,220,225
+.byte 102,15,56,220,233
+ movdqu (%r8,%rbx,1),%xmm6
+ movdqu 16-120(%rsi),%xmm1
+
+.byte 102,15,56,221,208
+ movdqu (%r9,%rbx,1),%xmm7
+ pxor %xmm12,%xmm6
+.byte 102,15,56,221,216
+ movdqu (%r10,%rbx,1),%xmm8
+ pxor %xmm12,%xmm7
+.byte 102,15,56,221,224
+ movdqu (%r11,%rbx,1),%xmm9
+ pxor %xmm12,%xmm8
+.byte 102,15,56,221,232
+ movdqu 32-120(%rsi),%xmm0
+ pxor %xmm12,%xmm9
+
+ movups %xmm2,-16(%r12,%rbx,1)
+ pxor %xmm6,%xmm2
+ movups %xmm3,-16(%r13,%rbx,1)
+ pxor %xmm7,%xmm3
+ movups %xmm4,-16(%r14,%rbx,1)
+ pxor %xmm8,%xmm4
+ movups %xmm5,-16(%r15,%rbx,1)
+ pxor %xmm9,%xmm5
+
+ decl %edx
+ jnz .Loop_enc4x
+
+ movq 16(%rsp),%rax
+ movl 24(%rsp),%edx
+
+
+
+
+
+
+
+
+
+
+ leaq 160(%rdi),%rdi
+ decl %edx
+ jnz .Lenc4x_loop_grande
+
+.Lenc4x_done:
+ movq -48(%rax),%r15
+ movq -40(%rax),%r14
+ movq -32(%rax),%r13
+ movq -24(%rax),%r12
+ movq -16(%rax),%rbp
+ movq -8(%rax),%rbx
+ leaq (%rax),%rsp
+.Lenc4x_epilogue:
+ .byte 0xf3,0xc3
+.size aesni_multi_cbc_encrypt,.-aesni_multi_cbc_encrypt
+
+.globl aesni_multi_cbc_decrypt
+.type aesni_multi_cbc_decrypt,@function
+.align 32
+aesni_multi_cbc_decrypt:
+ movq %rsp,%rax
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+
+
+
+
+
+
+ subq $48,%rsp
+ andq $-64,%rsp
+ movq %rax,16(%rsp)
+
+.Ldec4x_body:
+ movdqu (%rsi),%xmm12
+ leaq 120(%rsi),%rsi
+ leaq 80(%rdi),%rdi
+
+.Ldec4x_loop_grande:
+ movl %edx,24(%rsp)
+ xorl %edx,%edx
+ movl -64(%rdi),%ecx
+ movq -80(%rdi),%r8
+ cmpl %edx,%ecx
+ movq -72(%rdi),%r12
+ cmovgl %ecx,%edx
+ testl %ecx,%ecx
+ movdqu -56(%rdi),%xmm6
+ movl %ecx,32(%rsp)
+ cmovleq %rsp,%r8
+ movl -24(%rdi),%ecx
+ movq -40(%rdi),%r9
+ cmpl %edx,%ecx
+ movq -32(%rdi),%r13
+ cmovgl %ecx,%edx
+ testl %ecx,%ecx
+ movdqu -16(%rdi),%xmm7
+ movl %ecx,36(%rsp)
+ cmovleq %rsp,%r9
+ movl 16(%rdi),%ecx
+ movq 0(%rdi),%r10
+ cmpl %edx,%ecx
+ movq 8(%rdi),%r14
+ cmovgl %ecx,%edx
+ testl %ecx,%ecx
+ movdqu 24(%rdi),%xmm8
+ movl %ecx,40(%rsp)
+ cmovleq %rsp,%r10
+ movl 56(%rdi),%ecx
+ movq 40(%rdi),%r11
+ cmpl %edx,%ecx
+ movq 48(%rdi),%r15
+ cmovgl %ecx,%edx
+ testl %ecx,%ecx
+ movdqu 64(%rdi),%xmm9
+ movl %ecx,44(%rsp)
+ cmovleq %rsp,%r11
+ testl %edx,%edx
+ jz .Ldec4x_done
+
+ movups 16-120(%rsi),%xmm1
+ movups 32-120(%rsi),%xmm0
+ movl 240-120(%rsi),%eax
+ movdqu (%r8),%xmm2
+ movdqu (%r9),%xmm3
+ pxor %xmm12,%xmm2
+ movdqu (%r10),%xmm4
+ pxor %xmm12,%xmm3
+ movdqu (%r11),%xmm5
+ pxor %xmm12,%xmm4
+ pxor %xmm12,%xmm5
+ movdqa 32(%rsp),%xmm10
+ xorq %rbx,%rbx
+ jmp .Loop_dec4x
+
+.align 32
+.Loop_dec4x:
+ addq $16,%rbx
+ leaq 16(%rsp),%rbp
+ movl $1,%ecx
+ subq %rbx,%rbp
+
+.byte 102,15,56,222,209
+ prefetcht0 31(%r8,%rbx,1)
+ prefetcht0 31(%r9,%rbx,1)
+.byte 102,15,56,222,217
+ prefetcht0 31(%r10,%rbx,1)
+ prefetcht0 31(%r11,%rbx,1)
+.byte 102,15,56,222,225
+.byte 102,15,56,222,233
+ movups 48-120(%rsi),%xmm1
+ cmpl 32(%rsp),%ecx
+.byte 102,15,56,222,208
+.byte 102,15,56,222,216
+.byte 102,15,56,222,224
+ cmovgeq %rbp,%r8
+ cmovgq %rbp,%r12
+.byte 102,15,56,222,232
+ movups -56(%rsi),%xmm0
+ cmpl 36(%rsp),%ecx
+.byte 102,15,56,222,209
+.byte 102,15,56,222,217
+.byte 102,15,56,222,225
+ cmovgeq %rbp,%r9
+ cmovgq %rbp,%r13
+.byte 102,15,56,222,233
+ movups -40(%rsi),%xmm1
+ cmpl 40(%rsp),%ecx
+.byte 102,15,56,222,208
+.byte 102,15,56,222,216
+.byte 102,15,56,222,224
+ cmovgeq %rbp,%r10
+ cmovgq %rbp,%r14
+.byte 102,15,56,222,232
+ movups -24(%rsi),%xmm0
+ cmpl 44(%rsp),%ecx
+.byte 102,15,56,222,209
+.byte 102,15,56,222,217
+.byte 102,15,56,222,225
+ cmovgeq %rbp,%r11
+ cmovgq %rbp,%r15
+.byte 102,15,56,222,233
+ movups -8(%rsi),%xmm1
+ movdqa %xmm10,%xmm11
+.byte 102,15,56,222,208
+ prefetcht0 15(%r12,%rbx,1)
+ prefetcht0 15(%r13,%rbx,1)
+.byte 102,15,56,222,216
+ prefetcht0 15(%r14,%rbx,1)
+ prefetcht0 15(%r15,%rbx,1)
+.byte 102,15,56,222,224
+.byte 102,15,56,222,232
+ movups 128-120(%rsi),%xmm0
+ pxor %xmm12,%xmm12
+
+.byte 102,15,56,222,209
+ pcmpgtd %xmm12,%xmm11
+ movdqu -120(%rsi),%xmm12
+.byte 102,15,56,222,217
+ paddd %xmm11,%xmm10
+ movdqa %xmm10,32(%rsp)
+.byte 102,15,56,222,225
+.byte 102,15,56,222,233
+ movups 144-120(%rsi),%xmm1
+
+ cmpl $11,%eax
+
+.byte 102,15,56,222,208
+.byte 102,15,56,222,216
+.byte 102,15,56,222,224
+.byte 102,15,56,222,232
+ movups 160-120(%rsi),%xmm0
+
+ jb .Ldec4x_tail
+
+.byte 102,15,56,222,209
+.byte 102,15,56,222,217
+.byte 102,15,56,222,225
+.byte 102,15,56,222,233
+ movups 176-120(%rsi),%xmm1
+
+.byte 102,15,56,222,208
+.byte 102,15,56,222,216
+.byte 102,15,56,222,224
+.byte 102,15,56,222,232
+ movups 192-120(%rsi),%xmm0
+
+ je .Ldec4x_tail
+
+.byte 102,15,56,222,209
+.byte 102,15,56,222,217
+.byte 102,15,56,222,225
+.byte 102,15,56,222,233
+ movups 208-120(%rsi),%xmm1
+
+.byte 102,15,56,222,208
+.byte 102,15,56,222,216
+.byte 102,15,56,222,224
+.byte 102,15,56,222,232
+ movups 224-120(%rsi),%xmm0
+ jmp .Ldec4x_tail
+
+.align 32
+.Ldec4x_tail:
+.byte 102,15,56,222,209
+.byte 102,15,56,222,217
+.byte 102,15,56,222,225
+ pxor %xmm0,%xmm6
+ pxor %xmm0,%xmm7
+.byte 102,15,56,222,233
+ movdqu 16-120(%rsi),%xmm1
+ pxor %xmm0,%xmm8
+ pxor %xmm0,%xmm9
+ movdqu 32-120(%rsi),%xmm0
+
+.byte 102,15,56,223,214
+.byte 102,15,56,223,223
+ movdqu -16(%r8,%rbx,1),%xmm6
+ movdqu -16(%r9,%rbx,1),%xmm7
+.byte 102,65,15,56,223,224
+.byte 102,65,15,56,223,233
+ movdqu -16(%r10,%rbx,1),%xmm8
+ movdqu -16(%r11,%rbx,1),%xmm9
+
+ movups %xmm2,-16(%r12,%rbx,1)
+ movdqu (%r8,%rbx,1),%xmm2
+ movups %xmm3,-16(%r13,%rbx,1)
+ movdqu (%r9,%rbx,1),%xmm3
+ pxor %xmm12,%xmm2
+ movups %xmm4,-16(%r14,%rbx,1)
+ movdqu (%r10,%rbx,1),%xmm4
+ pxor %xmm12,%xmm3
+ movups %xmm5,-16(%r15,%rbx,1)
+ movdqu (%r11,%rbx,1),%xmm5
+ pxor %xmm12,%xmm4
+ pxor %xmm12,%xmm5
+
+ decl %edx
+ jnz .Loop_dec4x
+
+ movq 16(%rsp),%rax
+ movl 24(%rsp),%edx
+
+ leaq 160(%rdi),%rdi
+ decl %edx
+ jnz .Ldec4x_loop_grande
+
+.Ldec4x_done:
+ movq -48(%rax),%r15
+ movq -40(%rax),%r14
+ movq -32(%rax),%r13
+ movq -24(%rax),%r12
+ movq -16(%rax),%rbp
+ movq -8(%rax),%rbx
+ leaq (%rax),%rsp
+.Ldec4x_epilogue:
+ .byte 0xf3,0xc3
+.size aesni_multi_cbc_decrypt,.-aesni_multi_cbc_decrypt
diff --git a/secure/lib/libcrypto/amd64/aesni-sha1-x86_64.S b/secure/lib/libcrypto/amd64/aesni-sha1-x86_64.S
index e9a2053..fa16434 100644
--- a/secure/lib/libcrypto/amd64/aesni-sha1-x86_64.S
+++ b/secure/lib/libcrypto/amd64/aesni-sha1-x86_64.S
@@ -4,16 +4,18 @@
.globl aesni_cbc_sha1_enc
.type aesni_cbc_sha1_enc,@function
-.align 16
+.align 32
aesni_cbc_sha1_enc:
movl OPENSSL_ia32cap_P+0(%rip),%r10d
- movl OPENSSL_ia32cap_P+4(%rip),%r11d
+ movq OPENSSL_ia32cap_P+4(%rip),%r11
+ btq $61,%r11
+ jc aesni_cbc_sha1_enc_shaext
jmp aesni_cbc_sha1_enc_ssse3
.byte 0xf3,0xc3
.size aesni_cbc_sha1_enc,.-aesni_cbc_sha1_enc
.type aesni_cbc_sha1_enc_ssse3,@function
-.align 16
+.align 32
aesni_cbc_sha1_enc_ssse3:
movq 8(%rsp),%r10
@@ -30,12 +32,12 @@ aesni_cbc_sha1_enc_ssse3:
movq %rdi,%r12
movq %rsi,%r13
movq %rdx,%r14
- movq %rcx,%r15
- movdqu (%r8),%xmm11
+ leaq 112(%rcx),%r15
+ movdqu (%r8),%xmm2
movq %r8,88(%rsp)
shlq $6,%r14
subq %r12,%r13
- movl 240(%r15),%r8d
+ movl 240-112(%r15),%r8d
addq %r10,%r14
leaq K_XX_XX(%rip),%r11
@@ -45,1188 +47,1168 @@ aesni_cbc_sha1_enc_ssse3:
movl 12(%r9),%edx
movl %ebx,%esi
movl 16(%r9),%ebp
+ movl %ecx,%edi
+ xorl %edx,%edi
+ andl %edi,%esi
- movdqa 64(%r11),%xmm6
- movdqa 0(%r11),%xmm9
- movdqu 0(%r10),%xmm0
- movdqu 16(%r10),%xmm1
- movdqu 32(%r10),%xmm2
- movdqu 48(%r10),%xmm3
-.byte 102,15,56,0,198
+ movdqa 64(%r11),%xmm3
+ movdqa 0(%r11),%xmm13
+ movdqu 0(%r10),%xmm4
+ movdqu 16(%r10),%xmm5
+ movdqu 32(%r10),%xmm6
+ movdqu 48(%r10),%xmm7
+.byte 102,15,56,0,227
+.byte 102,15,56,0,235
+.byte 102,15,56,0,243
addq $64,%r10
-.byte 102,15,56,0,206
-.byte 102,15,56,0,214
-.byte 102,15,56,0,222
- paddd %xmm9,%xmm0
- paddd %xmm9,%xmm1
- paddd %xmm9,%xmm2
- movdqa %xmm0,0(%rsp)
- psubd %xmm9,%xmm0
- movdqa %xmm1,16(%rsp)
- psubd %xmm9,%xmm1
- movdqa %xmm2,32(%rsp)
- psubd %xmm9,%xmm2
- movups (%r15),%xmm13
- movups 16(%r15),%xmm14
+ paddd %xmm13,%xmm4
+.byte 102,15,56,0,251
+ paddd %xmm13,%xmm5
+ paddd %xmm13,%xmm6
+ movdqa %xmm4,0(%rsp)
+ psubd %xmm13,%xmm4
+ movdqa %xmm5,16(%rsp)
+ psubd %xmm13,%xmm5
+ movdqa %xmm6,32(%rsp)
+ psubd %xmm13,%xmm6
+ movups -112(%r15),%xmm15
+ movups 16-112(%r15),%xmm0
jmp .Loop_ssse3
-.align 16
+.align 32
.Loop_ssse3:
- movdqa %xmm1,%xmm4
- addl 0(%rsp),%ebp
- movups 0(%r12),%xmm12
- xorps %xmm13,%xmm12
- xorps %xmm12,%xmm11
-.byte 102,69,15,56,220,222
- movups 32(%r15),%xmm15
- xorl %edx,%ecx
- movdqa %xmm3,%xmm8
-.byte 102,15,58,15,224,8
+ rorl $2,%ebx
+ movups 0(%r12),%xmm14
+ xorps %xmm15,%xmm14
+ xorps %xmm14,%xmm2
+ movups -80(%r15),%xmm1
+.byte 102,15,56,220,208
+ pshufd $238,%xmm4,%xmm8
+ xorl %edx,%esi
+ movdqa %xmm7,%xmm12
+ paddd %xmm7,%xmm13
movl %eax,%edi
+ addl 0(%rsp),%ebp
+ punpcklqdq %xmm5,%xmm8
+ xorl %ecx,%ebx
roll $5,%eax
- paddd %xmm3,%xmm9
- andl %ecx,%esi
- xorl %edx,%ecx
- psrldq $4,%xmm8
- xorl %edx,%esi
- addl %eax,%ebp
- pxor %xmm0,%xmm4
- rorl $2,%ebx
addl %esi,%ebp
- pxor %xmm2,%xmm8
- addl 4(%rsp),%edx
- xorl %ecx,%ebx
- movl %ebp,%esi
- roll $5,%ebp
- pxor %xmm8,%xmm4
+ psrldq $4,%xmm12
andl %ebx,%edi
xorl %ecx,%ebx
- movdqa %xmm9,48(%rsp)
- xorl %ecx,%edi
-.byte 102,69,15,56,220,223
- movups 48(%r15),%xmm14
- addl %ebp,%edx
- movdqa %xmm4,%xmm10
- movdqa %xmm4,%xmm8
+ pxor %xmm4,%xmm8
+ addl %eax,%ebp
rorl $7,%eax
- addl %edi,%edx
- addl 8(%rsp),%ecx
+ pxor %xmm6,%xmm12
+ xorl %ecx,%edi
+ movl %ebp,%esi
+ addl 4(%rsp),%edx
+ pxor %xmm12,%xmm8
xorl %ebx,%eax
- pslldq $12,%xmm10
- paddd %xmm4,%xmm4
- movl %edx,%edi
- roll $5,%edx
+ roll $5,%ebp
+ movdqa %xmm13,48(%rsp)
+ addl %edi,%edx
+ movups -64(%r15),%xmm0
+.byte 102,15,56,220,209
andl %eax,%esi
+ movdqa %xmm8,%xmm3
xorl %ebx,%eax
- psrld $31,%xmm8
- xorl %ebx,%esi
- addl %edx,%ecx
- movdqa %xmm10,%xmm9
+ addl %ebp,%edx
rorl $7,%ebp
- addl %esi,%ecx
- psrld $30,%xmm10
- por %xmm8,%xmm4
- addl 12(%rsp),%ebx
+ movdqa %xmm8,%xmm12
+ xorl %ebx,%esi
+ pslldq $12,%xmm3
+ paddd %xmm8,%xmm8
+ movl %edx,%edi
+ addl 8(%rsp),%ecx
+ psrld $31,%xmm12
xorl %eax,%ebp
- movl %ecx,%esi
- roll $5,%ecx
-.byte 102,69,15,56,220,222
- movups 64(%r15),%xmm15
- pslld $2,%xmm9
- pxor %xmm10,%xmm4
+ roll $5,%edx
+ addl %esi,%ecx
+ movdqa %xmm3,%xmm13
andl %ebp,%edi
xorl %eax,%ebp
- movdqa 0(%r11),%xmm10
- xorl %eax,%edi
- addl %ecx,%ebx
- pxor %xmm9,%xmm4
+ psrld $30,%xmm3
+ addl %edx,%ecx
rorl $7,%edx
- addl %edi,%ebx
- movdqa %xmm2,%xmm5
- addl 16(%rsp),%eax
+ por %xmm12,%xmm8
+ xorl %eax,%edi
+ movl %ecx,%esi
+ addl 12(%rsp),%ebx
+ movups -48(%r15),%xmm1
+.byte 102,15,56,220,208
+ pslld $2,%xmm13
+ pxor %xmm3,%xmm8
xorl %ebp,%edx
- movdqa %xmm4,%xmm9
-.byte 102,15,58,15,233,8
- movl %ebx,%edi
- roll $5,%ebx
- paddd %xmm4,%xmm10
+ movdqa 0(%r11),%xmm3
+ roll $5,%ecx
+ addl %edi,%ebx
andl %edx,%esi
+ pxor %xmm13,%xmm8
xorl %ebp,%edx
- psrldq $4,%xmm9
- xorl %ebp,%esi
- addl %ebx,%eax
- pxor %xmm1,%xmm5
+ addl %ecx,%ebx
rorl $7,%ecx
- addl %esi,%eax
- pxor %xmm3,%xmm9
- addl 20(%rsp),%ebp
-.byte 102,69,15,56,220,223
- movups 80(%r15),%xmm14
+ pshufd $238,%xmm5,%xmm9
+ xorl %ebp,%esi
+ movdqa %xmm8,%xmm13
+ paddd %xmm8,%xmm3
+ movl %ebx,%edi
+ addl 16(%rsp),%eax
+ punpcklqdq %xmm6,%xmm9
xorl %edx,%ecx
- movl %eax,%esi
- roll $5,%eax
- pxor %xmm9,%xmm5
+ roll $5,%ebx
+ addl %esi,%eax
+ psrldq $4,%xmm13
andl %ecx,%edi
xorl %edx,%ecx
- movdqa %xmm10,0(%rsp)
- xorl %edx,%edi
- addl %eax,%ebp
- movdqa %xmm5,%xmm8
- movdqa %xmm5,%xmm9
+ pxor %xmm5,%xmm9
+ addl %ebx,%eax
rorl $7,%ebx
- addl %edi,%ebp
- addl 24(%rsp),%edx
+ movups -32(%r15),%xmm0
+.byte 102,15,56,220,209
+ pxor %xmm7,%xmm13
+ xorl %edx,%edi
+ movl %eax,%esi
+ addl 20(%rsp),%ebp
+ pxor %xmm13,%xmm9
xorl %ecx,%ebx
- pslldq $12,%xmm8
- paddd %xmm5,%xmm5
- movl %ebp,%edi
- roll $5,%ebp
+ roll $5,%eax
+ movdqa %xmm3,0(%rsp)
+ addl %edi,%ebp
andl %ebx,%esi
+ movdqa %xmm9,%xmm12
xorl %ecx,%ebx
- psrld $31,%xmm9
- xorl %ecx,%esi
-.byte 102,69,15,56,220,222
- movups 96(%r15),%xmm15
- addl %ebp,%edx
- movdqa %xmm8,%xmm10
+ addl %eax,%ebp
rorl $7,%eax
- addl %esi,%edx
- psrld $30,%xmm8
- por %xmm9,%xmm5
- addl 28(%rsp),%ecx
+ movdqa %xmm9,%xmm13
+ xorl %ecx,%esi
+ pslldq $12,%xmm12
+ paddd %xmm9,%xmm9
+ movl %ebp,%edi
+ addl 24(%rsp),%edx
+ psrld $31,%xmm13
xorl %ebx,%eax
- movl %edx,%esi
- roll $5,%edx
- pslld $2,%xmm10
- pxor %xmm8,%xmm5
+ roll $5,%ebp
+ addl %esi,%edx
+ movups -16(%r15),%xmm1
+.byte 102,15,56,220,208
+ movdqa %xmm12,%xmm3
andl %eax,%edi
xorl %ebx,%eax
- movdqa 16(%r11),%xmm8
- xorl %ebx,%edi
- addl %edx,%ecx
- pxor %xmm10,%xmm5
+ psrld $30,%xmm12
+ addl %ebp,%edx
rorl $7,%ebp
- addl %edi,%ecx
- movdqa %xmm3,%xmm6
- addl 32(%rsp),%ebx
+ por %xmm13,%xmm9
+ xorl %ebx,%edi
+ movl %edx,%esi
+ addl 28(%rsp),%ecx
+ pslld $2,%xmm3
+ pxor %xmm12,%xmm9
xorl %eax,%ebp
- movdqa %xmm5,%xmm10
-.byte 102,15,58,15,242,8
- movl %ecx,%edi
- roll $5,%ecx
-.byte 102,69,15,56,220,223
- movups 112(%r15),%xmm14
- paddd %xmm5,%xmm8
+ movdqa 16(%r11),%xmm12
+ roll $5,%edx
+ addl %edi,%ecx
andl %ebp,%esi
+ pxor %xmm3,%xmm9
xorl %eax,%ebp
- psrldq $4,%xmm10
- xorl %eax,%esi
- addl %ecx,%ebx
- pxor %xmm2,%xmm6
+ addl %edx,%ecx
rorl $7,%edx
- addl %esi,%ebx
- pxor %xmm4,%xmm10
- addl 36(%rsp),%eax
+ pshufd $238,%xmm6,%xmm10
+ xorl %eax,%esi
+ movdqa %xmm9,%xmm3
+ paddd %xmm9,%xmm12
+ movl %ecx,%edi
+ addl 32(%rsp),%ebx
+ movups 0(%r15),%xmm0
+.byte 102,15,56,220,209
+ punpcklqdq %xmm7,%xmm10
xorl %ebp,%edx
- movl %ebx,%esi
- roll $5,%ebx
- pxor %xmm10,%xmm6
+ roll $5,%ecx
+ addl %esi,%ebx
+ psrldq $4,%xmm3
andl %edx,%edi
xorl %ebp,%edx
- movdqa %xmm8,16(%rsp)
- xorl %ebp,%edi
- addl %ebx,%eax
- movdqa %xmm6,%xmm9
- movdqa %xmm6,%xmm10
+ pxor %xmm6,%xmm10
+ addl %ecx,%ebx
rorl $7,%ecx
- addl %edi,%eax
- addl 40(%rsp),%ebp
-.byte 102,69,15,56,220,222
- movups 128(%r15),%xmm15
+ pxor %xmm8,%xmm3
+ xorl %ebp,%edi
+ movl %ebx,%esi
+ addl 36(%rsp),%eax
+ pxor %xmm3,%xmm10
xorl %edx,%ecx
- pslldq $12,%xmm9
- paddd %xmm6,%xmm6
- movl %eax,%edi
- roll $5,%eax
+ roll $5,%ebx
+ movdqa %xmm12,16(%rsp)
+ addl %edi,%eax
andl %ecx,%esi
+ movdqa %xmm10,%xmm13
xorl %edx,%ecx
- psrld $31,%xmm10
- xorl %edx,%esi
- addl %eax,%ebp
- movdqa %xmm9,%xmm8
+ addl %ebx,%eax
rorl $7,%ebx
- addl %esi,%ebp
- psrld $30,%xmm9
- por %xmm10,%xmm6
- addl 44(%rsp),%edx
+ movups 16(%r15),%xmm1
+.byte 102,15,56,220,208
+ movdqa %xmm10,%xmm3
+ xorl %edx,%esi
+ pslldq $12,%xmm13
+ paddd %xmm10,%xmm10
+ movl %eax,%edi
+ addl 40(%rsp),%ebp
+ psrld $31,%xmm3
xorl %ecx,%ebx
- movl %ebp,%esi
- roll $5,%ebp
- pslld $2,%xmm8
- pxor %xmm9,%xmm6
+ roll $5,%eax
+ addl %esi,%ebp
+ movdqa %xmm13,%xmm12
andl %ebx,%edi
xorl %ecx,%ebx
- movdqa 16(%r11),%xmm9
- xorl %ecx,%edi
-.byte 102,69,15,56,220,223
- movups 144(%r15),%xmm14
- addl %ebp,%edx
- pxor %xmm8,%xmm6
+ psrld $30,%xmm13
+ addl %eax,%ebp
rorl $7,%eax
- addl %edi,%edx
- movdqa %xmm4,%xmm7
- addl 48(%rsp),%ecx
+ por %xmm3,%xmm10
+ xorl %ecx,%edi
+ movl %ebp,%esi
+ addl 44(%rsp),%edx
+ pslld $2,%xmm12
+ pxor %xmm13,%xmm10
xorl %ebx,%eax
- movdqa %xmm6,%xmm8
-.byte 102,15,58,15,251,8
- movl %edx,%edi
- roll $5,%edx
- paddd %xmm6,%xmm9
+ movdqa 16(%r11),%xmm13
+ roll $5,%ebp
+ addl %edi,%edx
+ movups 32(%r15),%xmm0
+.byte 102,15,56,220,209
andl %eax,%esi
+ pxor %xmm12,%xmm10
xorl %ebx,%eax
- psrldq $4,%xmm8
- xorl %ebx,%esi
- addl %edx,%ecx
- pxor %xmm3,%xmm7
+ addl %ebp,%edx
rorl $7,%ebp
- addl %esi,%ecx
- pxor %xmm5,%xmm8
- addl 52(%rsp),%ebx
+ pshufd $238,%xmm7,%xmm11
+ xorl %ebx,%esi
+ movdqa %xmm10,%xmm12
+ paddd %xmm10,%xmm13
+ movl %edx,%edi
+ addl 48(%rsp),%ecx
+ punpcklqdq %xmm8,%xmm11
xorl %eax,%ebp
- movl %ecx,%esi
- roll $5,%ecx
-.byte 102,69,15,56,220,222
- movups 160(%r15),%xmm15
- pxor %xmm8,%xmm7
+ roll $5,%edx
+ addl %esi,%ecx
+ psrldq $4,%xmm12
andl %ebp,%edi
xorl %eax,%ebp
- movdqa %xmm9,32(%rsp)
- xorl %eax,%edi
- addl %ecx,%ebx
- movdqa %xmm7,%xmm10
- movdqa %xmm7,%xmm8
+ pxor %xmm7,%xmm11
+ addl %edx,%ecx
rorl $7,%edx
- addl %edi,%ebx
- addl 56(%rsp),%eax
+ pxor %xmm9,%xmm12
+ xorl %eax,%edi
+ movl %ecx,%esi
+ addl 52(%rsp),%ebx
+ movups 48(%r15),%xmm1
+.byte 102,15,56,220,208
+ pxor %xmm12,%xmm11
xorl %ebp,%edx
- pslldq $12,%xmm10
- paddd %xmm7,%xmm7
- movl %ebx,%edi
- roll $5,%ebx
+ roll $5,%ecx
+ movdqa %xmm13,32(%rsp)
+ addl %edi,%ebx
andl %edx,%esi
+ movdqa %xmm11,%xmm3
xorl %ebp,%edx
- psrld $31,%xmm8
- xorl %ebp,%esi
- addl %ebx,%eax
- movdqa %xmm10,%xmm9
+ addl %ecx,%ebx
rorl $7,%ecx
+ movdqa %xmm11,%xmm12
+ xorl %ebp,%esi
+ pslldq $12,%xmm3
+ paddd %xmm11,%xmm11
+ movl %ebx,%edi
+ addl 56(%rsp),%eax
+ psrld $31,%xmm12
+ xorl %edx,%ecx
+ roll $5,%ebx
addl %esi,%eax
- psrld $30,%xmm10
- por %xmm8,%xmm7
- addl 60(%rsp),%ebp
+ movdqa %xmm3,%xmm13
+ andl %ecx,%edi
+ xorl %edx,%ecx
+ psrld $30,%xmm3
+ addl %ebx,%eax
+ rorl $7,%ebx
cmpl $11,%r8d
jb .Laesenclast1
- movups 176(%r15),%xmm14
-.byte 102,69,15,56,220,223
- movups 192(%r15),%xmm15
-.byte 102,69,15,56,220,222
+ movups 64(%r15),%xmm0
+.byte 102,15,56,220,209
+ movups 80(%r15),%xmm1
+.byte 102,15,56,220,208
je .Laesenclast1
- movups 208(%r15),%xmm14
-.byte 102,69,15,56,220,223
- movups 224(%r15),%xmm15
-.byte 102,69,15,56,220,222
+ movups 96(%r15),%xmm0
+.byte 102,15,56,220,209
+ movups 112(%r15),%xmm1
+.byte 102,15,56,220,208
.Laesenclast1:
-.byte 102,69,15,56,221,223
- movups 16(%r15),%xmm14
- xorl %edx,%ecx
+.byte 102,15,56,221,209
+ movups 16-112(%r15),%xmm0
+ por %xmm12,%xmm11
+ xorl %edx,%edi
movl %eax,%esi
+ addl 60(%rsp),%ebp
+ pslld $2,%xmm13
+ pxor %xmm3,%xmm11
+ xorl %ecx,%ebx
+ movdqa 16(%r11),%xmm3
roll $5,%eax
- pslld $2,%xmm9
- pxor %xmm10,%xmm7
- andl %ecx,%edi
- xorl %edx,%ecx
- movdqa 16(%r11),%xmm10
- xorl %edx,%edi
- addl %eax,%ebp
- pxor %xmm9,%xmm7
- rorl $7,%ebx
addl %edi,%ebp
- movdqa %xmm7,%xmm9
- addl 0(%rsp),%edx
- pxor %xmm4,%xmm0
-.byte 102,68,15,58,15,206,8
- xorl %ecx,%ebx
- movl %ebp,%edi
- roll $5,%ebp
- pxor %xmm1,%xmm0
andl %ebx,%esi
+ pxor %xmm13,%xmm11
+ pshufd $238,%xmm10,%xmm13
xorl %ecx,%ebx
- movdqa %xmm10,%xmm8
- paddd %xmm7,%xmm10
- xorl %ecx,%esi
- movups 16(%r12),%xmm12
- xorps %xmm13,%xmm12
- movups %xmm11,0(%r13,%r12,1)
- xorps %xmm12,%xmm11
-.byte 102,69,15,56,220,222
- movups 32(%r15),%xmm15
- addl %ebp,%edx
- pxor %xmm9,%xmm0
+ addl %eax,%ebp
rorl $7,%eax
- addl %esi,%edx
- addl 4(%rsp),%ecx
+ pxor %xmm8,%xmm4
+ xorl %ecx,%esi
+ movl %ebp,%edi
+ addl 0(%rsp),%edx
+ punpcklqdq %xmm11,%xmm13
xorl %ebx,%eax
- movdqa %xmm0,%xmm9
- movdqa %xmm10,48(%rsp)
- movl %edx,%esi
- roll $5,%edx
+ roll $5,%ebp
+ pxor %xmm5,%xmm4
+ addl %esi,%edx
+ movups 16(%r12),%xmm14
+ xorps %xmm15,%xmm14
+ movups %xmm2,0(%r12,%r13,1)
+ xorps %xmm14,%xmm2
+ movups -80(%r15),%xmm1
+.byte 102,15,56,220,208
andl %eax,%edi
+ movdqa %xmm3,%xmm12
xorl %ebx,%eax
- pslld $2,%xmm0
- xorl %ebx,%edi
- addl %edx,%ecx
- psrld $30,%xmm9
+ paddd %xmm11,%xmm3
+ addl %ebp,%edx
+ pxor %xmm13,%xmm4
rorl $7,%ebp
- addl %edi,%ecx
- addl 8(%rsp),%ebx
+ xorl %ebx,%edi
+ movl %edx,%esi
+ addl 4(%rsp),%ecx
+ movdqa %xmm4,%xmm13
xorl %eax,%ebp
- movl %ecx,%edi
- roll $5,%ecx
-.byte 102,69,15,56,220,223
- movups 48(%r15),%xmm14
- por %xmm9,%xmm0
+ roll $5,%edx
+ movdqa %xmm3,48(%rsp)
+ addl %edi,%ecx
andl %ebp,%esi
xorl %eax,%ebp
- movdqa %xmm0,%xmm10
- xorl %eax,%esi
- addl %ecx,%ebx
+ pslld $2,%xmm4
+ addl %edx,%ecx
rorl $7,%edx
- addl %esi,%ebx
- addl 12(%rsp),%eax
+ psrld $30,%xmm13
+ xorl %eax,%esi
+ movl %ecx,%edi
+ addl 8(%rsp),%ebx
+ movups -64(%r15),%xmm0
+.byte 102,15,56,220,209
+ por %xmm13,%xmm4
xorl %ebp,%edx
- movl %ebx,%esi
- roll $5,%ebx
+ roll $5,%ecx
+ pshufd $238,%xmm11,%xmm3
+ addl %esi,%ebx
andl %edx,%edi
xorl %ebp,%edx
+ addl %ecx,%ebx
+ addl 12(%rsp),%eax
xorl %ebp,%edi
- addl %ebx,%eax
- rorl $7,%ecx
+ movl %ebx,%esi
+ roll $5,%ebx
addl %edi,%eax
- addl 16(%rsp),%ebp
-.byte 102,69,15,56,220,222
- movups 64(%r15),%xmm15
- pxor %xmm5,%xmm1
-.byte 102,68,15,58,15,215,8
xorl %edx,%esi
+ rorl $7,%ecx
+ addl %ebx,%eax
+ pxor %xmm9,%xmm5
+ addl 16(%rsp),%ebp
+ movups -48(%r15),%xmm1
+.byte 102,15,56,220,208
+ xorl %ecx,%esi
+ punpcklqdq %xmm4,%xmm3
movl %eax,%edi
roll $5,%eax
- pxor %xmm2,%xmm1
- xorl %ecx,%esi
- addl %eax,%ebp
- movdqa %xmm8,%xmm9
- paddd %xmm0,%xmm8
- rorl $7,%ebx
+ pxor %xmm6,%xmm5
addl %esi,%ebp
- pxor %xmm10,%xmm1
- addl 20(%rsp),%edx
xorl %ecx,%edi
+ movdqa %xmm12,%xmm13
+ rorl $7,%ebx
+ paddd %xmm4,%xmm12
+ addl %eax,%ebp
+ pxor %xmm3,%xmm5
+ addl 20(%rsp),%edx
+ xorl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
- movdqa %xmm1,%xmm10
- movdqa %xmm8,0(%rsp)
- xorl %ebx,%edi
- addl %ebp,%edx
- rorl $7,%eax
+ movdqa %xmm5,%xmm3
addl %edi,%edx
- pslld $2,%xmm1
- addl 24(%rsp),%ecx
xorl %ebx,%esi
- psrld $30,%xmm10
+ movdqa %xmm12,0(%rsp)
+ rorl $7,%eax
+ addl %ebp,%edx
+ addl 24(%rsp),%ecx
+ pslld $2,%xmm5
+ xorl %eax,%esi
movl %edx,%edi
+ psrld $30,%xmm3
roll $5,%edx
- xorl %eax,%esi
-.byte 102,69,15,56,220,223
- movups 80(%r15),%xmm14
- addl %edx,%ecx
- rorl $7,%ebp
addl %esi,%ecx
- por %xmm10,%xmm1
- addl 28(%rsp),%ebx
+ movups -32(%r15),%xmm0
+.byte 102,15,56,220,209
xorl %eax,%edi
- movdqa %xmm1,%xmm8
+ rorl $7,%ebp
+ por %xmm3,%xmm5
+ addl %edx,%ecx
+ addl 28(%rsp),%ebx
+ pshufd $238,%xmm4,%xmm12
+ xorl %ebp,%edi
movl %ecx,%esi
roll $5,%ecx
- xorl %ebp,%edi
- addl %ecx,%ebx
- rorl $7,%edx
addl %edi,%ebx
- addl 32(%rsp),%eax
- pxor %xmm6,%xmm2
-.byte 102,68,15,58,15,192,8
xorl %ebp,%esi
+ rorl $7,%edx
+ addl %ecx,%ebx
+ pxor %xmm10,%xmm6
+ addl 32(%rsp),%eax
+ xorl %edx,%esi
+ punpcklqdq %xmm5,%xmm12
movl %ebx,%edi
roll $5,%ebx
- pxor %xmm3,%xmm2
- xorl %edx,%esi
- addl %ebx,%eax
- movdqa 32(%r11),%xmm10
- paddd %xmm1,%xmm9
- rorl $7,%ecx
+ pxor %xmm7,%xmm6
addl %esi,%eax
- pxor %xmm8,%xmm2
- addl 36(%rsp),%ebp
-.byte 102,69,15,56,220,222
- movups 96(%r15),%xmm15
xorl %edx,%edi
+ movdqa 32(%r11),%xmm3
+ rorl $7,%ecx
+ paddd %xmm5,%xmm13
+ addl %ebx,%eax
+ pxor %xmm12,%xmm6
+ addl 36(%rsp),%ebp
+ movups -16(%r15),%xmm1
+.byte 102,15,56,220,208
+ xorl %ecx,%edi
movl %eax,%esi
roll $5,%eax
- movdqa %xmm2,%xmm8
- movdqa %xmm9,16(%rsp)
- xorl %ecx,%edi
- addl %eax,%ebp
- rorl $7,%ebx
+ movdqa %xmm6,%xmm12
addl %edi,%ebp
- pslld $2,%xmm2
- addl 40(%rsp),%edx
xorl %ecx,%esi
- psrld $30,%xmm8
+ movdqa %xmm13,16(%rsp)
+ rorl $7,%ebx
+ addl %eax,%ebp
+ addl 40(%rsp),%edx
+ pslld $2,%xmm6
+ xorl %ebx,%esi
movl %ebp,%edi
+ psrld $30,%xmm12
roll $5,%ebp
- xorl %ebx,%esi
- addl %ebp,%edx
- rorl $7,%eax
addl %esi,%edx
- por %xmm8,%xmm2
- addl 44(%rsp),%ecx
xorl %ebx,%edi
- movdqa %xmm2,%xmm9
+ rorl $7,%eax
+ por %xmm12,%xmm6
+ addl %ebp,%edx
+ addl 44(%rsp),%ecx
+ pshufd $238,%xmm5,%xmm13
+ xorl %eax,%edi
movl %edx,%esi
roll $5,%edx
- xorl %eax,%edi
-.byte 102,69,15,56,220,223
- movups 112(%r15),%xmm14
- addl %edx,%ecx
- rorl $7,%ebp
addl %edi,%ecx
- addl 48(%rsp),%ebx
- pxor %xmm7,%xmm3
-.byte 102,68,15,58,15,201,8
+ movups 0(%r15),%xmm0
+.byte 102,15,56,220,209
xorl %eax,%esi
+ rorl $7,%ebp
+ addl %edx,%ecx
+ pxor %xmm11,%xmm7
+ addl 48(%rsp),%ebx
+ xorl %ebp,%esi
+ punpcklqdq %xmm6,%xmm13
movl %ecx,%edi
roll $5,%ecx
- pxor %xmm4,%xmm3
- xorl %ebp,%esi
- addl %ecx,%ebx
- movdqa %xmm10,%xmm8
- paddd %xmm2,%xmm10
- rorl $7,%edx
+ pxor %xmm8,%xmm7
addl %esi,%ebx
- pxor %xmm9,%xmm3
- addl 52(%rsp),%eax
xorl %ebp,%edi
+ movdqa %xmm3,%xmm12
+ rorl $7,%edx
+ paddd %xmm6,%xmm3
+ addl %ecx,%ebx
+ pxor %xmm13,%xmm7
+ addl 52(%rsp),%eax
+ xorl %edx,%edi
movl %ebx,%esi
roll $5,%ebx
- movdqa %xmm3,%xmm9
- movdqa %xmm10,32(%rsp)
- xorl %edx,%edi
- addl %ebx,%eax
- rorl $7,%ecx
+ movdqa %xmm7,%xmm13
addl %edi,%eax
- pslld $2,%xmm3
- addl 56(%rsp),%ebp
-.byte 102,69,15,56,220,222
- movups 128(%r15),%xmm15
xorl %edx,%esi
- psrld $30,%xmm9
+ movdqa %xmm3,32(%rsp)
+ rorl $7,%ecx
+ addl %ebx,%eax
+ addl 56(%rsp),%ebp
+ movups 16(%r15),%xmm1
+.byte 102,15,56,220,208
+ pslld $2,%xmm7
+ xorl %ecx,%esi
movl %eax,%edi
+ psrld $30,%xmm13
roll $5,%eax
- xorl %ecx,%esi
- addl %eax,%ebp
- rorl $7,%ebx
addl %esi,%ebp
- por %xmm9,%xmm3
- addl 60(%rsp),%edx
xorl %ecx,%edi
- movdqa %xmm3,%xmm10
+ rorl $7,%ebx
+ por %xmm13,%xmm7
+ addl %eax,%ebp
+ addl 60(%rsp),%edx
+ pshufd $238,%xmm6,%xmm3
+ xorl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
- xorl %ebx,%edi
- addl %ebp,%edx
- rorl $7,%eax
addl %edi,%edx
- addl 0(%rsp),%ecx
- pxor %xmm0,%xmm4
-.byte 102,68,15,58,15,210,8
xorl %ebx,%esi
+ rorl $7,%eax
+ addl %ebp,%edx
+ pxor %xmm4,%xmm8
+ addl 0(%rsp),%ecx
+ xorl %eax,%esi
+ punpcklqdq %xmm7,%xmm3
movl %edx,%edi
roll $5,%edx
- pxor %xmm5,%xmm4
- xorl %eax,%esi
-.byte 102,69,15,56,220,223
- movups 144(%r15),%xmm14
- addl %edx,%ecx
- movdqa %xmm8,%xmm9
- paddd %xmm3,%xmm8
- rorl $7,%ebp
+ pxor %xmm9,%xmm8
addl %esi,%ecx
- pxor %xmm10,%xmm4
- addl 4(%rsp),%ebx
+ movups 32(%r15),%xmm0
+.byte 102,15,56,220,209
xorl %eax,%edi
+ movdqa %xmm12,%xmm13
+ rorl $7,%ebp
+ paddd %xmm7,%xmm12
+ addl %edx,%ecx
+ pxor %xmm3,%xmm8
+ addl 4(%rsp),%ebx
+ xorl %ebp,%edi
movl %ecx,%esi
roll $5,%ecx
- movdqa %xmm4,%xmm10
- movdqa %xmm8,48(%rsp)
- xorl %ebp,%edi
- addl %ecx,%ebx
- rorl $7,%edx
+ movdqa %xmm8,%xmm3
addl %edi,%ebx
- pslld $2,%xmm4
- addl 8(%rsp),%eax
xorl %ebp,%esi
- psrld $30,%xmm10
+ movdqa %xmm12,48(%rsp)
+ rorl $7,%edx
+ addl %ecx,%ebx
+ addl 8(%rsp),%eax
+ pslld $2,%xmm8
+ xorl %edx,%esi
movl %ebx,%edi
+ psrld $30,%xmm3
roll $5,%ebx
- xorl %edx,%esi
- addl %ebx,%eax
- rorl $7,%ecx
addl %esi,%eax
- por %xmm10,%xmm4
- addl 12(%rsp),%ebp
-.byte 102,69,15,56,220,222
- movups 160(%r15),%xmm15
xorl %edx,%edi
- movdqa %xmm4,%xmm8
+ rorl $7,%ecx
+ por %xmm3,%xmm8
+ addl %ebx,%eax
+ addl 12(%rsp),%ebp
+ movups 48(%r15),%xmm1
+.byte 102,15,56,220,208
+ pshufd $238,%xmm7,%xmm12
+ xorl %ecx,%edi
movl %eax,%esi
roll $5,%eax
- xorl %ecx,%edi
- addl %eax,%ebp
- rorl $7,%ebx
addl %edi,%ebp
- addl 16(%rsp),%edx
- pxor %xmm1,%xmm5
-.byte 102,68,15,58,15,195,8
xorl %ecx,%esi
+ rorl $7,%ebx
+ addl %eax,%ebp
+ pxor %xmm5,%xmm9
+ addl 16(%rsp),%edx
+ xorl %ebx,%esi
+ punpcklqdq %xmm8,%xmm12
movl %ebp,%edi
roll $5,%ebp
- pxor %xmm6,%xmm5
- xorl %ebx,%esi
- addl %ebp,%edx
- movdqa %xmm9,%xmm10
- paddd %xmm4,%xmm9
- rorl $7,%eax
+ pxor %xmm10,%xmm9
addl %esi,%edx
- pxor %xmm8,%xmm5
- addl 20(%rsp),%ecx
xorl %ebx,%edi
+ movdqa %xmm13,%xmm3
+ rorl $7,%eax
+ paddd %xmm8,%xmm13
+ addl %ebp,%edx
+ pxor %xmm12,%xmm9
+ addl 20(%rsp),%ecx
+ xorl %eax,%edi
movl %edx,%esi
roll $5,%edx
- movdqa %xmm5,%xmm8
- movdqa %xmm9,0(%rsp)
- xorl %eax,%edi
+ movdqa %xmm9,%xmm12
+ addl %edi,%ecx
cmpl $11,%r8d
jb .Laesenclast2
- movups 176(%r15),%xmm14
-.byte 102,69,15,56,220,223
- movups 192(%r15),%xmm15
-.byte 102,69,15,56,220,222
+ movups 64(%r15),%xmm0
+.byte 102,15,56,220,209
+ movups 80(%r15),%xmm1
+.byte 102,15,56,220,208
je .Laesenclast2
- movups 208(%r15),%xmm14
-.byte 102,69,15,56,220,223
- movups 224(%r15),%xmm15
-.byte 102,69,15,56,220,222
+ movups 96(%r15),%xmm0
+.byte 102,15,56,220,209
+ movups 112(%r15),%xmm1
+.byte 102,15,56,220,208
.Laesenclast2:
-.byte 102,69,15,56,221,223
- movups 16(%r15),%xmm14
- addl %edx,%ecx
+.byte 102,15,56,221,209
+ movups 16-112(%r15),%xmm0
+ xorl %eax,%esi
+ movdqa %xmm13,0(%rsp)
rorl $7,%ebp
- addl %edi,%ecx
- pslld $2,%xmm5
+ addl %edx,%ecx
addl 24(%rsp),%ebx
- xorl %eax,%esi
- psrld $30,%xmm8
+ pslld $2,%xmm9
+ xorl %ebp,%esi
movl %ecx,%edi
+ psrld $30,%xmm12
roll $5,%ecx
- xorl %ebp,%esi
- addl %ecx,%ebx
- rorl $7,%edx
addl %esi,%ebx
- por %xmm8,%xmm5
- addl 28(%rsp),%eax
xorl %ebp,%edi
- movdqa %xmm5,%xmm9
+ rorl $7,%edx
+ por %xmm12,%xmm9
+ addl %ecx,%ebx
+ addl 28(%rsp),%eax
+ pshufd $238,%xmm8,%xmm13
+ rorl $7,%ecx
movl %ebx,%esi
- roll $5,%ebx
xorl %edx,%edi
- addl %ebx,%eax
- rorl $7,%ecx
+ roll $5,%ebx
addl %edi,%eax
- movl %ecx,%edi
- movups 32(%r12),%xmm12
- xorps %xmm13,%xmm12
- movups %xmm11,16(%r13,%r12,1)
- xorps %xmm12,%xmm11
-.byte 102,69,15,56,220,222
- movups 32(%r15),%xmm15
- pxor %xmm2,%xmm6
-.byte 102,68,15,58,15,204,8
+ xorl %ecx,%esi
xorl %edx,%ecx
+ addl %ebx,%eax
+ pxor %xmm6,%xmm10
addl 32(%rsp),%ebp
- andl %edx,%edi
- pxor %xmm7,%xmm6
+ movups 32(%r12),%xmm14
+ xorps %xmm15,%xmm14
+ movups %xmm2,16(%r13,%r12,1)
+ xorps %xmm14,%xmm2
+ movups -80(%r15),%xmm1
+.byte 102,15,56,220,208
andl %ecx,%esi
+ xorl %edx,%ecx
rorl $7,%ebx
- movdqa %xmm10,%xmm8
- paddd %xmm5,%xmm10
- addl %edi,%ebp
+ punpcklqdq %xmm9,%xmm13
movl %eax,%edi
- pxor %xmm9,%xmm6
+ xorl %ecx,%esi
+ pxor %xmm11,%xmm10
roll $5,%eax
addl %esi,%ebp
- xorl %edx,%ecx
- addl %eax,%ebp
- movdqa %xmm6,%xmm9
- movdqa %xmm10,16(%rsp)
- movl %ebx,%esi
+ movdqa %xmm3,%xmm12
+ xorl %ebx,%edi
+ paddd %xmm9,%xmm3
xorl %ecx,%ebx
+ pxor %xmm13,%xmm10
+ addl %eax,%ebp
addl 36(%rsp),%edx
- andl %ecx,%esi
- pslld $2,%xmm6
andl %ebx,%edi
+ xorl %ecx,%ebx
rorl $7,%eax
- psrld $30,%xmm9
- addl %esi,%edx
+ movdqa %xmm10,%xmm13
movl %ebp,%esi
+ xorl %ebx,%edi
+ movdqa %xmm3,16(%rsp)
roll $5,%ebp
-.byte 102,69,15,56,220,223
- movups 48(%r15),%xmm14
addl %edi,%edx
- xorl %ecx,%ebx
- addl %ebp,%edx
- por %xmm9,%xmm6
- movl %eax,%edi
+ movups -64(%r15),%xmm0
+.byte 102,15,56,220,209
+ xorl %eax,%esi
+ pslld $2,%xmm10
xorl %ebx,%eax
- movdqa %xmm6,%xmm10
+ addl %ebp,%edx
+ psrld $30,%xmm13
addl 40(%rsp),%ecx
- andl %ebx,%edi
andl %eax,%esi
+ xorl %ebx,%eax
+ por %xmm13,%xmm10
rorl $7,%ebp
- addl %edi,%ecx
movl %edx,%edi
+ xorl %eax,%esi
roll $5,%edx
+ pshufd $238,%xmm9,%xmm3
addl %esi,%ecx
- xorl %ebx,%eax
- addl %edx,%ecx
- movl %ebp,%esi
+ xorl %ebp,%edi
xorl %eax,%ebp
+ addl %edx,%ecx
addl 44(%rsp),%ebx
- andl %eax,%esi
andl %ebp,%edi
-.byte 102,69,15,56,220,222
- movups 64(%r15),%xmm15
+ xorl %eax,%ebp
rorl $7,%edx
- addl %esi,%ebx
+ movups -48(%r15),%xmm1
+.byte 102,15,56,220,208
movl %ecx,%esi
+ xorl %ebp,%edi
roll $5,%ecx
addl %edi,%ebx
- xorl %eax,%ebp
- addl %ecx,%ebx
- movl %edx,%edi
- pxor %xmm3,%xmm7
-.byte 102,68,15,58,15,213,8
+ xorl %edx,%esi
xorl %ebp,%edx
+ addl %ecx,%ebx
+ pxor %xmm7,%xmm11
addl 48(%rsp),%eax
- andl %ebp,%edi
- pxor %xmm0,%xmm7
andl %edx,%esi
+ xorl %ebp,%edx
rorl $7,%ecx
- movdqa 48(%r11),%xmm9
- paddd %xmm6,%xmm8
- addl %edi,%eax
+ punpcklqdq %xmm10,%xmm3
movl %ebx,%edi
- pxor %xmm10,%xmm7
+ xorl %edx,%esi
+ pxor %xmm4,%xmm11
roll $5,%ebx
addl %esi,%eax
- xorl %ebp,%edx
- addl %ebx,%eax
- movdqa %xmm7,%xmm10
- movdqa %xmm8,32(%rsp)
- movl %ecx,%esi
-.byte 102,69,15,56,220,223
- movups 80(%r15),%xmm14
+ movdqa 48(%r11),%xmm13
+ xorl %ecx,%edi
+ paddd %xmm10,%xmm12
xorl %edx,%ecx
+ pxor %xmm3,%xmm11
+ addl %ebx,%eax
addl 52(%rsp),%ebp
- andl %edx,%esi
- pslld $2,%xmm7
+ movups -32(%r15),%xmm0
+.byte 102,15,56,220,209
andl %ecx,%edi
+ xorl %edx,%ecx
rorl $7,%ebx
- psrld $30,%xmm10
- addl %esi,%ebp
+ movdqa %xmm11,%xmm3
movl %eax,%esi
+ xorl %ecx,%edi
+ movdqa %xmm12,32(%rsp)
roll $5,%eax
addl %edi,%ebp
- xorl %edx,%ecx
- addl %eax,%ebp
- por %xmm10,%xmm7
- movl %ebx,%edi
+ xorl %ebx,%esi
+ pslld $2,%xmm11
xorl %ecx,%ebx
- movdqa %xmm7,%xmm8
+ addl %eax,%ebp
+ psrld $30,%xmm3
addl 56(%rsp),%edx
- andl %ecx,%edi
andl %ebx,%esi
+ xorl %ecx,%ebx
+ por %xmm3,%xmm11
rorl $7,%eax
- addl %edi,%edx
movl %ebp,%edi
+ xorl %ebx,%esi
roll $5,%ebp
-.byte 102,69,15,56,220,222
- movups 96(%r15),%xmm15
+ pshufd $238,%xmm10,%xmm12
addl %esi,%edx
- xorl %ecx,%ebx
- addl %ebp,%edx
- movl %eax,%esi
+ movups -16(%r15),%xmm1
+.byte 102,15,56,220,208
+ xorl %eax,%edi
xorl %ebx,%eax
+ addl %ebp,%edx
addl 60(%rsp),%ecx
- andl %ebx,%esi
andl %eax,%edi
+ xorl %ebx,%eax
rorl $7,%ebp
- addl %esi,%ecx
movl %edx,%esi
+ xorl %eax,%edi
roll $5,%edx
addl %edi,%ecx
- xorl %ebx,%eax
- addl %edx,%ecx
- movl %ebp,%edi
- pxor %xmm4,%xmm0
-.byte 102,68,15,58,15,198,8
+ xorl %ebp,%esi
xorl %eax,%ebp
+ addl %edx,%ecx
+ pxor %xmm8,%xmm4
addl 0(%rsp),%ebx
- andl %eax,%edi
- pxor %xmm1,%xmm0
andl %ebp,%esi
-.byte 102,69,15,56,220,223
- movups 112(%r15),%xmm14
+ xorl %eax,%ebp
rorl $7,%edx
- movdqa %xmm9,%xmm10
- paddd %xmm7,%xmm9
- addl %edi,%ebx
+ movups 0(%r15),%xmm0
+.byte 102,15,56,220,209
+ punpcklqdq %xmm11,%xmm12
movl %ecx,%edi
- pxor %xmm8,%xmm0
+ xorl %ebp,%esi
+ pxor %xmm5,%xmm4
roll $5,%ecx
addl %esi,%ebx
- xorl %eax,%ebp
- addl %ecx,%ebx
- movdqa %xmm0,%xmm8
- movdqa %xmm9,48(%rsp)
- movl %edx,%esi
+ movdqa %xmm13,%xmm3
+ xorl %edx,%edi
+ paddd %xmm11,%xmm13
xorl %ebp,%edx
+ pxor %xmm12,%xmm4
+ addl %ecx,%ebx
addl 4(%rsp),%eax
- andl %ebp,%esi
- pslld $2,%xmm0
andl %edx,%edi
+ xorl %ebp,%edx
rorl $7,%ecx
- psrld $30,%xmm8
- addl %esi,%eax
+ movdqa %xmm4,%xmm12
movl %ebx,%esi
+ xorl %edx,%edi
+ movdqa %xmm13,48(%rsp)
roll $5,%ebx
addl %edi,%eax
- xorl %ebp,%edx
- addl %ebx,%eax
- por %xmm8,%xmm0
- movl %ecx,%edi
-.byte 102,69,15,56,220,222
- movups 128(%r15),%xmm15
+ xorl %ecx,%esi
+ pslld $2,%xmm4
xorl %edx,%ecx
- movdqa %xmm0,%xmm9
+ addl %ebx,%eax
+ psrld $30,%xmm12
addl 8(%rsp),%ebp
- andl %edx,%edi
+ movups 16(%r15),%xmm1
+.byte 102,15,56,220,208
andl %ecx,%esi
+ xorl %edx,%ecx
+ por %xmm12,%xmm4
rorl $7,%ebx
- addl %edi,%ebp
movl %eax,%edi
+ xorl %ecx,%esi
roll $5,%eax
+ pshufd $238,%xmm11,%xmm13
addl %esi,%ebp
- xorl %edx,%ecx
- addl %eax,%ebp
- movl %ebx,%esi
+ xorl %ebx,%edi
xorl %ecx,%ebx
+ addl %eax,%ebp
addl 12(%rsp),%edx
- andl %ecx,%esi
andl %ebx,%edi
+ xorl %ecx,%ebx
rorl $7,%eax
- addl %esi,%edx
movl %ebp,%esi
+ xorl %ebx,%edi
roll $5,%ebp
-.byte 102,69,15,56,220,223
- movups 144(%r15),%xmm14
addl %edi,%edx
- xorl %ecx,%ebx
- addl %ebp,%edx
- movl %eax,%edi
- pxor %xmm5,%xmm1
-.byte 102,68,15,58,15,207,8
+ movups 32(%r15),%xmm0
+.byte 102,15,56,220,209
+ xorl %eax,%esi
xorl %ebx,%eax
+ addl %ebp,%edx
+ pxor %xmm9,%xmm5
addl 16(%rsp),%ecx
- andl %ebx,%edi
- pxor %xmm2,%xmm1
andl %eax,%esi
+ xorl %ebx,%eax
rorl $7,%ebp
- movdqa %xmm10,%xmm8
- paddd %xmm0,%xmm10
- addl %edi,%ecx
+ punpcklqdq %xmm4,%xmm13
movl %edx,%edi
- pxor %xmm9,%xmm1
+ xorl %eax,%esi
+ pxor %xmm6,%xmm5
roll $5,%edx
addl %esi,%ecx
- xorl %ebx,%eax
- addl %edx,%ecx
- movdqa %xmm1,%xmm9
- movdqa %xmm10,0(%rsp)
- movl %ebp,%esi
+ movdqa %xmm3,%xmm12
+ xorl %ebp,%edi
+ paddd %xmm4,%xmm3
xorl %eax,%ebp
+ pxor %xmm13,%xmm5
+ addl %edx,%ecx
addl 20(%rsp),%ebx
- andl %eax,%esi
- pslld $2,%xmm1
andl %ebp,%edi
-.byte 102,69,15,56,220,222
- movups 160(%r15),%xmm15
+ xorl %eax,%ebp
rorl $7,%edx
- psrld $30,%xmm9
- addl %esi,%ebx
+ movups 48(%r15),%xmm1
+.byte 102,15,56,220,208
+ movdqa %xmm5,%xmm13
movl %ecx,%esi
+ xorl %ebp,%edi
+ movdqa %xmm3,0(%rsp)
roll $5,%ecx
addl %edi,%ebx
- xorl %eax,%ebp
- addl %ecx,%ebx
- por %xmm9,%xmm1
- movl %edx,%edi
+ xorl %edx,%esi
+ pslld $2,%xmm5
xorl %ebp,%edx
- movdqa %xmm1,%xmm10
+ addl %ecx,%ebx
+ psrld $30,%xmm13
addl 24(%rsp),%eax
- andl %ebp,%edi
andl %edx,%esi
+ xorl %ebp,%edx
+ por %xmm13,%xmm5
rorl $7,%ecx
- addl %edi,%eax
movl %ebx,%edi
+ xorl %edx,%esi
roll $5,%ebx
+ pshufd $238,%xmm4,%xmm3
addl %esi,%eax
- xorl %ebp,%edx
+ xorl %ecx,%edi
+ xorl %edx,%ecx
addl %ebx,%eax
- movl %ecx,%esi
+ addl 28(%rsp),%ebp
cmpl $11,%r8d
jb .Laesenclast3
- movups 176(%r15),%xmm14
-.byte 102,69,15,56,220,223
- movups 192(%r15),%xmm15
-.byte 102,69,15,56,220,222
+ movups 64(%r15),%xmm0
+.byte 102,15,56,220,209
+ movups 80(%r15),%xmm1
+.byte 102,15,56,220,208
je .Laesenclast3
- movups 208(%r15),%xmm14
-.byte 102,69,15,56,220,223
- movups 224(%r15),%xmm15
-.byte 102,69,15,56,220,222
+ movups 96(%r15),%xmm0
+.byte 102,15,56,220,209
+ movups 112(%r15),%xmm1
+.byte 102,15,56,220,208
.Laesenclast3:
-.byte 102,69,15,56,221,223
- movups 16(%r15),%xmm14
- xorl %edx,%ecx
- addl 28(%rsp),%ebp
- andl %edx,%esi
+.byte 102,15,56,221,209
+ movups 16-112(%r15),%xmm0
andl %ecx,%edi
+ xorl %edx,%ecx
rorl $7,%ebx
- addl %esi,%ebp
movl %eax,%esi
+ xorl %ecx,%edi
roll $5,%eax
addl %edi,%ebp
- xorl %edx,%ecx
- addl %eax,%ebp
- movl %ebx,%edi
- pxor %xmm6,%xmm2
-.byte 102,68,15,58,15,208,8
+ xorl %ebx,%esi
xorl %ecx,%ebx
+ addl %eax,%ebp
+ pxor %xmm10,%xmm6
addl 32(%rsp),%edx
- andl %ecx,%edi
- pxor %xmm3,%xmm2
andl %ebx,%esi
+ xorl %ecx,%ebx
rorl $7,%eax
- movdqa %xmm8,%xmm9
- paddd %xmm1,%xmm8
- addl %edi,%edx
+ punpcklqdq %xmm5,%xmm3
movl %ebp,%edi
- pxor %xmm10,%xmm2
+ xorl %ebx,%esi
+ pxor %xmm7,%xmm6
roll $5,%ebp
- movups 48(%r12),%xmm12
- xorps %xmm13,%xmm12
- movups %xmm11,32(%r13,%r12,1)
- xorps %xmm12,%xmm11
-.byte 102,69,15,56,220,222
- movups 32(%r15),%xmm15
addl %esi,%edx
- xorl %ecx,%ebx
- addl %ebp,%edx
- movdqa %xmm2,%xmm10
- movdqa %xmm8,16(%rsp)
- movl %eax,%esi
+ movups 48(%r12),%xmm14
+ xorps %xmm15,%xmm14
+ movups %xmm2,32(%r13,%r12,1)
+ xorps %xmm14,%xmm2
+ movups -80(%r15),%xmm1
+.byte 102,15,56,220,208
+ movdqa %xmm12,%xmm13
+ xorl %eax,%edi
+ paddd %xmm5,%xmm12
xorl %ebx,%eax
+ pxor %xmm3,%xmm6
+ addl %ebp,%edx
addl 36(%rsp),%ecx
- andl %ebx,%esi
- pslld $2,%xmm2
andl %eax,%edi
+ xorl %ebx,%eax
rorl $7,%ebp
- psrld $30,%xmm10
- addl %esi,%ecx
+ movdqa %xmm6,%xmm3
movl %edx,%esi
+ xorl %eax,%edi
+ movdqa %xmm12,16(%rsp)
roll $5,%edx
addl %edi,%ecx
- xorl %ebx,%eax
- addl %edx,%ecx
- por %xmm10,%xmm2
- movl %ebp,%edi
+ xorl %ebp,%esi
+ pslld $2,%xmm6
xorl %eax,%ebp
- movdqa %xmm2,%xmm8
+ addl %edx,%ecx
+ psrld $30,%xmm3
addl 40(%rsp),%ebx
- andl %eax,%edi
andl %ebp,%esi
-.byte 102,69,15,56,220,223
- movups 48(%r15),%xmm14
+ xorl %eax,%ebp
+ por %xmm3,%xmm6
rorl $7,%edx
- addl %edi,%ebx
+ movups -64(%r15),%xmm0
+.byte 102,15,56,220,209
movl %ecx,%edi
+ xorl %ebp,%esi
roll $5,%ecx
+ pshufd $238,%xmm5,%xmm12
addl %esi,%ebx
- xorl %eax,%ebp
- addl %ecx,%ebx
- movl %edx,%esi
+ xorl %edx,%edi
xorl %ebp,%edx
+ addl %ecx,%ebx
addl 44(%rsp),%eax
- andl %ebp,%esi
andl %edx,%edi
+ xorl %ebp,%edx
rorl $7,%ecx
- addl %esi,%eax
movl %ebx,%esi
+ xorl %edx,%edi
roll $5,%ebx
addl %edi,%eax
- xorl %ebp,%edx
+ xorl %edx,%esi
addl %ebx,%eax
+ pxor %xmm11,%xmm7
addl 48(%rsp),%ebp
-.byte 102,69,15,56,220,222
- movups 64(%r15),%xmm15
- pxor %xmm7,%xmm3
-.byte 102,68,15,58,15,193,8
- xorl %edx,%esi
+ movups -48(%r15),%xmm1
+.byte 102,15,56,220,208
+ xorl %ecx,%esi
+ punpcklqdq %xmm6,%xmm12
movl %eax,%edi
roll $5,%eax
- pxor %xmm4,%xmm3
- xorl %ecx,%esi
- addl %eax,%ebp
- movdqa %xmm9,%xmm10
- paddd %xmm2,%xmm9
- rorl $7,%ebx
+ pxor %xmm8,%xmm7
addl %esi,%ebp
- pxor %xmm8,%xmm3
- addl 52(%rsp),%edx
xorl %ecx,%edi
+ movdqa %xmm13,%xmm3
+ rorl $7,%ebx
+ paddd %xmm6,%xmm13
+ addl %eax,%ebp
+ pxor %xmm12,%xmm7
+ addl 52(%rsp),%edx
+ xorl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
- movdqa %xmm3,%xmm8
- movdqa %xmm9,32(%rsp)
- xorl %ebx,%edi
- addl %ebp,%edx
- rorl $7,%eax
+ movdqa %xmm7,%xmm12
addl %edi,%edx
- pslld $2,%xmm3
- addl 56(%rsp),%ecx
xorl %ebx,%esi
- psrld $30,%xmm8
+ movdqa %xmm13,32(%rsp)
+ rorl $7,%eax
+ addl %ebp,%edx
+ addl 56(%rsp),%ecx
+ pslld $2,%xmm7
+ xorl %eax,%esi
movl %edx,%edi
+ psrld $30,%xmm12
roll $5,%edx
- xorl %eax,%esi
-.byte 102,69,15,56,220,223
- movups 80(%r15),%xmm14
- addl %edx,%ecx
- rorl $7,%ebp
addl %esi,%ecx
- por %xmm8,%xmm3
- addl 60(%rsp),%ebx
+ movups -32(%r15),%xmm0
+.byte 102,15,56,220,209
xorl %eax,%edi
+ rorl $7,%ebp
+ por %xmm12,%xmm7
+ addl %edx,%ecx
+ addl 60(%rsp),%ebx
+ xorl %ebp,%edi
movl %ecx,%esi
roll $5,%ecx
- xorl %ebp,%edi
- addl %ecx,%ebx
- rorl $7,%edx
addl %edi,%ebx
- addl 0(%rsp),%eax
- paddd %xmm3,%xmm10
xorl %ebp,%esi
+ rorl $7,%edx
+ addl %ecx,%ebx
+ addl 0(%rsp),%eax
+ xorl %edx,%esi
movl %ebx,%edi
roll $5,%ebx
- xorl %edx,%esi
- movdqa %xmm10,48(%rsp)
- addl %ebx,%eax
- rorl $7,%ecx
+ paddd %xmm7,%xmm3
addl %esi,%eax
- addl 4(%rsp),%ebp
-.byte 102,69,15,56,220,222
- movups 96(%r15),%xmm15
xorl %edx,%edi
+ movdqa %xmm3,48(%rsp)
+ rorl $7,%ecx
+ addl %ebx,%eax
+ addl 4(%rsp),%ebp
+ movups -16(%r15),%xmm1
+.byte 102,15,56,220,208
+ xorl %ecx,%edi
movl %eax,%esi
roll $5,%eax
- xorl %ecx,%edi
- addl %eax,%ebp
- rorl $7,%ebx
addl %edi,%ebp
- addl 8(%rsp),%edx
xorl %ecx,%esi
+ rorl $7,%ebx
+ addl %eax,%ebp
+ addl 8(%rsp),%edx
+ xorl %ebx,%esi
movl %ebp,%edi
roll $5,%ebp
- xorl %ebx,%esi
- addl %ebp,%edx
- rorl $7,%eax
addl %esi,%edx
- addl 12(%rsp),%ecx
xorl %ebx,%edi
+ rorl $7,%eax
+ addl %ebp,%edx
+ addl 12(%rsp),%ecx
+ xorl %eax,%edi
movl %edx,%esi
roll $5,%edx
- xorl %eax,%edi
-.byte 102,69,15,56,220,223
- movups 112(%r15),%xmm14
- addl %edx,%ecx
- rorl $7,%ebp
addl %edi,%ecx
+ movups 0(%r15),%xmm0
+.byte 102,15,56,220,209
+ xorl %eax,%esi
+ rorl $7,%ebp
+ addl %edx,%ecx
cmpq %r14,%r10
je .Ldone_ssse3
- movdqa 64(%r11),%xmm6
- movdqa 0(%r11),%xmm9
- movdqu 0(%r10),%xmm0
- movdqu 16(%r10),%xmm1
- movdqu 32(%r10),%xmm2
- movdqu 48(%r10),%xmm3
-.byte 102,15,56,0,198
+ movdqa 64(%r11),%xmm3
+ movdqa 0(%r11),%xmm13
+ movdqu 0(%r10),%xmm4
+ movdqu 16(%r10),%xmm5
+ movdqu 32(%r10),%xmm6
+ movdqu 48(%r10),%xmm7
+.byte 102,15,56,0,227
addq $64,%r10
addl 16(%rsp),%ebx
- xorl %eax,%esi
-.byte 102,15,56,0,206
+ xorl %ebp,%esi
movl %ecx,%edi
+.byte 102,15,56,0,235
roll $5,%ecx
- paddd %xmm9,%xmm0
- xorl %ebp,%esi
- addl %ecx,%ebx
- rorl $7,%edx
addl %esi,%ebx
- movdqa %xmm0,0(%rsp)
- addl 20(%rsp),%eax
xorl %ebp,%edi
- psubd %xmm9,%xmm0
+ rorl $7,%edx
+ paddd %xmm13,%xmm4
+ addl %ecx,%ebx
+ addl 20(%rsp),%eax
+ xorl %edx,%edi
movl %ebx,%esi
+ movdqa %xmm4,0(%rsp)
roll $5,%ebx
- xorl %edx,%edi
- addl %ebx,%eax
- rorl $7,%ecx
addl %edi,%eax
- addl 24(%rsp),%ebp
-.byte 102,69,15,56,220,222
- movups 128(%r15),%xmm15
xorl %edx,%esi
+ rorl $7,%ecx
+ psubd %xmm13,%xmm4
+ addl %ebx,%eax
+ addl 24(%rsp),%ebp
+ movups 16(%r15),%xmm1
+.byte 102,15,56,220,208
+ xorl %ecx,%esi
movl %eax,%edi
roll $5,%eax
- xorl %ecx,%esi
- addl %eax,%ebp
- rorl $7,%ebx
addl %esi,%ebp
- addl 28(%rsp),%edx
xorl %ecx,%edi
+ rorl $7,%ebx
+ addl %eax,%ebp
+ addl 28(%rsp),%edx
+ xorl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
- xorl %ebx,%edi
- addl %ebp,%edx
- rorl $7,%eax
addl %edi,%edx
- addl 32(%rsp),%ecx
xorl %ebx,%esi
-.byte 102,15,56,0,214
+ rorl $7,%eax
+ addl %ebp,%edx
+ addl 32(%rsp),%ecx
+ xorl %eax,%esi
movl %edx,%edi
+.byte 102,15,56,0,243
roll $5,%edx
- paddd %xmm9,%xmm1
- xorl %eax,%esi
-.byte 102,69,15,56,220,223
- movups 144(%r15),%xmm14
- addl %edx,%ecx
- rorl $7,%ebp
addl %esi,%ecx
- movdqa %xmm1,16(%rsp)
- addl 36(%rsp),%ebx
+ movups 32(%r15),%xmm0
+.byte 102,15,56,220,209
xorl %eax,%edi
- psubd %xmm9,%xmm1
+ rorl $7,%ebp
+ paddd %xmm13,%xmm5
+ addl %edx,%ecx
+ addl 36(%rsp),%ebx
+ xorl %ebp,%edi
movl %ecx,%esi
+ movdqa %xmm5,16(%rsp)
roll $5,%ecx
- xorl %ebp,%edi
- addl %ecx,%ebx
- rorl $7,%edx
addl %edi,%ebx
- addl 40(%rsp),%eax
xorl %ebp,%esi
+ rorl $7,%edx
+ psubd %xmm13,%xmm5
+ addl %ecx,%ebx
+ addl 40(%rsp),%eax
+ xorl %edx,%esi
movl %ebx,%edi
roll $5,%ebx
- xorl %edx,%esi
- addl %ebx,%eax
- rorl $7,%ecx
addl %esi,%eax
- addl 44(%rsp),%ebp
-.byte 102,69,15,56,220,222
- movups 160(%r15),%xmm15
xorl %edx,%edi
+ rorl $7,%ecx
+ addl %ebx,%eax
+ addl 44(%rsp),%ebp
+ movups 48(%r15),%xmm1
+.byte 102,15,56,220,208
+ xorl %ecx,%edi
movl %eax,%esi
roll $5,%eax
- xorl %ecx,%edi
- addl %eax,%ebp
- rorl $7,%ebx
addl %edi,%ebp
- addl 48(%rsp),%edx
xorl %ecx,%esi
-.byte 102,15,56,0,222
+ rorl $7,%ebx
+ addl %eax,%ebp
+ addl 48(%rsp),%edx
+ xorl %ebx,%esi
movl %ebp,%edi
+.byte 102,15,56,0,251
roll $5,%ebp
- paddd %xmm9,%xmm2
- xorl %ebx,%esi
- addl %ebp,%edx
- rorl $7,%eax
addl %esi,%edx
- movdqa %xmm2,32(%rsp)
- addl 52(%rsp),%ecx
xorl %ebx,%edi
- psubd %xmm9,%xmm2
+ rorl $7,%eax
+ paddd %xmm13,%xmm6
+ addl %ebp,%edx
+ addl 52(%rsp),%ecx
+ xorl %eax,%edi
movl %edx,%esi
+ movdqa %xmm6,32(%rsp)
roll $5,%edx
- xorl %eax,%edi
+ addl %edi,%ecx
cmpl $11,%r8d
jb .Laesenclast4
- movups 176(%r15),%xmm14
-.byte 102,69,15,56,220,223
- movups 192(%r15),%xmm15
-.byte 102,69,15,56,220,222
+ movups 64(%r15),%xmm0
+.byte 102,15,56,220,209
+ movups 80(%r15),%xmm1
+.byte 102,15,56,220,208
je .Laesenclast4
- movups 208(%r15),%xmm14
-.byte 102,69,15,56,220,223
- movups 224(%r15),%xmm15
-.byte 102,69,15,56,220,222
+ movups 96(%r15),%xmm0
+.byte 102,15,56,220,209
+ movups 112(%r15),%xmm1
+.byte 102,15,56,220,208
.Laesenclast4:
-.byte 102,69,15,56,221,223
- movups 16(%r15),%xmm14
- addl %edx,%ecx
+.byte 102,15,56,221,209
+ movups 16-112(%r15),%xmm0
+ xorl %eax,%esi
rorl $7,%ebp
- addl %edi,%ecx
+ psubd %xmm13,%xmm6
+ addl %edx,%ecx
addl 56(%rsp),%ebx
- xorl %eax,%esi
+ xorl %ebp,%esi
movl %ecx,%edi
roll $5,%ecx
- xorl %ebp,%esi
- addl %ecx,%ebx
- rorl $7,%edx
addl %esi,%ebx
- addl 60(%rsp),%eax
xorl %ebp,%edi
+ rorl $7,%edx
+ addl %ecx,%ebx
+ addl 60(%rsp),%eax
+ xorl %edx,%edi
movl %ebx,%esi
roll $5,%ebx
- xorl %edx,%edi
- addl %ebx,%eax
- rorl $7,%ecx
addl %edi,%eax
- movups %xmm11,48(%r13,%r12,1)
+ rorl $7,%ecx
+ addl %ebx,%eax
+ movups %xmm2,48(%r13,%r12,1)
leaq 64(%r12),%r12
addl 0(%r9),%eax
@@ -1238,129 +1220,130 @@ aesni_cbc_sha1_enc_ssse3:
movl %esi,4(%r9)
movl %esi,%ebx
movl %ecx,8(%r9)
+ movl %ecx,%edi
movl %edx,12(%r9)
+ xorl %edx,%edi
movl %ebp,16(%r9)
+ andl %edi,%esi
jmp .Loop_ssse3
-.align 16
.Ldone_ssse3:
addl 16(%rsp),%ebx
- xorl %eax,%esi
+ xorl %ebp,%esi
movl %ecx,%edi
roll $5,%ecx
- xorl %ebp,%esi
- addl %ecx,%ebx
- rorl $7,%edx
addl %esi,%ebx
- addl 20(%rsp),%eax
xorl %ebp,%edi
+ rorl $7,%edx
+ addl %ecx,%ebx
+ addl 20(%rsp),%eax
+ xorl %edx,%edi
movl %ebx,%esi
roll $5,%ebx
- xorl %edx,%edi
- addl %ebx,%eax
- rorl $7,%ecx
addl %edi,%eax
- addl 24(%rsp),%ebp
-.byte 102,69,15,56,220,222
- movups 128(%r15),%xmm15
xorl %edx,%esi
+ rorl $7,%ecx
+ addl %ebx,%eax
+ addl 24(%rsp),%ebp
+ movups 16(%r15),%xmm1
+.byte 102,15,56,220,208
+ xorl %ecx,%esi
movl %eax,%edi
roll $5,%eax
- xorl %ecx,%esi
- addl %eax,%ebp
- rorl $7,%ebx
addl %esi,%ebp
- addl 28(%rsp),%edx
xorl %ecx,%edi
+ rorl $7,%ebx
+ addl %eax,%ebp
+ addl 28(%rsp),%edx
+ xorl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
- xorl %ebx,%edi
- addl %ebp,%edx
- rorl $7,%eax
addl %edi,%edx
- addl 32(%rsp),%ecx
xorl %ebx,%esi
+ rorl $7,%eax
+ addl %ebp,%edx
+ addl 32(%rsp),%ecx
+ xorl %eax,%esi
movl %edx,%edi
roll $5,%edx
- xorl %eax,%esi
-.byte 102,69,15,56,220,223
- movups 144(%r15),%xmm14
- addl %edx,%ecx
- rorl $7,%ebp
addl %esi,%ecx
- addl 36(%rsp),%ebx
+ movups 32(%r15),%xmm0
+.byte 102,15,56,220,209
xorl %eax,%edi
+ rorl $7,%ebp
+ addl %edx,%ecx
+ addl 36(%rsp),%ebx
+ xorl %ebp,%edi
movl %ecx,%esi
roll $5,%ecx
- xorl %ebp,%edi
- addl %ecx,%ebx
- rorl $7,%edx
addl %edi,%ebx
- addl 40(%rsp),%eax
xorl %ebp,%esi
+ rorl $7,%edx
+ addl %ecx,%ebx
+ addl 40(%rsp),%eax
+ xorl %edx,%esi
movl %ebx,%edi
roll $5,%ebx
- xorl %edx,%esi
- addl %ebx,%eax
- rorl $7,%ecx
addl %esi,%eax
- addl 44(%rsp),%ebp
-.byte 102,69,15,56,220,222
- movups 160(%r15),%xmm15
xorl %edx,%edi
+ rorl $7,%ecx
+ addl %ebx,%eax
+ addl 44(%rsp),%ebp
+ movups 48(%r15),%xmm1
+.byte 102,15,56,220,208
+ xorl %ecx,%edi
movl %eax,%esi
roll $5,%eax
- xorl %ecx,%edi
- addl %eax,%ebp
- rorl $7,%ebx
addl %edi,%ebp
- addl 48(%rsp),%edx
xorl %ecx,%esi
+ rorl $7,%ebx
+ addl %eax,%ebp
+ addl 48(%rsp),%edx
+ xorl %ebx,%esi
movl %ebp,%edi
roll $5,%ebp
- xorl %ebx,%esi
- addl %ebp,%edx
- rorl $7,%eax
addl %esi,%edx
- addl 52(%rsp),%ecx
xorl %ebx,%edi
+ rorl $7,%eax
+ addl %ebp,%edx
+ addl 52(%rsp),%ecx
+ xorl %eax,%edi
movl %edx,%esi
roll $5,%edx
- xorl %eax,%edi
+ addl %edi,%ecx
cmpl $11,%r8d
jb .Laesenclast5
- movups 176(%r15),%xmm14
-.byte 102,69,15,56,220,223
- movups 192(%r15),%xmm15
-.byte 102,69,15,56,220,222
+ movups 64(%r15),%xmm0
+.byte 102,15,56,220,209
+ movups 80(%r15),%xmm1
+.byte 102,15,56,220,208
je .Laesenclast5
- movups 208(%r15),%xmm14
-.byte 102,69,15,56,220,223
- movups 224(%r15),%xmm15
-.byte 102,69,15,56,220,222
+ movups 96(%r15),%xmm0
+.byte 102,15,56,220,209
+ movups 112(%r15),%xmm1
+.byte 102,15,56,220,208
.Laesenclast5:
-.byte 102,69,15,56,221,223
- movups 16(%r15),%xmm14
- addl %edx,%ecx
+.byte 102,15,56,221,209
+ movups 16-112(%r15),%xmm0
+ xorl %eax,%esi
rorl $7,%ebp
- addl %edi,%ecx
+ addl %edx,%ecx
addl 56(%rsp),%ebx
- xorl %eax,%esi
+ xorl %ebp,%esi
movl %ecx,%edi
roll $5,%ecx
- xorl %ebp,%esi
- addl %ecx,%ebx
- rorl $7,%edx
addl %esi,%ebx
- addl 60(%rsp),%eax
xorl %ebp,%edi
+ rorl $7,%edx
+ addl %ecx,%ebx
+ addl 60(%rsp),%eax
+ xorl %edx,%edi
movl %ebx,%esi
roll $5,%ebx
- xorl %edx,%edi
- addl %ebx,%eax
- rorl $7,%ecx
addl %edi,%eax
- movups %xmm11,48(%r13,%r12,1)
+ rorl $7,%ecx
+ addl %ebx,%eax
+ movups %xmm2,48(%r13,%r12,1)
movq 88(%rsp),%r8
addl 0(%r9),%eax
@@ -1373,7 +1356,7 @@ aesni_cbc_sha1_enc_ssse3:
movl %ecx,8(%r9)
movl %edx,12(%r9)
movl %ebp,16(%r9)
- movups %xmm11,(%r8)
+ movups %xmm2,(%r8)
leaq 104(%rsp),%rsi
movq 0(%rsi),%r15
movq 8(%rsi),%r14
@@ -1387,11 +1370,313 @@ aesni_cbc_sha1_enc_ssse3:
.size aesni_cbc_sha1_enc_ssse3,.-aesni_cbc_sha1_enc_ssse3
.align 64
K_XX_XX:
-.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999
-.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1
-.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc
-.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6
-.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999
+.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1
+.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc
+.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6
+.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+.byte 0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0
.byte 65,69,83,78,73,45,67,66,67,43,83,72,65,49,32,115,116,105,116,99,104,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 64
+.type aesni_cbc_sha1_enc_shaext,@function
+.align 32
+aesni_cbc_sha1_enc_shaext:
+ movq 8(%rsp),%r10
+ movdqu (%r9),%xmm8
+ movd 16(%r9),%xmm9
+ movdqa K_XX_XX+80(%rip),%xmm7
+
+ movl 240(%rcx),%r11d
+ subq %rdi,%rsi
+ movups (%rcx),%xmm15
+ movups 16(%rcx),%xmm0
+ leaq 112(%rcx),%rcx
+
+ pshufd $27,%xmm8,%xmm8
+ pshufd $27,%xmm9,%xmm9
+ jmp .Loop_shaext
+
+.align 16
+.Loop_shaext:
+ movups 0(%rdi),%xmm14
+ xorps %xmm15,%xmm14
+ xorps %xmm14,%xmm2
+ movups -80(%rcx),%xmm1
+.byte 102,15,56,220,208
+ movdqu (%r10),%xmm3
+ movdqa %xmm9,%xmm12
+.byte 102,15,56,0,223
+ movdqu 16(%r10),%xmm4
+ movdqa %xmm8,%xmm11
+ movups -64(%rcx),%xmm0
+.byte 102,15,56,220,209
+.byte 102,15,56,0,231
+
+ paddd %xmm3,%xmm9
+ movdqu 32(%r10),%xmm5
+ leaq 64(%r10),%r10
+ pxor %xmm12,%xmm3
+ movups -48(%rcx),%xmm1
+.byte 102,15,56,220,208
+ pxor %xmm12,%xmm3
+ movdqa %xmm8,%xmm10
+.byte 102,15,56,0,239
+.byte 69,15,58,204,193,0
+.byte 68,15,56,200,212
+ movups -32(%rcx),%xmm0
+.byte 102,15,56,220,209
+.byte 15,56,201,220
+ movdqu -16(%r10),%xmm6
+ movdqa %xmm8,%xmm9
+.byte 102,15,56,0,247
+ movups -16(%rcx),%xmm1
+.byte 102,15,56,220,208
+.byte 69,15,58,204,194,0
+.byte 68,15,56,200,205
+ pxor %xmm5,%xmm3
+.byte 15,56,201,229
+ movups 0(%rcx),%xmm0
+.byte 102,15,56,220,209
+ movdqa %xmm8,%xmm10
+.byte 69,15,58,204,193,0
+.byte 68,15,56,200,214
+ movups 16(%rcx),%xmm1
+.byte 102,15,56,220,208
+.byte 15,56,202,222
+ pxor %xmm6,%xmm4
+.byte 15,56,201,238
+ movups 32(%rcx),%xmm0
+.byte 102,15,56,220,209
+ movdqa %xmm8,%xmm9
+.byte 69,15,58,204,194,0
+.byte 68,15,56,200,203
+ movups 48(%rcx),%xmm1
+.byte 102,15,56,220,208
+.byte 15,56,202,227
+ pxor %xmm3,%xmm5
+.byte 15,56,201,243
+ cmpl $11,%r11d
+ jb .Laesenclast6
+ movups 64(%rcx),%xmm0
+.byte 102,15,56,220,209
+ movups 80(%rcx),%xmm1
+.byte 102,15,56,220,208
+ je .Laesenclast6
+ movups 96(%rcx),%xmm0
+.byte 102,15,56,220,209
+ movups 112(%rcx),%xmm1
+.byte 102,15,56,220,208
+.Laesenclast6:
+.byte 102,15,56,221,209
+ movups 16-112(%rcx),%xmm0
+ movdqa %xmm8,%xmm10
+.byte 69,15,58,204,193,0
+.byte 68,15,56,200,212
+ movups 16(%rdi),%xmm14
+ xorps %xmm15,%xmm14
+ movups %xmm2,0(%rsi,%rdi,1)
+ xorps %xmm14,%xmm2
+ movups -80(%rcx),%xmm1
+.byte 102,15,56,220,208
+.byte 15,56,202,236
+ pxor %xmm4,%xmm6
+.byte 15,56,201,220
+ movups -64(%rcx),%xmm0
+.byte 102,15,56,220,209
+ movdqa %xmm8,%xmm9
+.byte 69,15,58,204,194,1
+.byte 68,15,56,200,205
+ movups -48(%rcx),%xmm1
+.byte 102,15,56,220,208
+.byte 15,56,202,245
+ pxor %xmm5,%xmm3
+.byte 15,56,201,229
+ movups -32(%rcx),%xmm0
+.byte 102,15,56,220,209
+ movdqa %xmm8,%xmm10
+.byte 69,15,58,204,193,1
+.byte 68,15,56,200,214
+ movups -16(%rcx),%xmm1
+.byte 102,15,56,220,208
+.byte 15,56,202,222
+ pxor %xmm6,%xmm4
+.byte 15,56,201,238
+ movups 0(%rcx),%xmm0
+.byte 102,15,56,220,209
+ movdqa %xmm8,%xmm9
+.byte 69,15,58,204,194,1
+.byte 68,15,56,200,203
+ movups 16(%rcx),%xmm1
+.byte 102,15,56,220,208
+.byte 15,56,202,227
+ pxor %xmm3,%xmm5
+.byte 15,56,201,243
+ movups 32(%rcx),%xmm0
+.byte 102,15,56,220,209
+ movdqa %xmm8,%xmm10
+.byte 69,15,58,204,193,1
+.byte 68,15,56,200,212
+ movups 48(%rcx),%xmm1
+.byte 102,15,56,220,208
+.byte 15,56,202,236
+ pxor %xmm4,%xmm6
+.byte 15,56,201,220
+ cmpl $11,%r11d
+ jb .Laesenclast7
+ movups 64(%rcx),%xmm0
+.byte 102,15,56,220,209
+ movups 80(%rcx),%xmm1
+.byte 102,15,56,220,208
+ je .Laesenclast7
+ movups 96(%rcx),%xmm0
+.byte 102,15,56,220,209
+ movups 112(%rcx),%xmm1
+.byte 102,15,56,220,208
+.Laesenclast7:
+.byte 102,15,56,221,209
+ movups 16-112(%rcx),%xmm0
+ movdqa %xmm8,%xmm9
+.byte 69,15,58,204,194,1
+.byte 68,15,56,200,205
+ movups 32(%rdi),%xmm14
+ xorps %xmm15,%xmm14
+ movups %xmm2,16(%rsi,%rdi,1)
+ xorps %xmm14,%xmm2
+ movups -80(%rcx),%xmm1
+.byte 102,15,56,220,208
+.byte 15,56,202,245
+ pxor %xmm5,%xmm3
+.byte 15,56,201,229
+ movups -64(%rcx),%xmm0
+.byte 102,15,56,220,209
+ movdqa %xmm8,%xmm10
+.byte 69,15,58,204,193,2
+.byte 68,15,56,200,214
+ movups -48(%rcx),%xmm1
+.byte 102,15,56,220,208
+.byte 15,56,202,222
+ pxor %xmm6,%xmm4
+.byte 15,56,201,238
+ movups -32(%rcx),%xmm0
+.byte 102,15,56,220,209
+ movdqa %xmm8,%xmm9
+.byte 69,15,58,204,194,2
+.byte 68,15,56,200,203
+ movups -16(%rcx),%xmm1
+.byte 102,15,56,220,208
+.byte 15,56,202,227
+ pxor %xmm3,%xmm5
+.byte 15,56,201,243
+ movups 0(%rcx),%xmm0
+.byte 102,15,56,220,209
+ movdqa %xmm8,%xmm10
+.byte 69,15,58,204,193,2
+.byte 68,15,56,200,212
+ movups 16(%rcx),%xmm1
+.byte 102,15,56,220,208
+.byte 15,56,202,236
+ pxor %xmm4,%xmm6
+.byte 15,56,201,220
+ movups 32(%rcx),%xmm0
+.byte 102,15,56,220,209
+ movdqa %xmm8,%xmm9
+.byte 69,15,58,204,194,2
+.byte 68,15,56,200,205
+ movups 48(%rcx),%xmm1
+.byte 102,15,56,220,208
+.byte 15,56,202,245
+ pxor %xmm5,%xmm3
+.byte 15,56,201,229
+ cmpl $11,%r11d
+ jb .Laesenclast8
+ movups 64(%rcx),%xmm0
+.byte 102,15,56,220,209
+ movups 80(%rcx),%xmm1
+.byte 102,15,56,220,208
+ je .Laesenclast8
+ movups 96(%rcx),%xmm0
+.byte 102,15,56,220,209
+ movups 112(%rcx),%xmm1
+.byte 102,15,56,220,208
+.Laesenclast8:
+.byte 102,15,56,221,209
+ movups 16-112(%rcx),%xmm0
+ movdqa %xmm8,%xmm10
+.byte 69,15,58,204,193,2
+.byte 68,15,56,200,214
+ movups 48(%rdi),%xmm14
+ xorps %xmm15,%xmm14
+ movups %xmm2,32(%rsi,%rdi,1)
+ xorps %xmm14,%xmm2
+ movups -80(%rcx),%xmm1
+.byte 102,15,56,220,208
+.byte 15,56,202,222
+ pxor %xmm6,%xmm4
+.byte 15,56,201,238
+ movups -64(%rcx),%xmm0
+.byte 102,15,56,220,209
+ movdqa %xmm8,%xmm9
+.byte 69,15,58,204,194,3
+.byte 68,15,56,200,203
+ movups -48(%rcx),%xmm1
+.byte 102,15,56,220,208
+.byte 15,56,202,227
+ pxor %xmm3,%xmm5
+.byte 15,56,201,243
+ movups -32(%rcx),%xmm0
+.byte 102,15,56,220,209
+ movdqa %xmm8,%xmm10
+.byte 69,15,58,204,193,3
+.byte 68,15,56,200,212
+.byte 15,56,202,236
+ pxor %xmm4,%xmm6
+ movups -16(%rcx),%xmm1
+.byte 102,15,56,220,208
+ movdqa %xmm8,%xmm9
+.byte 69,15,58,204,194,3
+.byte 68,15,56,200,205
+.byte 15,56,202,245
+ movups 0(%rcx),%xmm0
+.byte 102,15,56,220,209
+ movdqa %xmm12,%xmm5
+ movdqa %xmm8,%xmm10
+.byte 69,15,58,204,193,3
+.byte 68,15,56,200,214
+ movups 16(%rcx),%xmm1
+.byte 102,15,56,220,208
+ movdqa %xmm8,%xmm9
+.byte 69,15,58,204,194,3
+.byte 68,15,56,200,205
+ movups 32(%rcx),%xmm0
+.byte 102,15,56,220,209
+ movups 48(%rcx),%xmm1
+.byte 102,15,56,220,208
+ cmpl $11,%r11d
+ jb .Laesenclast9
+ movups 64(%rcx),%xmm0
+.byte 102,15,56,220,209
+ movups 80(%rcx),%xmm1
+.byte 102,15,56,220,208
+ je .Laesenclast9
+ movups 96(%rcx),%xmm0
+.byte 102,15,56,220,209
+ movups 112(%rcx),%xmm1
+.byte 102,15,56,220,208
+.Laesenclast9:
+.byte 102,15,56,221,209
+ movups 16-112(%rcx),%xmm0
+ decq %rdx
+
+ paddd %xmm11,%xmm8
+ movups %xmm2,48(%rsi,%rdi,1)
+ leaq 64(%rdi),%rdi
+ jnz .Loop_shaext
+
+ pshufd $27,%xmm8,%xmm8
+ pshufd $27,%xmm9,%xmm9
+ movups %xmm2,(%r8)
+ movdqu %xmm8,(%r9)
+ movd %xmm9,16(%r9)
+ .byte 0xf3,0xc3
+.size aesni_cbc_sha1_enc_shaext,.-aesni_cbc_sha1_enc_shaext
diff --git a/secure/lib/libcrypto/amd64/aesni-sha256-x86_64.S b/secure/lib/libcrypto/amd64/aesni-sha256-x86_64.S
new file mode 100644
index 0000000..a940892
--- /dev/null
+++ b/secure/lib/libcrypto/amd64/aesni-sha256-x86_64.S
@@ -0,0 +1,58 @@
+ # $FreeBSD$
+.text
+
+
+.globl aesni_cbc_sha256_enc
+.type aesni_cbc_sha256_enc,@function
+.align 16
+aesni_cbc_sha256_enc:
+ xorl %eax,%eax
+ cmpq $0,%rdi
+ je .Lprobe
+ ud2
+.Lprobe:
+ .byte 0xf3,0xc3
+.size aesni_cbc_sha256_enc,.-aesni_cbc_sha256_enc
+
+.align 64
+.type K256,@object
+K256:
+.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+
+.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+.long 0,0,0,0, 0,0,0,0, -1,-1,-1,-1
+.long 0,0,0,0, 0,0,0,0
+.byte 65,69,83,78,73,45,67,66,67,43,83,72,65,50,53,54,32,115,116,105,116,99,104,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
+.align 64
diff --git a/secure/lib/libcrypto/amd64/aesni-x86_64.S b/secure/lib/libcrypto/amd64/aesni-x86_64.S
index df677a7..082a306 100644
--- a/secure/lib/libcrypto/amd64/aesni-x86_64.S
+++ b/secure/lib/libcrypto/amd64/aesni-x86_64.S
@@ -1,5 +1,6 @@
# $FreeBSD$
.text
+
.globl aesni_encrypt
.type aesni_encrypt,@function
.align 16
@@ -15,9 +16,12 @@ aesni_encrypt:
decl %eax
movups (%rdx),%xmm1
leaq 16(%rdx),%rdx
- jnz .Loop_enc1_1
+ jnz .Loop_enc1_1
.byte 102,15,56,221,209
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
.byte 0xf3,0xc3
.size aesni_encrypt,.-aesni_encrypt
@@ -36,34 +40,96 @@ aesni_decrypt:
decl %eax
movups (%rdx),%xmm1
leaq 16(%rdx),%rdx
- jnz .Loop_dec1_2
+ jnz .Loop_dec1_2
.byte 102,15,56,223,209
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
.byte 0xf3,0xc3
.size aesni_decrypt, .-aesni_decrypt
+.type _aesni_encrypt2,@function
+.align 16
+_aesni_encrypt2:
+ movups (%rcx),%xmm0
+ shll $4,%eax
+ movups 16(%rcx),%xmm1
+ xorps %xmm0,%xmm2
+ xorps %xmm0,%xmm3
+ movups 32(%rcx),%xmm0
+ leaq 32(%rcx,%rax,1),%rcx
+ negq %rax
+ addq $16,%rax
+
+.Lenc_loop2:
+.byte 102,15,56,220,209
+.byte 102,15,56,220,217
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
+.byte 102,15,56,220,208
+.byte 102,15,56,220,216
+ movups -16(%rcx,%rax,1),%xmm0
+ jnz .Lenc_loop2
+
+.byte 102,15,56,220,209
+.byte 102,15,56,220,217
+.byte 102,15,56,221,208
+.byte 102,15,56,221,216
+ .byte 0xf3,0xc3
+.size _aesni_encrypt2,.-_aesni_encrypt2
+.type _aesni_decrypt2,@function
+.align 16
+_aesni_decrypt2:
+ movups (%rcx),%xmm0
+ shll $4,%eax
+ movups 16(%rcx),%xmm1
+ xorps %xmm0,%xmm2
+ xorps %xmm0,%xmm3
+ movups 32(%rcx),%xmm0
+ leaq 32(%rcx,%rax,1),%rcx
+ negq %rax
+ addq $16,%rax
+
+.Ldec_loop2:
+.byte 102,15,56,222,209
+.byte 102,15,56,222,217
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
+.byte 102,15,56,222,208
+.byte 102,15,56,222,216
+ movups -16(%rcx,%rax,1),%xmm0
+ jnz .Ldec_loop2
+
+.byte 102,15,56,222,209
+.byte 102,15,56,222,217
+.byte 102,15,56,223,208
+.byte 102,15,56,223,216
+ .byte 0xf3,0xc3
+.size _aesni_decrypt2,.-_aesni_decrypt2
.type _aesni_encrypt3,@function
.align 16
_aesni_encrypt3:
movups (%rcx),%xmm0
- shrl $1,%eax
+ shll $4,%eax
movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
xorps %xmm0,%xmm4
- movups (%rcx),%xmm0
+ movups 32(%rcx),%xmm0
+ leaq 32(%rcx,%rax,1),%rcx
+ negq %rax
+ addq $16,%rax
.Lenc_loop3:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
- decl %eax
.byte 102,15,56,220,225
- movups 16(%rcx),%xmm1
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
- leaq 32(%rcx),%rcx
.byte 102,15,56,220,224
- movups (%rcx),%xmm0
+ movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop3
.byte 102,15,56,220,209
@@ -78,25 +144,26 @@ _aesni_encrypt3:
.align 16
_aesni_decrypt3:
movups (%rcx),%xmm0
- shrl $1,%eax
+ shll $4,%eax
movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
xorps %xmm0,%xmm4
- movups (%rcx),%xmm0
+ movups 32(%rcx),%xmm0
+ leaq 32(%rcx,%rax,1),%rcx
+ negq %rax
+ addq $16,%rax
.Ldec_loop3:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
- decl %eax
.byte 102,15,56,222,225
- movups 16(%rcx),%xmm1
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
.byte 102,15,56,222,208
.byte 102,15,56,222,216
- leaq 32(%rcx),%rcx
.byte 102,15,56,222,224
- movups (%rcx),%xmm0
+ movups -16(%rcx,%rax,1),%xmm0
jnz .Ldec_loop3
.byte 102,15,56,222,209
@@ -111,28 +178,30 @@ _aesni_decrypt3:
.align 16
_aesni_encrypt4:
movups (%rcx),%xmm0
- shrl $1,%eax
+ shll $4,%eax
movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
xorps %xmm0,%xmm4
xorps %xmm0,%xmm5
- movups (%rcx),%xmm0
+ movups 32(%rcx),%xmm0
+ leaq 32(%rcx,%rax,1),%rcx
+ negq %rax
+.byte 0x0f,0x1f,0x00
+ addq $16,%rax
.Lenc_loop4:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
- decl %eax
.byte 102,15,56,220,225
.byte 102,15,56,220,233
- movups 16(%rcx),%xmm1
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
- leaq 32(%rcx),%rcx
.byte 102,15,56,220,224
.byte 102,15,56,220,232
- movups (%rcx),%xmm0
+ movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop4
.byte 102,15,56,220,209
@@ -149,28 +218,30 @@ _aesni_encrypt4:
.align 16
_aesni_decrypt4:
movups (%rcx),%xmm0
- shrl $1,%eax
+ shll $4,%eax
movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
xorps %xmm0,%xmm4
xorps %xmm0,%xmm5
- movups (%rcx),%xmm0
+ movups 32(%rcx),%xmm0
+ leaq 32(%rcx,%rax,1),%rcx
+ negq %rax
+.byte 0x0f,0x1f,0x00
+ addq $16,%rax
.Ldec_loop4:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
- decl %eax
.byte 102,15,56,222,225
.byte 102,15,56,222,233
- movups 16(%rcx),%xmm1
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
.byte 102,15,56,222,208
.byte 102,15,56,222,216
- leaq 32(%rcx),%rcx
.byte 102,15,56,222,224
.byte 102,15,56,222,232
- movups (%rcx),%xmm0
+ movups -16(%rcx,%rax,1),%xmm0
jnz .Ldec_loop4
.byte 102,15,56,222,209
@@ -187,43 +258,40 @@ _aesni_decrypt4:
.align 16
_aesni_encrypt6:
movups (%rcx),%xmm0
- shrl $1,%eax
+ shll $4,%eax
movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
-.byte 102,15,56,220,209
pxor %xmm0,%xmm4
+.byte 102,15,56,220,209
+ leaq 32(%rcx,%rax,1),%rcx
+ negq %rax
.byte 102,15,56,220,217
pxor %xmm0,%xmm5
-.byte 102,15,56,220,225
pxor %xmm0,%xmm6
-.byte 102,15,56,220,233
+.byte 102,15,56,220,225
pxor %xmm0,%xmm7
- decl %eax
-.byte 102,15,56,220,241
- movups (%rcx),%xmm0
-.byte 102,15,56,220,249
+ movups (%rcx,%rax,1),%xmm0
+ addq $16,%rax
jmp .Lenc_loop6_enter
.align 16
.Lenc_loop6:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
- decl %eax
.byte 102,15,56,220,225
+.Lenc_loop6_enter:
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
-.Lenc_loop6_enter:
- movups 16(%rcx),%xmm1
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
- leaq 32(%rcx),%rcx
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
- movups (%rcx),%xmm0
+ movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop6
.byte 102,15,56,220,209
@@ -244,43 +312,40 @@ _aesni_encrypt6:
.align 16
_aesni_decrypt6:
movups (%rcx),%xmm0
- shrl $1,%eax
+ shll $4,%eax
movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
-.byte 102,15,56,222,209
pxor %xmm0,%xmm4
+.byte 102,15,56,222,209
+ leaq 32(%rcx,%rax,1),%rcx
+ negq %rax
.byte 102,15,56,222,217
pxor %xmm0,%xmm5
-.byte 102,15,56,222,225
pxor %xmm0,%xmm6
-.byte 102,15,56,222,233
+.byte 102,15,56,222,225
pxor %xmm0,%xmm7
- decl %eax
-.byte 102,15,56,222,241
- movups (%rcx),%xmm0
-.byte 102,15,56,222,249
+ movups (%rcx,%rax,1),%xmm0
+ addq $16,%rax
jmp .Ldec_loop6_enter
.align 16
.Ldec_loop6:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
- decl %eax
.byte 102,15,56,222,225
+.Ldec_loop6_enter:
.byte 102,15,56,222,233
.byte 102,15,56,222,241
.byte 102,15,56,222,249
-.Ldec_loop6_enter:
- movups 16(%rcx),%xmm1
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
.byte 102,15,56,222,208
.byte 102,15,56,222,216
- leaq 32(%rcx),%rcx
.byte 102,15,56,222,224
.byte 102,15,56,222,232
.byte 102,15,56,222,240
.byte 102,15,56,222,248
- movups (%rcx),%xmm0
+ movups -16(%rcx,%rax,1),%xmm0
jnz .Ldec_loop6
.byte 102,15,56,222,209
@@ -301,52 +366,46 @@ _aesni_decrypt6:
.align 16
_aesni_encrypt8:
movups (%rcx),%xmm0
- shrl $1,%eax
+ shll $4,%eax
movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
-.byte 102,15,56,220,209
pxor %xmm0,%xmm4
-.byte 102,15,56,220,217
pxor %xmm0,%xmm5
-.byte 102,15,56,220,225
pxor %xmm0,%xmm6
-.byte 102,15,56,220,233
+ leaq 32(%rcx,%rax,1),%rcx
+ negq %rax
+.byte 102,15,56,220,209
pxor %xmm0,%xmm7
- decl %eax
-.byte 102,15,56,220,241
pxor %xmm0,%xmm8
-.byte 102,15,56,220,249
+.byte 102,15,56,220,217
pxor %xmm0,%xmm9
- movups (%rcx),%xmm0
-.byte 102,68,15,56,220,193
-.byte 102,68,15,56,220,201
- movups 16(%rcx),%xmm1
- jmp .Lenc_loop8_enter
+ movups (%rcx,%rax,1),%xmm0
+ addq $16,%rax
+ jmp .Lenc_loop8_inner
.align 16
.Lenc_loop8:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
- decl %eax
+.Lenc_loop8_inner:
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
- movups 16(%rcx),%xmm1
.Lenc_loop8_enter:
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
- leaq 32(%rcx),%rcx
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
- movups (%rcx),%xmm0
+ movups -16(%rcx,%rax,1),%xmm0
jnz .Lenc_loop8
.byte 102,15,56,220,209
@@ -371,52 +430,46 @@ _aesni_encrypt8:
.align 16
_aesni_decrypt8:
movups (%rcx),%xmm0
- shrl $1,%eax
+ shll $4,%eax
movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
-.byte 102,15,56,222,209
pxor %xmm0,%xmm4
-.byte 102,15,56,222,217
pxor %xmm0,%xmm5
-.byte 102,15,56,222,225
pxor %xmm0,%xmm6
-.byte 102,15,56,222,233
+ leaq 32(%rcx,%rax,1),%rcx
+ negq %rax
+.byte 102,15,56,222,209
pxor %xmm0,%xmm7
- decl %eax
-.byte 102,15,56,222,241
pxor %xmm0,%xmm8
-.byte 102,15,56,222,249
+.byte 102,15,56,222,217
pxor %xmm0,%xmm9
- movups (%rcx),%xmm0
-.byte 102,68,15,56,222,193
-.byte 102,68,15,56,222,201
- movups 16(%rcx),%xmm1
- jmp .Ldec_loop8_enter
+ movups (%rcx,%rax,1),%xmm0
+ addq $16,%rax
+ jmp .Ldec_loop8_inner
.align 16
.Ldec_loop8:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
- decl %eax
+.Ldec_loop8_inner:
.byte 102,15,56,222,225
.byte 102,15,56,222,233
.byte 102,15,56,222,241
.byte 102,15,56,222,249
.byte 102,68,15,56,222,193
.byte 102,68,15,56,222,201
- movups 16(%rcx),%xmm1
.Ldec_loop8_enter:
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
.byte 102,15,56,222,208
.byte 102,15,56,222,216
- leaq 32(%rcx),%rcx
.byte 102,15,56,222,224
.byte 102,15,56,222,232
.byte 102,15,56,222,240
.byte 102,15,56,222,248
.byte 102,68,15,56,222,192
.byte 102,68,15,56,222,200
- movups (%rcx),%xmm0
+ movups -16(%rcx,%rax,1),%xmm0
jnz .Ldec_loop8
.byte 102,15,56,222,209
@@ -525,6 +578,7 @@ aesni_ecb_encrypt:
movups 80(%rdi),%xmm7
je .Lecb_enc_six
movdqu 96(%rdi),%xmm8
+ xorps %xmm9,%xmm9
call _aesni_encrypt8
movups %xmm2,(%rsi)
movups %xmm3,16(%rsi)
@@ -545,14 +599,13 @@ aesni_ecb_encrypt:
decl %eax
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
- jnz .Loop_enc1_3
+ jnz .Loop_enc1_3
.byte 102,15,56,221,209
movups %xmm2,(%rsi)
jmp .Lecb_ret
.align 16
.Lecb_enc_two:
- xorps %xmm4,%xmm4
- call _aesni_encrypt3
+ call _aesni_encrypt2
movups %xmm2,(%rsi)
movups %xmm3,16(%rsi)
jmp .Lecb_ret
@@ -639,15 +692,23 @@ aesni_ecb_encrypt:
jnc .Lecb_dec_loop8
movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
movq %r11,%rcx
movups %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
movl %r10d,%eax
movups %xmm4,32(%rsi)
+ pxor %xmm4,%xmm4
movups %xmm5,48(%rsi)
+ pxor %xmm5,%xmm5
movups %xmm6,64(%rsi)
+ pxor %xmm6,%xmm6
movups %xmm7,80(%rsi)
+ pxor %xmm7,%xmm7
movups %xmm8,96(%rsi)
+ pxor %xmm8,%xmm8
movups %xmm9,112(%rsi)
+ pxor %xmm9,%xmm9
leaq 128(%rsi),%rsi
addq $128,%rdx
jz .Lecb_ret
@@ -670,14 +731,23 @@ aesni_ecb_encrypt:
je .Lecb_dec_six
movups 96(%rdi),%xmm8
movups (%rcx),%xmm0
+ xorps %xmm9,%xmm9
call _aesni_decrypt8
movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
movups %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
movups %xmm4,32(%rsi)
+ pxor %xmm4,%xmm4
movups %xmm5,48(%rsi)
+ pxor %xmm5,%xmm5
movups %xmm6,64(%rsi)
+ pxor %xmm6,%xmm6
movups %xmm7,80(%rsi)
+ pxor %xmm7,%xmm7
movups %xmm8,96(%rsi)
+ pxor %xmm8,%xmm8
+ pxor %xmm9,%xmm9
jmp .Lecb_ret
.align 16
.Lecb_dec_one:
@@ -690,53 +760,76 @@ aesni_ecb_encrypt:
decl %eax
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
- jnz .Loop_dec1_4
+ jnz .Loop_dec1_4
.byte 102,15,56,223,209
movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
jmp .Lecb_ret
.align 16
.Lecb_dec_two:
- xorps %xmm4,%xmm4
- call _aesni_decrypt3
+ call _aesni_decrypt2
movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
movups %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
jmp .Lecb_ret
.align 16
.Lecb_dec_three:
call _aesni_decrypt3
movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
movups %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
movups %xmm4,32(%rsi)
+ pxor %xmm4,%xmm4
jmp .Lecb_ret
.align 16
.Lecb_dec_four:
call _aesni_decrypt4
movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
movups %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
movups %xmm4,32(%rsi)
+ pxor %xmm4,%xmm4
movups %xmm5,48(%rsi)
+ pxor %xmm5,%xmm5
jmp .Lecb_ret
.align 16
.Lecb_dec_five:
xorps %xmm7,%xmm7
call _aesni_decrypt6
movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
movups %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
movups %xmm4,32(%rsi)
+ pxor %xmm4,%xmm4
movups %xmm5,48(%rsi)
+ pxor %xmm5,%xmm5
movups %xmm6,64(%rsi)
+ pxor %xmm6,%xmm6
+ pxor %xmm7,%xmm7
jmp .Lecb_ret
.align 16
.Lecb_dec_six:
call _aesni_decrypt6
movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
movups %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
movups %xmm4,32(%rsi)
+ pxor %xmm4,%xmm4
movups %xmm5,48(%rsi)
+ pxor %xmm5,%xmm5
movups %xmm6,64(%rsi)
+ pxor %xmm6,%xmm6
movups %xmm7,80(%rsi)
+ pxor %xmm7,%xmm7
.Lecb_ret:
+ xorps %xmm0,%xmm0
+ pxor %xmm1,%xmm1
.byte 0xf3,0xc3
.size aesni_ecb_encrypt,.-aesni_ecb_encrypt
.globl aesni_ccm64_encrypt_blocks
@@ -744,56 +837,62 @@ aesni_ecb_encrypt:
.align 16
aesni_ccm64_encrypt_blocks:
movl 240(%rcx),%eax
- movdqu (%r8),%xmm9
- movdqa .Lincrement64(%rip),%xmm6
+ movdqu (%r8),%xmm6
+ movdqa .Lincrement64(%rip),%xmm9
movdqa .Lbswap_mask(%rip),%xmm7
- shrl $1,%eax
+ shll $4,%eax
+ movl $16,%r10d
leaq 0(%rcx),%r11
movdqu (%r9),%xmm3
- movdqa %xmm9,%xmm2
- movl %eax,%r10d
-.byte 102,68,15,56,0,207
+ movdqa %xmm6,%xmm2
+ leaq 32(%rcx,%rax,1),%rcx
+.byte 102,15,56,0,247
+ subq %rax,%r10
jmp .Lccm64_enc_outer
.align 16
.Lccm64_enc_outer:
movups (%r11),%xmm0
- movl %r10d,%eax
+ movq %r10,%rax
movups (%rdi),%xmm8
xorps %xmm0,%xmm2
movups 16(%r11),%xmm1
xorps %xmm8,%xmm0
- leaq 32(%r11),%rcx
xorps %xmm0,%xmm3
- movups (%rcx),%xmm0
+ movups 32(%r11),%xmm0
.Lccm64_enc2_loop:
.byte 102,15,56,220,209
- decl %eax
.byte 102,15,56,220,217
- movups 16(%rcx),%xmm1
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
.byte 102,15,56,220,208
- leaq 32(%rcx),%rcx
.byte 102,15,56,220,216
- movups 0(%rcx),%xmm0
+ movups -16(%rcx,%rax,1),%xmm0
jnz .Lccm64_enc2_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
- paddq %xmm6,%xmm9
+ paddq %xmm9,%xmm6
+ decq %rdx
.byte 102,15,56,221,208
.byte 102,15,56,221,216
- decq %rdx
leaq 16(%rdi),%rdi
xorps %xmm2,%xmm8
- movdqa %xmm9,%xmm2
+ movdqa %xmm6,%xmm2
movups %xmm8,(%rsi)
- leaq 16(%rsi),%rsi
.byte 102,15,56,0,215
+ leaq 16(%rsi),%rsi
jnz .Lccm64_enc_outer
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
+ pxor %xmm2,%xmm2
movups %xmm3,(%r9)
+ pxor %xmm3,%xmm3
+ pxor %xmm8,%xmm8
+ pxor %xmm6,%xmm6
.byte 0xf3,0xc3
.size aesni_ccm64_encrypt_blocks,.-aesni_ccm64_encrypt_blocks
.globl aesni_ccm64_decrypt_blocks
@@ -801,15 +900,15 @@ aesni_ccm64_encrypt_blocks:
.align 16
aesni_ccm64_decrypt_blocks:
movl 240(%rcx),%eax
- movups (%r8),%xmm9
+ movups (%r8),%xmm6
movdqu (%r9),%xmm3
- movdqa .Lincrement64(%rip),%xmm6
+ movdqa .Lincrement64(%rip),%xmm9
movdqa .Lbswap_mask(%rip),%xmm7
- movaps %xmm9,%xmm2
+ movaps %xmm6,%xmm2
movl %eax,%r10d
movq %rcx,%r11
-.byte 102,68,15,56,0,207
+.byte 102,15,56,0,247
movups (%rcx),%xmm0
movups 16(%rcx),%xmm1
leaq 32(%rcx),%rcx
@@ -819,17 +918,21 @@ aesni_ccm64_decrypt_blocks:
decl %eax
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
- jnz .Loop_enc1_5
+ jnz .Loop_enc1_5
.byte 102,15,56,221,209
+ shll $4,%r10d
+ movl $16,%eax
movups (%rdi),%xmm8
- paddq %xmm6,%xmm9
+ paddq %xmm9,%xmm6
leaq 16(%rdi),%rdi
+ subq %r10,%rax
+ leaq 32(%r11,%r10,1),%rcx
+ movq %rax,%r10
jmp .Lccm64_dec_outer
.align 16
.Lccm64_dec_outer:
xorps %xmm2,%xmm8
- movdqa %xmm9,%xmm2
- movl %r10d,%eax
+ movdqa %xmm6,%xmm2
movups %xmm8,(%rsi)
leaq 16(%rsi),%rsi
.byte 102,15,56,0,215
@@ -838,36 +941,36 @@ aesni_ccm64_decrypt_blocks:
jz .Lccm64_dec_break
movups (%r11),%xmm0
- shrl $1,%eax
+ movq %r10,%rax
movups 16(%r11),%xmm1
xorps %xmm0,%xmm8
- leaq 32(%r11),%rcx
xorps %xmm0,%xmm2
xorps %xmm8,%xmm3
- movups (%rcx),%xmm0
-
+ movups 32(%r11),%xmm0
+ jmp .Lccm64_dec2_loop
+.align 16
.Lccm64_dec2_loop:
.byte 102,15,56,220,209
- decl %eax
.byte 102,15,56,220,217
- movups 16(%rcx),%xmm1
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
.byte 102,15,56,220,208
- leaq 32(%rcx),%rcx
.byte 102,15,56,220,216
- movups 0(%rcx),%xmm0
+ movups -16(%rcx,%rax,1),%xmm0
jnz .Lccm64_dec2_loop
movups (%rdi),%xmm8
- paddq %xmm6,%xmm9
+ paddq %xmm9,%xmm6
.byte 102,15,56,220,209
.byte 102,15,56,220,217
- leaq 16(%rdi),%rdi
.byte 102,15,56,221,208
.byte 102,15,56,221,216
+ leaq 16(%rdi),%rdi
jmp .Lccm64_dec_outer
.align 16
.Lccm64_dec_break:
+ movl 240(%r11),%eax
movups (%r11),%xmm0
movups 16(%r11),%xmm1
xorps %xmm0,%xmm8
@@ -878,9 +981,15 @@ aesni_ccm64_decrypt_blocks:
decl %eax
movups (%r11),%xmm1
leaq 16(%r11),%r11
- jnz .Loop_enc1_6
+ jnz .Loop_enc1_6
.byte 102,15,56,221,217
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
+ pxor %xmm2,%xmm2
movups %xmm3,(%r9)
+ pxor %xmm3,%xmm3
+ pxor %xmm8,%xmm8
+ pxor %xmm6,%xmm6
.byte 0xf3,0xc3
.size aesni_ccm64_decrypt_blocks,.-aesni_ccm64_decrypt_blocks
.globl aesni_ctr32_encrypt_blocks
@@ -888,490 +997,859 @@ aesni_ccm64_decrypt_blocks:
.align 16
aesni_ctr32_encrypt_blocks:
cmpq $1,%rdx
- je .Lctr32_one_shortcut
+ jne .Lctr32_bulk
- movdqu (%r8),%xmm14
- movdqa .Lbswap_mask(%rip),%xmm15
- xorl %eax,%eax
-.byte 102,69,15,58,22,242,3
-.byte 102,68,15,58,34,240,3
+
+ movups (%r8),%xmm2
+ movups (%rdi),%xmm3
+ movl 240(%rcx),%edx
+ movups (%rcx),%xmm0
+ movups 16(%rcx),%xmm1
+ leaq 32(%rcx),%rcx
+ xorps %xmm0,%xmm2
+.Loop_enc1_7:
+.byte 102,15,56,220,209
+ decl %edx
+ movups (%rcx),%xmm1
+ leaq 16(%rcx),%rcx
+ jnz .Loop_enc1_7
+.byte 102,15,56,221,209
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
+ xorps %xmm3,%xmm2
+ pxor %xmm3,%xmm3
+ movups %xmm2,(%rsi)
+ xorps %xmm2,%xmm2
+ jmp .Lctr32_epilogue
+
+.align 16
+.Lctr32_bulk:
+ leaq (%rsp),%rax
+ pushq %rbp
+ subq $128,%rsp
+ andq $-16,%rsp
+ leaq -8(%rax),%rbp
+
+
+
+
+ movdqu (%r8),%xmm2
+ movdqu (%rcx),%xmm0
+ movl 12(%r8),%r8d
+ pxor %xmm0,%xmm2
+ movl 12(%rcx),%r11d
+ movdqa %xmm2,0(%rsp)
+ bswapl %r8d
+ movdqa %xmm2,%xmm3
+ movdqa %xmm2,%xmm4
+ movdqa %xmm2,%xmm5
+ movdqa %xmm2,64(%rsp)
+ movdqa %xmm2,80(%rsp)
+ movdqa %xmm2,96(%rsp)
+ movq %rdx,%r10
+ movdqa %xmm2,112(%rsp)
+
+ leaq 1(%r8),%rax
+ leaq 2(%r8),%rdx
+ bswapl %eax
+ bswapl %edx
+ xorl %r11d,%eax
+ xorl %r11d,%edx
+.byte 102,15,58,34,216,3
+ leaq 3(%r8),%rax
+ movdqa %xmm3,16(%rsp)
+.byte 102,15,58,34,226,3
+ bswapl %eax
+ movq %r10,%rdx
+ leaq 4(%r8),%r10
+ movdqa %xmm4,32(%rsp)
+ xorl %r11d,%eax
+ bswapl %r10d
+.byte 102,15,58,34,232,3
+ xorl %r11d,%r10d
+ movdqa %xmm5,48(%rsp)
+ leaq 5(%r8),%r9
+ movl %r10d,64+12(%rsp)
+ bswapl %r9d
+ leaq 6(%r8),%r10
movl 240(%rcx),%eax
+ xorl %r11d,%r9d
bswapl %r10d
- pxor %xmm12,%xmm12
- pxor %xmm13,%xmm13
-.byte 102,69,15,58,34,226,0
- leaq 3(%r10),%r11
-.byte 102,69,15,58,34,235,0
- incl %r10d
-.byte 102,69,15,58,34,226,1
- incq %r11
-.byte 102,69,15,58,34,235,1
- incl %r10d
-.byte 102,69,15,58,34,226,2
- incq %r11
-.byte 102,69,15,58,34,235,2
- movdqa %xmm12,-40(%rsp)
-.byte 102,69,15,56,0,231
- movdqa %xmm13,-24(%rsp)
-.byte 102,69,15,56,0,239
-
- pshufd $192,%xmm12,%xmm2
- pshufd $128,%xmm12,%xmm3
- pshufd $64,%xmm12,%xmm4
- cmpq $6,%rdx
+ movl %r9d,80+12(%rsp)
+ xorl %r11d,%r10d
+ leaq 7(%r8),%r9
+ movl %r10d,96+12(%rsp)
+ bswapl %r9d
+ movl OPENSSL_ia32cap_P+4(%rip),%r10d
+ xorl %r11d,%r9d
+ andl $71303168,%r10d
+ movl %r9d,112+12(%rsp)
+
+ movups 16(%rcx),%xmm1
+
+ movdqa 64(%rsp),%xmm6
+ movdqa 80(%rsp),%xmm7
+
+ cmpq $8,%rdx
jb .Lctr32_tail
- shrl $1,%eax
- movq %rcx,%r11
- movl %eax,%r10d
+
subq $6,%rdx
+ cmpl $4194304,%r10d
+ je .Lctr32_6x
+
+ leaq 128(%rcx),%rcx
+ subq $2,%rdx
+ jmp .Lctr32_loop8
+
+.align 16
+.Lctr32_6x:
+ shll $4,%eax
+ movl $48,%r10d
+ bswapl %r11d
+ leaq 32(%rcx,%rax,1),%rcx
+ subq %rax,%r10
jmp .Lctr32_loop6
.align 16
.Lctr32_loop6:
- pshufd $192,%xmm13,%xmm5
- por %xmm14,%xmm2
- movups (%r11),%xmm0
- pshufd $128,%xmm13,%xmm6
- por %xmm14,%xmm3
- movups 16(%r11),%xmm1
- pshufd $64,%xmm13,%xmm7
- por %xmm14,%xmm4
- por %xmm14,%xmm5
- xorps %xmm0,%xmm2
- por %xmm14,%xmm6
- por %xmm14,%xmm7
+ addl $6,%r8d
+ movups -48(%rcx,%r10,1),%xmm0
+.byte 102,15,56,220,209
+ movl %r8d,%eax
+ xorl %r11d,%eax
+.byte 102,15,56,220,217
+.byte 0x0f,0x38,0xf1,0x44,0x24,12
+ leal 1(%r8),%eax
+.byte 102,15,56,220,225
+ xorl %r11d,%eax
+.byte 0x0f,0x38,0xf1,0x44,0x24,28
+.byte 102,15,56,220,233
+ leal 2(%r8),%eax
+ xorl %r11d,%eax
+.byte 102,15,56,220,241
+.byte 0x0f,0x38,0xf1,0x44,0x24,44
+ leal 3(%r8),%eax
+.byte 102,15,56,220,249
+ movups -32(%rcx,%r10,1),%xmm1
+ xorl %r11d,%eax
+.byte 102,15,56,220,208
+.byte 0x0f,0x38,0xf1,0x44,0x24,60
+ leal 4(%r8),%eax
+.byte 102,15,56,220,216
+ xorl %r11d,%eax
+.byte 0x0f,0x38,0xf1,0x44,0x24,76
+.byte 102,15,56,220,224
+ leal 5(%r8),%eax
+ xorl %r11d,%eax
+.byte 102,15,56,220,232
+.byte 0x0f,0x38,0xf1,0x44,0x24,92
+ movq %r10,%rax
+.byte 102,15,56,220,240
+.byte 102,15,56,220,248
+ movups -16(%rcx,%r10,1),%xmm0
+ call .Lenc_loop6
+ movdqu (%rdi),%xmm8
+ movdqu 16(%rdi),%xmm9
+ movdqu 32(%rdi),%xmm10
+ movdqu 48(%rdi),%xmm11
+ movdqu 64(%rdi),%xmm12
+ movdqu 80(%rdi),%xmm13
+ leaq 96(%rdi),%rdi
+ movups -64(%rcx,%r10,1),%xmm1
+ pxor %xmm2,%xmm8
+ movaps 0(%rsp),%xmm2
+ pxor %xmm3,%xmm9
+ movaps 16(%rsp),%xmm3
+ pxor %xmm4,%xmm10
+ movaps 32(%rsp),%xmm4
+ pxor %xmm5,%xmm11
+ movaps 48(%rsp),%xmm5
+ pxor %xmm6,%xmm12
+ movaps 64(%rsp),%xmm6
+ pxor %xmm7,%xmm13
+ movaps 80(%rsp),%xmm7
+ movdqu %xmm8,(%rsi)
+ movdqu %xmm9,16(%rsi)
+ movdqu %xmm10,32(%rsi)
+ movdqu %xmm11,48(%rsi)
+ movdqu %xmm12,64(%rsi)
+ movdqu %xmm13,80(%rsi)
+ leaq 96(%rsi),%rsi
- pxor %xmm0,%xmm3
+ subq $6,%rdx
+ jnc .Lctr32_loop6
+
+ addq $6,%rdx
+ jz .Lctr32_done
+
+ leal -48(%r10),%eax
+ leaq -80(%rcx,%r10,1),%rcx
+ negl %eax
+ shrl $4,%eax
+ jmp .Lctr32_tail
+
+.align 32
+.Lctr32_loop8:
+ addl $8,%r8d
+ movdqa 96(%rsp),%xmm8
.byte 102,15,56,220,209
- leaq 32(%r11),%rcx
- pxor %xmm0,%xmm4
+ movl %r8d,%r9d
+ movdqa 112(%rsp),%xmm9
.byte 102,15,56,220,217
- movdqa .Lincrement32(%rip),%xmm13
- pxor %xmm0,%xmm5
+ bswapl %r9d
+ movups 32-128(%rcx),%xmm0
.byte 102,15,56,220,225
- movdqa -40(%rsp),%xmm12
- pxor %xmm0,%xmm6
+ xorl %r11d,%r9d
+ nop
.byte 102,15,56,220,233
- pxor %xmm0,%xmm7
- movups (%rcx),%xmm0
- decl %eax
+ movl %r9d,0+12(%rsp)
+ leaq 1(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
- jmp .Lctr32_enc_loop6_enter
-.align 16
-.Lctr32_enc_loop6:
+.byte 102,68,15,56,220,193
+.byte 102,68,15,56,220,201
+ movups 48-128(%rcx),%xmm1
+ bswapl %r9d
+.byte 102,15,56,220,208
+.byte 102,15,56,220,216
+ xorl %r11d,%r9d
+.byte 0x66,0x90
+.byte 102,15,56,220,224
+.byte 102,15,56,220,232
+ movl %r9d,16+12(%rsp)
+ leaq 2(%r8),%r9
+.byte 102,15,56,220,240
+.byte 102,15,56,220,248
+.byte 102,68,15,56,220,192
+.byte 102,68,15,56,220,200
+ movups 64-128(%rcx),%xmm0
+ bswapl %r9d
.byte 102,15,56,220,209
.byte 102,15,56,220,217
- decl %eax
+ xorl %r11d,%r9d
+.byte 0x66,0x90
.byte 102,15,56,220,225
.byte 102,15,56,220,233
+ movl %r9d,32+12(%rsp)
+ leaq 3(%r8),%r9
.byte 102,15,56,220,241
.byte 102,15,56,220,249
-.Lctr32_enc_loop6_enter:
- movups 16(%rcx),%xmm1
+.byte 102,68,15,56,220,193
+.byte 102,68,15,56,220,201
+ movups 80-128(%rcx),%xmm1
+ bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
- leaq 32(%rcx),%rcx
+ xorl %r11d,%r9d
+.byte 0x66,0x90
.byte 102,15,56,220,224
.byte 102,15,56,220,232
+ movl %r9d,48+12(%rsp)
+ leaq 4(%r8),%r9
.byte 102,15,56,220,240
.byte 102,15,56,220,248
- movups (%rcx),%xmm0
- jnz .Lctr32_enc_loop6
+.byte 102,68,15,56,220,192
+.byte 102,68,15,56,220,200
+ movups 96-128(%rcx),%xmm0
+ bswapl %r9d
+.byte 102,15,56,220,209
+.byte 102,15,56,220,217
+ xorl %r11d,%r9d
+.byte 0x66,0x90
+.byte 102,15,56,220,225
+.byte 102,15,56,220,233
+ movl %r9d,64+12(%rsp)
+ leaq 5(%r8),%r9
+.byte 102,15,56,220,241
+.byte 102,15,56,220,249
+.byte 102,68,15,56,220,193
+.byte 102,68,15,56,220,201
+ movups 112-128(%rcx),%xmm1
+ bswapl %r9d
+.byte 102,15,56,220,208
+.byte 102,15,56,220,216
+ xorl %r11d,%r9d
+.byte 0x66,0x90
+.byte 102,15,56,220,224
+.byte 102,15,56,220,232
+ movl %r9d,80+12(%rsp)
+ leaq 6(%r8),%r9
+.byte 102,15,56,220,240
+.byte 102,15,56,220,248
+.byte 102,68,15,56,220,192
+.byte 102,68,15,56,220,200
+ movups 128-128(%rcx),%xmm0
+ bswapl %r9d
+.byte 102,15,56,220,209
+.byte 102,15,56,220,217
+ xorl %r11d,%r9d
+.byte 0x66,0x90
+.byte 102,15,56,220,225
+.byte 102,15,56,220,233
+ movl %r9d,96+12(%rsp)
+ leaq 7(%r8),%r9
+.byte 102,15,56,220,241
+.byte 102,15,56,220,249
+.byte 102,68,15,56,220,193
+.byte 102,68,15,56,220,201
+ movups 144-128(%rcx),%xmm1
+ bswapl %r9d
+.byte 102,15,56,220,208
+.byte 102,15,56,220,216
+.byte 102,15,56,220,224
+ xorl %r11d,%r9d
+ movdqu 0(%rdi),%xmm10
+.byte 102,15,56,220,232
+ movl %r9d,112+12(%rsp)
+ cmpl $11,%eax
+.byte 102,15,56,220,240
+.byte 102,15,56,220,248
+.byte 102,68,15,56,220,192
+.byte 102,68,15,56,220,200
+ movups 160-128(%rcx),%xmm0
+
+ jb .Lctr32_enc_done
.byte 102,15,56,220,209
- paddd %xmm13,%xmm12
.byte 102,15,56,220,217
- paddd -24(%rsp),%xmm13
.byte 102,15,56,220,225
- movdqa %xmm12,-40(%rsp)
.byte 102,15,56,220,233
- movdqa %xmm13,-24(%rsp)
.byte 102,15,56,220,241
-.byte 102,69,15,56,0,231
.byte 102,15,56,220,249
-.byte 102,69,15,56,0,239
+.byte 102,68,15,56,220,193
+.byte 102,68,15,56,220,201
+ movups 176-128(%rcx),%xmm1
-.byte 102,15,56,221,208
- movups (%rdi),%xmm8
-.byte 102,15,56,221,216
- movups 16(%rdi),%xmm9
-.byte 102,15,56,221,224
- movups 32(%rdi),%xmm10
-.byte 102,15,56,221,232
- movups 48(%rdi),%xmm11
-.byte 102,15,56,221,240
- movups 64(%rdi),%xmm1
-.byte 102,15,56,221,248
- movups 80(%rdi),%xmm0
- leaq 96(%rdi),%rdi
+.byte 102,15,56,220,208
+.byte 102,15,56,220,216
+.byte 102,15,56,220,224
+.byte 102,15,56,220,232
+.byte 102,15,56,220,240
+.byte 102,15,56,220,248
+.byte 102,68,15,56,220,192
+.byte 102,68,15,56,220,200
+ movups 192-128(%rcx),%xmm0
+ je .Lctr32_enc_done
- xorps %xmm2,%xmm8
- pshufd $192,%xmm12,%xmm2
- xorps %xmm3,%xmm9
- pshufd $128,%xmm12,%xmm3
- movups %xmm8,(%rsi)
- xorps %xmm4,%xmm10
- pshufd $64,%xmm12,%xmm4
- movups %xmm9,16(%rsi)
- xorps %xmm5,%xmm11
- movups %xmm10,32(%rsi)
- xorps %xmm6,%xmm1
- movups %xmm11,48(%rsi)
- xorps %xmm7,%xmm0
- movups %xmm1,64(%rsi)
- movups %xmm0,80(%rsi)
- leaq 96(%rsi),%rsi
- movl %r10d,%eax
- subq $6,%rdx
- jnc .Lctr32_loop6
+.byte 102,15,56,220,209
+.byte 102,15,56,220,217
+.byte 102,15,56,220,225
+.byte 102,15,56,220,233
+.byte 102,15,56,220,241
+.byte 102,15,56,220,249
+.byte 102,68,15,56,220,193
+.byte 102,68,15,56,220,201
+ movups 208-128(%rcx),%xmm1
- addq $6,%rdx
+.byte 102,15,56,220,208
+.byte 102,15,56,220,216
+.byte 102,15,56,220,224
+.byte 102,15,56,220,232
+.byte 102,15,56,220,240
+.byte 102,15,56,220,248
+.byte 102,68,15,56,220,192
+.byte 102,68,15,56,220,200
+ movups 224-128(%rcx),%xmm0
+ jmp .Lctr32_enc_done
+
+.align 16
+.Lctr32_enc_done:
+ movdqu 16(%rdi),%xmm11
+ pxor %xmm0,%xmm10
+ movdqu 32(%rdi),%xmm12
+ pxor %xmm0,%xmm11
+ movdqu 48(%rdi),%xmm13
+ pxor %xmm0,%xmm12
+ movdqu 64(%rdi),%xmm14
+ pxor %xmm0,%xmm13
+ movdqu 80(%rdi),%xmm15
+ pxor %xmm0,%xmm14
+ pxor %xmm0,%xmm15
+.byte 102,15,56,220,209
+.byte 102,15,56,220,217
+.byte 102,15,56,220,225
+.byte 102,15,56,220,233
+.byte 102,15,56,220,241
+.byte 102,15,56,220,249
+.byte 102,68,15,56,220,193
+.byte 102,68,15,56,220,201
+ movdqu 96(%rdi),%xmm1
+ leaq 128(%rdi),%rdi
+
+.byte 102,65,15,56,221,210
+ pxor %xmm0,%xmm1
+ movdqu 112-128(%rdi),%xmm10
+.byte 102,65,15,56,221,219
+ pxor %xmm0,%xmm10
+ movdqa 0(%rsp),%xmm11
+.byte 102,65,15,56,221,228
+.byte 102,65,15,56,221,237
+ movdqa 16(%rsp),%xmm12
+ movdqa 32(%rsp),%xmm13
+.byte 102,65,15,56,221,246
+.byte 102,65,15,56,221,255
+ movdqa 48(%rsp),%xmm14
+ movdqa 64(%rsp),%xmm15
+.byte 102,68,15,56,221,193
+ movdqa 80(%rsp),%xmm0
+ movups 16-128(%rcx),%xmm1
+.byte 102,69,15,56,221,202
+
+ movups %xmm2,(%rsi)
+ movdqa %xmm11,%xmm2
+ movups %xmm3,16(%rsi)
+ movdqa %xmm12,%xmm3
+ movups %xmm4,32(%rsi)
+ movdqa %xmm13,%xmm4
+ movups %xmm5,48(%rsi)
+ movdqa %xmm14,%xmm5
+ movups %xmm6,64(%rsi)
+ movdqa %xmm15,%xmm6
+ movups %xmm7,80(%rsi)
+ movdqa %xmm0,%xmm7
+ movups %xmm8,96(%rsi)
+ movups %xmm9,112(%rsi)
+ leaq 128(%rsi),%rsi
+
+ subq $8,%rdx
+ jnc .Lctr32_loop8
+
+ addq $8,%rdx
jz .Lctr32_done
- movq %r11,%rcx
- leal 1(%rax,%rax,1),%eax
+ leaq -128(%rcx),%rcx
.Lctr32_tail:
- por %xmm14,%xmm2
- movups (%rdi),%xmm8
- cmpq $2,%rdx
- jb .Lctr32_one
- por %xmm14,%xmm3
- movups 16(%rdi),%xmm9
- je .Lctr32_two
- pshufd $192,%xmm13,%xmm5
- por %xmm14,%xmm4
- movups 32(%rdi),%xmm10
+ leaq 16(%rcx),%rcx
cmpq $4,%rdx
- jb .Lctr32_three
+ jb .Lctr32_loop3
+ je .Lctr32_loop4
- pshufd $128,%xmm13,%xmm6
- por %xmm14,%xmm5
- movups 48(%rdi),%xmm11
- je .Lctr32_four
- por %xmm14,%xmm6
- xorps %xmm7,%xmm7
+ shll $4,%eax
+ movdqa 96(%rsp),%xmm8
+ pxor %xmm9,%xmm9
- call _aesni_encrypt6
+ movups 16(%rcx),%xmm0
+.byte 102,15,56,220,209
+.byte 102,15,56,220,217
+ leaq 32-16(%rcx,%rax,1),%rcx
+ negq %rax
+.byte 102,15,56,220,225
+ addq $16,%rax
+ movups (%rdi),%xmm10
+.byte 102,15,56,220,233
+.byte 102,15,56,220,241
+ movups 16(%rdi),%xmm11
+ movups 32(%rdi),%xmm12
+.byte 102,15,56,220,249
+.byte 102,68,15,56,220,193
- movups 64(%rdi),%xmm1
- xorps %xmm2,%xmm8
- xorps %xmm3,%xmm9
- movups %xmm8,(%rsi)
- xorps %xmm4,%xmm10
- movups %xmm9,16(%rsi)
- xorps %xmm5,%xmm11
- movups %xmm10,32(%rsi)
- xorps %xmm6,%xmm1
- movups %xmm11,48(%rsi)
- movups %xmm1,64(%rsi)
+ call .Lenc_loop8_enter
+
+ movdqu 48(%rdi),%xmm13
+ pxor %xmm10,%xmm2
+ movdqu 64(%rdi),%xmm10
+ pxor %xmm11,%xmm3
+ movdqu %xmm2,(%rsi)
+ pxor %xmm12,%xmm4
+ movdqu %xmm3,16(%rsi)
+ pxor %xmm13,%xmm5
+ movdqu %xmm4,32(%rsi)
+ pxor %xmm10,%xmm6
+ movdqu %xmm5,48(%rsi)
+ movdqu %xmm6,64(%rsi)
+ cmpq $6,%rdx
+ jb .Lctr32_done
+
+ movups 80(%rdi),%xmm11
+ xorps %xmm11,%xmm7
+ movups %xmm7,80(%rsi)
+ je .Lctr32_done
+
+ movups 96(%rdi),%xmm12
+ xorps %xmm12,%xmm8
+ movups %xmm8,96(%rsi)
jmp .Lctr32_done
-.align 16
-.Lctr32_one_shortcut:
- movups (%r8),%xmm2
- movups (%rdi),%xmm8
- movl 240(%rcx),%eax
-.Lctr32_one:
- movups (%rcx),%xmm0
- movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
- xorps %xmm0,%xmm2
-.Loop_enc1_7:
+.align 32
+.Lctr32_loop4:
.byte 102,15,56,220,209
+ leaq 16(%rcx),%rcx
decl %eax
+.byte 102,15,56,220,217
+.byte 102,15,56,220,225
+.byte 102,15,56,220,233
movups (%rcx),%xmm1
- leaq 16(%rcx),%rcx
- jnz .Loop_enc1_7
+ jnz .Lctr32_loop4
.byte 102,15,56,221,209
- xorps %xmm2,%xmm8
- movups %xmm8,(%rsi)
- jmp .Lctr32_done
+.byte 102,15,56,221,217
+ movups (%rdi),%xmm10
+ movups 16(%rdi),%xmm11
+.byte 102,15,56,221,225
+.byte 102,15,56,221,233
+ movups 32(%rdi),%xmm12
+ movups 48(%rdi),%xmm13
-.align 16
-.Lctr32_two:
- xorps %xmm4,%xmm4
- call _aesni_encrypt3
- xorps %xmm2,%xmm8
- xorps %xmm3,%xmm9
- movups %xmm8,(%rsi)
- movups %xmm9,16(%rsi)
+ xorps %xmm10,%xmm2
+ movups %xmm2,(%rsi)
+ xorps %xmm11,%xmm3
+ movups %xmm3,16(%rsi)
+ pxor %xmm12,%xmm4
+ movdqu %xmm4,32(%rsi)
+ pxor %xmm13,%xmm5
+ movdqu %xmm5,48(%rsi)
jmp .Lctr32_done
-.align 16
-.Lctr32_three:
- call _aesni_encrypt3
- xorps %xmm2,%xmm8
- xorps %xmm3,%xmm9
- movups %xmm8,(%rsi)
- xorps %xmm4,%xmm10
- movups %xmm9,16(%rsi)
- movups %xmm10,32(%rsi)
- jmp .Lctr32_done
+.align 32
+.Lctr32_loop3:
+.byte 102,15,56,220,209
+ leaq 16(%rcx),%rcx
+ decl %eax
+.byte 102,15,56,220,217
+.byte 102,15,56,220,225
+ movups (%rcx),%xmm1
+ jnz .Lctr32_loop3
+.byte 102,15,56,221,209
+.byte 102,15,56,221,217
+.byte 102,15,56,221,225
-.align 16
-.Lctr32_four:
- call _aesni_encrypt4
- xorps %xmm2,%xmm8
- xorps %xmm3,%xmm9
- movups %xmm8,(%rsi)
- xorps %xmm4,%xmm10
- movups %xmm9,16(%rsi)
- xorps %xmm5,%xmm11
- movups %xmm10,32(%rsi)
- movups %xmm11,48(%rsi)
+ movups (%rdi),%xmm10
+ xorps %xmm10,%xmm2
+ movups %xmm2,(%rsi)
+ cmpq $2,%rdx
+ jb .Lctr32_done
+
+ movups 16(%rdi),%xmm11
+ xorps %xmm11,%xmm3
+ movups %xmm3,16(%rsi)
+ je .Lctr32_done
+
+ movups 32(%rdi),%xmm12
+ xorps %xmm12,%xmm4
+ movups %xmm4,32(%rsi)
.Lctr32_done:
+ xorps %xmm0,%xmm0
+ xorl %r11d,%r11d
+ pxor %xmm1,%xmm1
+ pxor %xmm2,%xmm2
+ pxor %xmm3,%xmm3
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
+ pxor %xmm6,%xmm6
+ pxor %xmm7,%xmm7
+ movaps %xmm0,0(%rsp)
+ pxor %xmm8,%xmm8
+ movaps %xmm0,16(%rsp)
+ pxor %xmm9,%xmm9
+ movaps %xmm0,32(%rsp)
+ pxor %xmm10,%xmm10
+ movaps %xmm0,48(%rsp)
+ pxor %xmm11,%xmm11
+ movaps %xmm0,64(%rsp)
+ pxor %xmm12,%xmm12
+ movaps %xmm0,80(%rsp)
+ pxor %xmm13,%xmm13
+ movaps %xmm0,96(%rsp)
+ pxor %xmm14,%xmm14
+ movaps %xmm0,112(%rsp)
+ pxor %xmm15,%xmm15
+ leaq (%rbp),%rsp
+ popq %rbp
+.Lctr32_epilogue:
.byte 0xf3,0xc3
.size aesni_ctr32_encrypt_blocks,.-aesni_ctr32_encrypt_blocks
.globl aesni_xts_encrypt
.type aesni_xts_encrypt,@function
.align 16
aesni_xts_encrypt:
- leaq -104(%rsp),%rsp
- movups (%r9),%xmm15
+ leaq (%rsp),%rax
+ pushq %rbp
+ subq $112,%rsp
+ andq $-16,%rsp
+ leaq -8(%rax),%rbp
+ movups (%r9),%xmm2
movl 240(%r8),%eax
movl 240(%rcx),%r10d
movups (%r8),%xmm0
movups 16(%r8),%xmm1
leaq 32(%r8),%r8
- xorps %xmm0,%xmm15
+ xorps %xmm0,%xmm2
.Loop_enc1_8:
-.byte 102,68,15,56,220,249
+.byte 102,15,56,220,209
decl %eax
movups (%r8),%xmm1
leaq 16(%r8),%r8
- jnz .Loop_enc1_8
-.byte 102,68,15,56,221,249
+ jnz .Loop_enc1_8
+.byte 102,15,56,221,209
+ movups (%rcx),%xmm0
movq %rcx,%r11
movl %r10d,%eax
+ shll $4,%r10d
movq %rdx,%r9
andq $-16,%rdx
+ movups 16(%rcx,%r10,1),%xmm1
+
movdqa .Lxts_magic(%rip),%xmm8
- pxor %xmm14,%xmm14
- pcmpgtd %xmm15,%xmm14
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
+ movdqa %xmm2,%xmm15
+ pshufd $95,%xmm2,%xmm9
+ pxor %xmm0,%xmm1
+ movdqa %xmm9,%xmm14
+ paddd %xmm9,%xmm9
movdqa %xmm15,%xmm10
+ psrad $31,%xmm14
paddq %xmm15,%xmm15
- pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
- pxor %xmm9,%xmm15
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
+ pand %xmm8,%xmm14
+ pxor %xmm0,%xmm10
+ pxor %xmm14,%xmm15
+ movdqa %xmm9,%xmm14
+ paddd %xmm9,%xmm9
movdqa %xmm15,%xmm11
+ psrad $31,%xmm14
paddq %xmm15,%xmm15
- pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
- pxor %xmm9,%xmm15
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
+ pand %xmm8,%xmm14
+ pxor %xmm0,%xmm11
+ pxor %xmm14,%xmm15
+ movdqa %xmm9,%xmm14
+ paddd %xmm9,%xmm9
movdqa %xmm15,%xmm12
+ psrad $31,%xmm14
paddq %xmm15,%xmm15
- pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
- pxor %xmm9,%xmm15
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
+ pand %xmm8,%xmm14
+ pxor %xmm0,%xmm12
+ pxor %xmm14,%xmm15
+ movdqa %xmm9,%xmm14
+ paddd %xmm9,%xmm9
movdqa %xmm15,%xmm13
+ psrad $31,%xmm14
+ paddq %xmm15,%xmm15
+ pand %xmm8,%xmm14
+ pxor %xmm0,%xmm13
+ pxor %xmm14,%xmm15
+ movdqa %xmm15,%xmm14
+ psrad $31,%xmm9
paddq %xmm15,%xmm15
pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
+ pxor %xmm0,%xmm14
pxor %xmm9,%xmm15
+ movaps %xmm1,96(%rsp)
+
subq $96,%rdx
jc .Lxts_enc_short
- shrl $1,%eax
- subl $1,%eax
- movl %eax,%r10d
+ movl $16+96,%eax
+ leaq 32(%r11,%r10,1),%rcx
+ subq %r10,%rax
+ movups 16(%r11),%xmm1
+ movq %rax,%r10
+ leaq .Lxts_magic(%rip),%r8
jmp .Lxts_enc_grandloop
-.align 16
+.align 32
.Lxts_enc_grandloop:
- pshufd $19,%xmm14,%xmm9
- movdqa %xmm15,%xmm14
- paddq %xmm15,%xmm15
movdqu 0(%rdi),%xmm2
- pand %xmm8,%xmm9
+ movdqa %xmm0,%xmm8
movdqu 16(%rdi),%xmm3
- pxor %xmm9,%xmm15
-
- movdqu 32(%rdi),%xmm4
pxor %xmm10,%xmm2
- movdqu 48(%rdi),%xmm5
+ movdqu 32(%rdi),%xmm4
pxor %xmm11,%xmm3
- movdqu 64(%rdi),%xmm6
+.byte 102,15,56,220,209
+ movdqu 48(%rdi),%xmm5
pxor %xmm12,%xmm4
- movdqu 80(%rdi),%xmm7
- leaq 96(%rdi),%rdi
+.byte 102,15,56,220,217
+ movdqu 64(%rdi),%xmm6
pxor %xmm13,%xmm5
- movups (%r11),%xmm0
+.byte 102,15,56,220,225
+ movdqu 80(%rdi),%xmm7
+ pxor %xmm15,%xmm8
+ movdqa 96(%rsp),%xmm9
pxor %xmm14,%xmm6
- pxor %xmm15,%xmm7
-
-
+.byte 102,15,56,220,233
+ movups 32(%r11),%xmm0
+ leaq 96(%rdi),%rdi
+ pxor %xmm8,%xmm7
- movups 16(%r11),%xmm1
- pxor %xmm0,%xmm2
- pxor %xmm0,%xmm3
+ pxor %xmm9,%xmm10
+.byte 102,15,56,220,241
+ pxor %xmm9,%xmm11
movdqa %xmm10,0(%rsp)
-.byte 102,15,56,220,209
- leaq 32(%r11),%rcx
- pxor %xmm0,%xmm4
+.byte 102,15,56,220,249
+ movups 48(%r11),%xmm1
+ pxor %xmm9,%xmm12
+
+.byte 102,15,56,220,208
+ pxor %xmm9,%xmm13
movdqa %xmm11,16(%rsp)
-.byte 102,15,56,220,217
- pxor %xmm0,%xmm5
+.byte 102,15,56,220,216
+ pxor %xmm9,%xmm14
movdqa %xmm12,32(%rsp)
-.byte 102,15,56,220,225
- pxor %xmm0,%xmm6
- movdqa %xmm13,48(%rsp)
-.byte 102,15,56,220,233
- pxor %xmm0,%xmm7
- movups (%rcx),%xmm0
- decl %eax
+.byte 102,15,56,220,224
+.byte 102,15,56,220,232
+ pxor %xmm9,%xmm8
movdqa %xmm14,64(%rsp)
-.byte 102,15,56,220,241
- movdqa %xmm15,80(%rsp)
-.byte 102,15,56,220,249
- pxor %xmm14,%xmm14
- pcmpgtd %xmm15,%xmm14
- jmp .Lxts_enc_loop6_enter
-
-.align 16
+.byte 102,15,56,220,240
+.byte 102,15,56,220,248
+ movups 64(%r11),%xmm0
+ movdqa %xmm8,80(%rsp)
+ pshufd $95,%xmm15,%xmm9
+ jmp .Lxts_enc_loop6
+.align 32
.Lxts_enc_loop6:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
- decl %eax
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
-.Lxts_enc_loop6_enter:
- movups 16(%rcx),%xmm1
+ movups -64(%rcx,%rax,1),%xmm1
+ addq $32,%rax
+
.byte 102,15,56,220,208
.byte 102,15,56,220,216
- leaq 32(%rcx),%rcx
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
- movups (%rcx),%xmm0
+ movups -80(%rcx,%rax,1),%xmm0
jnz .Lxts_enc_loop6
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- paddq %xmm15,%xmm15
+ movdqa (%r8),%xmm8
+ movdqa %xmm9,%xmm14
+ paddd %xmm9,%xmm9
.byte 102,15,56,220,209
- pand %xmm8,%xmm9
+ paddq %xmm15,%xmm15
+ psrad $31,%xmm14
.byte 102,15,56,220,217
- pcmpgtd %xmm15,%xmm14
+ pand %xmm8,%xmm14
+ movups (%r11),%xmm10
.byte 102,15,56,220,225
- pxor %xmm9,%xmm15
.byte 102,15,56,220,233
.byte 102,15,56,220,241
+ pxor %xmm14,%xmm15
+ movaps %xmm10,%xmm11
.byte 102,15,56,220,249
- movups 16(%rcx),%xmm1
+ movups -64(%rcx),%xmm1
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm10
- paddq %xmm15,%xmm15
+ movdqa %xmm9,%xmm14
.byte 102,15,56,220,208
- pand %xmm8,%xmm9
+ paddd %xmm9,%xmm9
+ pxor %xmm15,%xmm10
.byte 102,15,56,220,216
- pcmpgtd %xmm15,%xmm14
+ psrad $31,%xmm14
+ paddq %xmm15,%xmm15
.byte 102,15,56,220,224
- pxor %xmm9,%xmm15
.byte 102,15,56,220,232
+ pand %xmm8,%xmm14
+ movaps %xmm11,%xmm12
.byte 102,15,56,220,240
+ pxor %xmm14,%xmm15
+ movdqa %xmm9,%xmm14
.byte 102,15,56,220,248
- movups 32(%rcx),%xmm0
+ movups -48(%rcx),%xmm0
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm11
- paddq %xmm15,%xmm15
+ paddd %xmm9,%xmm9
.byte 102,15,56,220,209
- pand %xmm8,%xmm9
+ pxor %xmm15,%xmm11
+ psrad $31,%xmm14
.byte 102,15,56,220,217
- pcmpgtd %xmm15,%xmm14
+ paddq %xmm15,%xmm15
+ pand %xmm8,%xmm14
.byte 102,15,56,220,225
- pxor %xmm9,%xmm15
.byte 102,15,56,220,233
+ movdqa %xmm13,48(%rsp)
+ pxor %xmm14,%xmm15
.byte 102,15,56,220,241
+ movaps %xmm12,%xmm13
+ movdqa %xmm9,%xmm14
.byte 102,15,56,220,249
+ movups -32(%rcx),%xmm1
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm12
+ paddd %xmm9,%xmm9
+.byte 102,15,56,220,208
+ pxor %xmm15,%xmm12
+ psrad $31,%xmm14
+.byte 102,15,56,220,216
paddq %xmm15,%xmm15
-.byte 102,15,56,221,208
- pand %xmm8,%xmm9
-.byte 102,15,56,221,216
- pcmpgtd %xmm15,%xmm14
-.byte 102,15,56,221,224
- pxor %xmm9,%xmm15
-.byte 102,15,56,221,232
-.byte 102,15,56,221,240
-.byte 102,15,56,221,248
+ pand %xmm8,%xmm14
+.byte 102,15,56,220,224
+.byte 102,15,56,220,232
+.byte 102,15,56,220,240
+ pxor %xmm14,%xmm15
+ movaps %xmm13,%xmm14
+.byte 102,15,56,220,248
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm13
+ movdqa %xmm9,%xmm0
+ paddd %xmm9,%xmm9
+.byte 102,15,56,220,209
+ pxor %xmm15,%xmm13
+ psrad $31,%xmm0
+.byte 102,15,56,220,217
+ paddq %xmm15,%xmm15
+ pand %xmm8,%xmm0
+.byte 102,15,56,220,225
+.byte 102,15,56,220,233
+ pxor %xmm0,%xmm15
+ movups (%r11),%xmm0
+.byte 102,15,56,220,241
+.byte 102,15,56,220,249
+ movups 16(%r11),%xmm1
+
+ pxor %xmm15,%xmm14
+.byte 102,15,56,221,84,36,0
+ psrad $31,%xmm9
paddq %xmm15,%xmm15
- xorps 0(%rsp),%xmm2
+.byte 102,15,56,221,92,36,16
+.byte 102,15,56,221,100,36,32
pand %xmm8,%xmm9
- xorps 16(%rsp),%xmm3
- pcmpgtd %xmm15,%xmm14
+ movq %r10,%rax
+.byte 102,15,56,221,108,36,48
+.byte 102,15,56,221,116,36,64
+.byte 102,15,56,221,124,36,80
pxor %xmm9,%xmm15
- xorps 32(%rsp),%xmm4
- movups %xmm2,0(%rsi)
- xorps 48(%rsp),%xmm5
- movups %xmm3,16(%rsi)
- xorps 64(%rsp),%xmm6
- movups %xmm4,32(%rsi)
- xorps 80(%rsp),%xmm7
- movups %xmm5,48(%rsi)
- movl %r10d,%eax
- movups %xmm6,64(%rsi)
- movups %xmm7,80(%rsi)
leaq 96(%rsi),%rsi
+ movups %xmm2,-96(%rsi)
+ movups %xmm3,-80(%rsi)
+ movups %xmm4,-64(%rsi)
+ movups %xmm5,-48(%rsi)
+ movups %xmm6,-32(%rsi)
+ movups %xmm7,-16(%rsi)
subq $96,%rdx
jnc .Lxts_enc_grandloop
- leal 3(%rax,%rax,1),%eax
+ movl $16+96,%eax
+ subl %r10d,%eax
movq %r11,%rcx
- movl %eax,%r10d
+ shrl $4,%eax
.Lxts_enc_short:
+
+ movl %eax,%r10d
+ pxor %xmm0,%xmm10
addq $96,%rdx
jz .Lxts_enc_done
+ pxor %xmm0,%xmm11
cmpq $32,%rdx
jb .Lxts_enc_one
+ pxor %xmm0,%xmm12
je .Lxts_enc_two
+ pxor %xmm0,%xmm13
cmpq $64,%rdx
jb .Lxts_enc_three
+ pxor %xmm0,%xmm14
je .Lxts_enc_four
- pshufd $19,%xmm14,%xmm9
- movdqa %xmm15,%xmm14
- paddq %xmm15,%xmm15
movdqu (%rdi),%xmm2
- pand %xmm8,%xmm9
movdqu 16(%rdi),%xmm3
- pxor %xmm9,%xmm15
-
movdqu 32(%rdi),%xmm4
pxor %xmm10,%xmm2
movdqu 48(%rdi),%xmm5
@@ -1381,6 +1859,7 @@ aesni_xts_encrypt:
pxor %xmm12,%xmm4
pxor %xmm13,%xmm5
pxor %xmm14,%xmm6
+ pxor %xmm7,%xmm7
call _aesni_encrypt6
@@ -1412,7 +1891,7 @@ aesni_xts_encrypt:
decl %eax
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
- jnz .Loop_enc1_9
+ jnz .Loop_enc1_9
.byte 102,15,56,221,209
xorps %xmm10,%xmm2
movdqa %xmm11,%xmm10
@@ -1428,7 +1907,7 @@ aesni_xts_encrypt:
xorps %xmm10,%xmm2
xorps %xmm11,%xmm3
- call _aesni_encrypt3
+ call _aesni_encrypt2
xorps %xmm10,%xmm2
movdqa %xmm12,%xmm10
@@ -1474,15 +1953,15 @@ aesni_xts_encrypt:
call _aesni_encrypt4
- xorps %xmm10,%xmm2
- movdqa %xmm15,%xmm10
- xorps %xmm11,%xmm3
- xorps %xmm12,%xmm4
- movups %xmm2,(%rsi)
- xorps %xmm13,%xmm5
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
+ pxor %xmm10,%xmm2
+ movdqa %xmm14,%xmm10
+ pxor %xmm11,%xmm3
+ pxor %xmm12,%xmm4
+ movdqu %xmm2,(%rsi)
+ pxor %xmm13,%xmm5
+ movdqu %xmm3,16(%rsi)
+ movdqu %xmm4,32(%rsi)
+ movdqu %xmm5,48(%rsi)
leaq 64(%rsi),%rsi
jmp .Lxts_enc_done
@@ -1517,13 +1996,37 @@ aesni_xts_encrypt:
decl %eax
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
- jnz .Loop_enc1_10
+ jnz .Loop_enc1_10
.byte 102,15,56,221,209
xorps %xmm10,%xmm2
movups %xmm2,-16(%rsi)
.Lxts_enc_ret:
- leaq 104(%rsp),%rsp
+ xorps %xmm0,%xmm0
+ pxor %xmm1,%xmm1
+ pxor %xmm2,%xmm2
+ pxor %xmm3,%xmm3
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
+ pxor %xmm6,%xmm6
+ pxor %xmm7,%xmm7
+ movaps %xmm0,0(%rsp)
+ pxor %xmm8,%xmm8
+ movaps %xmm0,16(%rsp)
+ pxor %xmm9,%xmm9
+ movaps %xmm0,32(%rsp)
+ pxor %xmm10,%xmm10
+ movaps %xmm0,48(%rsp)
+ pxor %xmm11,%xmm11
+ movaps %xmm0,64(%rsp)
+ pxor %xmm12,%xmm12
+ movaps %xmm0,80(%rsp)
+ pxor %xmm13,%xmm13
+ movaps %xmm0,96(%rsp)
+ pxor %xmm14,%xmm14
+ pxor %xmm15,%xmm15
+ leaq (%rbp),%rsp
+ popq %rbp
.Lxts_enc_epilogue:
.byte 0xf3,0xc3
.size aesni_xts_encrypt,.-aesni_xts_encrypt
@@ -1531,249 +2034,293 @@ aesni_xts_encrypt:
.type aesni_xts_decrypt,@function
.align 16
aesni_xts_decrypt:
- leaq -104(%rsp),%rsp
- movups (%r9),%xmm15
+ leaq (%rsp),%rax
+ pushq %rbp
+ subq $112,%rsp
+ andq $-16,%rsp
+ leaq -8(%rax),%rbp
+ movups (%r9),%xmm2
movl 240(%r8),%eax
movl 240(%rcx),%r10d
movups (%r8),%xmm0
movups 16(%r8),%xmm1
leaq 32(%r8),%r8
- xorps %xmm0,%xmm15
+ xorps %xmm0,%xmm2
.Loop_enc1_11:
-.byte 102,68,15,56,220,249
+.byte 102,15,56,220,209
decl %eax
movups (%r8),%xmm1
leaq 16(%r8),%r8
- jnz .Loop_enc1_11
-.byte 102,68,15,56,221,249
+ jnz .Loop_enc1_11
+.byte 102,15,56,221,209
xorl %eax,%eax
testq $15,%rdx
setnz %al
shlq $4,%rax
subq %rax,%rdx
+ movups (%rcx),%xmm0
movq %rcx,%r11
movl %r10d,%eax
+ shll $4,%r10d
movq %rdx,%r9
andq $-16,%rdx
+ movups 16(%rcx,%r10,1),%xmm1
+
movdqa .Lxts_magic(%rip),%xmm8
- pxor %xmm14,%xmm14
- pcmpgtd %xmm15,%xmm14
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
+ movdqa %xmm2,%xmm15
+ pshufd $95,%xmm2,%xmm9
+ pxor %xmm0,%xmm1
+ movdqa %xmm9,%xmm14
+ paddd %xmm9,%xmm9
movdqa %xmm15,%xmm10
+ psrad $31,%xmm14
paddq %xmm15,%xmm15
- pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
- pxor %xmm9,%xmm15
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
+ pand %xmm8,%xmm14
+ pxor %xmm0,%xmm10
+ pxor %xmm14,%xmm15
+ movdqa %xmm9,%xmm14
+ paddd %xmm9,%xmm9
movdqa %xmm15,%xmm11
+ psrad $31,%xmm14
paddq %xmm15,%xmm15
- pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
- pxor %xmm9,%xmm15
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
+ pand %xmm8,%xmm14
+ pxor %xmm0,%xmm11
+ pxor %xmm14,%xmm15
+ movdqa %xmm9,%xmm14
+ paddd %xmm9,%xmm9
movdqa %xmm15,%xmm12
+ psrad $31,%xmm14
paddq %xmm15,%xmm15
- pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
- pxor %xmm9,%xmm15
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
+ pand %xmm8,%xmm14
+ pxor %xmm0,%xmm12
+ pxor %xmm14,%xmm15
+ movdqa %xmm9,%xmm14
+ paddd %xmm9,%xmm9
movdqa %xmm15,%xmm13
+ psrad $31,%xmm14
+ paddq %xmm15,%xmm15
+ pand %xmm8,%xmm14
+ pxor %xmm0,%xmm13
+ pxor %xmm14,%xmm15
+ movdqa %xmm15,%xmm14
+ psrad $31,%xmm9
paddq %xmm15,%xmm15
pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
+ pxor %xmm0,%xmm14
pxor %xmm9,%xmm15
+ movaps %xmm1,96(%rsp)
+
subq $96,%rdx
jc .Lxts_dec_short
- shrl $1,%eax
- subl $1,%eax
- movl %eax,%r10d
+ movl $16+96,%eax
+ leaq 32(%r11,%r10,1),%rcx
+ subq %r10,%rax
+ movups 16(%r11),%xmm1
+ movq %rax,%r10
+ leaq .Lxts_magic(%rip),%r8
jmp .Lxts_dec_grandloop
-.align 16
+.align 32
.Lxts_dec_grandloop:
- pshufd $19,%xmm14,%xmm9
- movdqa %xmm15,%xmm14
- paddq %xmm15,%xmm15
movdqu 0(%rdi),%xmm2
- pand %xmm8,%xmm9
+ movdqa %xmm0,%xmm8
movdqu 16(%rdi),%xmm3
- pxor %xmm9,%xmm15
-
- movdqu 32(%rdi),%xmm4
pxor %xmm10,%xmm2
- movdqu 48(%rdi),%xmm5
+ movdqu 32(%rdi),%xmm4
pxor %xmm11,%xmm3
- movdqu 64(%rdi),%xmm6
+.byte 102,15,56,222,209
+ movdqu 48(%rdi),%xmm5
pxor %xmm12,%xmm4
- movdqu 80(%rdi),%xmm7
- leaq 96(%rdi),%rdi
+.byte 102,15,56,222,217
+ movdqu 64(%rdi),%xmm6
pxor %xmm13,%xmm5
- movups (%r11),%xmm0
+.byte 102,15,56,222,225
+ movdqu 80(%rdi),%xmm7
+ pxor %xmm15,%xmm8
+ movdqa 96(%rsp),%xmm9
pxor %xmm14,%xmm6
- pxor %xmm15,%xmm7
-
-
+.byte 102,15,56,222,233
+ movups 32(%r11),%xmm0
+ leaq 96(%rdi),%rdi
+ pxor %xmm8,%xmm7
- movups 16(%r11),%xmm1
- pxor %xmm0,%xmm2
- pxor %xmm0,%xmm3
+ pxor %xmm9,%xmm10
+.byte 102,15,56,222,241
+ pxor %xmm9,%xmm11
movdqa %xmm10,0(%rsp)
-.byte 102,15,56,222,209
- leaq 32(%r11),%rcx
- pxor %xmm0,%xmm4
+.byte 102,15,56,222,249
+ movups 48(%r11),%xmm1
+ pxor %xmm9,%xmm12
+
+.byte 102,15,56,222,208
+ pxor %xmm9,%xmm13
movdqa %xmm11,16(%rsp)
-.byte 102,15,56,222,217
- pxor %xmm0,%xmm5
+.byte 102,15,56,222,216
+ pxor %xmm9,%xmm14
movdqa %xmm12,32(%rsp)
-.byte 102,15,56,222,225
- pxor %xmm0,%xmm6
- movdqa %xmm13,48(%rsp)
-.byte 102,15,56,222,233
- pxor %xmm0,%xmm7
- movups (%rcx),%xmm0
- decl %eax
+.byte 102,15,56,222,224
+.byte 102,15,56,222,232
+ pxor %xmm9,%xmm8
movdqa %xmm14,64(%rsp)
-.byte 102,15,56,222,241
- movdqa %xmm15,80(%rsp)
-.byte 102,15,56,222,249
- pxor %xmm14,%xmm14
- pcmpgtd %xmm15,%xmm14
- jmp .Lxts_dec_loop6_enter
-
-.align 16
+.byte 102,15,56,222,240
+.byte 102,15,56,222,248
+ movups 64(%r11),%xmm0
+ movdqa %xmm8,80(%rsp)
+ pshufd $95,%xmm15,%xmm9
+ jmp .Lxts_dec_loop6
+.align 32
.Lxts_dec_loop6:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
- decl %eax
.byte 102,15,56,222,225
.byte 102,15,56,222,233
.byte 102,15,56,222,241
.byte 102,15,56,222,249
-.Lxts_dec_loop6_enter:
- movups 16(%rcx),%xmm1
+ movups -64(%rcx,%rax,1),%xmm1
+ addq $32,%rax
+
.byte 102,15,56,222,208
.byte 102,15,56,222,216
- leaq 32(%rcx),%rcx
.byte 102,15,56,222,224
.byte 102,15,56,222,232
.byte 102,15,56,222,240
.byte 102,15,56,222,248
- movups (%rcx),%xmm0
+ movups -80(%rcx,%rax,1),%xmm0
jnz .Lxts_dec_loop6
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- paddq %xmm15,%xmm15
+ movdqa (%r8),%xmm8
+ movdqa %xmm9,%xmm14
+ paddd %xmm9,%xmm9
.byte 102,15,56,222,209
- pand %xmm8,%xmm9
+ paddq %xmm15,%xmm15
+ psrad $31,%xmm14
.byte 102,15,56,222,217
- pcmpgtd %xmm15,%xmm14
+ pand %xmm8,%xmm14
+ movups (%r11),%xmm10
.byte 102,15,56,222,225
- pxor %xmm9,%xmm15
.byte 102,15,56,222,233
.byte 102,15,56,222,241
+ pxor %xmm14,%xmm15
+ movaps %xmm10,%xmm11
.byte 102,15,56,222,249
- movups 16(%rcx),%xmm1
+ movups -64(%rcx),%xmm1
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm10
- paddq %xmm15,%xmm15
+ movdqa %xmm9,%xmm14
.byte 102,15,56,222,208
- pand %xmm8,%xmm9
+ paddd %xmm9,%xmm9
+ pxor %xmm15,%xmm10
.byte 102,15,56,222,216
- pcmpgtd %xmm15,%xmm14
+ psrad $31,%xmm14
+ paddq %xmm15,%xmm15
.byte 102,15,56,222,224
- pxor %xmm9,%xmm15
.byte 102,15,56,222,232
+ pand %xmm8,%xmm14
+ movaps %xmm11,%xmm12
.byte 102,15,56,222,240
+ pxor %xmm14,%xmm15
+ movdqa %xmm9,%xmm14
.byte 102,15,56,222,248
- movups 32(%rcx),%xmm0
+ movups -48(%rcx),%xmm0
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm11
- paddq %xmm15,%xmm15
+ paddd %xmm9,%xmm9
.byte 102,15,56,222,209
- pand %xmm8,%xmm9
+ pxor %xmm15,%xmm11
+ psrad $31,%xmm14
.byte 102,15,56,222,217
- pcmpgtd %xmm15,%xmm14
+ paddq %xmm15,%xmm15
+ pand %xmm8,%xmm14
.byte 102,15,56,222,225
- pxor %xmm9,%xmm15
.byte 102,15,56,222,233
+ movdqa %xmm13,48(%rsp)
+ pxor %xmm14,%xmm15
.byte 102,15,56,222,241
+ movaps %xmm12,%xmm13
+ movdqa %xmm9,%xmm14
.byte 102,15,56,222,249
+ movups -32(%rcx),%xmm1
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm12
+ paddd %xmm9,%xmm9
+.byte 102,15,56,222,208
+ pxor %xmm15,%xmm12
+ psrad $31,%xmm14
+.byte 102,15,56,222,216
paddq %xmm15,%xmm15
-.byte 102,15,56,223,208
- pand %xmm8,%xmm9
-.byte 102,15,56,223,216
- pcmpgtd %xmm15,%xmm14
-.byte 102,15,56,223,224
- pxor %xmm9,%xmm15
-.byte 102,15,56,223,232
-.byte 102,15,56,223,240
-.byte 102,15,56,223,248
+ pand %xmm8,%xmm14
+.byte 102,15,56,222,224
+.byte 102,15,56,222,232
+.byte 102,15,56,222,240
+ pxor %xmm14,%xmm15
+ movaps %xmm13,%xmm14
+.byte 102,15,56,222,248
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm13
+ movdqa %xmm9,%xmm0
+ paddd %xmm9,%xmm9
+.byte 102,15,56,222,209
+ pxor %xmm15,%xmm13
+ psrad $31,%xmm0
+.byte 102,15,56,222,217
+ paddq %xmm15,%xmm15
+ pand %xmm8,%xmm0
+.byte 102,15,56,222,225
+.byte 102,15,56,222,233
+ pxor %xmm0,%xmm15
+ movups (%r11),%xmm0
+.byte 102,15,56,222,241
+.byte 102,15,56,222,249
+ movups 16(%r11),%xmm1
+
+ pxor %xmm15,%xmm14
+.byte 102,15,56,223,84,36,0
+ psrad $31,%xmm9
paddq %xmm15,%xmm15
- xorps 0(%rsp),%xmm2
+.byte 102,15,56,223,92,36,16
+.byte 102,15,56,223,100,36,32
pand %xmm8,%xmm9
- xorps 16(%rsp),%xmm3
- pcmpgtd %xmm15,%xmm14
+ movq %r10,%rax
+.byte 102,15,56,223,108,36,48
+.byte 102,15,56,223,116,36,64
+.byte 102,15,56,223,124,36,80
pxor %xmm9,%xmm15
- xorps 32(%rsp),%xmm4
- movups %xmm2,0(%rsi)
- xorps 48(%rsp),%xmm5
- movups %xmm3,16(%rsi)
- xorps 64(%rsp),%xmm6
- movups %xmm4,32(%rsi)
- xorps 80(%rsp),%xmm7
- movups %xmm5,48(%rsi)
- movl %r10d,%eax
- movups %xmm6,64(%rsi)
- movups %xmm7,80(%rsi)
leaq 96(%rsi),%rsi
+ movups %xmm2,-96(%rsi)
+ movups %xmm3,-80(%rsi)
+ movups %xmm4,-64(%rsi)
+ movups %xmm5,-48(%rsi)
+ movups %xmm6,-32(%rsi)
+ movups %xmm7,-16(%rsi)
subq $96,%rdx
jnc .Lxts_dec_grandloop
- leal 3(%rax,%rax,1),%eax
+ movl $16+96,%eax
+ subl %r10d,%eax
movq %r11,%rcx
- movl %eax,%r10d
+ shrl $4,%eax
.Lxts_dec_short:
+
+ movl %eax,%r10d
+ pxor %xmm0,%xmm10
+ pxor %xmm0,%xmm11
addq $96,%rdx
jz .Lxts_dec_done
+ pxor %xmm0,%xmm12
cmpq $32,%rdx
jb .Lxts_dec_one
+ pxor %xmm0,%xmm13
je .Lxts_dec_two
+ pxor %xmm0,%xmm14
cmpq $64,%rdx
jb .Lxts_dec_three
je .Lxts_dec_four
- pshufd $19,%xmm14,%xmm9
- movdqa %xmm15,%xmm14
- paddq %xmm15,%xmm15
movdqu (%rdi),%xmm2
- pand %xmm8,%xmm9
movdqu 16(%rdi),%xmm3
- pxor %xmm9,%xmm15
-
movdqu 32(%rdi),%xmm4
pxor %xmm10,%xmm2
movdqu 48(%rdi),%xmm5
@@ -1823,7 +2370,7 @@ aesni_xts_decrypt:
decl %eax
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
- jnz .Loop_dec1_12
+ jnz .Loop_dec1_12
.byte 102,15,56,223,209
xorps %xmm10,%xmm2
movdqa %xmm11,%xmm10
@@ -1840,7 +2387,7 @@ aesni_xts_decrypt:
xorps %xmm10,%xmm2
xorps %xmm11,%xmm3
- call _aesni_decrypt3
+ call _aesni_decrypt2
xorps %xmm10,%xmm2
movdqa %xmm12,%xmm10
@@ -1866,7 +2413,7 @@ aesni_xts_decrypt:
xorps %xmm10,%xmm2
movdqa %xmm13,%xmm10
xorps %xmm11,%xmm3
- movdqa %xmm15,%xmm11
+ movdqa %xmm14,%xmm11
xorps %xmm12,%xmm4
movups %xmm2,(%rsi)
movups %xmm3,16(%rsi)
@@ -1876,14 +2423,8 @@ aesni_xts_decrypt:
.align 16
.Lxts_dec_four:
- pshufd $19,%xmm14,%xmm9
- movdqa %xmm15,%xmm14
- paddq %xmm15,%xmm15
movups (%rdi),%xmm2
- pand %xmm8,%xmm9
movups 16(%rdi),%xmm3
- pxor %xmm9,%xmm15
-
movups 32(%rdi),%xmm4
xorps %xmm10,%xmm2
movups 48(%rdi),%xmm5
@@ -1894,16 +2435,16 @@ aesni_xts_decrypt:
call _aesni_decrypt4
- xorps %xmm10,%xmm2
+ pxor %xmm10,%xmm2
movdqa %xmm14,%xmm10
- xorps %xmm11,%xmm3
+ pxor %xmm11,%xmm3
movdqa %xmm15,%xmm11
- xorps %xmm12,%xmm4
- movups %xmm2,(%rsi)
- xorps %xmm13,%xmm5
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
+ pxor %xmm12,%xmm4
+ movdqu %xmm2,(%rsi)
+ pxor %xmm13,%xmm5
+ movdqu %xmm3,16(%rsi)
+ movdqu %xmm4,32(%rsi)
+ movdqu %xmm5,48(%rsi)
leaq 64(%rsi),%rsi
jmp .Lxts_dec_done
@@ -1927,7 +2468,7 @@ aesni_xts_decrypt:
decl %eax
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
- jnz .Loop_dec1_13
+ jnz .Loop_dec1_13
.byte 102,15,56,223,209
xorps %xmm11,%xmm2
movups %xmm2,(%rsi)
@@ -1957,13 +2498,37 @@ aesni_xts_decrypt:
decl %eax
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
- jnz .Loop_dec1_14
+ jnz .Loop_dec1_14
.byte 102,15,56,223,209
xorps %xmm10,%xmm2
movups %xmm2,(%rsi)
.Lxts_dec_ret:
- leaq 104(%rsp),%rsp
+ xorps %xmm0,%xmm0
+ pxor %xmm1,%xmm1
+ pxor %xmm2,%xmm2
+ pxor %xmm3,%xmm3
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
+ pxor %xmm6,%xmm6
+ pxor %xmm7,%xmm7
+ movaps %xmm0,0(%rsp)
+ pxor %xmm8,%xmm8
+ movaps %xmm0,16(%rsp)
+ pxor %xmm9,%xmm9
+ movaps %xmm0,32(%rsp)
+ pxor %xmm10,%xmm10
+ movaps %xmm0,48(%rsp)
+ pxor %xmm11,%xmm11
+ movaps %xmm0,64(%rsp)
+ pxor %xmm12,%xmm12
+ movaps %xmm0,80(%rsp)
+ pxor %xmm13,%xmm13
+ movaps %xmm0,96(%rsp)
+ pxor %xmm14,%xmm14
+ pxor %xmm15,%xmm15
+ leaq (%rbp),%rsp
+ popq %rbp
.Lxts_dec_epilogue:
.byte 0xf3,0xc3
.size aesni_xts_decrypt,.-aesni_xts_decrypt
@@ -2000,7 +2565,7 @@ aesni_cbc_encrypt:
decl %eax
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
- jnz .Loop_enc1_15
+ jnz .Loop_enc1_15
.byte 102,15,56,221,209
movl %r10d,%eax
movq %r11,%rcx
@@ -2010,285 +2575,545 @@ aesni_cbc_encrypt:
jnc .Lcbc_enc_loop
addq $16,%rdx
jnz .Lcbc_enc_tail
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
movups %xmm2,(%r8)
+ pxor %xmm2,%xmm2
+ pxor %xmm3,%xmm3
jmp .Lcbc_ret
.Lcbc_enc_tail:
movq %rdx,%rcx
xchgq %rdi,%rsi
-.long 0x9066A4F3
+.long 0x9066A4F3
movl $16,%ecx
subq %rdx,%rcx
xorl %eax,%eax
-.long 0x9066AAF3
+.long 0x9066AAF3
leaq -16(%rdi),%rdi
movl %r10d,%eax
movq %rdi,%rsi
movq %r11,%rcx
xorq %rdx,%rdx
- jmp .Lcbc_enc_loop
+ jmp .Lcbc_enc_loop
.align 16
.Lcbc_decrypt:
- movups (%r8),%xmm9
+ cmpq $16,%rdx
+ jne .Lcbc_decrypt_bulk
+
+
+
+ movdqu (%rdi),%xmm2
+ movdqu (%r8),%xmm3
+ movdqa %xmm2,%xmm4
+ movups (%rcx),%xmm0
+ movups 16(%rcx),%xmm1
+ leaq 32(%rcx),%rcx
+ xorps %xmm0,%xmm2
+.Loop_dec1_16:
+.byte 102,15,56,222,209
+ decl %r10d
+ movups (%rcx),%xmm1
+ leaq 16(%rcx),%rcx
+ jnz .Loop_dec1_16
+.byte 102,15,56,223,209
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
+ movdqu %xmm4,(%r8)
+ xorps %xmm3,%xmm2
+ pxor %xmm3,%xmm3
+ movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
+ jmp .Lcbc_ret
+.align 16
+.Lcbc_decrypt_bulk:
+ leaq (%rsp),%rax
+ pushq %rbp
+ subq $16,%rsp
+ andq $-16,%rsp
+ leaq -8(%rax),%rbp
+ movups (%r8),%xmm10
movl %r10d,%eax
- cmpq $112,%rdx
+ cmpq $80,%rdx
jbe .Lcbc_dec_tail
- shrl $1,%r10d
- subq $112,%rdx
- movl %r10d,%eax
- movaps %xmm9,-24(%rsp)
+
+ movups (%rcx),%xmm0
+ movdqu 0(%rdi),%xmm2
+ movdqu 16(%rdi),%xmm3
+ movdqa %xmm2,%xmm11
+ movdqu 32(%rdi),%xmm4
+ movdqa %xmm3,%xmm12
+ movdqu 48(%rdi),%xmm5
+ movdqa %xmm4,%xmm13
+ movdqu 64(%rdi),%xmm6
+ movdqa %xmm5,%xmm14
+ movdqu 80(%rdi),%xmm7
+ movdqa %xmm6,%xmm15
+ movl OPENSSL_ia32cap_P+4(%rip),%r9d
+ cmpq $112,%rdx
+ jbe .Lcbc_dec_six_or_seven
+
+ andl $71303168,%r9d
+ subq $80,%rdx
+ cmpl $4194304,%r9d
+ je .Lcbc_dec_loop6_enter
+ subq $32,%rdx
+ leaq 112(%rcx),%rcx
jmp .Lcbc_dec_loop8_enter
.align 16
.Lcbc_dec_loop8:
- movaps %xmm0,-24(%rsp)
movups %xmm9,(%rsi)
leaq 16(%rsi),%rsi
.Lcbc_dec_loop8_enter:
- movups (%rcx),%xmm0
- movups (%rdi),%xmm2
- movups 16(%rdi),%xmm3
- movups 16(%rcx),%xmm1
+ movdqu 96(%rdi),%xmm8
+ pxor %xmm0,%xmm2
+ movdqu 112(%rdi),%xmm9
+ pxor %xmm0,%xmm3
+ movups 16-112(%rcx),%xmm1
+ pxor %xmm0,%xmm4
+ xorq %r11,%r11
+ cmpq $112,%rdx
+ pxor %xmm0,%xmm5
+ pxor %xmm0,%xmm6
+ pxor %xmm0,%xmm7
+ pxor %xmm0,%xmm8
- leaq 32(%rcx),%rcx
- movdqu 32(%rdi),%xmm4
- xorps %xmm0,%xmm2
- movdqu 48(%rdi),%xmm5
- xorps %xmm0,%xmm3
- movdqu 64(%rdi),%xmm6
.byte 102,15,56,222,209
- pxor %xmm0,%xmm4
- movdqu 80(%rdi),%xmm7
+ pxor %xmm0,%xmm9
+ movups 32-112(%rcx),%xmm0
.byte 102,15,56,222,217
- pxor %xmm0,%xmm5
- movdqu 96(%rdi),%xmm8
.byte 102,15,56,222,225
- pxor %xmm0,%xmm6
- movdqu 112(%rdi),%xmm9
.byte 102,15,56,222,233
- pxor %xmm0,%xmm7
- decl %eax
.byte 102,15,56,222,241
- pxor %xmm0,%xmm8
.byte 102,15,56,222,249
- pxor %xmm0,%xmm9
- movups (%rcx),%xmm0
.byte 102,68,15,56,222,193
+ setnc %r11b
+ shlq $7,%r11
.byte 102,68,15,56,222,201
- movups 16(%rcx),%xmm1
-
- call .Ldec_loop8_enter
+ addq %rdi,%r11
+ movups 48-112(%rcx),%xmm1
+.byte 102,15,56,222,208
+.byte 102,15,56,222,216
+.byte 102,15,56,222,224
+.byte 102,15,56,222,232
+.byte 102,15,56,222,240
+.byte 102,15,56,222,248
+.byte 102,68,15,56,222,192
+.byte 102,68,15,56,222,200
+ movups 64-112(%rcx),%xmm0
+ nop
+.byte 102,15,56,222,209
+.byte 102,15,56,222,217
+.byte 102,15,56,222,225
+.byte 102,15,56,222,233
+.byte 102,15,56,222,241
+.byte 102,15,56,222,249
+.byte 102,68,15,56,222,193
+.byte 102,68,15,56,222,201
+ movups 80-112(%rcx),%xmm1
+ nop
+.byte 102,15,56,222,208
+.byte 102,15,56,222,216
+.byte 102,15,56,222,224
+.byte 102,15,56,222,232
+.byte 102,15,56,222,240
+.byte 102,15,56,222,248
+.byte 102,68,15,56,222,192
+.byte 102,68,15,56,222,200
+ movups 96-112(%rcx),%xmm0
+ nop
+.byte 102,15,56,222,209
+.byte 102,15,56,222,217
+.byte 102,15,56,222,225
+.byte 102,15,56,222,233
+.byte 102,15,56,222,241
+.byte 102,15,56,222,249
+.byte 102,68,15,56,222,193
+.byte 102,68,15,56,222,201
+ movups 112-112(%rcx),%xmm1
+ nop
+.byte 102,15,56,222,208
+.byte 102,15,56,222,216
+.byte 102,15,56,222,224
+.byte 102,15,56,222,232
+.byte 102,15,56,222,240
+.byte 102,15,56,222,248
+.byte 102,68,15,56,222,192
+.byte 102,68,15,56,222,200
+ movups 128-112(%rcx),%xmm0
+ nop
+.byte 102,15,56,222,209
+.byte 102,15,56,222,217
+.byte 102,15,56,222,225
+.byte 102,15,56,222,233
+.byte 102,15,56,222,241
+.byte 102,15,56,222,249
+.byte 102,68,15,56,222,193
+.byte 102,68,15,56,222,201
+ movups 144-112(%rcx),%xmm1
+ cmpl $11,%eax
+.byte 102,15,56,222,208
+.byte 102,15,56,222,216
+.byte 102,15,56,222,224
+.byte 102,15,56,222,232
+.byte 102,15,56,222,240
+.byte 102,15,56,222,248
+.byte 102,68,15,56,222,192
+.byte 102,68,15,56,222,200
+ movups 160-112(%rcx),%xmm0
+ jb .Lcbc_dec_done
+.byte 102,15,56,222,209
+.byte 102,15,56,222,217
+.byte 102,15,56,222,225
+.byte 102,15,56,222,233
+.byte 102,15,56,222,241
+.byte 102,15,56,222,249
+.byte 102,68,15,56,222,193
+.byte 102,68,15,56,222,201
+ movups 176-112(%rcx),%xmm1
+ nop
+.byte 102,15,56,222,208
+.byte 102,15,56,222,216
+.byte 102,15,56,222,224
+.byte 102,15,56,222,232
+.byte 102,15,56,222,240
+.byte 102,15,56,222,248
+.byte 102,68,15,56,222,192
+.byte 102,68,15,56,222,200
+ movups 192-112(%rcx),%xmm0
+ je .Lcbc_dec_done
+.byte 102,15,56,222,209
+.byte 102,15,56,222,217
+.byte 102,15,56,222,225
+.byte 102,15,56,222,233
+.byte 102,15,56,222,241
+.byte 102,15,56,222,249
+.byte 102,68,15,56,222,193
+.byte 102,68,15,56,222,201
+ movups 208-112(%rcx),%xmm1
+ nop
+.byte 102,15,56,222,208
+.byte 102,15,56,222,216
+.byte 102,15,56,222,224
+.byte 102,15,56,222,232
+.byte 102,15,56,222,240
+.byte 102,15,56,222,248
+.byte 102,68,15,56,222,192
+.byte 102,68,15,56,222,200
+ movups 224-112(%rcx),%xmm0
+ jmp .Lcbc_dec_done
+.align 16
+.Lcbc_dec_done:
+.byte 102,15,56,222,209
+.byte 102,15,56,222,217
+ pxor %xmm0,%xmm10
+ pxor %xmm0,%xmm11
+.byte 102,15,56,222,225
+.byte 102,15,56,222,233
+ pxor %xmm0,%xmm12
+ pxor %xmm0,%xmm13
+.byte 102,15,56,222,241
+.byte 102,15,56,222,249
+ pxor %xmm0,%xmm14
+ pxor %xmm0,%xmm15
+.byte 102,68,15,56,222,193
+.byte 102,68,15,56,222,201
+ movdqu 80(%rdi),%xmm1
+
+.byte 102,65,15,56,223,210
+ movdqu 96(%rdi),%xmm10
+ pxor %xmm0,%xmm1
+.byte 102,65,15,56,223,219
+ pxor %xmm0,%xmm10
+ movdqu 112(%rdi),%xmm0
+.byte 102,65,15,56,223,228
+ leaq 128(%rdi),%rdi
+ movdqu 0(%r11),%xmm11
+.byte 102,65,15,56,223,237
+.byte 102,65,15,56,223,246
+ movdqu 16(%r11),%xmm12
+ movdqu 32(%r11),%xmm13
+.byte 102,65,15,56,223,255
+.byte 102,68,15,56,223,193
+ movdqu 48(%r11),%xmm14
+ movdqu 64(%r11),%xmm15
+.byte 102,69,15,56,223,202
+ movdqa %xmm0,%xmm10
+ movdqu 80(%r11),%xmm1
+ movups -112(%rcx),%xmm0
- movups (%rdi),%xmm1
- movups 16(%rdi),%xmm0
- xorps -24(%rsp),%xmm2
- xorps %xmm1,%xmm3
- movups 32(%rdi),%xmm1
- xorps %xmm0,%xmm4
- movups 48(%rdi),%xmm0
- xorps %xmm1,%xmm5
- movups 64(%rdi),%xmm1
- xorps %xmm0,%xmm6
- movups 80(%rdi),%xmm0
- xorps %xmm1,%xmm7
- movups 96(%rdi),%xmm1
- xorps %xmm0,%xmm8
- movups 112(%rdi),%xmm0
- xorps %xmm1,%xmm9
movups %xmm2,(%rsi)
+ movdqa %xmm11,%xmm2
movups %xmm3,16(%rsi)
+ movdqa %xmm12,%xmm3
movups %xmm4,32(%rsi)
+ movdqa %xmm13,%xmm4
movups %xmm5,48(%rsi)
- movl %r10d,%eax
+ movdqa %xmm14,%xmm5
movups %xmm6,64(%rsi)
- movq %r11,%rcx
+ movdqa %xmm15,%xmm6
movups %xmm7,80(%rsi)
- leaq 128(%rdi),%rdi
+ movdqa %xmm1,%xmm7
movups %xmm8,96(%rsi)
leaq 112(%rsi),%rsi
+
subq $128,%rdx
ja .Lcbc_dec_loop8
movaps %xmm9,%xmm2
- movaps %xmm0,%xmm9
+ leaq -112(%rcx),%rcx
addq $112,%rdx
- jle .Lcbc_dec_tail_collected
- movups %xmm2,(%rsi)
- leal 1(%r10,%r10,1),%eax
+ jle .Lcbc_dec_clear_tail_collected
+ movups %xmm9,(%rsi)
leaq 16(%rsi),%rsi
+ cmpq $80,%rdx
+ jbe .Lcbc_dec_tail
+
+ movaps %xmm11,%xmm2
+.Lcbc_dec_six_or_seven:
+ cmpq $96,%rdx
+ ja .Lcbc_dec_seven
+
+ movaps %xmm7,%xmm8
+ call _aesni_decrypt6
+ pxor %xmm10,%xmm2
+ movaps %xmm8,%xmm10
+ pxor %xmm11,%xmm3
+ movdqu %xmm2,(%rsi)
+ pxor %xmm12,%xmm4
+ movdqu %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
+ pxor %xmm13,%xmm5
+ movdqu %xmm4,32(%rsi)
+ pxor %xmm4,%xmm4
+ pxor %xmm14,%xmm6
+ movdqu %xmm5,48(%rsi)
+ pxor %xmm5,%xmm5
+ pxor %xmm15,%xmm7
+ movdqu %xmm6,64(%rsi)
+ pxor %xmm6,%xmm6
+ leaq 80(%rsi),%rsi
+ movdqa %xmm7,%xmm2
+ pxor %xmm7,%xmm7
+ jmp .Lcbc_dec_tail_collected
+
+.align 16
+.Lcbc_dec_seven:
+ movups 96(%rdi),%xmm8
+ xorps %xmm9,%xmm9
+ call _aesni_decrypt8
+ movups 80(%rdi),%xmm9
+ pxor %xmm10,%xmm2
+ movups 96(%rdi),%xmm10
+ pxor %xmm11,%xmm3
+ movdqu %xmm2,(%rsi)
+ pxor %xmm12,%xmm4
+ movdqu %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
+ pxor %xmm13,%xmm5
+ movdqu %xmm4,32(%rsi)
+ pxor %xmm4,%xmm4
+ pxor %xmm14,%xmm6
+ movdqu %xmm5,48(%rsi)
+ pxor %xmm5,%xmm5
+ pxor %xmm15,%xmm7
+ movdqu %xmm6,64(%rsi)
+ pxor %xmm6,%xmm6
+ pxor %xmm9,%xmm8
+ movdqu %xmm7,80(%rsi)
+ pxor %xmm7,%xmm7
+ leaq 96(%rsi),%rsi
+ movdqa %xmm8,%xmm2
+ pxor %xmm8,%xmm8
+ pxor %xmm9,%xmm9
+ jmp .Lcbc_dec_tail_collected
+
+.align 16
+.Lcbc_dec_loop6:
+ movups %xmm7,(%rsi)
+ leaq 16(%rsi),%rsi
+ movdqu 0(%rdi),%xmm2
+ movdqu 16(%rdi),%xmm3
+ movdqa %xmm2,%xmm11
+ movdqu 32(%rdi),%xmm4
+ movdqa %xmm3,%xmm12
+ movdqu 48(%rdi),%xmm5
+ movdqa %xmm4,%xmm13
+ movdqu 64(%rdi),%xmm6
+ movdqa %xmm5,%xmm14
+ movdqu 80(%rdi),%xmm7
+ movdqa %xmm6,%xmm15
+.Lcbc_dec_loop6_enter:
+ leaq 96(%rdi),%rdi
+ movdqa %xmm7,%xmm8
+
+ call _aesni_decrypt6
+
+ pxor %xmm10,%xmm2
+ movdqa %xmm8,%xmm10
+ pxor %xmm11,%xmm3
+ movdqu %xmm2,(%rsi)
+ pxor %xmm12,%xmm4
+ movdqu %xmm3,16(%rsi)
+ pxor %xmm13,%xmm5
+ movdqu %xmm4,32(%rsi)
+ pxor %xmm14,%xmm6
+ movq %r11,%rcx
+ movdqu %xmm5,48(%rsi)
+ pxor %xmm15,%xmm7
+ movl %r10d,%eax
+ movdqu %xmm6,64(%rsi)
+ leaq 80(%rsi),%rsi
+ subq $96,%rdx
+ ja .Lcbc_dec_loop6
+
+ movdqa %xmm7,%xmm2
+ addq $80,%rdx
+ jle .Lcbc_dec_clear_tail_collected
+ movups %xmm7,(%rsi)
+ leaq 16(%rsi),%rsi
+
.Lcbc_dec_tail:
movups (%rdi),%xmm2
- movaps %xmm2,%xmm8
- cmpq $16,%rdx
+ subq $16,%rdx
jbe .Lcbc_dec_one
movups 16(%rdi),%xmm3
- movaps %xmm3,%xmm7
- cmpq $32,%rdx
+ movaps %xmm2,%xmm11
+ subq $16,%rdx
jbe .Lcbc_dec_two
movups 32(%rdi),%xmm4
- movaps %xmm4,%xmm6
- cmpq $48,%rdx
+ movaps %xmm3,%xmm12
+ subq $16,%rdx
jbe .Lcbc_dec_three
movups 48(%rdi),%xmm5
- cmpq $64,%rdx
+ movaps %xmm4,%xmm13
+ subq $16,%rdx
jbe .Lcbc_dec_four
movups 64(%rdi),%xmm6
- cmpq $80,%rdx
- jbe .Lcbc_dec_five
-
- movups 80(%rdi),%xmm7
- cmpq $96,%rdx
- jbe .Lcbc_dec_six
-
- movups 96(%rdi),%xmm8
- movaps %xmm9,-24(%rsp)
- call _aesni_decrypt8
- movups (%rdi),%xmm1
- movups 16(%rdi),%xmm0
- xorps -24(%rsp),%xmm2
- xorps %xmm1,%xmm3
- movups 32(%rdi),%xmm1
- xorps %xmm0,%xmm4
- movups 48(%rdi),%xmm0
- xorps %xmm1,%xmm5
- movups 64(%rdi),%xmm1
- xorps %xmm0,%xmm6
- movups 80(%rdi),%xmm0
- xorps %xmm1,%xmm7
- movups 96(%rdi),%xmm9
- xorps %xmm0,%xmm8
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
- movups %xmm6,64(%rsi)
- movups %xmm7,80(%rsi)
- leaq 96(%rsi),%rsi
- movaps %xmm8,%xmm2
- subq $112,%rdx
+ movaps %xmm5,%xmm14
+ movaps %xmm6,%xmm15
+ xorps %xmm7,%xmm7
+ call _aesni_decrypt6
+ pxor %xmm10,%xmm2
+ movaps %xmm15,%xmm10
+ pxor %xmm11,%xmm3
+ movdqu %xmm2,(%rsi)
+ pxor %xmm12,%xmm4
+ movdqu %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
+ pxor %xmm13,%xmm5
+ movdqu %xmm4,32(%rsi)
+ pxor %xmm4,%xmm4
+ pxor %xmm14,%xmm6
+ movdqu %xmm5,48(%rsi)
+ pxor %xmm5,%xmm5
+ leaq 64(%rsi),%rsi
+ movdqa %xmm6,%xmm2
+ pxor %xmm6,%xmm6
+ pxor %xmm7,%xmm7
+ subq $16,%rdx
jmp .Lcbc_dec_tail_collected
+
.align 16
.Lcbc_dec_one:
+ movaps %xmm2,%xmm11
movups (%rcx),%xmm0
movups 16(%rcx),%xmm1
leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
-.Loop_dec1_16:
+.Loop_dec1_17:
.byte 102,15,56,222,209
decl %eax
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
- jnz .Loop_dec1_16
+ jnz .Loop_dec1_17
.byte 102,15,56,223,209
- xorps %xmm9,%xmm2
- movaps %xmm8,%xmm9
- subq $16,%rdx
+ xorps %xmm10,%xmm2
+ movaps %xmm11,%xmm10
jmp .Lcbc_dec_tail_collected
.align 16
.Lcbc_dec_two:
- xorps %xmm4,%xmm4
- call _aesni_decrypt3
- xorps %xmm9,%xmm2
- xorps %xmm8,%xmm3
- movups %xmm2,(%rsi)
- movaps %xmm7,%xmm9
- movaps %xmm3,%xmm2
+ movaps %xmm3,%xmm12
+ call _aesni_decrypt2
+ pxor %xmm10,%xmm2
+ movaps %xmm12,%xmm10
+ pxor %xmm11,%xmm3
+ movdqu %xmm2,(%rsi)
+ movdqa %xmm3,%xmm2
+ pxor %xmm3,%xmm3
leaq 16(%rsi),%rsi
- subq $32,%rdx
jmp .Lcbc_dec_tail_collected
.align 16
.Lcbc_dec_three:
+ movaps %xmm4,%xmm13
call _aesni_decrypt3
- xorps %xmm9,%xmm2
- xorps %xmm8,%xmm3
- movups %xmm2,(%rsi)
- xorps %xmm7,%xmm4
- movups %xmm3,16(%rsi)
- movaps %xmm6,%xmm9
- movaps %xmm4,%xmm2
+ pxor %xmm10,%xmm2
+ movaps %xmm13,%xmm10
+ pxor %xmm11,%xmm3
+ movdqu %xmm2,(%rsi)
+ pxor %xmm12,%xmm4
+ movdqu %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
+ movdqa %xmm4,%xmm2
+ pxor %xmm4,%xmm4
leaq 32(%rsi),%rsi
- subq $48,%rdx
jmp .Lcbc_dec_tail_collected
.align 16
.Lcbc_dec_four:
+ movaps %xmm5,%xmm14
call _aesni_decrypt4
- xorps %xmm9,%xmm2
- movups 48(%rdi),%xmm9
- xorps %xmm8,%xmm3
- movups %xmm2,(%rsi)
- xorps %xmm7,%xmm4
- movups %xmm3,16(%rsi)
- xorps %xmm6,%xmm5
- movups %xmm4,32(%rsi)
- movaps %xmm5,%xmm2
+ pxor %xmm10,%xmm2
+ movaps %xmm14,%xmm10
+ pxor %xmm11,%xmm3
+ movdqu %xmm2,(%rsi)
+ pxor %xmm12,%xmm4
+ movdqu %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
+ pxor %xmm13,%xmm5
+ movdqu %xmm4,32(%rsi)
+ pxor %xmm4,%xmm4
+ movdqa %xmm5,%xmm2
+ pxor %xmm5,%xmm5
leaq 48(%rsi),%rsi
- subq $64,%rdx
- jmp .Lcbc_dec_tail_collected
-.align 16
-.Lcbc_dec_five:
- xorps %xmm7,%xmm7
- call _aesni_decrypt6
- movups 16(%rdi),%xmm1
- movups 32(%rdi),%xmm0
- xorps %xmm9,%xmm2
- xorps %xmm8,%xmm3
- xorps %xmm1,%xmm4
- movups 48(%rdi),%xmm1
- xorps %xmm0,%xmm5
- movups 64(%rdi),%xmm9
- xorps %xmm1,%xmm6
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
- leaq 64(%rsi),%rsi
- movaps %xmm6,%xmm2
- subq $80,%rdx
- jmp .Lcbc_dec_tail_collected
-.align 16
-.Lcbc_dec_six:
- call _aesni_decrypt6
- movups 16(%rdi),%xmm1
- movups 32(%rdi),%xmm0
- xorps %xmm9,%xmm2
- xorps %xmm8,%xmm3
- xorps %xmm1,%xmm4
- movups 48(%rdi),%xmm1
- xorps %xmm0,%xmm5
- movups 64(%rdi),%xmm0
- xorps %xmm1,%xmm6
- movups 80(%rdi),%xmm9
- xorps %xmm0,%xmm7
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
- movups %xmm6,64(%rsi)
- leaq 80(%rsi),%rsi
- movaps %xmm7,%xmm2
- subq $96,%rdx
jmp .Lcbc_dec_tail_collected
+
.align 16
+.Lcbc_dec_clear_tail_collected:
+ pxor %xmm3,%xmm3
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
+ pxor %xmm6,%xmm6
+ pxor %xmm7,%xmm7
+ pxor %xmm8,%xmm8
+ pxor %xmm9,%xmm9
.Lcbc_dec_tail_collected:
+ movups %xmm10,(%r8)
andq $15,%rdx
- movups %xmm9,(%r8)
jnz .Lcbc_dec_tail_partial
movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
jmp .Lcbc_dec_ret
.align 16
.Lcbc_dec_tail_partial:
- movaps %xmm2,-24(%rsp)
+ movaps %xmm2,(%rsp)
+ pxor %xmm2,%xmm2
movq $16,%rcx
movq %rsi,%rdi
subq %rdx,%rcx
- leaq -24(%rsp),%rsi
-.long 0x9066A4F3
+ leaq (%rsp),%rsi
+.long 0x9066A4F3
+ movdqa %xmm2,(%rsp)
.Lcbc_dec_ret:
+ xorps %xmm0,%xmm0
+ pxor %xmm1,%xmm1
+ leaq (%rbp),%rsp
+ popq %rbp
.Lcbc_ret:
.byte 0xf3,0xc3
.size aesni_cbc_encrypt,.-aesni_cbc_encrypt
@@ -2296,7 +3121,7 @@ aesni_cbc_encrypt:
.type aesni_set_decrypt_key,@function
.align 16
aesni_set_decrypt_key:
-.byte 0x48,0x83,0xEC,0x08
+.byte 0x48,0x83,0xEC,0x08
call __aesni_set_encrypt_key
shll $4,%esi
testl %eax,%eax
@@ -2324,7 +3149,9 @@ aesni_set_decrypt_key:
movups (%rdx),%xmm0
.byte 102,15,56,219,192
+ pxor %xmm1,%xmm1
movups %xmm0,(%rdi)
+ pxor %xmm0,%xmm0
.Ldec_key_ret:
addq $8,%rsp
.byte 0xf3,0xc3
@@ -2335,15 +3162,17 @@ aesni_set_decrypt_key:
.align 16
aesni_set_encrypt_key:
__aesni_set_encrypt_key:
-.byte 0x48,0x83,0xEC,0x08
+.byte 0x48,0x83,0xEC,0x08
movq $-1,%rax
testq %rdi,%rdi
jz .Lenc_key_ret
testq %rdx,%rdx
jz .Lenc_key_ret
+ movl $268437504,%r10d
movups (%rdi),%xmm0
xorps %xmm4,%xmm4
+ andl OPENSSL_ia32cap_P+4(%rip),%r10d
leaq 16(%rdx),%rax
cmpl $256,%esi
je .L14rounds
@@ -2354,6 +3183,9 @@ __aesni_set_encrypt_key:
.L10rounds:
movl $9,%esi
+ cmpl $268435456,%r10d
+ je .L10rounds_alt
+
movups %xmm0,(%rdx)
.byte 102,15,58,223,200,1
call .Lkey_expansion_128_cold
@@ -2381,9 +3213,79 @@ __aesni_set_encrypt_key:
jmp .Lenc_key_ret
.align 16
+.L10rounds_alt:
+ movdqa .Lkey_rotate(%rip),%xmm5
+ movl $8,%r10d
+ movdqa .Lkey_rcon1(%rip),%xmm4
+ movdqa %xmm0,%xmm2
+ movdqu %xmm0,(%rdx)
+ jmp .Loop_key128
+
+.align 16
+.Loop_key128:
+.byte 102,15,56,0,197
+.byte 102,15,56,221,196
+ pslld $1,%xmm4
+ leaq 16(%rax),%rax
+
+ movdqa %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm3,%xmm2
+
+ pxor %xmm2,%xmm0
+ movdqu %xmm0,-16(%rax)
+ movdqa %xmm0,%xmm2
+
+ decl %r10d
+ jnz .Loop_key128
+
+ movdqa .Lkey_rcon1b(%rip),%xmm4
+
+.byte 102,15,56,0,197
+.byte 102,15,56,221,196
+ pslld $1,%xmm4
+
+ movdqa %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm3,%xmm2
+
+ pxor %xmm2,%xmm0
+ movdqu %xmm0,(%rax)
+
+ movdqa %xmm0,%xmm2
+.byte 102,15,56,0,197
+.byte 102,15,56,221,196
+
+ movdqa %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm3,%xmm2
+
+ pxor %xmm2,%xmm0
+ movdqu %xmm0,16(%rax)
+
+ movl %esi,96(%rax)
+ xorl %eax,%eax
+ jmp .Lenc_key_ret
+
+.align 16
.L12rounds:
movq 16(%rdi),%xmm2
movl $11,%esi
+ cmpl $268435456,%r10d
+ je .L12rounds_alt
+
movups %xmm0,(%rdx)
.byte 102,15,58,223,202,1
call .Lkey_expansion_192a_cold
@@ -2407,10 +3309,54 @@ __aesni_set_encrypt_key:
jmp .Lenc_key_ret
.align 16
+.L12rounds_alt:
+ movdqa .Lkey_rotate192(%rip),%xmm5
+ movdqa .Lkey_rcon1(%rip),%xmm4
+ movl $8,%r10d
+ movdqu %xmm0,(%rdx)
+ jmp .Loop_key192
+
+.align 16
+.Loop_key192:
+ movq %xmm2,0(%rax)
+ movdqa %xmm2,%xmm1
+.byte 102,15,56,0,213
+.byte 102,15,56,221,212
+ pslld $1,%xmm4
+ leaq 24(%rax),%rax
+
+ movdqa %xmm0,%xmm3
+ pslldq $4,%xmm0
+ pxor %xmm0,%xmm3
+ pslldq $4,%xmm0
+ pxor %xmm0,%xmm3
+ pslldq $4,%xmm0
+ pxor %xmm3,%xmm0
+
+ pshufd $255,%xmm0,%xmm3
+ pxor %xmm1,%xmm3
+ pslldq $4,%xmm1
+ pxor %xmm1,%xmm3
+
+ pxor %xmm2,%xmm0
+ pxor %xmm3,%xmm2
+ movdqu %xmm0,-16(%rax)
+
+ decl %r10d
+ jnz .Loop_key192
+
+ movl %esi,32(%rax)
+ xorl %eax,%eax
+ jmp .Lenc_key_ret
+
+.align 16
.L14rounds:
movups 16(%rdi),%xmm2
movl $13,%esi
leaq 16(%rax),%rax
+ cmpl $268435456,%r10d
+ je .L14rounds_alt
+
movups %xmm0,(%rdx)
movups %xmm2,16(%rdx)
.byte 102,15,58,223,202,1
@@ -2445,9 +3391,69 @@ __aesni_set_encrypt_key:
jmp .Lenc_key_ret
.align 16
+.L14rounds_alt:
+ movdqa .Lkey_rotate(%rip),%xmm5
+ movdqa .Lkey_rcon1(%rip),%xmm4
+ movl $7,%r10d
+ movdqu %xmm0,0(%rdx)
+ movdqa %xmm2,%xmm1
+ movdqu %xmm2,16(%rdx)
+ jmp .Loop_key256
+
+.align 16
+.Loop_key256:
+.byte 102,15,56,0,213
+.byte 102,15,56,221,212
+
+ movdqa %xmm0,%xmm3
+ pslldq $4,%xmm0
+ pxor %xmm0,%xmm3
+ pslldq $4,%xmm0
+ pxor %xmm0,%xmm3
+ pslldq $4,%xmm0
+ pxor %xmm3,%xmm0
+ pslld $1,%xmm4
+
+ pxor %xmm2,%xmm0
+ movdqu %xmm0,(%rax)
+
+ decl %r10d
+ jz .Ldone_key256
+
+ pshufd $255,%xmm0,%xmm2
+ pxor %xmm3,%xmm3
+.byte 102,15,56,221,211
+
+ movdqa %xmm1,%xmm3
+ pslldq $4,%xmm1
+ pxor %xmm1,%xmm3
+ pslldq $4,%xmm1
+ pxor %xmm1,%xmm3
+ pslldq $4,%xmm1
+ pxor %xmm3,%xmm1
+
+ pxor %xmm1,%xmm2
+ movdqu %xmm2,16(%rax)
+ leaq 32(%rax),%rax
+ movdqa %xmm2,%xmm1
+
+ jmp .Loop_key256
+
+.Ldone_key256:
+ movl %esi,16(%rax)
+ xorl %eax,%eax
+ jmp .Lenc_key_ret
+
+.align 16
.Lbad_keybits:
movq $-2,%rax
.Lenc_key_ret:
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
+ pxor %xmm2,%xmm2
+ pxor %xmm3,%xmm3
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
addq $8,%rsp
.byte 0xf3,0xc3
.LSEH_end_set_encrypt_key:
@@ -2531,6 +3537,16 @@ __aesni_set_encrypt_key:
.long 1,0,0,0
.Lxts_magic:
.long 0x87,0,1,0
+.Lincrement1:
+.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
+.Lkey_rotate:
+.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d
+.Lkey_rotate192:
+.long 0x04070605,0x04070605,0x04070605,0x04070605
+.Lkey_rcon1:
+.long 1,1,1,1
+.Lkey_rcon1b:
+.long 0x1b,0x1b,0x1b,0x1b
.byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69,83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 64
diff --git a/secure/lib/libcrypto/amd64/bsaes-x86_64.S b/secure/lib/libcrypto/amd64/bsaes-x86_64.S
index 5588ef5..be410de 100644
--- a/secure/lib/libcrypto/amd64/bsaes-x86_64.S
+++ b/secure/lib/libcrypto/amd64/bsaes-x86_64.S
@@ -14,18 +14,18 @@ _bsaes_encrypt8:
movdqa 80(%r11),%xmm7
pxor %xmm8,%xmm15
pxor %xmm8,%xmm0
-.byte 102,68,15,56,0,255
pxor %xmm8,%xmm1
-.byte 102,15,56,0,199
pxor %xmm8,%xmm2
-.byte 102,15,56,0,207
+.byte 102,68,15,56,0,255
+.byte 102,15,56,0,199
pxor %xmm8,%xmm3
-.byte 102,15,56,0,215
pxor %xmm8,%xmm4
-.byte 102,15,56,0,223
+.byte 102,15,56,0,207
+.byte 102,15,56,0,215
pxor %xmm8,%xmm5
-.byte 102,15,56,0,231
pxor %xmm8,%xmm6
+.byte 102,15,56,0,223
+.byte 102,15,56,0,231
.byte 102,15,56,0,239
.byte 102,15,56,0,247
_bsaes_encrypt8_bitslice:
@@ -122,21 +122,21 @@ _bsaes_encrypt8_bitslice:
.Lenc_loop:
pxor 0(%rax),%xmm15
pxor 16(%rax),%xmm0
-.byte 102,68,15,56,0,255
pxor 32(%rax),%xmm1
-.byte 102,15,56,0,199
pxor 48(%rax),%xmm2
-.byte 102,15,56,0,207
+.byte 102,68,15,56,0,255
+.byte 102,15,56,0,199
pxor 64(%rax),%xmm3
-.byte 102,15,56,0,215
pxor 80(%rax),%xmm4
-.byte 102,15,56,0,223
+.byte 102,15,56,0,207
+.byte 102,15,56,0,215
pxor 96(%rax),%xmm5
-.byte 102,15,56,0,231
pxor 112(%rax),%xmm6
+.byte 102,15,56,0,223
+.byte 102,15,56,0,231
.byte 102,15,56,0,239
- leaq 128(%rax),%rax
.byte 102,15,56,0,247
+ leaq 128(%rax),%rax
.Lenc_sbox:
pxor %xmm5,%xmm4
pxor %xmm0,%xmm1
@@ -486,18 +486,18 @@ _bsaes_decrypt8:
movdqa -48(%r11),%xmm7
pxor %xmm8,%xmm15
pxor %xmm8,%xmm0
-.byte 102,68,15,56,0,255
pxor %xmm8,%xmm1
-.byte 102,15,56,0,199
pxor %xmm8,%xmm2
-.byte 102,15,56,0,207
+.byte 102,68,15,56,0,255
+.byte 102,15,56,0,199
pxor %xmm8,%xmm3
-.byte 102,15,56,0,215
pxor %xmm8,%xmm4
-.byte 102,15,56,0,223
+.byte 102,15,56,0,207
+.byte 102,15,56,0,215
pxor %xmm8,%xmm5
-.byte 102,15,56,0,231
pxor %xmm8,%xmm6
+.byte 102,15,56,0,223
+.byte 102,15,56,0,231
.byte 102,15,56,0,239
.byte 102,15,56,0,247
movdqa 0(%r11),%xmm7
@@ -593,21 +593,21 @@ _bsaes_decrypt8:
.Ldec_loop:
pxor 0(%rax),%xmm15
pxor 16(%rax),%xmm0
-.byte 102,68,15,56,0,255
pxor 32(%rax),%xmm1
-.byte 102,15,56,0,199
pxor 48(%rax),%xmm2
-.byte 102,15,56,0,207
+.byte 102,68,15,56,0,255
+.byte 102,15,56,0,199
pxor 64(%rax),%xmm3
-.byte 102,15,56,0,215
pxor 80(%rax),%xmm4
-.byte 102,15,56,0,223
+.byte 102,15,56,0,207
+.byte 102,15,56,0,215
pxor 96(%rax),%xmm5
-.byte 102,15,56,0,231
pxor 112(%rax),%xmm6
+.byte 102,15,56,0,223
+.byte 102,15,56,0,231
.byte 102,15,56,0,239
- leaq 128(%rax),%rax
.byte 102,15,56,0,247
+ leaq 128(%rax),%rax
.Ldec_sbox:
pxor %xmm3,%xmm2
@@ -1285,7 +1285,7 @@ bsaes_cbc_encrypt:
leaq (%r12),%rdi
leaq 32(%rbp),%rsi
leaq (%r15),%rdx
- call asm_AES_decrypt
+ call asm_AES_decrypt
pxor 32(%rbp),%xmm14
movdqu %xmm14,(%r13)
movdqa %xmm15,%xmm14
@@ -1383,21 +1383,21 @@ bsaes_ctr32_encrypt_blocks:
movdqa -16(%r11),%xmm7
pxor %xmm8,%xmm15
pxor %xmm8,%xmm0
-.byte 102,68,15,56,0,255
pxor %xmm8,%xmm1
-.byte 102,15,56,0,199
pxor %xmm8,%xmm2
-.byte 102,15,56,0,207
+.byte 102,68,15,56,0,255
+.byte 102,15,56,0,199
pxor %xmm8,%xmm3
-.byte 102,15,56,0,215
pxor %xmm8,%xmm4
-.byte 102,15,56,0,223
+.byte 102,15,56,0,207
+.byte 102,15,56,0,215
pxor %xmm8,%xmm5
-.byte 102,15,56,0,231
pxor %xmm8,%xmm6
+.byte 102,15,56,0,223
+.byte 102,15,56,0,231
.byte 102,15,56,0,239
- leaq .LBS0(%rip),%r11
.byte 102,15,56,0,247
+ leaq .LBS0(%rip),%r11
movl %ebx,%r10d
call _bsaes_encrypt8_bitslice
@@ -1535,7 +1535,7 @@ bsaes_xts_encrypt:
leaq (%r9),%rdi
leaq 32(%rbp),%rsi
leaq (%r8),%rdx
- call asm_AES_encrypt
+ call asm_AES_encrypt
movl 240(%r15),%eax
movq %r14,%rbx
@@ -1905,7 +1905,7 @@ bsaes_xts_encrypt:
leaq 32(%rbp),%rdi
leaq 32(%rbp),%rsi
leaq (%r15),%rdx
- call asm_AES_encrypt
+ call asm_AES_encrypt
pxor 32(%rbp),%xmm15
@@ -1938,7 +1938,7 @@ bsaes_xts_encrypt:
leaq 32(%rbp),%rsi
movdqa %xmm15,32(%rbp)
leaq (%r15),%rdx
- call asm_AES_encrypt
+ call asm_AES_encrypt
pxor 32(%rbp),%xmm6
movdqu %xmm6,-16(%r13)
@@ -1987,7 +1987,7 @@ bsaes_xts_decrypt:
leaq (%r9),%rdi
leaq 32(%rbp),%rsi
leaq (%r8),%rdx
- call asm_AES_encrypt
+ call asm_AES_encrypt
movl 240(%r15),%eax
movq %r14,%rbx
@@ -2364,7 +2364,7 @@ bsaes_xts_decrypt:
leaq 32(%rbp),%rdi
leaq 32(%rbp),%rsi
leaq (%r15),%rdx
- call asm_AES_decrypt
+ call asm_AES_decrypt
pxor 32(%rbp),%xmm15
@@ -2395,7 +2395,7 @@ bsaes_xts_decrypt:
leaq 32(%rbp),%rsi
movdqa %xmm15,32(%rbp)
leaq (%r15),%rdx
- call asm_AES_decrypt
+ call asm_AES_decrypt
pxor 32(%rbp),%xmm6
movq %r13,%rdx
movdqu %xmm6,(%r13)
@@ -2416,7 +2416,7 @@ bsaes_xts_decrypt:
leaq 32(%rbp),%rsi
movdqa %xmm15,32(%rbp)
leaq (%r15),%rdx
- call asm_AES_decrypt
+ call asm_AES_decrypt
pxor 32(%rbp),%xmm5
movdqu %xmm5,(%r13)
diff --git a/secure/lib/libcrypto/amd64/cmll-x86_64.S b/secure/lib/libcrypto/amd64/cmll-x86_64.S
index f42203c..ecd33f1 100644
--- a/secure/lib/libcrypto/amd64/cmll-x86_64.S
+++ b/secure/lib/libcrypto/amd64/cmll-x86_64.S
@@ -267,7 +267,7 @@ _x86_64_Camellia_encrypt:
movl %ecx,%r10d
movl %edx,%r11d
-.byte 0xf3,0xc3
+.byte 0xf3,0xc3
.size _x86_64_Camellia_encrypt,.-_x86_64_Camellia_encrypt
@@ -537,7 +537,7 @@ _x86_64_Camellia_decrypt:
movl %eax,%r10d
movl %ebx,%r11d
-.byte 0xf3,0xc3
+.byte 0xf3,0xc3
.size _x86_64_Camellia_decrypt,.-_x86_64_Camellia_decrypt
.globl Camellia_Ekeygen
.type Camellia_Ekeygen,@function
@@ -550,7 +550,7 @@ Camellia_Ekeygen:
pushq %r15
.Lkey_prologue:
- movq %rdi,%r15
+ movl %edi,%r15d
movq %rdx,%r13
movl 0(%rsi),%r8d
@@ -1724,14 +1724,14 @@ Camellia_cbc_encrypt:
cld
movq %r12,%rsi
leaq 8+24(%rsp),%rdi
-.long 0x9066A4F3
+.long 0x9066A4F3
popfq
.Lcbc_enc_popf:
leaq 24(%rsp),%r12
leaq 16+24(%rsp),%rax
movq %rax,8(%rsp)
- jmp .Lcbc_eloop
+ jmp .Lcbc_eloop
.align 16
.LCBC_DECRYPT:
@@ -1814,7 +1814,7 @@ Camellia_cbc_encrypt:
cld
leaq 8+24(%rsp),%rsi
leaq (%r13),%rdi
-.long 0x9066A4F3
+.long 0x9066A4F3
popfq
.Lcbc_dec_popf:
diff --git a/secure/lib/libcrypto/amd64/ecp_nistz256-x86_64.S b/secure/lib/libcrypto/amd64/ecp_nistz256-x86_64.S
new file mode 100644
index 0000000..c5875d7
--- /dev/null
+++ b/secure/lib/libcrypto/amd64/ecp_nistz256-x86_64.S
@@ -0,0 +1,2005 @@
+ # $FreeBSD$
+.text
+
+
+
+.align 64
+.Lpoly:
+.quad 0xffffffffffffffff, 0x00000000ffffffff, 0x0000000000000000, 0xffffffff00000001
+
+
+.LRR:
+.quad 0x0000000000000003, 0xfffffffbffffffff, 0xfffffffffffffffe, 0x00000004fffffffd
+
+.LOne:
+.long 1,1,1,1,1,1,1,1
+.LTwo:
+.long 2,2,2,2,2,2,2,2
+.LThree:
+.long 3,3,3,3,3,3,3,3
+.LONE_mont:
+.quad 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe
+
+.globl ecp_nistz256_mul_by_2
+.type ecp_nistz256_mul_by_2,@function
+.align 64
+ecp_nistz256_mul_by_2:
+ pushq %r12
+ pushq %r13
+
+ movq 0(%rsi),%r8
+ movq 8(%rsi),%r9
+ addq %r8,%r8
+ movq 16(%rsi),%r10
+ adcq %r9,%r9
+ movq 24(%rsi),%r11
+ leaq .Lpoly(%rip),%rsi
+ movq %r8,%rax
+ adcq %r10,%r10
+ adcq %r11,%r11
+ movq %r9,%rdx
+ sbbq %r13,%r13
+
+ subq 0(%rsi),%r8
+ movq %r10,%rcx
+ sbbq 8(%rsi),%r9
+ sbbq 16(%rsi),%r10
+ movq %r11,%r12
+ sbbq 24(%rsi),%r11
+ testq %r13,%r13
+
+ cmovzq %rax,%r8
+ cmovzq %rdx,%r9
+ movq %r8,0(%rdi)
+ cmovzq %rcx,%r10
+ movq %r9,8(%rdi)
+ cmovzq %r12,%r11
+ movq %r10,16(%rdi)
+ movq %r11,24(%rdi)
+
+ popq %r13
+ popq %r12
+ .byte 0xf3,0xc3
+.size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
+
+
+
+.globl ecp_nistz256_div_by_2
+.type ecp_nistz256_div_by_2,@function
+.align 32
+ecp_nistz256_div_by_2:
+ pushq %r12
+ pushq %r13
+
+ movq 0(%rsi),%r8
+ movq 8(%rsi),%r9
+ movq 16(%rsi),%r10
+ movq %r8,%rax
+ movq 24(%rsi),%r11
+ leaq .Lpoly(%rip),%rsi
+
+ movq %r9,%rdx
+ xorq %r13,%r13
+ addq 0(%rsi),%r8
+ movq %r10,%rcx
+ adcq 8(%rsi),%r9
+ adcq 16(%rsi),%r10
+ movq %r11,%r12
+ adcq 24(%rsi),%r11
+ adcq $0,%r13
+ xorq %rsi,%rsi
+ testq $1,%rax
+
+ cmovzq %rax,%r8
+ cmovzq %rdx,%r9
+ cmovzq %rcx,%r10
+ cmovzq %r12,%r11
+ cmovzq %rsi,%r13
+
+ movq %r9,%rax
+ shrq $1,%r8
+ shlq $63,%rax
+ movq %r10,%rdx
+ shrq $1,%r9
+ orq %rax,%r8
+ shlq $63,%rdx
+ movq %r11,%rcx
+ shrq $1,%r10
+ orq %rdx,%r9
+ shlq $63,%rcx
+ shrq $1,%r11
+ shlq $63,%r13
+ orq %rcx,%r10
+ orq %r13,%r11
+
+ movq %r8,0(%rdi)
+ movq %r9,8(%rdi)
+ movq %r10,16(%rdi)
+ movq %r11,24(%rdi)
+
+ popq %r13
+ popq %r12
+ .byte 0xf3,0xc3
+.size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
+
+
+
+.globl ecp_nistz256_mul_by_3
+.type ecp_nistz256_mul_by_3,@function
+.align 32
+ecp_nistz256_mul_by_3:
+ pushq %r12
+ pushq %r13
+
+ movq 0(%rsi),%r8
+ xorq %r13,%r13
+ movq 8(%rsi),%r9
+ addq %r8,%r8
+ movq 16(%rsi),%r10
+ adcq %r9,%r9
+ movq 24(%rsi),%r11
+ movq %r8,%rax
+ adcq %r10,%r10
+ adcq %r11,%r11
+ movq %r9,%rdx
+ adcq $0,%r13
+
+ subq $-1,%r8
+ movq %r10,%rcx
+ sbbq .Lpoly+8(%rip),%r9
+ sbbq $0,%r10
+ movq %r11,%r12
+ sbbq .Lpoly+24(%rip),%r11
+ testq %r13,%r13
+
+ cmovzq %rax,%r8
+ cmovzq %rdx,%r9
+ cmovzq %rcx,%r10
+ cmovzq %r12,%r11
+
+ xorq %r13,%r13
+ addq 0(%rsi),%r8
+ adcq 8(%rsi),%r9
+ movq %r8,%rax
+ adcq 16(%rsi),%r10
+ adcq 24(%rsi),%r11
+ movq %r9,%rdx
+ adcq $0,%r13
+
+ subq $-1,%r8
+ movq %r10,%rcx
+ sbbq .Lpoly+8(%rip),%r9
+ sbbq $0,%r10
+ movq %r11,%r12
+ sbbq .Lpoly+24(%rip),%r11
+ testq %r13,%r13
+
+ cmovzq %rax,%r8
+ cmovzq %rdx,%r9
+ movq %r8,0(%rdi)
+ cmovzq %rcx,%r10
+ movq %r9,8(%rdi)
+ cmovzq %r12,%r11
+ movq %r10,16(%rdi)
+ movq %r11,24(%rdi)
+
+ popq %r13
+ popq %r12
+ .byte 0xf3,0xc3
+.size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
+
+
+
+.globl ecp_nistz256_add
+.type ecp_nistz256_add,@function
+.align 32
+ecp_nistz256_add:
+ pushq %r12
+ pushq %r13
+
+ movq 0(%rsi),%r8
+ xorq %r13,%r13
+ movq 8(%rsi),%r9
+ movq 16(%rsi),%r10
+ movq 24(%rsi),%r11
+ leaq .Lpoly(%rip),%rsi
+
+ addq 0(%rdx),%r8
+ adcq 8(%rdx),%r9
+ movq %r8,%rax
+ adcq 16(%rdx),%r10
+ adcq 24(%rdx),%r11
+ movq %r9,%rdx
+ adcq $0,%r13
+
+ subq 0(%rsi),%r8
+ movq %r10,%rcx
+ sbbq 8(%rsi),%r9
+ sbbq 16(%rsi),%r10
+ movq %r11,%r12
+ sbbq 24(%rsi),%r11
+ testq %r13,%r13
+
+ cmovzq %rax,%r8
+ cmovzq %rdx,%r9
+ movq %r8,0(%rdi)
+ cmovzq %rcx,%r10
+ movq %r9,8(%rdi)
+ cmovzq %r12,%r11
+ movq %r10,16(%rdi)
+ movq %r11,24(%rdi)
+
+ popq %r13
+ popq %r12
+ .byte 0xf3,0xc3
+.size ecp_nistz256_add,.-ecp_nistz256_add
+
+
+
+.globl ecp_nistz256_sub
+.type ecp_nistz256_sub,@function
+.align 32
+ecp_nistz256_sub:
+ pushq %r12
+ pushq %r13
+
+ movq 0(%rsi),%r8
+ xorq %r13,%r13
+ movq 8(%rsi),%r9
+ movq 16(%rsi),%r10
+ movq 24(%rsi),%r11
+ leaq .Lpoly(%rip),%rsi
+
+ subq 0(%rdx),%r8
+ sbbq 8(%rdx),%r9
+ movq %r8,%rax
+ sbbq 16(%rdx),%r10
+ sbbq 24(%rdx),%r11
+ movq %r9,%rdx
+ sbbq $0,%r13
+
+ addq 0(%rsi),%r8
+ movq %r10,%rcx
+ adcq 8(%rsi),%r9
+ adcq 16(%rsi),%r10
+ movq %r11,%r12
+ adcq 24(%rsi),%r11
+ testq %r13,%r13
+
+ cmovzq %rax,%r8
+ cmovzq %rdx,%r9
+ movq %r8,0(%rdi)
+ cmovzq %rcx,%r10
+ movq %r9,8(%rdi)
+ cmovzq %r12,%r11
+ movq %r10,16(%rdi)
+ movq %r11,24(%rdi)
+
+ popq %r13
+ popq %r12
+ .byte 0xf3,0xc3
+.size ecp_nistz256_sub,.-ecp_nistz256_sub
+
+
+
+.globl ecp_nistz256_neg
+.type ecp_nistz256_neg,@function
+.align 32
+ecp_nistz256_neg:
+ pushq %r12
+ pushq %r13
+
+ xorq %r8,%r8
+ xorq %r9,%r9
+ xorq %r10,%r10
+ xorq %r11,%r11
+ xorq %r13,%r13
+
+ subq 0(%rsi),%r8
+ sbbq 8(%rsi),%r9
+ sbbq 16(%rsi),%r10
+ movq %r8,%rax
+ sbbq 24(%rsi),%r11
+ leaq .Lpoly(%rip),%rsi
+ movq %r9,%rdx
+ sbbq $0,%r13
+
+ addq 0(%rsi),%r8
+ movq %r10,%rcx
+ adcq 8(%rsi),%r9
+ adcq 16(%rsi),%r10
+ movq %r11,%r12
+ adcq 24(%rsi),%r11
+ testq %r13,%r13
+
+ cmovzq %rax,%r8
+ cmovzq %rdx,%r9
+ movq %r8,0(%rdi)
+ cmovzq %rcx,%r10
+ movq %r9,8(%rdi)
+ cmovzq %r12,%r11
+ movq %r10,16(%rdi)
+ movq %r11,24(%rdi)
+
+ popq %r13
+ popq %r12
+ .byte 0xf3,0xc3
+.size ecp_nistz256_neg,.-ecp_nistz256_neg
+
+
+
+
+.globl ecp_nistz256_to_mont
+.type ecp_nistz256_to_mont,@function
+.align 32
+ecp_nistz256_to_mont:
+ leaq .LRR(%rip),%rdx
+ jmp .Lmul_mont
+.size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont
+
+
+
+
+
+
+
+.globl ecp_nistz256_mul_mont
+.type ecp_nistz256_mul_mont,@function
+.align 32
+ecp_nistz256_mul_mont:
+.Lmul_mont:
+ pushq %rbp
+ pushq %rbx
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+ movq %rdx,%rbx
+ movq 0(%rdx),%rax
+ movq 0(%rsi),%r9
+ movq 8(%rsi),%r10
+ movq 16(%rsi),%r11
+ movq 24(%rsi),%r12
+
+ call __ecp_nistz256_mul_montq
+.Lmul_mont_done:
+ popq %r15
+ popq %r14
+ popq %r13
+ popq %r12
+ popq %rbx
+ popq %rbp
+ .byte 0xf3,0xc3
+.size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
+
+.type __ecp_nistz256_mul_montq,@function
+.align 32
+__ecp_nistz256_mul_montq:
+
+
+ movq %rax,%rbp
+ mulq %r9
+ movq .Lpoly+8(%rip),%r14
+ movq %rax,%r8
+ movq %rbp,%rax
+ movq %rdx,%r9
+
+ mulq %r10
+ movq .Lpoly+24(%rip),%r15
+ addq %rax,%r9
+ movq %rbp,%rax
+ adcq $0,%rdx
+ movq %rdx,%r10
+
+ mulq %r11
+ addq %rax,%r10
+ movq %rbp,%rax
+ adcq $0,%rdx
+ movq %rdx,%r11
+
+ mulq %r12
+ addq %rax,%r11
+ movq %r8,%rax
+ adcq $0,%rdx
+ xorq %r13,%r13
+ movq %rdx,%r12
+
+
+
+
+
+
+
+
+
+
+ movq %r8,%rbp
+ shlq $32,%r8
+ mulq %r15
+ shrq $32,%rbp
+ addq %r8,%r9
+ adcq %rbp,%r10
+ adcq %rax,%r11
+ movq 8(%rbx),%rax
+ adcq %rdx,%r12
+ adcq $0,%r13
+ xorq %r8,%r8
+
+
+
+ movq %rax,%rbp
+ mulq 0(%rsi)
+ addq %rax,%r9
+ movq %rbp,%rax
+ adcq $0,%rdx
+ movq %rdx,%rcx
+
+ mulq 8(%rsi)
+ addq %rcx,%r10
+ adcq $0,%rdx
+ addq %rax,%r10
+ movq %rbp,%rax
+ adcq $0,%rdx
+ movq %rdx,%rcx
+
+ mulq 16(%rsi)
+ addq %rcx,%r11
+ adcq $0,%rdx
+ addq %rax,%r11
+ movq %rbp,%rax
+ adcq $0,%rdx
+ movq %rdx,%rcx
+
+ mulq 24(%rsi)
+ addq %rcx,%r12
+ adcq $0,%rdx
+ addq %rax,%r12
+ movq %r9,%rax
+ adcq %rdx,%r13
+ adcq $0,%r8
+
+
+
+ movq %r9,%rbp
+ shlq $32,%r9
+ mulq %r15
+ shrq $32,%rbp
+ addq %r9,%r10
+ adcq %rbp,%r11
+ adcq %rax,%r12
+ movq 16(%rbx),%rax
+ adcq %rdx,%r13
+ adcq $0,%r8
+ xorq %r9,%r9
+
+
+
+ movq %rax,%rbp
+ mulq 0(%rsi)
+ addq %rax,%r10
+ movq %rbp,%rax
+ adcq $0,%rdx
+ movq %rdx,%rcx
+
+ mulq 8(%rsi)
+ addq %rcx,%r11
+ adcq $0,%rdx
+ addq %rax,%r11
+ movq %rbp,%rax
+ adcq $0,%rdx
+ movq %rdx,%rcx
+
+ mulq 16(%rsi)
+ addq %rcx,%r12
+ adcq $0,%rdx
+ addq %rax,%r12
+ movq %rbp,%rax
+ adcq $0,%rdx
+ movq %rdx,%rcx
+
+ mulq 24(%rsi)
+ addq %rcx,%r13
+ adcq $0,%rdx
+ addq %rax,%r13
+ movq %r10,%rax
+ adcq %rdx,%r8
+ adcq $0,%r9
+
+
+
+ movq %r10,%rbp
+ shlq $32,%r10
+ mulq %r15
+ shrq $32,%rbp
+ addq %r10,%r11
+ adcq %rbp,%r12
+ adcq %rax,%r13
+ movq 24(%rbx),%rax
+ adcq %rdx,%r8
+ adcq $0,%r9
+ xorq %r10,%r10
+
+
+
+ movq %rax,%rbp
+ mulq 0(%rsi)
+ addq %rax,%r11
+ movq %rbp,%rax
+ adcq $0,%rdx
+ movq %rdx,%rcx
+
+ mulq 8(%rsi)
+ addq %rcx,%r12
+ adcq $0,%rdx
+ addq %rax,%r12
+ movq %rbp,%rax
+ adcq $0,%rdx
+ movq %rdx,%rcx
+
+ mulq 16(%rsi)
+ addq %rcx,%r13
+ adcq $0,%rdx
+ addq %rax,%r13
+ movq %rbp,%rax
+ adcq $0,%rdx
+ movq %rdx,%rcx
+
+ mulq 24(%rsi)
+ addq %rcx,%r8
+ adcq $0,%rdx
+ addq %rax,%r8
+ movq %r11,%rax
+ adcq %rdx,%r9
+ adcq $0,%r10
+
+
+
+ movq %r11,%rbp
+ shlq $32,%r11
+ mulq %r15
+ shrq $32,%rbp
+ addq %r11,%r12
+ adcq %rbp,%r13
+ movq %r12,%rcx
+ adcq %rax,%r8
+ adcq %rdx,%r9
+ movq %r13,%rbp
+ adcq $0,%r10
+
+
+
+ subq $-1,%r12
+ movq %r8,%rbx
+ sbbq %r14,%r13
+ sbbq $0,%r8
+ movq %r9,%rdx
+ sbbq %r15,%r9
+ sbbq $0,%r10
+
+ cmovcq %rcx,%r12
+ cmovcq %rbp,%r13
+ movq %r12,0(%rdi)
+ cmovcq %rbx,%r8
+ movq %r13,8(%rdi)
+ cmovcq %rdx,%r9
+ movq %r8,16(%rdi)
+ movq %r9,24(%rdi)
+
+ .byte 0xf3,0xc3
+.size __ecp_nistz256_mul_montq,.-__ecp_nistz256_mul_montq
+
+
+
+
+
+
+
+
+.globl ecp_nistz256_sqr_mont
+.type ecp_nistz256_sqr_mont,@function
+.align 32
+ecp_nistz256_sqr_mont:
+ pushq %rbp
+ pushq %rbx
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+ movq 0(%rsi),%rax
+ movq 8(%rsi),%r14
+ movq 16(%rsi),%r15
+ movq 24(%rsi),%r8
+
+ call __ecp_nistz256_sqr_montq
+.Lsqr_mont_done:
+ popq %r15
+ popq %r14
+ popq %r13
+ popq %r12
+ popq %rbx
+ popq %rbp
+ .byte 0xf3,0xc3
+.size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
+
+.type __ecp_nistz256_sqr_montq,@function
+.align 32
+__ecp_nistz256_sqr_montq:
+ movq %rax,%r13
+ mulq %r14
+ movq %rax,%r9
+ movq %r15,%rax
+ movq %rdx,%r10
+
+ mulq %r13
+ addq %rax,%r10
+ movq %r8,%rax
+ adcq $0,%rdx
+ movq %rdx,%r11
+
+ mulq %r13
+ addq %rax,%r11
+ movq %r15,%rax
+ adcq $0,%rdx
+ movq %rdx,%r12
+
+
+ mulq %r14
+ addq %rax,%r11
+ movq %r8,%rax
+ adcq $0,%rdx
+ movq %rdx,%rbp
+
+ mulq %r14
+ addq %rax,%r12
+ movq %r8,%rax
+ adcq $0,%rdx
+ addq %rbp,%r12
+ movq %rdx,%r13
+ adcq $0,%r13
+
+
+ mulq %r15
+ xorq %r15,%r15
+ addq %rax,%r13
+ movq 0(%rsi),%rax
+ movq %rdx,%r14
+ adcq $0,%r14
+
+ addq %r9,%r9
+ adcq %r10,%r10
+ adcq %r11,%r11
+ adcq %r12,%r12
+ adcq %r13,%r13
+ adcq %r14,%r14
+ adcq $0,%r15
+
+ mulq %rax
+ movq %rax,%r8
+ movq 8(%rsi),%rax
+ movq %rdx,%rcx
+
+ mulq %rax
+ addq %rcx,%r9
+ adcq %rax,%r10
+ movq 16(%rsi),%rax
+ adcq $0,%rdx
+ movq %rdx,%rcx
+
+ mulq %rax
+ addq %rcx,%r11
+ adcq %rax,%r12
+ movq 24(%rsi),%rax
+ adcq $0,%rdx
+ movq %rdx,%rcx
+
+ mulq %rax
+ addq %rcx,%r13
+ adcq %rax,%r14
+ movq %r8,%rax
+ adcq %rdx,%r15
+
+ movq .Lpoly+8(%rip),%rsi
+ movq .Lpoly+24(%rip),%rbp
+
+
+
+
+ movq %r8,%rcx
+ shlq $32,%r8
+ mulq %rbp
+ shrq $32,%rcx
+ addq %r8,%r9
+ adcq %rcx,%r10
+ adcq %rax,%r11
+ movq %r9,%rax
+ adcq $0,%rdx
+
+
+
+ movq %r9,%rcx
+ shlq $32,%r9
+ movq %rdx,%r8
+ mulq %rbp
+ shrq $32,%rcx
+ addq %r9,%r10
+ adcq %rcx,%r11
+ adcq %rax,%r8
+ movq %r10,%rax
+ adcq $0,%rdx
+
+
+
+ movq %r10,%rcx
+ shlq $32,%r10
+ movq %rdx,%r9
+ mulq %rbp
+ shrq $32,%rcx
+ addq %r10,%r11
+ adcq %rcx,%r8
+ adcq %rax,%r9
+ movq %r11,%rax
+ adcq $0,%rdx
+
+
+
+ movq %r11,%rcx
+ shlq $32,%r11
+ movq %rdx,%r10
+ mulq %rbp
+ shrq $32,%rcx
+ addq %r11,%r8
+ adcq %rcx,%r9
+ adcq %rax,%r10
+ adcq $0,%rdx
+ xorq %r11,%r11
+
+
+
+ addq %r8,%r12
+ adcq %r9,%r13
+ movq %r12,%r8
+ adcq %r10,%r14
+ adcq %rdx,%r15
+ movq %r13,%r9
+ adcq $0,%r11
+
+ subq $-1,%r12
+ movq %r14,%r10
+ sbbq %rsi,%r13
+ sbbq $0,%r14
+ movq %r15,%rcx
+ sbbq %rbp,%r15
+ sbbq $0,%r11
+
+ cmovcq %r8,%r12
+ cmovcq %r9,%r13
+ movq %r12,0(%rdi)
+ cmovcq %r10,%r14
+ movq %r13,8(%rdi)
+ cmovcq %rcx,%r15
+ movq %r14,16(%rdi)
+ movq %r15,24(%rdi)
+
+ .byte 0xf3,0xc3
+.size __ecp_nistz256_sqr_montq,.-__ecp_nistz256_sqr_montq
+
+
+
+
+
+
+.globl ecp_nistz256_from_mont
+.type ecp_nistz256_from_mont,@function
+.align 32
+ecp_nistz256_from_mont:
+ pushq %r12
+ pushq %r13
+
+ movq 0(%rsi),%rax
+ movq .Lpoly+24(%rip),%r13
+ movq 8(%rsi),%r9
+ movq 16(%rsi),%r10
+ movq 24(%rsi),%r11
+ movq %rax,%r8
+ movq .Lpoly+8(%rip),%r12
+
+
+
+ movq %rax,%rcx
+ shlq $32,%r8
+ mulq %r13
+ shrq $32,%rcx
+ addq %r8,%r9
+ adcq %rcx,%r10
+ adcq %rax,%r11
+ movq %r9,%rax
+ adcq $0,%rdx
+
+
+
+ movq %r9,%rcx
+ shlq $32,%r9
+ movq %rdx,%r8
+ mulq %r13
+ shrq $32,%rcx
+ addq %r9,%r10
+ adcq %rcx,%r11
+ adcq %rax,%r8
+ movq %r10,%rax
+ adcq $0,%rdx
+
+
+
+ movq %r10,%rcx
+ shlq $32,%r10
+ movq %rdx,%r9
+ mulq %r13
+ shrq $32,%rcx
+ addq %r10,%r11
+ adcq %rcx,%r8
+ adcq %rax,%r9
+ movq %r11,%rax
+ adcq $0,%rdx
+
+
+
+ movq %r11,%rcx
+ shlq $32,%r11
+ movq %rdx,%r10
+ mulq %r13
+ shrq $32,%rcx
+ addq %r11,%r8
+ adcq %rcx,%r9
+ movq %r8,%rcx
+ adcq %rax,%r10
+ movq %r9,%rsi
+ adcq $0,%rdx
+
+
+
+ subq $-1,%r8
+ movq %r10,%rax
+ sbbq %r12,%r9
+ sbbq $0,%r10
+ movq %rdx,%r11
+ sbbq %r13,%rdx
+ sbbq %r13,%r13
+
+ cmovnzq %rcx,%r8
+ cmovnzq %rsi,%r9
+ movq %r8,0(%rdi)
+ cmovnzq %rax,%r10
+ movq %r9,8(%rdi)
+ cmovzq %rdx,%r11
+ movq %r10,16(%rdi)
+ movq %r11,24(%rdi)
+
+ popq %r13
+ popq %r12
+ .byte 0xf3,0xc3
+.size ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
+
+
+.globl ecp_nistz256_select_w5
+.type ecp_nistz256_select_w5,@function
+.align 32
+ecp_nistz256_select_w5:
+ movdqa .LOne(%rip),%xmm0
+ movd %edx,%xmm1
+
+ pxor %xmm2,%xmm2
+ pxor %xmm3,%xmm3
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
+ pxor %xmm6,%xmm6
+ pxor %xmm7,%xmm7
+
+ movdqa %xmm0,%xmm8
+ pshufd $0,%xmm1,%xmm1
+
+ movq $16,%rax
+.Lselect_loop_sse_w5:
+
+ movdqa %xmm8,%xmm15
+ paddd %xmm0,%xmm8
+ pcmpeqd %xmm1,%xmm15
+
+ movdqa 0(%rsi),%xmm9
+ movdqa 16(%rsi),%xmm10
+ movdqa 32(%rsi),%xmm11
+ movdqa 48(%rsi),%xmm12
+ movdqa 64(%rsi),%xmm13
+ movdqa 80(%rsi),%xmm14
+ leaq 96(%rsi),%rsi
+
+ pand %xmm15,%xmm9
+ pand %xmm15,%xmm10
+ por %xmm9,%xmm2
+ pand %xmm15,%xmm11
+ por %xmm10,%xmm3
+ pand %xmm15,%xmm12
+ por %xmm11,%xmm4
+ pand %xmm15,%xmm13
+ por %xmm12,%xmm5
+ pand %xmm15,%xmm14
+ por %xmm13,%xmm6
+ por %xmm14,%xmm7
+
+ decq %rax
+ jnz .Lselect_loop_sse_w5
+
+ movdqu %xmm2,0(%rdi)
+ movdqu %xmm3,16(%rdi)
+ movdqu %xmm4,32(%rdi)
+ movdqu %xmm5,48(%rdi)
+ movdqu %xmm6,64(%rdi)
+ movdqu %xmm7,80(%rdi)
+ .byte 0xf3,0xc3
+.size ecp_nistz256_select_w5,.-ecp_nistz256_select_w5
+
+
+
+.globl ecp_nistz256_select_w7
+.type ecp_nistz256_select_w7,@function
+.align 32
+ecp_nistz256_select_w7:
+ movdqa .LOne(%rip),%xmm8
+ movd %edx,%xmm1
+
+ pxor %xmm2,%xmm2
+ pxor %xmm3,%xmm3
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
+
+ movdqa %xmm8,%xmm0
+ pshufd $0,%xmm1,%xmm1
+ movq $64,%rax
+
+.Lselect_loop_sse_w7:
+ movdqa %xmm8,%xmm15
+ paddd %xmm0,%xmm8
+ movdqa 0(%rsi),%xmm9
+ movdqa 16(%rsi),%xmm10
+ pcmpeqd %xmm1,%xmm15
+ movdqa 32(%rsi),%xmm11
+ movdqa 48(%rsi),%xmm12
+ leaq 64(%rsi),%rsi
+
+ pand %xmm15,%xmm9
+ pand %xmm15,%xmm10
+ por %xmm9,%xmm2
+ pand %xmm15,%xmm11
+ por %xmm10,%xmm3
+ pand %xmm15,%xmm12
+ por %xmm11,%xmm4
+ prefetcht0 255(%rsi)
+ por %xmm12,%xmm5
+
+ decq %rax
+ jnz .Lselect_loop_sse_w7
+
+ movdqu %xmm2,0(%rdi)
+ movdqu %xmm3,16(%rdi)
+ movdqu %xmm4,32(%rdi)
+ movdqu %xmm5,48(%rdi)
+ .byte 0xf3,0xc3
+.size ecp_nistz256_select_w7,.-ecp_nistz256_select_w7
+.globl ecp_nistz256_avx2_select_w7
+.type ecp_nistz256_avx2_select_w7,@function
+.align 32
+ecp_nistz256_avx2_select_w7:
+.byte 0x0f,0x0b
+ .byte 0xf3,0xc3
+.size ecp_nistz256_avx2_select_w7,.-ecp_nistz256_avx2_select_w7
+.type __ecp_nistz256_add_toq,@function
+.align 32
+__ecp_nistz256_add_toq:
+ addq 0(%rbx),%r12
+ adcq 8(%rbx),%r13
+ movq %r12,%rax
+ adcq 16(%rbx),%r8
+ adcq 24(%rbx),%r9
+ movq %r13,%rbp
+ sbbq %r11,%r11
+
+ subq $-1,%r12
+ movq %r8,%rcx
+ sbbq %r14,%r13
+ sbbq $0,%r8
+ movq %r9,%r10
+ sbbq %r15,%r9
+ testq %r11,%r11
+
+ cmovzq %rax,%r12
+ cmovzq %rbp,%r13
+ movq %r12,0(%rdi)
+ cmovzq %rcx,%r8
+ movq %r13,8(%rdi)
+ cmovzq %r10,%r9
+ movq %r8,16(%rdi)
+ movq %r9,24(%rdi)
+
+ .byte 0xf3,0xc3
+.size __ecp_nistz256_add_toq,.-__ecp_nistz256_add_toq
+
+.type __ecp_nistz256_sub_fromq,@function
+.align 32
+__ecp_nistz256_sub_fromq:
+ subq 0(%rbx),%r12
+ sbbq 8(%rbx),%r13
+ movq %r12,%rax
+ sbbq 16(%rbx),%r8
+ sbbq 24(%rbx),%r9
+ movq %r13,%rbp
+ sbbq %r11,%r11
+
+ addq $-1,%r12
+ movq %r8,%rcx
+ adcq %r14,%r13
+ adcq $0,%r8
+ movq %r9,%r10
+ adcq %r15,%r9
+ testq %r11,%r11
+
+ cmovzq %rax,%r12
+ cmovzq %rbp,%r13
+ movq %r12,0(%rdi)
+ cmovzq %rcx,%r8
+ movq %r13,8(%rdi)
+ cmovzq %r10,%r9
+ movq %r8,16(%rdi)
+ movq %r9,24(%rdi)
+
+ .byte 0xf3,0xc3
+.size __ecp_nistz256_sub_fromq,.-__ecp_nistz256_sub_fromq
+
+.type __ecp_nistz256_subq,@function
+.align 32
+__ecp_nistz256_subq:
+ subq %r12,%rax
+ sbbq %r13,%rbp
+ movq %rax,%r12
+ sbbq %r8,%rcx
+ sbbq %r9,%r10
+ movq %rbp,%r13
+ sbbq %r11,%r11
+
+ addq $-1,%rax
+ movq %rcx,%r8
+ adcq %r14,%rbp
+ adcq $0,%rcx
+ movq %r10,%r9
+ adcq %r15,%r10
+ testq %r11,%r11
+
+ cmovnzq %rax,%r12
+ cmovnzq %rbp,%r13
+ cmovnzq %rcx,%r8
+ cmovnzq %r10,%r9
+
+ .byte 0xf3,0xc3
+.size __ecp_nistz256_subq,.-__ecp_nistz256_subq
+
+.type __ecp_nistz256_mul_by_2q,@function
+.align 32
+__ecp_nistz256_mul_by_2q:
+ addq %r12,%r12
+ adcq %r13,%r13
+ movq %r12,%rax
+ adcq %r8,%r8
+ adcq %r9,%r9
+ movq %r13,%rbp
+ sbbq %r11,%r11
+
+ subq $-1,%r12
+ movq %r8,%rcx
+ sbbq %r14,%r13
+ sbbq $0,%r8
+ movq %r9,%r10
+ sbbq %r15,%r9
+ testq %r11,%r11
+
+ cmovzq %rax,%r12
+ cmovzq %rbp,%r13
+ movq %r12,0(%rdi)
+ cmovzq %rcx,%r8
+ movq %r13,8(%rdi)
+ cmovzq %r10,%r9
+ movq %r8,16(%rdi)
+ movq %r9,24(%rdi)
+
+ .byte 0xf3,0xc3
+.size __ecp_nistz256_mul_by_2q,.-__ecp_nistz256_mul_by_2q
+.globl ecp_nistz256_point_double
+.type ecp_nistz256_point_double,@function
+.align 32
+ecp_nistz256_point_double:
+ pushq %rbp
+ pushq %rbx
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+ subq $160+8,%rsp
+
+ movdqu 0(%rsi),%xmm0
+ movq %rsi,%rbx
+ movdqu 16(%rsi),%xmm1
+ movq 32+0(%rsi),%r12
+ movq 32+8(%rsi),%r13
+ movq 32+16(%rsi),%r8
+ movq 32+24(%rsi),%r9
+ movq .Lpoly+8(%rip),%r14
+ movq .Lpoly+24(%rip),%r15
+ movdqa %xmm0,96(%rsp)
+ movdqa %xmm1,96+16(%rsp)
+ leaq 32(%rdi),%r10
+ leaq 64(%rdi),%r11
+.byte 102,72,15,110,199
+.byte 102,73,15,110,202
+.byte 102,73,15,110,211
+
+ leaq 0(%rsp),%rdi
+ call __ecp_nistz256_mul_by_2q
+
+ movq 64+0(%rsi),%rax
+ movq 64+8(%rsi),%r14
+ movq 64+16(%rsi),%r15
+ movq 64+24(%rsi),%r8
+ leaq 64-0(%rsi),%rsi
+ leaq 64(%rsp),%rdi
+ call __ecp_nistz256_sqr_montq
+
+ movq 0+0(%rsp),%rax
+ movq 8+0(%rsp),%r14
+ leaq 0+0(%rsp),%rsi
+ movq 16+0(%rsp),%r15
+ movq 24+0(%rsp),%r8
+ leaq 0(%rsp),%rdi
+ call __ecp_nistz256_sqr_montq
+
+ movq 32(%rbx),%rax
+ movq 64+0(%rbx),%r9
+ movq 64+8(%rbx),%r10
+ movq 64+16(%rbx),%r11
+ movq 64+24(%rbx),%r12
+ leaq 64-0(%rbx),%rsi
+ leaq 32(%rbx),%rbx
+.byte 102,72,15,126,215
+ call __ecp_nistz256_mul_montq
+ call __ecp_nistz256_mul_by_2q
+
+ movq 96+0(%rsp),%r12
+ movq 96+8(%rsp),%r13
+ leaq 64(%rsp),%rbx
+ movq 96+16(%rsp),%r8
+ movq 96+24(%rsp),%r9
+ leaq 32(%rsp),%rdi
+ call __ecp_nistz256_add_toq
+
+ movq 96+0(%rsp),%r12
+ movq 96+8(%rsp),%r13
+ leaq 64(%rsp),%rbx
+ movq 96+16(%rsp),%r8
+ movq 96+24(%rsp),%r9
+ leaq 64(%rsp),%rdi
+ call __ecp_nistz256_sub_fromq
+
+ movq 0+0(%rsp),%rax
+ movq 8+0(%rsp),%r14
+ leaq 0+0(%rsp),%rsi
+ movq 16+0(%rsp),%r15
+ movq 24+0(%rsp),%r8
+.byte 102,72,15,126,207
+ call __ecp_nistz256_sqr_montq
+ xorq %r9,%r9
+ movq %r12,%rax
+ addq $-1,%r12
+ movq %r13,%r10
+ adcq %rsi,%r13
+ movq %r14,%rcx
+ adcq $0,%r14
+ movq %r15,%r8
+ adcq %rbp,%r15
+ adcq $0,%r9
+ xorq %rsi,%rsi
+ testq $1,%rax
+
+ cmovzq %rax,%r12
+ cmovzq %r10,%r13
+ cmovzq %rcx,%r14
+ cmovzq %r8,%r15
+ cmovzq %rsi,%r9
+
+ movq %r13,%rax
+ shrq $1,%r12
+ shlq $63,%rax
+ movq %r14,%r10
+ shrq $1,%r13
+ orq %rax,%r12
+ shlq $63,%r10
+ movq %r15,%rcx
+ shrq $1,%r14
+ orq %r10,%r13
+ shlq $63,%rcx
+ movq %r12,0(%rdi)
+ shrq $1,%r15
+ movq %r13,8(%rdi)
+ shlq $63,%r9
+ orq %rcx,%r14
+ orq %r9,%r15
+ movq %r14,16(%rdi)
+ movq %r15,24(%rdi)
+ movq 64(%rsp),%rax
+ leaq 64(%rsp),%rbx
+ movq 0+32(%rsp),%r9
+ movq 8+32(%rsp),%r10
+ leaq 0+32(%rsp),%rsi
+ movq 16+32(%rsp),%r11
+ movq 24+32(%rsp),%r12
+ leaq 32(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+ leaq 128(%rsp),%rdi
+ call __ecp_nistz256_mul_by_2q
+
+ leaq 32(%rsp),%rbx
+ leaq 32(%rsp),%rdi
+ call __ecp_nistz256_add_toq
+
+ movq 96(%rsp),%rax
+ leaq 96(%rsp),%rbx
+ movq 0+0(%rsp),%r9
+ movq 8+0(%rsp),%r10
+ leaq 0+0(%rsp),%rsi
+ movq 16+0(%rsp),%r11
+ movq 24+0(%rsp),%r12
+ leaq 0(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+ leaq 128(%rsp),%rdi
+ call __ecp_nistz256_mul_by_2q
+
+ movq 0+32(%rsp),%rax
+ movq 8+32(%rsp),%r14
+ leaq 0+32(%rsp),%rsi
+ movq 16+32(%rsp),%r15
+ movq 24+32(%rsp),%r8
+.byte 102,72,15,126,199
+ call __ecp_nistz256_sqr_montq
+
+ leaq 128(%rsp),%rbx
+ movq %r14,%r8
+ movq %r15,%r9
+ movq %rsi,%r14
+ movq %rbp,%r15
+ call __ecp_nistz256_sub_fromq
+
+ movq 0+0(%rsp),%rax
+ movq 0+8(%rsp),%rbp
+ movq 0+16(%rsp),%rcx
+ movq 0+24(%rsp),%r10
+ leaq 0(%rsp),%rdi
+ call __ecp_nistz256_subq
+
+ movq 32(%rsp),%rax
+ leaq 32(%rsp),%rbx
+ movq %r12,%r14
+ xorl %ecx,%ecx
+ movq %r12,0+0(%rsp)
+ movq %r13,%r10
+ movq %r13,0+8(%rsp)
+ cmovzq %r8,%r11
+ movq %r8,0+16(%rsp)
+ leaq 0-0(%rsp),%rsi
+ cmovzq %r9,%r12
+ movq %r9,0+24(%rsp)
+ movq %r14,%r9
+ leaq 0(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+.byte 102,72,15,126,203
+.byte 102,72,15,126,207
+ call __ecp_nistz256_sub_fromq
+
+ addq $160+8,%rsp
+ popq %r15
+ popq %r14
+ popq %r13
+ popq %r12
+ popq %rbx
+ popq %rbp
+ .byte 0xf3,0xc3
+.size ecp_nistz256_point_double,.-ecp_nistz256_point_double
+.globl ecp_nistz256_point_add
+.type ecp_nistz256_point_add,@function
+.align 32
+ecp_nistz256_point_add:
+ pushq %rbp
+ pushq %rbx
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+ subq $576+8,%rsp
+
+ movdqu 0(%rsi),%xmm0
+ movdqu 16(%rsi),%xmm1
+ movdqu 32(%rsi),%xmm2
+ movdqu 48(%rsi),%xmm3
+ movdqu 64(%rsi),%xmm4
+ movdqu 80(%rsi),%xmm5
+ movq %rsi,%rbx
+ movq %rdx,%rsi
+ movdqa %xmm0,384(%rsp)
+ movdqa %xmm1,384+16(%rsp)
+ por %xmm0,%xmm1
+ movdqa %xmm2,416(%rsp)
+ movdqa %xmm3,416+16(%rsp)
+ por %xmm2,%xmm3
+ movdqa %xmm4,448(%rsp)
+ movdqa %xmm5,448+16(%rsp)
+ por %xmm1,%xmm3
+
+ movdqu 0(%rsi),%xmm0
+ pshufd $177,%xmm3,%xmm5
+ movdqu 16(%rsi),%xmm1
+ movdqu 32(%rsi),%xmm2
+ por %xmm3,%xmm5
+ movdqu 48(%rsi),%xmm3
+ movq 64+0(%rsi),%rax
+ movq 64+8(%rsi),%r14
+ movq 64+16(%rsi),%r15
+ movq 64+24(%rsi),%r8
+ movdqa %xmm0,480(%rsp)
+ pshufd $30,%xmm5,%xmm4
+ movdqa %xmm1,480+16(%rsp)
+ por %xmm0,%xmm1
+.byte 102,72,15,110,199
+ movdqa %xmm2,512(%rsp)
+ movdqa %xmm3,512+16(%rsp)
+ por %xmm2,%xmm3
+ por %xmm4,%xmm5
+ pxor %xmm4,%xmm4
+ por %xmm1,%xmm3
+
+ leaq 64-0(%rsi),%rsi
+ movq %rax,544+0(%rsp)
+ movq %r14,544+8(%rsp)
+ movq %r15,544+16(%rsp)
+ movq %r8,544+24(%rsp)
+ leaq 96(%rsp),%rdi
+ call __ecp_nistz256_sqr_montq
+
+ pcmpeqd %xmm4,%xmm5
+ pshufd $177,%xmm3,%xmm4
+ por %xmm3,%xmm4
+ pshufd $0,%xmm5,%xmm5
+ pshufd $30,%xmm4,%xmm3
+ por %xmm3,%xmm4
+ pxor %xmm3,%xmm3
+ pcmpeqd %xmm3,%xmm4
+ pshufd $0,%xmm4,%xmm4
+ movq 64+0(%rbx),%rax
+ movq 64+8(%rbx),%r14
+ movq 64+16(%rbx),%r15
+ movq 64+24(%rbx),%r8
+
+ leaq 64-0(%rbx),%rsi
+ leaq 32(%rsp),%rdi
+ call __ecp_nistz256_sqr_montq
+
+ movq 544(%rsp),%rax
+ leaq 544(%rsp),%rbx
+ movq 0+96(%rsp),%r9
+ movq 8+96(%rsp),%r10
+ leaq 0+96(%rsp),%rsi
+ movq 16+96(%rsp),%r11
+ movq 24+96(%rsp),%r12
+ leaq 224(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+ movq 448(%rsp),%rax
+ leaq 448(%rsp),%rbx
+ movq 0+32(%rsp),%r9
+ movq 8+32(%rsp),%r10
+ leaq 0+32(%rsp),%rsi
+ movq 16+32(%rsp),%r11
+ movq 24+32(%rsp),%r12
+ leaq 256(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+ movq 416(%rsp),%rax
+ leaq 416(%rsp),%rbx
+ movq 0+224(%rsp),%r9
+ movq 8+224(%rsp),%r10
+ leaq 0+224(%rsp),%rsi
+ movq 16+224(%rsp),%r11
+ movq 24+224(%rsp),%r12
+ leaq 224(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+ movq 512(%rsp),%rax
+ leaq 512(%rsp),%rbx
+ movq 0+256(%rsp),%r9
+ movq 8+256(%rsp),%r10
+ leaq 0+256(%rsp),%rsi
+ movq 16+256(%rsp),%r11
+ movq 24+256(%rsp),%r12
+ leaq 256(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+ leaq 224(%rsp),%rbx
+ leaq 64(%rsp),%rdi
+ call __ecp_nistz256_sub_fromq
+
+ orq %r13,%r12
+ movdqa %xmm4,%xmm2
+ orq %r8,%r12
+ orq %r9,%r12
+ por %xmm5,%xmm2
+.byte 102,73,15,110,220
+
+ movq 384(%rsp),%rax
+ leaq 384(%rsp),%rbx
+ movq 0+96(%rsp),%r9
+ movq 8+96(%rsp),%r10
+ leaq 0+96(%rsp),%rsi
+ movq 16+96(%rsp),%r11
+ movq 24+96(%rsp),%r12
+ leaq 160(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+ movq 480(%rsp),%rax
+ leaq 480(%rsp),%rbx
+ movq 0+32(%rsp),%r9
+ movq 8+32(%rsp),%r10
+ leaq 0+32(%rsp),%rsi
+ movq 16+32(%rsp),%r11
+ movq 24+32(%rsp),%r12
+ leaq 192(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+ leaq 160(%rsp),%rbx
+ leaq 0(%rsp),%rdi
+ call __ecp_nistz256_sub_fromq
+
+ orq %r13,%r12
+ orq %r8,%r12
+ orq %r9,%r12
+
+.byte 0x3e
+ jnz .Ladd_proceedq
+.byte 102,73,15,126,208
+.byte 102,73,15,126,217
+ testq %r8,%r8
+ jnz .Ladd_proceedq
+ testq %r9,%r9
+ jz .Ladd_proceedq
+
+.byte 102,72,15,126,199
+ pxor %xmm0,%xmm0
+ movdqu %xmm0,0(%rdi)
+ movdqu %xmm0,16(%rdi)
+ movdqu %xmm0,32(%rdi)
+ movdqu %xmm0,48(%rdi)
+ movdqu %xmm0,64(%rdi)
+ movdqu %xmm0,80(%rdi)
+ jmp .Ladd_doneq
+
+.align 32
+.Ladd_proceedq:
+ movq 0+64(%rsp),%rax
+ movq 8+64(%rsp),%r14
+ leaq 0+64(%rsp),%rsi
+ movq 16+64(%rsp),%r15
+ movq 24+64(%rsp),%r8
+ leaq 96(%rsp),%rdi
+ call __ecp_nistz256_sqr_montq
+
+ movq 448(%rsp),%rax
+ leaq 448(%rsp),%rbx
+ movq 0+0(%rsp),%r9
+ movq 8+0(%rsp),%r10
+ leaq 0+0(%rsp),%rsi
+ movq 16+0(%rsp),%r11
+ movq 24+0(%rsp),%r12
+ leaq 352(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+ movq 0+0(%rsp),%rax
+ movq 8+0(%rsp),%r14
+ leaq 0+0(%rsp),%rsi
+ movq 16+0(%rsp),%r15
+ movq 24+0(%rsp),%r8
+ leaq 32(%rsp),%rdi
+ call __ecp_nistz256_sqr_montq
+
+ movq 544(%rsp),%rax
+ leaq 544(%rsp),%rbx
+ movq 0+352(%rsp),%r9
+ movq 8+352(%rsp),%r10
+ leaq 0+352(%rsp),%rsi
+ movq 16+352(%rsp),%r11
+ movq 24+352(%rsp),%r12
+ leaq 352(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+ movq 0(%rsp),%rax
+ leaq 0(%rsp),%rbx
+ movq 0+32(%rsp),%r9
+ movq 8+32(%rsp),%r10
+ leaq 0+32(%rsp),%rsi
+ movq 16+32(%rsp),%r11
+ movq 24+32(%rsp),%r12
+ leaq 128(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+ movq 160(%rsp),%rax
+ leaq 160(%rsp),%rbx
+ movq 0+32(%rsp),%r9
+ movq 8+32(%rsp),%r10
+ leaq 0+32(%rsp),%rsi
+ movq 16+32(%rsp),%r11
+ movq 24+32(%rsp),%r12
+ leaq 192(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+
+
+
+ addq %r12,%r12
+ leaq 96(%rsp),%rsi
+ adcq %r13,%r13
+ movq %r12,%rax
+ adcq %r8,%r8
+ adcq %r9,%r9
+ movq %r13,%rbp
+ sbbq %r11,%r11
+
+ subq $-1,%r12
+ movq %r8,%rcx
+ sbbq %r14,%r13
+ sbbq $0,%r8
+ movq %r9,%r10
+ sbbq %r15,%r9
+ testq %r11,%r11
+
+ cmovzq %rax,%r12
+ movq 0(%rsi),%rax
+ cmovzq %rbp,%r13
+ movq 8(%rsi),%rbp
+ cmovzq %rcx,%r8
+ movq 16(%rsi),%rcx
+ cmovzq %r10,%r9
+ movq 24(%rsi),%r10
+
+ call __ecp_nistz256_subq
+
+ leaq 128(%rsp),%rbx
+ leaq 288(%rsp),%rdi
+ call __ecp_nistz256_sub_fromq
+
+ movq 192+0(%rsp),%rax
+ movq 192+8(%rsp),%rbp
+ movq 192+16(%rsp),%rcx
+ movq 192+24(%rsp),%r10
+ leaq 320(%rsp),%rdi
+
+ call __ecp_nistz256_subq
+
+ movq %r12,0(%rdi)
+ movq %r13,8(%rdi)
+ movq %r8,16(%rdi)
+ movq %r9,24(%rdi)
+ movq 128(%rsp),%rax
+ leaq 128(%rsp),%rbx
+ movq 0+224(%rsp),%r9
+ movq 8+224(%rsp),%r10
+ leaq 0+224(%rsp),%rsi
+ movq 16+224(%rsp),%r11
+ movq 24+224(%rsp),%r12
+ leaq 256(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+ movq 320(%rsp),%rax
+ leaq 320(%rsp),%rbx
+ movq 0+64(%rsp),%r9
+ movq 8+64(%rsp),%r10
+ leaq 0+64(%rsp),%rsi
+ movq 16+64(%rsp),%r11
+ movq 24+64(%rsp),%r12
+ leaq 320(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+ leaq 256(%rsp),%rbx
+ leaq 320(%rsp),%rdi
+ call __ecp_nistz256_sub_fromq
+
+.byte 102,72,15,126,199
+
+ movdqa %xmm5,%xmm0
+ movdqa %xmm5,%xmm1
+ pandn 352(%rsp),%xmm0
+ movdqa %xmm5,%xmm2
+ pandn 352+16(%rsp),%xmm1
+ movdqa %xmm5,%xmm3
+ pand 544(%rsp),%xmm2
+ pand 544+16(%rsp),%xmm3
+ por %xmm0,%xmm2
+ por %xmm1,%xmm3
+
+ movdqa %xmm4,%xmm0
+ movdqa %xmm4,%xmm1
+ pandn %xmm2,%xmm0
+ movdqa %xmm4,%xmm2
+ pandn %xmm3,%xmm1
+ movdqa %xmm4,%xmm3
+ pand 448(%rsp),%xmm2
+ pand 448+16(%rsp),%xmm3
+ por %xmm0,%xmm2
+ por %xmm1,%xmm3
+ movdqu %xmm2,64(%rdi)
+ movdqu %xmm3,80(%rdi)
+
+ movdqa %xmm5,%xmm0
+ movdqa %xmm5,%xmm1
+ pandn 288(%rsp),%xmm0
+ movdqa %xmm5,%xmm2
+ pandn 288+16(%rsp),%xmm1
+ movdqa %xmm5,%xmm3
+ pand 480(%rsp),%xmm2
+ pand 480+16(%rsp),%xmm3
+ por %xmm0,%xmm2
+ por %xmm1,%xmm3
+
+ movdqa %xmm4,%xmm0
+ movdqa %xmm4,%xmm1
+ pandn %xmm2,%xmm0
+ movdqa %xmm4,%xmm2
+ pandn %xmm3,%xmm1
+ movdqa %xmm4,%xmm3
+ pand 384(%rsp),%xmm2
+ pand 384+16(%rsp),%xmm3
+ por %xmm0,%xmm2
+ por %xmm1,%xmm3
+ movdqu %xmm2,0(%rdi)
+ movdqu %xmm3,16(%rdi)
+
+ movdqa %xmm5,%xmm0
+ movdqa %xmm5,%xmm1
+ pandn 320(%rsp),%xmm0
+ movdqa %xmm5,%xmm2
+ pandn 320+16(%rsp),%xmm1
+ movdqa %xmm5,%xmm3
+ pand 512(%rsp),%xmm2
+ pand 512+16(%rsp),%xmm3
+ por %xmm0,%xmm2
+ por %xmm1,%xmm3
+
+ movdqa %xmm4,%xmm0
+ movdqa %xmm4,%xmm1
+ pandn %xmm2,%xmm0
+ movdqa %xmm4,%xmm2
+ pandn %xmm3,%xmm1
+ movdqa %xmm4,%xmm3
+ pand 416(%rsp),%xmm2
+ pand 416+16(%rsp),%xmm3
+ por %xmm0,%xmm2
+ por %xmm1,%xmm3
+ movdqu %xmm2,32(%rdi)
+ movdqu %xmm3,48(%rdi)
+
+.Ladd_doneq:
+ addq $576+8,%rsp
+ popq %r15
+ popq %r14
+ popq %r13
+ popq %r12
+ popq %rbx
+ popq %rbp
+ .byte 0xf3,0xc3
+.size ecp_nistz256_point_add,.-ecp_nistz256_point_add
+.globl ecp_nistz256_point_add_affine
+.type ecp_nistz256_point_add_affine,@function
+.align 32
+ecp_nistz256_point_add_affine:
+ pushq %rbp
+ pushq %rbx
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+ subq $480+8,%rsp
+
+ movdqu 0(%rsi),%xmm0
+ movq %rdx,%rbx
+ movdqu 16(%rsi),%xmm1
+ movdqu 32(%rsi),%xmm2
+ movdqu 48(%rsi),%xmm3
+ movdqu 64(%rsi),%xmm4
+ movdqu 80(%rsi),%xmm5
+ movq 64+0(%rsi),%rax
+ movq 64+8(%rsi),%r14
+ movq 64+16(%rsi),%r15
+ movq 64+24(%rsi),%r8
+ movdqa %xmm0,320(%rsp)
+ movdqa %xmm1,320+16(%rsp)
+ por %xmm0,%xmm1
+ movdqa %xmm2,352(%rsp)
+ movdqa %xmm3,352+16(%rsp)
+ por %xmm2,%xmm3
+ movdqa %xmm4,384(%rsp)
+ movdqa %xmm5,384+16(%rsp)
+ por %xmm1,%xmm3
+
+ movdqu 0(%rbx),%xmm0
+ pshufd $177,%xmm3,%xmm5
+ movdqu 16(%rbx),%xmm1
+ movdqu 32(%rbx),%xmm2
+ por %xmm3,%xmm5
+ movdqu 48(%rbx),%xmm3
+ movdqa %xmm0,416(%rsp)
+ pshufd $30,%xmm5,%xmm4
+ movdqa %xmm1,416+16(%rsp)
+ por %xmm0,%xmm1
+.byte 102,72,15,110,199
+ movdqa %xmm2,448(%rsp)
+ movdqa %xmm3,448+16(%rsp)
+ por %xmm2,%xmm3
+ por %xmm4,%xmm5
+ pxor %xmm4,%xmm4
+ por %xmm1,%xmm3
+
+ leaq 64-0(%rsi),%rsi
+ leaq 32(%rsp),%rdi
+ call __ecp_nistz256_sqr_montq
+
+ pcmpeqd %xmm4,%xmm5
+ pshufd $177,%xmm3,%xmm4
+ movq 0(%rbx),%rax
+
+ movq %r12,%r9
+ por %xmm3,%xmm4
+ pshufd $0,%xmm5,%xmm5
+ pshufd $30,%xmm4,%xmm3
+ movq %r13,%r10
+ por %xmm3,%xmm4
+ pxor %xmm3,%xmm3
+ movq %r14,%r11
+ pcmpeqd %xmm3,%xmm4
+ pshufd $0,%xmm4,%xmm4
+
+ leaq 32-0(%rsp),%rsi
+ movq %r15,%r12
+ leaq 0(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+ leaq 320(%rsp),%rbx
+ leaq 64(%rsp),%rdi
+ call __ecp_nistz256_sub_fromq
+
+ movq 384(%rsp),%rax
+ leaq 384(%rsp),%rbx
+ movq 0+32(%rsp),%r9
+ movq 8+32(%rsp),%r10
+ leaq 0+32(%rsp),%rsi
+ movq 16+32(%rsp),%r11
+ movq 24+32(%rsp),%r12
+ leaq 32(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+ movq 384(%rsp),%rax
+ leaq 384(%rsp),%rbx
+ movq 0+64(%rsp),%r9
+ movq 8+64(%rsp),%r10
+ leaq 0+64(%rsp),%rsi
+ movq 16+64(%rsp),%r11
+ movq 24+64(%rsp),%r12
+ leaq 288(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+ movq 448(%rsp),%rax
+ leaq 448(%rsp),%rbx
+ movq 0+32(%rsp),%r9
+ movq 8+32(%rsp),%r10
+ leaq 0+32(%rsp),%rsi
+ movq 16+32(%rsp),%r11
+ movq 24+32(%rsp),%r12
+ leaq 32(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+ leaq 352(%rsp),%rbx
+ leaq 96(%rsp),%rdi
+ call __ecp_nistz256_sub_fromq
+
+ movq 0+64(%rsp),%rax
+ movq 8+64(%rsp),%r14
+ leaq 0+64(%rsp),%rsi
+ movq 16+64(%rsp),%r15
+ movq 24+64(%rsp),%r8
+ leaq 128(%rsp),%rdi
+ call __ecp_nistz256_sqr_montq
+
+ movq 0+96(%rsp),%rax
+ movq 8+96(%rsp),%r14
+ leaq 0+96(%rsp),%rsi
+ movq 16+96(%rsp),%r15
+ movq 24+96(%rsp),%r8
+ leaq 192(%rsp),%rdi
+ call __ecp_nistz256_sqr_montq
+
+ movq 128(%rsp),%rax
+ leaq 128(%rsp),%rbx
+ movq 0+64(%rsp),%r9
+ movq 8+64(%rsp),%r10
+ leaq 0+64(%rsp),%rsi
+ movq 16+64(%rsp),%r11
+ movq 24+64(%rsp),%r12
+ leaq 160(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+ movq 320(%rsp),%rax
+ leaq 320(%rsp),%rbx
+ movq 0+128(%rsp),%r9
+ movq 8+128(%rsp),%r10
+ leaq 0+128(%rsp),%rsi
+ movq 16+128(%rsp),%r11
+ movq 24+128(%rsp),%r12
+ leaq 0(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+
+
+
+ addq %r12,%r12
+ leaq 192(%rsp),%rsi
+ adcq %r13,%r13
+ movq %r12,%rax
+ adcq %r8,%r8
+ adcq %r9,%r9
+ movq %r13,%rbp
+ sbbq %r11,%r11
+
+ subq $-1,%r12
+ movq %r8,%rcx
+ sbbq %r14,%r13
+ sbbq $0,%r8
+ movq %r9,%r10
+ sbbq %r15,%r9
+ testq %r11,%r11
+
+ cmovzq %rax,%r12
+ movq 0(%rsi),%rax
+ cmovzq %rbp,%r13
+ movq 8(%rsi),%rbp
+ cmovzq %rcx,%r8
+ movq 16(%rsi),%rcx
+ cmovzq %r10,%r9
+ movq 24(%rsi),%r10
+
+ call __ecp_nistz256_subq
+
+ leaq 160(%rsp),%rbx
+ leaq 224(%rsp),%rdi
+ call __ecp_nistz256_sub_fromq
+
+ movq 0+0(%rsp),%rax
+ movq 0+8(%rsp),%rbp
+ movq 0+16(%rsp),%rcx
+ movq 0+24(%rsp),%r10
+ leaq 64(%rsp),%rdi
+
+ call __ecp_nistz256_subq
+
+ movq %r12,0(%rdi)
+ movq %r13,8(%rdi)
+ movq %r8,16(%rdi)
+ movq %r9,24(%rdi)
+ movq 352(%rsp),%rax
+ leaq 352(%rsp),%rbx
+ movq 0+160(%rsp),%r9
+ movq 8+160(%rsp),%r10
+ leaq 0+160(%rsp),%rsi
+ movq 16+160(%rsp),%r11
+ movq 24+160(%rsp),%r12
+ leaq 32(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+ movq 96(%rsp),%rax
+ leaq 96(%rsp),%rbx
+ movq 0+64(%rsp),%r9
+ movq 8+64(%rsp),%r10
+ leaq 0+64(%rsp),%rsi
+ movq 16+64(%rsp),%r11
+ movq 24+64(%rsp),%r12
+ leaq 64(%rsp),%rdi
+ call __ecp_nistz256_mul_montq
+
+ leaq 32(%rsp),%rbx
+ leaq 256(%rsp),%rdi
+ call __ecp_nistz256_sub_fromq
+
+.byte 102,72,15,126,199
+
+ movdqa %xmm5,%xmm0
+ movdqa %xmm5,%xmm1
+ pandn 288(%rsp),%xmm0
+ movdqa %xmm5,%xmm2
+ pandn 288+16(%rsp),%xmm1
+ movdqa %xmm5,%xmm3
+ pand .LONE_mont(%rip),%xmm2
+ pand .LONE_mont+16(%rip),%xmm3
+ por %xmm0,%xmm2
+ por %xmm1,%xmm3
+
+ movdqa %xmm4,%xmm0
+ movdqa %xmm4,%xmm1
+ pandn %xmm2,%xmm0
+ movdqa %xmm4,%xmm2
+ pandn %xmm3,%xmm1
+ movdqa %xmm4,%xmm3
+ pand 384(%rsp),%xmm2
+ pand 384+16(%rsp),%xmm3
+ por %xmm0,%xmm2
+ por %xmm1,%xmm3
+ movdqu %xmm2,64(%rdi)
+ movdqu %xmm3,80(%rdi)
+
+ movdqa %xmm5,%xmm0
+ movdqa %xmm5,%xmm1
+ pandn 224(%rsp),%xmm0
+ movdqa %xmm5,%xmm2
+ pandn 224+16(%rsp),%xmm1
+ movdqa %xmm5,%xmm3
+ pand 416(%rsp),%xmm2
+ pand 416+16(%rsp),%xmm3
+ por %xmm0,%xmm2
+ por %xmm1,%xmm3
+
+ movdqa %xmm4,%xmm0
+ movdqa %xmm4,%xmm1
+ pandn %xmm2,%xmm0
+ movdqa %xmm4,%xmm2
+ pandn %xmm3,%xmm1
+ movdqa %xmm4,%xmm3
+ pand 320(%rsp),%xmm2
+ pand 320+16(%rsp),%xmm3
+ por %xmm0,%xmm2
+ por %xmm1,%xmm3
+ movdqu %xmm2,0(%rdi)
+ movdqu %xmm3,16(%rdi)
+
+ movdqa %xmm5,%xmm0
+ movdqa %xmm5,%xmm1
+ pandn 256(%rsp),%xmm0
+ movdqa %xmm5,%xmm2
+ pandn 256+16(%rsp),%xmm1
+ movdqa %xmm5,%xmm3
+ pand 448(%rsp),%xmm2
+ pand 448+16(%rsp),%xmm3
+ por %xmm0,%xmm2
+ por %xmm1,%xmm3
+
+ movdqa %xmm4,%xmm0
+ movdqa %xmm4,%xmm1
+ pandn %xmm2,%xmm0
+ movdqa %xmm4,%xmm2
+ pandn %xmm3,%xmm1
+ movdqa %xmm4,%xmm3
+ pand 352(%rsp),%xmm2
+ pand 352+16(%rsp),%xmm3
+ por %xmm0,%xmm2
+ por %xmm1,%xmm3
+ movdqu %xmm2,32(%rdi)
+ movdqu %xmm3,48(%rdi)
+
+ addq $480+8,%rsp
+ popq %r15
+ popq %r14
+ popq %r13
+ popq %r12
+ popq %rbx
+ popq %rbp
+ .byte 0xf3,0xc3
+.size ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine
diff --git a/secure/lib/libcrypto/amd64/ghash-x86_64.S b/secure/lib/libcrypto/amd64/ghash-x86_64.S
index d7ea764..aa93c80 100644
--- a/secure/lib/libcrypto/amd64/ghash-x86_64.S
+++ b/secure/lib/libcrypto/amd64/ghash-x86_64.S
@@ -1,6 +1,7 @@
# $FreeBSD$
.text
+
.globl gcm_gmult_4bit
.type gcm_gmult_4bit,@function
.align 16
@@ -659,6 +660,7 @@ gcm_ghash_4bit:
.type gcm_init_clmul,@function
.align 16
gcm_init_clmul:
+.L_init_clmul:
movdqu (%rsi),%xmm2
pshufd $78,%xmm2,%xmm2
@@ -677,15 +679,15 @@ gcm_init_clmul:
pxor %xmm5,%xmm2
+ pshufd $78,%xmm2,%xmm6
movdqa %xmm2,%xmm0
+ pxor %xmm2,%xmm6
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
- pshufd $78,%xmm2,%xmm4
pxor %xmm0,%xmm3
- pxor %xmm2,%xmm4
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
-.byte 102,15,58,68,220,0
+.byte 102,15,58,68,222,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
@@ -695,44 +697,134 @@ gcm_init_clmul:
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
+ movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
+ psllq $5,%xmm0
+ pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
+ psllq $57,%xmm0
+ movdqa %xmm0,%xmm3
+ pslldq $8,%xmm0
+ psrldq $8,%xmm3
+ pxor %xmm4,%xmm0
+ pxor %xmm3,%xmm1
+
+
+ movdqa %xmm0,%xmm4
+ psrlq $1,%xmm0
+ pxor %xmm4,%xmm1
+ pxor %xmm0,%xmm4
+ psrlq $5,%xmm0
+ pxor %xmm4,%xmm0
+ psrlq $1,%xmm0
+ pxor %xmm1,%xmm0
+ pshufd $78,%xmm2,%xmm3
+ pshufd $78,%xmm0,%xmm4
+ pxor %xmm2,%xmm3
+ movdqu %xmm2,0(%rdi)
+ pxor %xmm0,%xmm4
+ movdqu %xmm0,16(%rdi)
+.byte 102,15,58,15,227,8
+ movdqu %xmm4,32(%rdi)
+ movdqa %xmm0,%xmm1
+ pshufd $78,%xmm0,%xmm3
+ pxor %xmm0,%xmm3
+.byte 102,15,58,68,194,0
+.byte 102,15,58,68,202,17
+.byte 102,15,58,68,222,0
+ pxor %xmm0,%xmm3
+ pxor %xmm1,%xmm3
+
+ movdqa %xmm3,%xmm4
+ psrldq $8,%xmm3
+ pslldq $8,%xmm4
+ pxor %xmm3,%xmm1
+ pxor %xmm4,%xmm0
+
+ movdqa %xmm0,%xmm4
+ movdqa %xmm0,%xmm3
psllq $5,%xmm0
+ pxor %xmm0,%xmm3
+ psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
- movdqa %xmm0,%xmm4
+ movdqa %xmm0,%xmm3
pslldq $8,%xmm0
- psrldq $8,%xmm4
- pxor %xmm3,%xmm0
- pxor %xmm4,%xmm1
+ psrldq $8,%xmm3
+ pxor %xmm4,%xmm0
+ pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
+ psrlq $1,%xmm0
+ pxor %xmm4,%xmm1
+ pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
+ pxor %xmm1,%xmm0
+ movdqa %xmm0,%xmm5
+ movdqa %xmm0,%xmm1
+ pshufd $78,%xmm0,%xmm3
+ pxor %xmm0,%xmm3
+.byte 102,15,58,68,194,0
+.byte 102,15,58,68,202,17
+.byte 102,15,58,68,222,0
+ pxor %xmm0,%xmm3
+ pxor %xmm1,%xmm3
+
+ movdqa %xmm3,%xmm4
+ psrldq $8,%xmm3
+ pslldq $8,%xmm4
+ pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
- pxor %xmm1,%xmm4
+
+ movdqa %xmm0,%xmm4
+ movdqa %xmm0,%xmm3
+ psllq $5,%xmm0
+ pxor %xmm0,%xmm3
+ psllq $1,%xmm0
+ pxor %xmm3,%xmm0
+ psllq $57,%xmm0
+ movdqa %xmm0,%xmm3
+ pslldq $8,%xmm0
+ psrldq $8,%xmm3
+ pxor %xmm4,%xmm0
+ pxor %xmm3,%xmm1
+
+
+ movdqa %xmm0,%xmm4
psrlq $1,%xmm0
+ pxor %xmm4,%xmm1
+ pxor %xmm0,%xmm4
+ psrlq $5,%xmm0
pxor %xmm4,%xmm0
- movdqu %xmm2,(%rdi)
- movdqu %xmm0,16(%rdi)
+ psrlq $1,%xmm0
+ pxor %xmm1,%xmm0
+ pshufd $78,%xmm5,%xmm3
+ pshufd $78,%xmm0,%xmm4
+ pxor %xmm5,%xmm3
+ movdqu %xmm5,48(%rdi)
+ pxor %xmm0,%xmm4
+ movdqu %xmm0,64(%rdi)
+.byte 102,15,58,15,227,8
+ movdqu %xmm4,80(%rdi)
.byte 0xf3,0xc3
.size gcm_init_clmul,.-gcm_init_clmul
.globl gcm_gmult_clmul
.type gcm_gmult_clmul,@function
.align 16
gcm_gmult_clmul:
+.L_gmult_clmul:
movdqu (%rdi),%xmm0
movdqa .Lbswap_mask(%rip),%xmm5
movdqu (%rsi),%xmm2
+ movdqu 32(%rsi),%xmm4
.byte 102,15,56,0,197
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
- pshufd $78,%xmm2,%xmm4
pxor %xmm0,%xmm3
- pxor %xmm2,%xmm4
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,220,0
@@ -745,201 +837,379 @@ gcm_gmult_clmul:
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
+ movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
- psllq $1,%xmm0
- pxor %xmm3,%xmm0
psllq $5,%xmm0
+ pxor %xmm0,%xmm3
+ psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
- movdqa %xmm0,%xmm4
+ movdqa %xmm0,%xmm3
pslldq $8,%xmm0
- psrldq $8,%xmm4
- pxor %xmm3,%xmm0
- pxor %xmm4,%xmm1
+ psrldq $8,%xmm3
+ pxor %xmm4,%xmm0
+ pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
- psrlq $5,%xmm0
- pxor %xmm4,%xmm0
psrlq $1,%xmm0
+ pxor %xmm4,%xmm1
+ pxor %xmm0,%xmm4
+ psrlq $5,%xmm0
pxor %xmm4,%xmm0
- pxor %xmm1,%xmm4
psrlq $1,%xmm0
- pxor %xmm4,%xmm0
+ pxor %xmm1,%xmm0
.byte 102,15,56,0,197
movdqu %xmm0,(%rdi)
.byte 0xf3,0xc3
.size gcm_gmult_clmul,.-gcm_gmult_clmul
.globl gcm_ghash_clmul
.type gcm_ghash_clmul,@function
-.align 16
+.align 32
gcm_ghash_clmul:
- movdqa .Lbswap_mask(%rip),%xmm5
+.L_ghash_clmul:
+ movdqa .Lbswap_mask(%rip),%xmm10
movdqu (%rdi),%xmm0
movdqu (%rsi),%xmm2
-.byte 102,15,56,0,197
+ movdqu 32(%rsi),%xmm7
+.byte 102,65,15,56,0,194
subq $16,%rcx
jz .Lodd_tail
- movdqu 16(%rsi),%xmm8
+ movdqu 16(%rsi),%xmm6
+ movl OPENSSL_ia32cap_P+4(%rip),%eax
+ cmpq $48,%rcx
+ jb .Lskip4x
+ andl $71303168,%eax
+ cmpl $4194304,%eax
+ je .Lskip4x
+ subq $48,%rcx
+ movq $11547335547999543296,%rax
+ movdqu 48(%rsi),%xmm14
+ movdqu 64(%rsi),%xmm15
- movdqu (%rdx),%xmm3
- movdqu 16(%rdx),%xmm6
-.byte 102,15,56,0,221
-.byte 102,15,56,0,245
- pxor %xmm3,%xmm0
- movdqa %xmm6,%xmm7
- pshufd $78,%xmm6,%xmm3
- pshufd $78,%xmm2,%xmm4
- pxor %xmm6,%xmm3
- pxor %xmm2,%xmm4
-.byte 102,15,58,68,242,0
-.byte 102,15,58,68,250,17
-.byte 102,15,58,68,220,0
- pxor %xmm6,%xmm3
- pxor %xmm7,%xmm3
- movdqa %xmm3,%xmm4
- psrldq $8,%xmm3
- pslldq $8,%xmm4
- pxor %xmm3,%xmm7
- pxor %xmm4,%xmm6
+ movdqu 48(%rdx),%xmm3
+ movdqu 32(%rdx),%xmm11
+.byte 102,65,15,56,0,218
+.byte 102,69,15,56,0,218
+ movdqa %xmm3,%xmm5
+ pshufd $78,%xmm3,%xmm4
+ pxor %xmm3,%xmm4
+.byte 102,15,58,68,218,0
+.byte 102,15,58,68,234,17
+.byte 102,15,58,68,231,0
+
+ movdqa %xmm11,%xmm13
+ pshufd $78,%xmm11,%xmm12
+ pxor %xmm11,%xmm12
+.byte 102,68,15,58,68,222,0
+.byte 102,68,15,58,68,238,17
+.byte 102,68,15,58,68,231,16
+ xorps %xmm11,%xmm3
+ xorps %xmm13,%xmm5
+ movups 80(%rsi),%xmm7
+ xorps %xmm12,%xmm4
+
+ movdqu 16(%rdx),%xmm11
+ movdqu 0(%rdx),%xmm8
+.byte 102,69,15,56,0,218
+.byte 102,69,15,56,0,194
+ movdqa %xmm11,%xmm13
+ pshufd $78,%xmm11,%xmm12
+ pxor %xmm8,%xmm0
+ pxor %xmm11,%xmm12
+.byte 102,69,15,58,68,222,0
movdqa %xmm0,%xmm1
- pshufd $78,%xmm0,%xmm3
- pshufd $78,%xmm8,%xmm4
- pxor %xmm0,%xmm3
- pxor %xmm8,%xmm4
+ pshufd $78,%xmm0,%xmm8
+ pxor %xmm0,%xmm8
+.byte 102,69,15,58,68,238,17
+.byte 102,68,15,58,68,231,0
+ xorps %xmm11,%xmm3
+ xorps %xmm13,%xmm5
- leaq 32(%rdx),%rdx
- subq $32,%rcx
- jbe .Leven_tail
+ leaq 64(%rdx),%rdx
+ subq $64,%rcx
+ jc .Ltail4x
-.Lmod_loop:
-.byte 102,65,15,58,68,192,0
-.byte 102,65,15,58,68,200,17
-.byte 102,15,58,68,220,0
- pxor %xmm0,%xmm3
- pxor %xmm1,%xmm3
+ jmp .Lmod4_loop
+.align 32
+.Lmod4_loop:
+.byte 102,65,15,58,68,199,0
+ xorps %xmm12,%xmm4
+ movdqu 48(%rdx),%xmm11
+.byte 102,69,15,56,0,218
+.byte 102,65,15,58,68,207,17
+ xorps %xmm3,%xmm0
+ movdqu 32(%rdx),%xmm3
+ movdqa %xmm11,%xmm13
+.byte 102,68,15,58,68,199,16
+ pshufd $78,%xmm11,%xmm12
+ xorps %xmm5,%xmm1
+ pxor %xmm11,%xmm12
+.byte 102,65,15,56,0,218
+ movups 32(%rsi),%xmm7
+ xorps %xmm4,%xmm8
+.byte 102,68,15,58,68,218,0
+ pshufd $78,%xmm3,%xmm4
- movdqa %xmm3,%xmm4
- psrldq $8,%xmm3
- pslldq $8,%xmm4
- pxor %xmm3,%xmm1
- pxor %xmm4,%xmm0
- movdqu (%rdx),%xmm3
- pxor %xmm6,%xmm0
- pxor %xmm7,%xmm1
-
- movdqu 16(%rdx),%xmm6
-.byte 102,15,56,0,221
-.byte 102,15,56,0,245
-
- movdqa %xmm6,%xmm7
- pshufd $78,%xmm6,%xmm9
- pshufd $78,%xmm2,%xmm10
- pxor %xmm6,%xmm9
- pxor %xmm2,%xmm10
- pxor %xmm3,%xmm1
+ pxor %xmm0,%xmm8
+ movdqa %xmm3,%xmm5
+ pxor %xmm1,%xmm8
+ pxor %xmm3,%xmm4
+ movdqa %xmm8,%xmm9
+.byte 102,68,15,58,68,234,17
+ pslldq $8,%xmm8
+ psrldq $8,%xmm9
+ pxor %xmm8,%xmm0
+ movdqa .L7_mask(%rip),%xmm8
+ pxor %xmm9,%xmm1
+.byte 102,76,15,110,200
+
+ pand %xmm0,%xmm8
+.byte 102,69,15,56,0,200
+ pxor %xmm0,%xmm9
+.byte 102,68,15,58,68,231,0
+ psllq $57,%xmm9
+ movdqa %xmm9,%xmm8
+ pslldq $8,%xmm9
+.byte 102,15,58,68,222,0
+ psrldq $8,%xmm8
+ pxor %xmm9,%xmm0
+ pxor %xmm8,%xmm1
+ movdqu 0(%rdx),%xmm8
+
+ movdqa %xmm0,%xmm9
+ psrlq $1,%xmm0
+.byte 102,15,58,68,238,17
+ xorps %xmm11,%xmm3
+ movdqu 16(%rdx),%xmm11
+.byte 102,69,15,56,0,218
+.byte 102,15,58,68,231,16
+ xorps %xmm13,%xmm5
+ movups 80(%rsi),%xmm7
+.byte 102,69,15,56,0,194
+ pxor %xmm9,%xmm1
+ pxor %xmm0,%xmm9
+ psrlq $5,%xmm0
+
+ movdqa %xmm11,%xmm13
+ pxor %xmm12,%xmm4
+ pshufd $78,%xmm11,%xmm12
+ pxor %xmm9,%xmm0
+ pxor %xmm8,%xmm1
+ pxor %xmm11,%xmm12
+.byte 102,69,15,58,68,222,0
+ psrlq $1,%xmm0
+ pxor %xmm1,%xmm0
+ movdqa %xmm0,%xmm1
+.byte 102,69,15,58,68,238,17
+ xorps %xmm11,%xmm3
+ pshufd $78,%xmm0,%xmm8
+ pxor %xmm0,%xmm8
+
+.byte 102,68,15,58,68,231,0
+ xorps %xmm13,%xmm5
+ leaq 64(%rdx),%rdx
+ subq $64,%rcx
+ jnc .Lmod4_loop
+
+.Ltail4x:
+.byte 102,65,15,58,68,199,0
+.byte 102,65,15,58,68,207,17
+.byte 102,68,15,58,68,199,16
+ xorps %xmm12,%xmm4
+ xorps %xmm3,%xmm0
+ xorps %xmm5,%xmm1
+ pxor %xmm0,%xmm1
+ pxor %xmm4,%xmm8
+
+ pxor %xmm1,%xmm8
+ pxor %xmm0,%xmm1
+
+ movdqa %xmm8,%xmm9
+ psrldq $8,%xmm8
+ pslldq $8,%xmm9
+ pxor %xmm8,%xmm1
+ pxor %xmm9,%xmm0
+
+ movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
- psllq $1,%xmm0
- pxor %xmm3,%xmm0
psllq $5,%xmm0
+ pxor %xmm0,%xmm3
+ psllq $1,%xmm0
pxor %xmm3,%xmm0
-.byte 102,15,58,68,242,0
psllq $57,%xmm0
- movdqa %xmm0,%xmm4
+ movdqa %xmm0,%xmm3
pslldq $8,%xmm0
- psrldq $8,%xmm4
- pxor %xmm3,%xmm0
- pxor %xmm4,%xmm1
+ psrldq $8,%xmm3
+ pxor %xmm4,%xmm0
+ pxor %xmm3,%xmm1
+
-.byte 102,15,58,68,250,17
movdqa %xmm0,%xmm4
- psrlq $5,%xmm0
- pxor %xmm4,%xmm0
psrlq $1,%xmm0
+ pxor %xmm4,%xmm1
+ pxor %xmm0,%xmm4
+ psrlq $5,%xmm0
pxor %xmm4,%xmm0
- pxor %xmm1,%xmm4
psrlq $1,%xmm0
- pxor %xmm4,%xmm0
+ pxor %xmm1,%xmm0
+ addq $64,%rcx
+ jz .Ldone
+ movdqu 32(%rsi),%xmm7
+ subq $16,%rcx
+ jz .Lodd_tail
+.Lskip4x:
+
+
+
+
+
+ movdqu (%rdx),%xmm8
+ movdqu 16(%rdx),%xmm3
+.byte 102,69,15,56,0,194
+.byte 102,65,15,56,0,218
+ pxor %xmm8,%xmm0
-.byte 102,69,15,58,68,202,0
+ movdqa %xmm3,%xmm5
+ pshufd $78,%xmm3,%xmm4
+ pxor %xmm3,%xmm4
+.byte 102,15,58,68,218,0
+.byte 102,15,58,68,234,17
+.byte 102,15,58,68,231,0
+
+ leaq 32(%rdx),%rdx
+ nop
+ subq $32,%rcx
+ jbe .Leven_tail
+ nop
+ jmp .Lmod_loop
+
+.align 32
+.Lmod_loop:
movdqa %xmm0,%xmm1
- pshufd $78,%xmm0,%xmm3
- pshufd $78,%xmm8,%xmm4
- pxor %xmm0,%xmm3
+ movdqa %xmm4,%xmm8
+ pshufd $78,%xmm0,%xmm4
+ pxor %xmm0,%xmm4
+
+.byte 102,15,58,68,198,0
+.byte 102,15,58,68,206,17
+.byte 102,15,58,68,231,16
+
+ pxor %xmm3,%xmm0
+ pxor %xmm5,%xmm1
+ movdqu (%rdx),%xmm9
+ pxor %xmm0,%xmm8
+.byte 102,69,15,56,0,202
+ movdqu 16(%rdx),%xmm3
+
+ pxor %xmm1,%xmm8
+ pxor %xmm9,%xmm1
pxor %xmm8,%xmm4
+.byte 102,65,15,56,0,218
+ movdqa %xmm4,%xmm8
+ psrldq $8,%xmm8
+ pslldq $8,%xmm4
+ pxor %xmm8,%xmm1
+ pxor %xmm4,%xmm0
- pxor %xmm6,%xmm9
- pxor %xmm7,%xmm9
- movdqa %xmm9,%xmm10
- psrldq $8,%xmm9
- pslldq $8,%xmm10
- pxor %xmm9,%xmm7
- pxor %xmm10,%xmm6
+ movdqa %xmm3,%xmm5
+
+ movdqa %xmm0,%xmm9
+ movdqa %xmm0,%xmm8
+ psllq $5,%xmm0
+ pxor %xmm0,%xmm8
+.byte 102,15,58,68,218,0
+ psllq $1,%xmm0
+ pxor %xmm8,%xmm0
+ psllq $57,%xmm0
+ movdqa %xmm0,%xmm8
+ pslldq $8,%xmm0
+ psrldq $8,%xmm8
+ pxor %xmm9,%xmm0
+ pshufd $78,%xmm5,%xmm4
+ pxor %xmm8,%xmm1
+ pxor %xmm5,%xmm4
+ movdqa %xmm0,%xmm9
+ psrlq $1,%xmm0
+.byte 102,15,58,68,234,17
+ pxor %xmm9,%xmm1
+ pxor %xmm0,%xmm9
+ psrlq $5,%xmm0
+ pxor %xmm9,%xmm0
leaq 32(%rdx),%rdx
+ psrlq $1,%xmm0
+.byte 102,15,58,68,231,0
+ pxor %xmm1,%xmm0
+
subq $32,%rcx
ja .Lmod_loop
.Leven_tail:
-.byte 102,65,15,58,68,192,0
-.byte 102,65,15,58,68,200,17
-.byte 102,15,58,68,220,0
- pxor %xmm0,%xmm3
- pxor %xmm1,%xmm3
+ movdqa %xmm0,%xmm1
+ movdqa %xmm4,%xmm8
+ pshufd $78,%xmm0,%xmm4
+ pxor %xmm0,%xmm4
- movdqa %xmm3,%xmm4
- psrldq $8,%xmm3
+.byte 102,15,58,68,198,0
+.byte 102,15,58,68,206,17
+.byte 102,15,58,68,231,16
+
+ pxor %xmm3,%xmm0
+ pxor %xmm5,%xmm1
+ pxor %xmm0,%xmm8
+ pxor %xmm1,%xmm8
+ pxor %xmm8,%xmm4
+ movdqa %xmm4,%xmm8
+ psrldq $8,%xmm8
pslldq $8,%xmm4
- pxor %xmm3,%xmm1
+ pxor %xmm8,%xmm1
pxor %xmm4,%xmm0
- pxor %xmm6,%xmm0
- pxor %xmm7,%xmm1
+ movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
- psllq $1,%xmm0
- pxor %xmm3,%xmm0
psllq $5,%xmm0
+ pxor %xmm0,%xmm3
+ psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
- movdqa %xmm0,%xmm4
+ movdqa %xmm0,%xmm3
pslldq $8,%xmm0
- psrldq $8,%xmm4
- pxor %xmm3,%xmm0
- pxor %xmm4,%xmm1
+ psrldq $8,%xmm3
+ pxor %xmm4,%xmm0
+ pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
- psrlq $5,%xmm0
- pxor %xmm4,%xmm0
psrlq $1,%xmm0
+ pxor %xmm4,%xmm1
+ pxor %xmm0,%xmm4
+ psrlq $5,%xmm0
pxor %xmm4,%xmm0
- pxor %xmm1,%xmm4
psrlq $1,%xmm0
- pxor %xmm4,%xmm0
+ pxor %xmm1,%xmm0
testq %rcx,%rcx
jnz .Ldone
.Lodd_tail:
- movdqu (%rdx),%xmm3
-.byte 102,15,56,0,221
- pxor %xmm3,%xmm0
+ movdqu (%rdx),%xmm8
+.byte 102,69,15,56,0,194
+ pxor %xmm8,%xmm0
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
- pshufd $78,%xmm2,%xmm4
pxor %xmm0,%xmm3
- pxor %xmm2,%xmm4
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
-.byte 102,15,58,68,220,0
+.byte 102,15,58,68,223,0
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
@@ -949,38 +1219,60 @@ gcm_ghash_clmul:
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
+ movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
- psllq $1,%xmm0
- pxor %xmm3,%xmm0
psllq $5,%xmm0
+ pxor %xmm0,%xmm3
+ psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
- movdqa %xmm0,%xmm4
+ movdqa %xmm0,%xmm3
pslldq $8,%xmm0
- psrldq $8,%xmm4
- pxor %xmm3,%xmm0
- pxor %xmm4,%xmm1
+ psrldq $8,%xmm3
+ pxor %xmm4,%xmm0
+ pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
- psrlq $5,%xmm0
- pxor %xmm4,%xmm0
psrlq $1,%xmm0
+ pxor %xmm4,%xmm1
+ pxor %xmm0,%xmm4
+ psrlq $5,%xmm0
pxor %xmm4,%xmm0
- pxor %xmm1,%xmm4
psrlq $1,%xmm0
- pxor %xmm4,%xmm0
+ pxor %xmm1,%xmm0
.Ldone:
-.byte 102,15,56,0,197
+.byte 102,65,15,56,0,194
movdqu %xmm0,(%rdi)
.byte 0xf3,0xc3
-.LSEH_end_gcm_ghash_clmul:
.size gcm_ghash_clmul,.-gcm_ghash_clmul
+.globl gcm_init_avx
+.type gcm_init_avx,@function
+.align 32
+gcm_init_avx:
+ jmp .L_init_clmul
+.size gcm_init_avx,.-gcm_init_avx
+.globl gcm_gmult_avx
+.type gcm_gmult_avx,@function
+.align 32
+gcm_gmult_avx:
+ jmp .L_gmult_clmul
+.size gcm_gmult_avx,.-gcm_gmult_avx
+.globl gcm_ghash_avx
+.type gcm_ghash_avx,@function
+.align 32
+gcm_ghash_avx:
+ jmp .L_ghash_clmul
+.size gcm_ghash_avx,.-gcm_ghash_avx
.align 64
.Lbswap_mask:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.L0x1c2_polynomial:
.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
+.L7_mask:
+.long 7,0,7,0
+.L7_mask_poly:
+.long 7,0,450,0
.align 64
.type .Lrem_4bit,@object
.Lrem_4bit:
diff --git a/secure/lib/libcrypto/amd64/md5-x86_64.S b/secure/lib/libcrypto/amd64/md5-x86_64.S
index c592dcc..94fb761 100644
--- a/secure/lib/libcrypto/amd64/md5-x86_64.S
+++ b/secure/lib/libcrypto/amd64/md5-x86_64.S
@@ -30,7 +30,7 @@ md5_block_asm_data_order:
cmpq %rdi,%rsi
- je .Lend
+ je .Lend
.Lloop:
@@ -649,7 +649,7 @@ md5_block_asm_data_order:
addq $64,%rsi
cmpq %rdi,%rsi
- jb .Lloop
+ jb .Lloop
.Lend:
diff --git a/secure/lib/libcrypto/amd64/modexp512-x86_64.S b/secure/lib/libcrypto/amd64/modexp512-x86_64.S
deleted file mode 100644
index 71072ad..0000000
--- a/secure/lib/libcrypto/amd64/modexp512-x86_64.S
+++ /dev/null
@@ -1,1774 +0,0 @@
- # $FreeBSD$
-.text
-
-.type MULADD_128x512,@function
-.align 16
-MULADD_128x512:
- movq 0(%rsi),%rax
- mulq %rbp
- addq %rax,%r8
- adcq $0,%rdx
- movq %r8,0(%rcx)
- movq %rdx,%rbx
-
- movq 8(%rsi),%rax
- mulq %rbp
- addq %rax,%r9
- adcq $0,%rdx
- addq %rbx,%r9
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 16(%rsi),%rax
- mulq %rbp
- addq %rax,%r10
- adcq $0,%rdx
- addq %rbx,%r10
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 24(%rsi),%rax
- mulq %rbp
- addq %rax,%r11
- adcq $0,%rdx
- addq %rbx,%r11
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 32(%rsi),%rax
- mulq %rbp
- addq %rax,%r12
- adcq $0,%rdx
- addq %rbx,%r12
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 40(%rsi),%rax
- mulq %rbp
- addq %rax,%r13
- adcq $0,%rdx
- addq %rbx,%r13
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 48(%rsi),%rax
- mulq %rbp
- addq %rax,%r14
- adcq $0,%rdx
- addq %rbx,%r14
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 56(%rsi),%rax
- mulq %rbp
- addq %rax,%r15
- adcq $0,%rdx
- addq %rbx,%r15
- adcq $0,%rdx
- movq %rdx,%r8
- movq 8(%rdi),%rbp
- movq 0(%rsi),%rax
- mulq %rbp
- addq %rax,%r9
- adcq $0,%rdx
- movq %r9,8(%rcx)
- movq %rdx,%rbx
-
- movq 8(%rsi),%rax
- mulq %rbp
- addq %rax,%r10
- adcq $0,%rdx
- addq %rbx,%r10
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 16(%rsi),%rax
- mulq %rbp
- addq %rax,%r11
- adcq $0,%rdx
- addq %rbx,%r11
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 24(%rsi),%rax
- mulq %rbp
- addq %rax,%r12
- adcq $0,%rdx
- addq %rbx,%r12
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 32(%rsi),%rax
- mulq %rbp
- addq %rax,%r13
- adcq $0,%rdx
- addq %rbx,%r13
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 40(%rsi),%rax
- mulq %rbp
- addq %rax,%r14
- adcq $0,%rdx
- addq %rbx,%r14
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 48(%rsi),%rax
- mulq %rbp
- addq %rax,%r15
- adcq $0,%rdx
- addq %rbx,%r15
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 56(%rsi),%rax
- mulq %rbp
- addq %rax,%r8
- adcq $0,%rdx
- addq %rbx,%r8
- adcq $0,%rdx
- movq %rdx,%r9
- .byte 0xf3,0xc3
-.size MULADD_128x512,.-MULADD_128x512
-.type mont_reduce,@function
-.align 16
-mont_reduce:
- leaq 192(%rsp),%rdi
- movq 32(%rsp),%rsi
- addq $576,%rsi
- leaq 520(%rsp),%rcx
-
- movq 96(%rcx),%rbp
- movq 0(%rsi),%rax
- mulq %rbp
- movq (%rcx),%r8
- addq %rax,%r8
- adcq $0,%rdx
- movq %r8,0(%rdi)
- movq %rdx,%rbx
-
- movq 8(%rsi),%rax
- mulq %rbp
- movq 8(%rcx),%r9
- addq %rax,%r9
- adcq $0,%rdx
- addq %rbx,%r9
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 16(%rsi),%rax
- mulq %rbp
- movq 16(%rcx),%r10
- addq %rax,%r10
- adcq $0,%rdx
- addq %rbx,%r10
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 24(%rsi),%rax
- mulq %rbp
- movq 24(%rcx),%r11
- addq %rax,%r11
- adcq $0,%rdx
- addq %rbx,%r11
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 32(%rsi),%rax
- mulq %rbp
- movq 32(%rcx),%r12
- addq %rax,%r12
- adcq $0,%rdx
- addq %rbx,%r12
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 40(%rsi),%rax
- mulq %rbp
- movq 40(%rcx),%r13
- addq %rax,%r13
- adcq $0,%rdx
- addq %rbx,%r13
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 48(%rsi),%rax
- mulq %rbp
- movq 48(%rcx),%r14
- addq %rax,%r14
- adcq $0,%rdx
- addq %rbx,%r14
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 56(%rsi),%rax
- mulq %rbp
- movq 56(%rcx),%r15
- addq %rax,%r15
- adcq $0,%rdx
- addq %rbx,%r15
- adcq $0,%rdx
- movq %rdx,%r8
- movq 104(%rcx),%rbp
- movq 0(%rsi),%rax
- mulq %rbp
- addq %rax,%r9
- adcq $0,%rdx
- movq %r9,8(%rdi)
- movq %rdx,%rbx
-
- movq 8(%rsi),%rax
- mulq %rbp
- addq %rax,%r10
- adcq $0,%rdx
- addq %rbx,%r10
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 16(%rsi),%rax
- mulq %rbp
- addq %rax,%r11
- adcq $0,%rdx
- addq %rbx,%r11
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 24(%rsi),%rax
- mulq %rbp
- addq %rax,%r12
- adcq $0,%rdx
- addq %rbx,%r12
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 32(%rsi),%rax
- mulq %rbp
- addq %rax,%r13
- adcq $0,%rdx
- addq %rbx,%r13
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 40(%rsi),%rax
- mulq %rbp
- addq %rax,%r14
- adcq $0,%rdx
- addq %rbx,%r14
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 48(%rsi),%rax
- mulq %rbp
- addq %rax,%r15
- adcq $0,%rdx
- addq %rbx,%r15
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 56(%rsi),%rax
- mulq %rbp
- addq %rax,%r8
- adcq $0,%rdx
- addq %rbx,%r8
- adcq $0,%rdx
- movq %rdx,%r9
- movq 112(%rcx),%rbp
- movq 0(%rsi),%rax
- mulq %rbp
- addq %rax,%r10
- adcq $0,%rdx
- movq %r10,16(%rdi)
- movq %rdx,%rbx
-
- movq 8(%rsi),%rax
- mulq %rbp
- addq %rax,%r11
- adcq $0,%rdx
- addq %rbx,%r11
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 16(%rsi),%rax
- mulq %rbp
- addq %rax,%r12
- adcq $0,%rdx
- addq %rbx,%r12
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 24(%rsi),%rax
- mulq %rbp
- addq %rax,%r13
- adcq $0,%rdx
- addq %rbx,%r13
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 32(%rsi),%rax
- mulq %rbp
- addq %rax,%r14
- adcq $0,%rdx
- addq %rbx,%r14
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 40(%rsi),%rax
- mulq %rbp
- addq %rax,%r15
- adcq $0,%rdx
- addq %rbx,%r15
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 48(%rsi),%rax
- mulq %rbp
- addq %rax,%r8
- adcq $0,%rdx
- addq %rbx,%r8
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 56(%rsi),%rax
- mulq %rbp
- addq %rax,%r9
- adcq $0,%rdx
- addq %rbx,%r9
- adcq $0,%rdx
- movq %rdx,%r10
- movq 120(%rcx),%rbp
- movq 0(%rsi),%rax
- mulq %rbp
- addq %rax,%r11
- adcq $0,%rdx
- movq %r11,24(%rdi)
- movq %rdx,%rbx
-
- movq 8(%rsi),%rax
- mulq %rbp
- addq %rax,%r12
- adcq $0,%rdx
- addq %rbx,%r12
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 16(%rsi),%rax
- mulq %rbp
- addq %rax,%r13
- adcq $0,%rdx
- addq %rbx,%r13
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 24(%rsi),%rax
- mulq %rbp
- addq %rax,%r14
- adcq $0,%rdx
- addq %rbx,%r14
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 32(%rsi),%rax
- mulq %rbp
- addq %rax,%r15
- adcq $0,%rdx
- addq %rbx,%r15
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 40(%rsi),%rax
- mulq %rbp
- addq %rax,%r8
- adcq $0,%rdx
- addq %rbx,%r8
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 48(%rsi),%rax
- mulq %rbp
- addq %rax,%r9
- adcq $0,%rdx
- addq %rbx,%r9
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 56(%rsi),%rax
- mulq %rbp
- addq %rax,%r10
- adcq $0,%rdx
- addq %rbx,%r10
- adcq $0,%rdx
- movq %rdx,%r11
- xorq %rax,%rax
-
- addq 64(%rcx),%r8
- adcq 72(%rcx),%r9
- adcq 80(%rcx),%r10
- adcq 88(%rcx),%r11
- adcq $0,%rax
-
-
-
-
- movq %r8,64(%rdi)
- movq %r9,72(%rdi)
- movq %r10,%rbp
- movq %r11,88(%rdi)
-
- movq %rax,384(%rsp)
-
- movq 0(%rdi),%r8
- movq 8(%rdi),%r9
- movq 16(%rdi),%r10
- movq 24(%rdi),%r11
-
-
-
-
-
-
-
-
- addq $80,%rdi
-
- addq $64,%rsi
- leaq 296(%rsp),%rcx
-
- call MULADD_128x512
-
- movq 384(%rsp),%rax
-
-
- addq -16(%rdi),%r8
- adcq -8(%rdi),%r9
- movq %r8,64(%rcx)
- movq %r9,72(%rcx)
-
- adcq %rax,%rax
- movq %rax,384(%rsp)
-
- leaq 192(%rsp),%rdi
- addq $64,%rsi
-
-
-
-
-
- movq (%rsi),%r8
- movq 8(%rsi),%rbx
-
- movq (%rcx),%rax
- mulq %r8
- movq %rax,%rbp
- movq %rdx,%r9
-
- movq 8(%rcx),%rax
- mulq %r8
- addq %rax,%r9
-
- movq (%rcx),%rax
- mulq %rbx
- addq %rax,%r9
-
- movq %r9,8(%rdi)
-
-
- subq $192,%rsi
-
- movq (%rcx),%r8
- movq 8(%rcx),%r9
-
- call MULADD_128x512
-
-
-
-
- movq 0(%rsi),%rax
- movq 8(%rsi),%rbx
- movq 16(%rsi),%rdi
- movq 24(%rsi),%rdx
-
-
- movq 384(%rsp),%rbp
-
- addq 64(%rcx),%r8
- adcq 72(%rcx),%r9
-
-
- adcq %rbp,%rbp
-
-
-
- shlq $3,%rbp
- movq 32(%rsp),%rcx
- addq %rcx,%rbp
-
-
- xorq %rsi,%rsi
-
- addq 0(%rbp),%r10
- adcq 64(%rbp),%r11
- adcq 128(%rbp),%r12
- adcq 192(%rbp),%r13
- adcq 256(%rbp),%r14
- adcq 320(%rbp),%r15
- adcq 384(%rbp),%r8
- adcq 448(%rbp),%r9
-
-
-
- sbbq $0,%rsi
-
-
- andq %rsi,%rax
- andq %rsi,%rbx
- andq %rsi,%rdi
- andq %rsi,%rdx
-
- movq $1,%rbp
- subq %rax,%r10
- sbbq %rbx,%r11
- sbbq %rdi,%r12
- sbbq %rdx,%r13
-
-
-
-
- sbbq $0,%rbp
-
-
-
- addq $512,%rcx
- movq 32(%rcx),%rax
- movq 40(%rcx),%rbx
- movq 48(%rcx),%rdi
- movq 56(%rcx),%rdx
-
-
-
- andq %rsi,%rax
- andq %rsi,%rbx
- andq %rsi,%rdi
- andq %rsi,%rdx
-
-
-
- subq $1,%rbp
-
- sbbq %rax,%r14
- sbbq %rbx,%r15
- sbbq %rdi,%r8
- sbbq %rdx,%r9
-
-
-
- movq 144(%rsp),%rsi
- movq %r10,0(%rsi)
- movq %r11,8(%rsi)
- movq %r12,16(%rsi)
- movq %r13,24(%rsi)
- movq %r14,32(%rsi)
- movq %r15,40(%rsi)
- movq %r8,48(%rsi)
- movq %r9,56(%rsi)
-
- .byte 0xf3,0xc3
-.size mont_reduce,.-mont_reduce
-.type mont_mul_a3b,@function
-.align 16
-mont_mul_a3b:
-
-
-
-
- movq 0(%rdi),%rbp
-
- movq %r10,%rax
- mulq %rbp
- movq %rax,520(%rsp)
- movq %rdx,%r10
- movq %r11,%rax
- mulq %rbp
- addq %rax,%r10
- adcq $0,%rdx
- movq %rdx,%r11
- movq %r12,%rax
- mulq %rbp
- addq %rax,%r11
- adcq $0,%rdx
- movq %rdx,%r12
- movq %r13,%rax
- mulq %rbp
- addq %rax,%r12
- adcq $0,%rdx
- movq %rdx,%r13
- movq %r14,%rax
- mulq %rbp
- addq %rax,%r13
- adcq $0,%rdx
- movq %rdx,%r14
- movq %r15,%rax
- mulq %rbp
- addq %rax,%r14
- adcq $0,%rdx
- movq %rdx,%r15
- movq %r8,%rax
- mulq %rbp
- addq %rax,%r15
- adcq $0,%rdx
- movq %rdx,%r8
- movq %r9,%rax
- mulq %rbp
- addq %rax,%r8
- adcq $0,%rdx
- movq %rdx,%r9
- movq 8(%rdi),%rbp
- movq 0(%rsi),%rax
- mulq %rbp
- addq %rax,%r10
- adcq $0,%rdx
- movq %r10,528(%rsp)
- movq %rdx,%rbx
-
- movq 8(%rsi),%rax
- mulq %rbp
- addq %rax,%r11
- adcq $0,%rdx
- addq %rbx,%r11
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 16(%rsi),%rax
- mulq %rbp
- addq %rax,%r12
- adcq $0,%rdx
- addq %rbx,%r12
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 24(%rsi),%rax
- mulq %rbp
- addq %rax,%r13
- adcq $0,%rdx
- addq %rbx,%r13
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 32(%rsi),%rax
- mulq %rbp
- addq %rax,%r14
- adcq $0,%rdx
- addq %rbx,%r14
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 40(%rsi),%rax
- mulq %rbp
- addq %rax,%r15
- adcq $0,%rdx
- addq %rbx,%r15
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 48(%rsi),%rax
- mulq %rbp
- addq %rax,%r8
- adcq $0,%rdx
- addq %rbx,%r8
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 56(%rsi),%rax
- mulq %rbp
- addq %rax,%r9
- adcq $0,%rdx
- addq %rbx,%r9
- adcq $0,%rdx
- movq %rdx,%r10
- movq 16(%rdi),%rbp
- movq 0(%rsi),%rax
- mulq %rbp
- addq %rax,%r11
- adcq $0,%rdx
- movq %r11,536(%rsp)
- movq %rdx,%rbx
-
- movq 8(%rsi),%rax
- mulq %rbp
- addq %rax,%r12
- adcq $0,%rdx
- addq %rbx,%r12
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 16(%rsi),%rax
- mulq %rbp
- addq %rax,%r13
- adcq $0,%rdx
- addq %rbx,%r13
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 24(%rsi),%rax
- mulq %rbp
- addq %rax,%r14
- adcq $0,%rdx
- addq %rbx,%r14
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 32(%rsi),%rax
- mulq %rbp
- addq %rax,%r15
- adcq $0,%rdx
- addq %rbx,%r15
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 40(%rsi),%rax
- mulq %rbp
- addq %rax,%r8
- adcq $0,%rdx
- addq %rbx,%r8
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 48(%rsi),%rax
- mulq %rbp
- addq %rax,%r9
- adcq $0,%rdx
- addq %rbx,%r9
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 56(%rsi),%rax
- mulq %rbp
- addq %rax,%r10
- adcq $0,%rdx
- addq %rbx,%r10
- adcq $0,%rdx
- movq %rdx,%r11
- movq 24(%rdi),%rbp
- movq 0(%rsi),%rax
- mulq %rbp
- addq %rax,%r12
- adcq $0,%rdx
- movq %r12,544(%rsp)
- movq %rdx,%rbx
-
- movq 8(%rsi),%rax
- mulq %rbp
- addq %rax,%r13
- adcq $0,%rdx
- addq %rbx,%r13
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 16(%rsi),%rax
- mulq %rbp
- addq %rax,%r14
- adcq $0,%rdx
- addq %rbx,%r14
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 24(%rsi),%rax
- mulq %rbp
- addq %rax,%r15
- adcq $0,%rdx
- addq %rbx,%r15
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 32(%rsi),%rax
- mulq %rbp
- addq %rax,%r8
- adcq $0,%rdx
- addq %rbx,%r8
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 40(%rsi),%rax
- mulq %rbp
- addq %rax,%r9
- adcq $0,%rdx
- addq %rbx,%r9
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 48(%rsi),%rax
- mulq %rbp
- addq %rax,%r10
- adcq $0,%rdx
- addq %rbx,%r10
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 56(%rsi),%rax
- mulq %rbp
- addq %rax,%r11
- adcq $0,%rdx
- addq %rbx,%r11
- adcq $0,%rdx
- movq %rdx,%r12
- movq 32(%rdi),%rbp
- movq 0(%rsi),%rax
- mulq %rbp
- addq %rax,%r13
- adcq $0,%rdx
- movq %r13,552(%rsp)
- movq %rdx,%rbx
-
- movq 8(%rsi),%rax
- mulq %rbp
- addq %rax,%r14
- adcq $0,%rdx
- addq %rbx,%r14
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 16(%rsi),%rax
- mulq %rbp
- addq %rax,%r15
- adcq $0,%rdx
- addq %rbx,%r15
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 24(%rsi),%rax
- mulq %rbp
- addq %rax,%r8
- adcq $0,%rdx
- addq %rbx,%r8
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 32(%rsi),%rax
- mulq %rbp
- addq %rax,%r9
- adcq $0,%rdx
- addq %rbx,%r9
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 40(%rsi),%rax
- mulq %rbp
- addq %rax,%r10
- adcq $0,%rdx
- addq %rbx,%r10
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 48(%rsi),%rax
- mulq %rbp
- addq %rax,%r11
- adcq $0,%rdx
- addq %rbx,%r11
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 56(%rsi),%rax
- mulq %rbp
- addq %rax,%r12
- adcq $0,%rdx
- addq %rbx,%r12
- adcq $0,%rdx
- movq %rdx,%r13
- movq 40(%rdi),%rbp
- movq 0(%rsi),%rax
- mulq %rbp
- addq %rax,%r14
- adcq $0,%rdx
- movq %r14,560(%rsp)
- movq %rdx,%rbx
-
- movq 8(%rsi),%rax
- mulq %rbp
- addq %rax,%r15
- adcq $0,%rdx
- addq %rbx,%r15
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 16(%rsi),%rax
- mulq %rbp
- addq %rax,%r8
- adcq $0,%rdx
- addq %rbx,%r8
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 24(%rsi),%rax
- mulq %rbp
- addq %rax,%r9
- adcq $0,%rdx
- addq %rbx,%r9
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 32(%rsi),%rax
- mulq %rbp
- addq %rax,%r10
- adcq $0,%rdx
- addq %rbx,%r10
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 40(%rsi),%rax
- mulq %rbp
- addq %rax,%r11
- adcq $0,%rdx
- addq %rbx,%r11
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 48(%rsi),%rax
- mulq %rbp
- addq %rax,%r12
- adcq $0,%rdx
- addq %rbx,%r12
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 56(%rsi),%rax
- mulq %rbp
- addq %rax,%r13
- adcq $0,%rdx
- addq %rbx,%r13
- adcq $0,%rdx
- movq %rdx,%r14
- movq 48(%rdi),%rbp
- movq 0(%rsi),%rax
- mulq %rbp
- addq %rax,%r15
- adcq $0,%rdx
- movq %r15,568(%rsp)
- movq %rdx,%rbx
-
- movq 8(%rsi),%rax
- mulq %rbp
- addq %rax,%r8
- adcq $0,%rdx
- addq %rbx,%r8
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 16(%rsi),%rax
- mulq %rbp
- addq %rax,%r9
- adcq $0,%rdx
- addq %rbx,%r9
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 24(%rsi),%rax
- mulq %rbp
- addq %rax,%r10
- adcq $0,%rdx
- addq %rbx,%r10
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 32(%rsi),%rax
- mulq %rbp
- addq %rax,%r11
- adcq $0,%rdx
- addq %rbx,%r11
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 40(%rsi),%rax
- mulq %rbp
- addq %rax,%r12
- adcq $0,%rdx
- addq %rbx,%r12
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 48(%rsi),%rax
- mulq %rbp
- addq %rax,%r13
- adcq $0,%rdx
- addq %rbx,%r13
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 56(%rsi),%rax
- mulq %rbp
- addq %rax,%r14
- adcq $0,%rdx
- addq %rbx,%r14
- adcq $0,%rdx
- movq %rdx,%r15
- movq 56(%rdi),%rbp
- movq 0(%rsi),%rax
- mulq %rbp
- addq %rax,%r8
- adcq $0,%rdx
- movq %r8,576(%rsp)
- movq %rdx,%rbx
-
- movq 8(%rsi),%rax
- mulq %rbp
- addq %rax,%r9
- adcq $0,%rdx
- addq %rbx,%r9
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 16(%rsi),%rax
- mulq %rbp
- addq %rax,%r10
- adcq $0,%rdx
- addq %rbx,%r10
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 24(%rsi),%rax
- mulq %rbp
- addq %rax,%r11
- adcq $0,%rdx
- addq %rbx,%r11
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 32(%rsi),%rax
- mulq %rbp
- addq %rax,%r12
- adcq $0,%rdx
- addq %rbx,%r12
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 40(%rsi),%rax
- mulq %rbp
- addq %rax,%r13
- adcq $0,%rdx
- addq %rbx,%r13
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 48(%rsi),%rax
- mulq %rbp
- addq %rax,%r14
- adcq $0,%rdx
- addq %rbx,%r14
- adcq $0,%rdx
- movq %rdx,%rbx
-
- movq 56(%rsi),%rax
- mulq %rbp
- addq %rax,%r15
- adcq $0,%rdx
- addq %rbx,%r15
- adcq $0,%rdx
- movq %rdx,%r8
- movq %r9,584(%rsp)
- movq %r10,592(%rsp)
- movq %r11,600(%rsp)
- movq %r12,608(%rsp)
- movq %r13,616(%rsp)
- movq %r14,624(%rsp)
- movq %r15,632(%rsp)
- movq %r8,640(%rsp)
-
-
-
-
-
- jmp mont_reduce
-
-
-.size mont_mul_a3b,.-mont_mul_a3b
-.type sqr_reduce,@function
-.align 16
-sqr_reduce:
- movq 16(%rsp),%rcx
-
-
-
- movq %r10,%rbx
-
- movq %r11,%rax
- mulq %rbx
- movq %rax,528(%rsp)
- movq %rdx,%r10
- movq %r12,%rax
- mulq %rbx
- addq %rax,%r10
- adcq $0,%rdx
- movq %rdx,%r11
- movq %r13,%rax
- mulq %rbx
- addq %rax,%r11
- adcq $0,%rdx
- movq %rdx,%r12
- movq %r14,%rax
- mulq %rbx
- addq %rax,%r12
- adcq $0,%rdx
- movq %rdx,%r13
- movq %r15,%rax
- mulq %rbx
- addq %rax,%r13
- adcq $0,%rdx
- movq %rdx,%r14
- movq %r8,%rax
- mulq %rbx
- addq %rax,%r14
- adcq $0,%rdx
- movq %rdx,%r15
- movq %r9,%rax
- mulq %rbx
- addq %rax,%r15
- adcq $0,%rdx
- movq %rdx,%rsi
-
- movq %r10,536(%rsp)
-
-
-
-
-
- movq 8(%rcx),%rbx
-
- movq 16(%rcx),%rax
- mulq %rbx
- addq %rax,%r11
- adcq $0,%rdx
- movq %r11,544(%rsp)
-
- movq %rdx,%r10
- movq 24(%rcx),%rax
- mulq %rbx
- addq %rax,%r12
- adcq $0,%rdx
- addq %r10,%r12
- adcq $0,%rdx
- movq %r12,552(%rsp)
-
- movq %rdx,%r10
- movq 32(%rcx),%rax
- mulq %rbx
- addq %rax,%r13
- adcq $0,%rdx
- addq %r10,%r13
- adcq $0,%rdx
-
- movq %rdx,%r10
- movq 40(%rcx),%rax
- mulq %rbx
- addq %rax,%r14
- adcq $0,%rdx
- addq %r10,%r14
- adcq $0,%rdx
-
- movq %rdx,%r10
- movq %r8,%rax
- mulq %rbx
- addq %rax,%r15
- adcq $0,%rdx
- addq %r10,%r15
- adcq $0,%rdx
-
- movq %rdx,%r10
- movq %r9,%rax
- mulq %rbx
- addq %rax,%rsi
- adcq $0,%rdx
- addq %r10,%rsi
- adcq $0,%rdx
-
- movq %rdx,%r11
-
-
-
-
- movq 16(%rcx),%rbx
-
- movq 24(%rcx),%rax
- mulq %rbx
- addq %rax,%r13
- adcq $0,%rdx
- movq %r13,560(%rsp)
-
- movq %rdx,%r10
- movq 32(%rcx),%rax
- mulq %rbx
- addq %rax,%r14
- adcq $0,%rdx
- addq %r10,%r14
- adcq $0,%rdx
- movq %r14,568(%rsp)
-
- movq %rdx,%r10
- movq 40(%rcx),%rax
- mulq %rbx
- addq %rax,%r15
- adcq $0,%rdx
- addq %r10,%r15
- adcq $0,%rdx
-
- movq %rdx,%r10
- movq %r8,%rax
- mulq %rbx
- addq %rax,%rsi
- adcq $0,%rdx
- addq %r10,%rsi
- adcq $0,%rdx
-
- movq %rdx,%r10
- movq %r9,%rax
- mulq %rbx
- addq %rax,%r11
- adcq $0,%rdx
- addq %r10,%r11
- adcq $0,%rdx
-
- movq %rdx,%r12
-
-
-
-
-
- movq 24(%rcx),%rbx
-
- movq 32(%rcx),%rax
- mulq %rbx
- addq %rax,%r15
- adcq $0,%rdx
- movq %r15,576(%rsp)
-
- movq %rdx,%r10
- movq 40(%rcx),%rax
- mulq %rbx
- addq %rax,%rsi
- adcq $0,%rdx
- addq %r10,%rsi
- adcq $0,%rdx
- movq %rsi,584(%rsp)
-
- movq %rdx,%r10
- movq %r8,%rax
- mulq %rbx
- addq %rax,%r11
- adcq $0,%rdx
- addq %r10,%r11
- adcq $0,%rdx
-
- movq %rdx,%r10
- movq %r9,%rax
- mulq %rbx
- addq %rax,%r12
- adcq $0,%rdx
- addq %r10,%r12
- adcq $0,%rdx
-
- movq %rdx,%r15
-
-
-
-
- movq 32(%rcx),%rbx
-
- movq 40(%rcx),%rax
- mulq %rbx
- addq %rax,%r11
- adcq $0,%rdx
- movq %r11,592(%rsp)
-
- movq %rdx,%r10
- movq %r8,%rax
- mulq %rbx
- addq %rax,%r12
- adcq $0,%rdx
- addq %r10,%r12
- adcq $0,%rdx
- movq %r12,600(%rsp)
-
- movq %rdx,%r10
- movq %r9,%rax
- mulq %rbx
- addq %rax,%r15
- adcq $0,%rdx
- addq %r10,%r15
- adcq $0,%rdx
-
- movq %rdx,%r11
-
-
-
-
- movq 40(%rcx),%rbx
-
- movq %r8,%rax
- mulq %rbx
- addq %rax,%r15
- adcq $0,%rdx
- movq %r15,608(%rsp)
-
- movq %rdx,%r10
- movq %r9,%rax
- mulq %rbx
- addq %rax,%r11
- adcq $0,%rdx
- addq %r10,%r11
- adcq $0,%rdx
- movq %r11,616(%rsp)
-
- movq %rdx,%r12
-
-
-
-
- movq %r8,%rbx
-
- movq %r9,%rax
- mulq %rbx
- addq %rax,%r12
- adcq $0,%rdx
- movq %r12,624(%rsp)
-
- movq %rdx,632(%rsp)
-
-
- movq 528(%rsp),%r10
- movq 536(%rsp),%r11
- movq 544(%rsp),%r12
- movq 552(%rsp),%r13
- movq 560(%rsp),%r14
- movq 568(%rsp),%r15
-
- movq 24(%rcx),%rax
- mulq %rax
- movq %rax,%rdi
- movq %rdx,%r8
-
- addq %r10,%r10
- adcq %r11,%r11
- adcq %r12,%r12
- adcq %r13,%r13
- adcq %r14,%r14
- adcq %r15,%r15
- adcq $0,%r8
-
- movq 0(%rcx),%rax
- mulq %rax
- movq %rax,520(%rsp)
- movq %rdx,%rbx
-
- movq 8(%rcx),%rax
- mulq %rax
-
- addq %rbx,%r10
- adcq %rax,%r11
- adcq $0,%rdx
-
- movq %rdx,%rbx
- movq %r10,528(%rsp)
- movq %r11,536(%rsp)
-
- movq 16(%rcx),%rax
- mulq %rax
-
- addq %rbx,%r12
- adcq %rax,%r13
- adcq $0,%rdx
-
- movq %rdx,%rbx
-
- movq %r12,544(%rsp)
- movq %r13,552(%rsp)
-
- xorq %rbp,%rbp
- addq %rbx,%r14
- adcq %rdi,%r15
- adcq $0,%rbp
-
- movq %r14,560(%rsp)
- movq %r15,568(%rsp)
-
-
-
-
- movq 576(%rsp),%r10
- movq 584(%rsp),%r11
- movq 592(%rsp),%r12
- movq 600(%rsp),%r13
- movq 608(%rsp),%r14
- movq 616(%rsp),%r15
- movq 624(%rsp),%rdi
- movq 632(%rsp),%rsi
-
- movq %r9,%rax
- mulq %rax
- movq %rax,%r9
- movq %rdx,%rbx
-
- addq %r10,%r10
- adcq %r11,%r11
- adcq %r12,%r12
- adcq %r13,%r13
- adcq %r14,%r14
- adcq %r15,%r15
- adcq %rdi,%rdi
- adcq %rsi,%rsi
- adcq $0,%rbx
-
- addq %rbp,%r10
-
- movq 32(%rcx),%rax
- mulq %rax
-
- addq %r8,%r10
- adcq %rax,%r11
- adcq $0,%rdx
-
- movq %rdx,%rbp
-
- movq %r10,576(%rsp)
- movq %r11,584(%rsp)
-
- movq 40(%rcx),%rax
- mulq %rax
-
- addq %rbp,%r12
- adcq %rax,%r13
- adcq $0,%rdx
-
- movq %rdx,%rbp
-
- movq %r12,592(%rsp)
- movq %r13,600(%rsp)
-
- movq 48(%rcx),%rax
- mulq %rax
-
- addq %rbp,%r14
- adcq %rax,%r15
- adcq $0,%rdx
-
- movq %r14,608(%rsp)
- movq %r15,616(%rsp)
-
- addq %rdx,%rdi
- adcq %r9,%rsi
- adcq $0,%rbx
-
- movq %rdi,624(%rsp)
- movq %rsi,632(%rsp)
- movq %rbx,640(%rsp)
-
- jmp mont_reduce
-
-
-.size sqr_reduce,.-sqr_reduce
-.globl mod_exp_512
-.type mod_exp_512,@function
-mod_exp_512:
- pushq %rbp
- pushq %rbx
- pushq %r12
- pushq %r13
- pushq %r14
- pushq %r15
-
-
- movq %rsp,%r8
- subq $2688,%rsp
- andq $-64,%rsp
-
-
- movq %r8,0(%rsp)
- movq %rdi,8(%rsp)
- movq %rsi,16(%rsp)
- movq %rcx,24(%rsp)
-.Lbody:
-
-
-
- pxor %xmm4,%xmm4
- movdqu 0(%rsi),%xmm0
- movdqu 16(%rsi),%xmm1
- movdqu 32(%rsi),%xmm2
- movdqu 48(%rsi),%xmm3
- movdqa %xmm4,512(%rsp)
- movdqa %xmm4,528(%rsp)
- movdqa %xmm4,608(%rsp)
- movdqa %xmm4,624(%rsp)
- movdqa %xmm0,544(%rsp)
- movdqa %xmm1,560(%rsp)
- movdqa %xmm2,576(%rsp)
- movdqa %xmm3,592(%rsp)
-
-
- movdqu 0(%rdx),%xmm0
- movdqu 16(%rdx),%xmm1
- movdqu 32(%rdx),%xmm2
- movdqu 48(%rdx),%xmm3
-
- leaq 384(%rsp),%rbx
- movq %rbx,136(%rsp)
- call mont_reduce
-
-
- leaq 448(%rsp),%rcx
- xorq %rax,%rax
- movq %rax,0(%rcx)
- movq %rax,8(%rcx)
- movq %rax,24(%rcx)
- movq %rax,32(%rcx)
- movq %rax,40(%rcx)
- movq %rax,48(%rcx)
- movq %rax,56(%rcx)
- movq %rax,128(%rsp)
- movq $1,16(%rcx)
-
- leaq 640(%rsp),%rbp
- movq %rcx,%rsi
- movq %rbp,%rdi
- movq $8,%rax
-loop_0:
- movq (%rcx),%rbx
- movw %bx,(%rdi)
- shrq $16,%rbx
- movw %bx,64(%rdi)
- shrq $16,%rbx
- movw %bx,128(%rdi)
- shrq $16,%rbx
- movw %bx,192(%rdi)
- leaq 8(%rcx),%rcx
- leaq 256(%rdi),%rdi
- decq %rax
- jnz loop_0
- movq $31,%rax
- movq %rax,32(%rsp)
- movq %rbp,40(%rsp)
-
- movq %rsi,136(%rsp)
- movq 0(%rsi),%r10
- movq 8(%rsi),%r11
- movq 16(%rsi),%r12
- movq 24(%rsi),%r13
- movq 32(%rsi),%r14
- movq 40(%rsi),%r15
- movq 48(%rsi),%r8
- movq 56(%rsi),%r9
-init_loop:
- leaq 384(%rsp),%rdi
- call mont_mul_a3b
- leaq 448(%rsp),%rsi
- movq 40(%rsp),%rbp
- addq $2,%rbp
- movq %rbp,40(%rsp)
- movq %rsi,%rcx
- movq $8,%rax
-loop_1:
- movq (%rcx),%rbx
- movw %bx,(%rbp)
- shrq $16,%rbx
- movw %bx,64(%rbp)
- shrq $16,%rbx
- movw %bx,128(%rbp)
- shrq $16,%rbx
- movw %bx,192(%rbp)
- leaq 8(%rcx),%rcx
- leaq 256(%rbp),%rbp
- decq %rax
- jnz loop_1
- movq 32(%rsp),%rax
- subq $1,%rax
- movq %rax,32(%rsp)
- jne init_loop
-
-
-
- movdqa %xmm0,64(%rsp)
- movdqa %xmm1,80(%rsp)
- movdqa %xmm2,96(%rsp)
- movdqa %xmm3,112(%rsp)
-
-
-
-
-
- movl 126(%rsp),%eax
- movq %rax,%rdx
- shrq $11,%rax
- andl $2047,%edx
- movl %edx,126(%rsp)
- leaq 640(%rsp,%rax,2),%rsi
- movq 8(%rsp),%rdx
- movq $4,%rbp
-loop_2:
- movzwq 192(%rsi),%rbx
- movzwq 448(%rsi),%rax
- shlq $16,%rbx
- shlq $16,%rax
- movw 128(%rsi),%bx
- movw 384(%rsi),%ax
- shlq $16,%rbx
- shlq $16,%rax
- movw 64(%rsi),%bx
- movw 320(%rsi),%ax
- shlq $16,%rbx
- shlq $16,%rax
- movw 0(%rsi),%bx
- movw 256(%rsi),%ax
- movq %rbx,0(%rdx)
- movq %rax,8(%rdx)
- leaq 512(%rsi),%rsi
- leaq 16(%rdx),%rdx
- subq $1,%rbp
- jnz loop_2
- movq $505,48(%rsp)
-
- movq 8(%rsp),%rcx
- movq %rcx,136(%rsp)
- movq 0(%rcx),%r10
- movq 8(%rcx),%r11
- movq 16(%rcx),%r12
- movq 24(%rcx),%r13
- movq 32(%rcx),%r14
- movq 40(%rcx),%r15
- movq 48(%rcx),%r8
- movq 56(%rcx),%r9
- jmp sqr_2
-
-main_loop_a3b:
- call sqr_reduce
- call sqr_reduce
- call sqr_reduce
-sqr_2:
- call sqr_reduce
- call sqr_reduce
-
-
-
- movq 48(%rsp),%rcx
- movq %rcx,%rax
- shrq $4,%rax
- movl 64(%rsp,%rax,2),%edx
- andq $15,%rcx
- shrq %cl,%rdx
- andq $31,%rdx
-
- leaq 640(%rsp,%rdx,2),%rsi
- leaq 448(%rsp),%rdx
- movq %rdx,%rdi
- movq $4,%rbp
-loop_3:
- movzwq 192(%rsi),%rbx
- movzwq 448(%rsi),%rax
- shlq $16,%rbx
- shlq $16,%rax
- movw 128(%rsi),%bx
- movw 384(%rsi),%ax
- shlq $16,%rbx
- shlq $16,%rax
- movw 64(%rsi),%bx
- movw 320(%rsi),%ax
- shlq $16,%rbx
- shlq $16,%rax
- movw 0(%rsi),%bx
- movw 256(%rsi),%ax
- movq %rbx,0(%rdx)
- movq %rax,8(%rdx)
- leaq 512(%rsi),%rsi
- leaq 16(%rdx),%rdx
- subq $1,%rbp
- jnz loop_3
- movq 8(%rsp),%rsi
- call mont_mul_a3b
-
-
-
- movq 48(%rsp),%rcx
- subq $5,%rcx
- movq %rcx,48(%rsp)
- jge main_loop_a3b
-
-
-
-end_main_loop_a3b:
-
-
- movq 8(%rsp),%rdx
- pxor %xmm4,%xmm4
- movdqu 0(%rdx),%xmm0
- movdqu 16(%rdx),%xmm1
- movdqu 32(%rdx),%xmm2
- movdqu 48(%rdx),%xmm3
- movdqa %xmm4,576(%rsp)
- movdqa %xmm4,592(%rsp)
- movdqa %xmm4,608(%rsp)
- movdqa %xmm4,624(%rsp)
- movdqa %xmm0,512(%rsp)
- movdqa %xmm1,528(%rsp)
- movdqa %xmm2,544(%rsp)
- movdqa %xmm3,560(%rsp)
- call mont_reduce
-
-
-
- movq 8(%rsp),%rax
- movq 0(%rax),%r8
- movq 8(%rax),%r9
- movq 16(%rax),%r10
- movq 24(%rax),%r11
- movq 32(%rax),%r12
- movq 40(%rax),%r13
- movq 48(%rax),%r14
- movq 56(%rax),%r15
-
-
- movq 24(%rsp),%rbx
- addq $512,%rbx
-
- subq 0(%rbx),%r8
- sbbq 8(%rbx),%r9
- sbbq 16(%rbx),%r10
- sbbq 24(%rbx),%r11
- sbbq 32(%rbx),%r12
- sbbq 40(%rbx),%r13
- sbbq 48(%rbx),%r14
- sbbq 56(%rbx),%r15
-
-
- movq 0(%rax),%rsi
- movq 8(%rax),%rdi
- movq 16(%rax),%rcx
- movq 24(%rax),%rdx
- cmovncq %r8,%rsi
- cmovncq %r9,%rdi
- cmovncq %r10,%rcx
- cmovncq %r11,%rdx
- movq %rsi,0(%rax)
- movq %rdi,8(%rax)
- movq %rcx,16(%rax)
- movq %rdx,24(%rax)
-
- movq 32(%rax),%rsi
- movq 40(%rax),%rdi
- movq 48(%rax),%rcx
- movq 56(%rax),%rdx
- cmovncq %r12,%rsi
- cmovncq %r13,%rdi
- cmovncq %r14,%rcx
- cmovncq %r15,%rdx
- movq %rsi,32(%rax)
- movq %rdi,40(%rax)
- movq %rcx,48(%rax)
- movq %rdx,56(%rax)
-
- movq 0(%rsp),%rsi
- movq 0(%rsi),%r15
- movq 8(%rsi),%r14
- movq 16(%rsi),%r13
- movq 24(%rsi),%r12
- movq 32(%rsi),%rbx
- movq 40(%rsi),%rbp
- leaq 48(%rsi),%rsp
-.Lepilogue:
- .byte 0xf3,0xc3
-.size mod_exp_512, . - mod_exp_512
diff --git a/secure/lib/libcrypto/amd64/rc4-x86_64.S b/secure/lib/libcrypto/amd64/rc4-x86_64.S
index c561af7..c51ca89 100644
--- a/secure/lib/libcrypto/amd64/rc4-x86_64.S
+++ b/secure/lib/libcrypto/amd64/rc4-x86_64.S
@@ -48,7 +48,7 @@ RC4: orq %rsi,%rsi
movl (%rdi,%rax,4),%edx
movl (%rdi,%r10,4),%eax
xorb (%r12),%dl
- movb %dl,(%r13,%r12,1)
+ movb %dl,(%r12,%r13,1)
leaq 1(%r12),%r12
decq %rbx
jnz .Loop8_warmup
@@ -127,7 +127,7 @@ RC4: orq %rsi,%rsi
subq $8,%r11
xorq (%r12),%r8
- movq %r8,(%r13,%r12,1)
+ movq %r8,(%r12,%r13,1)
leaq 8(%r12),%r12
testq $-8,%r11
@@ -153,7 +153,7 @@ RC4: orq %rsi,%rsi
movl (%rdi,%rax,4),%edx
movl (%rdi,%r10,4),%eax
xorb (%r12),%dl
- movb %dl,(%r13,%r12,1)
+ movb %dl,(%r12,%r13,1)
leaq 1(%r12),%r12
decq %rbx
jnz .Loop16_warmup
@@ -190,7 +190,7 @@ RC4: orq %rsi,%rsi
pxor %xmm1,%xmm2
addb %bl,%cl
pinsrw $0,(%rdi,%rax,4),%xmm0
- movdqu %xmm2,(%r13,%r12,1)
+ movdqu %xmm2,(%r12,%r13,1)
leaq 16(%r12),%r12
.Loop16_enter:
movl (%rdi,%rcx,4),%edx
@@ -326,7 +326,7 @@ RC4: orq %rsi,%rsi
psllq $8,%xmm1
pxor %xmm0,%xmm2
pxor %xmm1,%xmm2
- movdqu %xmm2,(%r13,%r12,1)
+ movdqu %xmm2,(%r12,%r13,1)
leaq 16(%r12),%r12
cmpq $0,%r11
@@ -344,7 +344,7 @@ RC4: orq %rsi,%rsi
movl (%rdi,%rax,4),%edx
movl (%rdi,%r10,4),%eax
xorb (%r12),%dl
- movb %dl,(%r13,%r12,1)
+ movb %dl,(%r12,%r13,1)
leaq 1(%r12),%r12
decq %r11
jnz .Lloop1
@@ -369,7 +369,7 @@ RC4: orq %rsi,%rsi
movb %al,(%rdi,%rcx,1)
cmpq %rsi,%rcx
movb %dl,(%rdi,%r10,1)
- jne .Lcmov0
+ jne .Lcmov0
movq %rax,%rbx
.Lcmov0:
addb %al,%dl
@@ -383,7 +383,7 @@ RC4: orq %rsi,%rsi
movb %bl,(%rdi,%rcx,1)
cmpq %r10,%rcx
movb %dl,(%rdi,%rsi,1)
- jne .Lcmov1
+ jne .Lcmov1
movq %rbx,%rax
.Lcmov1:
addb %bl,%dl
@@ -397,7 +397,7 @@ RC4: orq %rsi,%rsi
movb %al,(%rdi,%rcx,1)
cmpq %rsi,%rcx
movb %dl,(%rdi,%r10,1)
- jne .Lcmov2
+ jne .Lcmov2
movq %rax,%rbx
.Lcmov2:
addb %al,%dl
@@ -411,7 +411,7 @@ RC4: orq %rsi,%rsi
movb %bl,(%rdi,%rcx,1)
cmpq %r10,%rcx
movb %dl,(%rdi,%rsi,1)
- jne .Lcmov3
+ jne .Lcmov3
movq %rbx,%rax
.Lcmov3:
addb %bl,%dl
@@ -425,7 +425,7 @@ RC4: orq %rsi,%rsi
movb %al,(%rdi,%rcx,1)
cmpq %rsi,%rcx
movb %dl,(%rdi,%r10,1)
- jne .Lcmov4
+ jne .Lcmov4
movq %rax,%rbx
.Lcmov4:
addb %al,%dl
@@ -439,7 +439,7 @@ RC4: orq %rsi,%rsi
movb %bl,(%rdi,%rcx,1)
cmpq %r10,%rcx
movb %dl,(%rdi,%rsi,1)
- jne .Lcmov5
+ jne .Lcmov5
movq %rbx,%rax
.Lcmov5:
addb %bl,%dl
@@ -453,7 +453,7 @@ RC4: orq %rsi,%rsi
movb %al,(%rdi,%rcx,1)
cmpq %rsi,%rcx
movb %dl,(%rdi,%r10,1)
- jne .Lcmov6
+ jne .Lcmov6
movq %rax,%rbx
.Lcmov6:
addb %al,%dl
@@ -467,7 +467,7 @@ RC4: orq %rsi,%rsi
movb %bl,(%rdi,%rcx,1)
cmpq %r10,%rcx
movb %dl,(%rdi,%rsi,1)
- jne .Lcmov7
+ jne .Lcmov7
movq %rbx,%rax
.Lcmov7:
addb %bl,%dl
diff --git a/secure/lib/libcrypto/amd64/rsaz-avx2.S b/secure/lib/libcrypto/amd64/rsaz-avx2.S
new file mode 100644
index 0000000..ba13765
--- /dev/null
+++ b/secure/lib/libcrypto/amd64/rsaz-avx2.S
@@ -0,0 +1,26 @@
+ # $FreeBSD$
+.text
+
+.globl rsaz_avx2_eligible
+.type rsaz_avx2_eligible,@function
+rsaz_avx2_eligible:
+ xorl %eax,%eax
+ .byte 0xf3,0xc3
+.size rsaz_avx2_eligible,.-rsaz_avx2_eligible
+
+.globl rsaz_1024_sqr_avx2
+.globl rsaz_1024_mul_avx2
+.globl rsaz_1024_norm2red_avx2
+.globl rsaz_1024_red2norm_avx2
+.globl rsaz_1024_scatter5_avx2
+.globl rsaz_1024_gather5_avx2
+.type rsaz_1024_sqr_avx2,@function
+rsaz_1024_sqr_avx2:
+rsaz_1024_mul_avx2:
+rsaz_1024_norm2red_avx2:
+rsaz_1024_red2norm_avx2:
+rsaz_1024_scatter5_avx2:
+rsaz_1024_gather5_avx2:
+.byte 0x0f,0x0b
+ .byte 0xf3,0xc3
+.size rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
diff --git a/secure/lib/libcrypto/amd64/rsaz-x86_64.S b/secure/lib/libcrypto/amd64/rsaz-x86_64.S
new file mode 100644
index 0000000..efd229a
--- /dev/null
+++ b/secure/lib/libcrypto/amd64/rsaz-x86_64.S
@@ -0,0 +1,1118 @@
+ # $FreeBSD$
+.text
+
+
+
+.globl rsaz_512_sqr
+.type rsaz_512_sqr,@function
+.align 32
+rsaz_512_sqr:
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+
+ subq $128+24,%rsp
+.Lsqr_body:
+ movq %rdx,%rbp
+ movq (%rsi),%rdx
+ movq 8(%rsi),%rax
+ movq %rcx,128(%rsp)
+ jmp .Loop_sqr
+
+.align 32
+.Loop_sqr:
+ movl %r8d,128+8(%rsp)
+
+ movq %rdx,%rbx
+ mulq %rdx
+ movq %rax,%r8
+ movq 16(%rsi),%rax
+ movq %rdx,%r9
+
+ mulq %rbx
+ addq %rax,%r9
+ movq 24(%rsi),%rax
+ movq %rdx,%r10
+ adcq $0,%r10
+
+ mulq %rbx
+ addq %rax,%r10
+ movq 32(%rsi),%rax
+ movq %rdx,%r11
+ adcq $0,%r11
+
+ mulq %rbx
+ addq %rax,%r11
+ movq 40(%rsi),%rax
+ movq %rdx,%r12
+ adcq $0,%r12
+
+ mulq %rbx
+ addq %rax,%r12
+ movq 48(%rsi),%rax
+ movq %rdx,%r13
+ adcq $0,%r13
+
+ mulq %rbx
+ addq %rax,%r13
+ movq 56(%rsi),%rax
+ movq %rdx,%r14
+ adcq $0,%r14
+
+ mulq %rbx
+ addq %rax,%r14
+ movq %rbx,%rax
+ movq %rdx,%r15
+ adcq $0,%r15
+
+ addq %r8,%r8
+ movq %r9,%rcx
+ adcq %r9,%r9
+
+ mulq %rax
+ movq %rax,(%rsp)
+ addq %rdx,%r8
+ adcq $0,%r9
+
+ movq %r8,8(%rsp)
+ shrq $63,%rcx
+
+
+ movq 8(%rsi),%r8
+ movq 16(%rsi),%rax
+ mulq %r8
+ addq %rax,%r10
+ movq 24(%rsi),%rax
+ movq %rdx,%rbx
+ adcq $0,%rbx
+
+ mulq %r8
+ addq %rax,%r11
+ movq 32(%rsi),%rax
+ adcq $0,%rdx
+ addq %rbx,%r11
+ movq %rdx,%rbx
+ adcq $0,%rbx
+
+ mulq %r8
+ addq %rax,%r12
+ movq 40(%rsi),%rax
+ adcq $0,%rdx
+ addq %rbx,%r12
+ movq %rdx,%rbx
+ adcq $0,%rbx
+
+ mulq %r8
+ addq %rax,%r13
+ movq 48(%rsi),%rax
+ adcq $0,%rdx
+ addq %rbx,%r13
+ movq %rdx,%rbx
+ adcq $0,%rbx
+
+ mulq %r8
+ addq %rax,%r14
+ movq 56(%rsi),%rax
+ adcq $0,%rdx
+ addq %rbx,%r14
+ movq %rdx,%rbx
+ adcq $0,%rbx
+
+ mulq %r8
+ addq %rax,%r15
+ movq %r8,%rax
+ adcq $0,%rdx
+ addq %rbx,%r15
+ movq %rdx,%r8
+ movq %r10,%rdx
+ adcq $0,%r8
+
+ addq %rdx,%rdx
+ leaq (%rcx,%r10,2),%r10
+ movq %r11,%rbx
+ adcq %r11,%r11
+
+ mulq %rax
+ addq %rax,%r9
+ adcq %rdx,%r10
+ adcq $0,%r11
+
+ movq %r9,16(%rsp)
+ movq %r10,24(%rsp)
+ shrq $63,%rbx
+
+
+ movq 16(%rsi),%r9
+ movq 24(%rsi),%rax
+ mulq %r9
+ addq %rax,%r12
+ movq 32(%rsi),%rax
+ movq %rdx,%rcx
+ adcq $0,%rcx
+
+ mulq %r9
+ addq %rax,%r13
+ movq 40(%rsi),%rax
+ adcq $0,%rdx
+ addq %rcx,%r13
+ movq %rdx,%rcx
+ adcq $0,%rcx
+
+ mulq %r9
+ addq %rax,%r14
+ movq 48(%rsi),%rax
+ adcq $0,%rdx
+ addq %rcx,%r14
+ movq %rdx,%rcx
+ adcq $0,%rcx
+
+ mulq %r9
+ movq %r12,%r10
+ leaq (%rbx,%r12,2),%r12
+ addq %rax,%r15
+ movq 56(%rsi),%rax
+ adcq $0,%rdx
+ addq %rcx,%r15
+ movq %rdx,%rcx
+ adcq $0,%rcx
+
+ mulq %r9
+ shrq $63,%r10
+ addq %rax,%r8
+ movq %r9,%rax
+ adcq $0,%rdx
+ addq %rcx,%r8
+ movq %rdx,%r9
+ adcq $0,%r9
+
+ movq %r13,%rcx
+ leaq (%r10,%r13,2),%r13
+
+ mulq %rax
+ addq %rax,%r11
+ adcq %rdx,%r12
+ adcq $0,%r13
+
+ movq %r11,32(%rsp)
+ movq %r12,40(%rsp)
+ shrq $63,%rcx
+
+
+ movq 24(%rsi),%r10
+ movq 32(%rsi),%rax
+ mulq %r10
+ addq %rax,%r14
+ movq 40(%rsi),%rax
+ movq %rdx,%rbx
+ adcq $0,%rbx
+
+ mulq %r10
+ addq %rax,%r15
+ movq 48(%rsi),%rax
+ adcq $0,%rdx
+ addq %rbx,%r15
+ movq %rdx,%rbx
+ adcq $0,%rbx
+
+ mulq %r10
+ movq %r14,%r12
+ leaq (%rcx,%r14,2),%r14
+ addq %rax,%r8
+ movq 56(%rsi),%rax
+ adcq $0,%rdx
+ addq %rbx,%r8
+ movq %rdx,%rbx
+ adcq $0,%rbx
+
+ mulq %r10
+ shrq $63,%r12
+ addq %rax,%r9
+ movq %r10,%rax
+ adcq $0,%rdx
+ addq %rbx,%r9
+ movq %rdx,%r10
+ adcq $0,%r10
+
+ movq %r15,%rbx
+ leaq (%r12,%r15,2),%r15
+
+ mulq %rax
+ addq %rax,%r13
+ adcq %rdx,%r14
+ adcq $0,%r15
+
+ movq %r13,48(%rsp)
+ movq %r14,56(%rsp)
+ shrq $63,%rbx
+
+
+ movq 32(%rsi),%r11
+ movq 40(%rsi),%rax
+ mulq %r11
+ addq %rax,%r8
+ movq 48(%rsi),%rax
+ movq %rdx,%rcx
+ adcq $0,%rcx
+
+ mulq %r11
+ addq %rax,%r9
+ movq 56(%rsi),%rax
+ adcq $0,%rdx
+ movq %r8,%r12
+ leaq (%rbx,%r8,2),%r8
+ addq %rcx,%r9
+ movq %rdx,%rcx
+ adcq $0,%rcx
+
+ mulq %r11
+ shrq $63,%r12
+ addq %rax,%r10
+ movq %r11,%rax
+ adcq $0,%rdx
+ addq %rcx,%r10
+ movq %rdx,%r11
+ adcq $0,%r11
+
+ movq %r9,%rcx
+ leaq (%r12,%r9,2),%r9
+
+ mulq %rax
+ addq %rax,%r15
+ adcq %rdx,%r8
+ adcq $0,%r9
+
+ movq %r15,64(%rsp)
+ movq %r8,72(%rsp)
+ shrq $63,%rcx
+
+
+ movq 40(%rsi),%r12
+ movq 48(%rsi),%rax
+ mulq %r12
+ addq %rax,%r10
+ movq 56(%rsi),%rax
+ movq %rdx,%rbx
+ adcq $0,%rbx
+
+ mulq %r12
+ addq %rax,%r11
+ movq %r12,%rax
+ movq %r10,%r15
+ leaq (%rcx,%r10,2),%r10
+ adcq $0,%rdx
+ shrq $63,%r15
+ addq %rbx,%r11
+ movq %rdx,%r12
+ adcq $0,%r12
+
+ movq %r11,%rbx
+ leaq (%r15,%r11,2),%r11
+
+ mulq %rax
+ addq %rax,%r9
+ adcq %rdx,%r10
+ adcq $0,%r11
+
+ movq %r9,80(%rsp)
+ movq %r10,88(%rsp)
+
+
+ movq 48(%rsi),%r13
+ movq 56(%rsi),%rax
+ mulq %r13
+ addq %rax,%r12
+ movq %r13,%rax
+ movq %rdx,%r13
+ adcq $0,%r13
+
+ xorq %r14,%r14
+ shlq $1,%rbx
+ adcq %r12,%r12
+ adcq %r13,%r13
+ adcq %r14,%r14
+
+ mulq %rax
+ addq %rax,%r11
+ adcq %rdx,%r12
+ adcq $0,%r13
+
+ movq %r11,96(%rsp)
+ movq %r12,104(%rsp)
+
+
+ movq 56(%rsi),%rax
+ mulq %rax
+ addq %rax,%r13
+ adcq $0,%rdx
+
+ addq %rdx,%r14
+
+ movq %r13,112(%rsp)
+ movq %r14,120(%rsp)
+
+ movq (%rsp),%r8
+ movq 8(%rsp),%r9
+ movq 16(%rsp),%r10
+ movq 24(%rsp),%r11
+ movq 32(%rsp),%r12
+ movq 40(%rsp),%r13
+ movq 48(%rsp),%r14
+ movq 56(%rsp),%r15
+
+ call __rsaz_512_reduce
+
+ addq 64(%rsp),%r8
+ adcq 72(%rsp),%r9
+ adcq 80(%rsp),%r10
+ adcq 88(%rsp),%r11
+ adcq 96(%rsp),%r12
+ adcq 104(%rsp),%r13
+ adcq 112(%rsp),%r14
+ adcq 120(%rsp),%r15
+ sbbq %rcx,%rcx
+
+ call __rsaz_512_subtract
+
+ movq %r8,%rdx
+ movq %r9,%rax
+ movl 128+8(%rsp),%r8d
+ movq %rdi,%rsi
+
+ decl %r8d
+ jnz .Loop_sqr
+
+ leaq 128+24+48(%rsp),%rax
+ movq -48(%rax),%r15
+ movq -40(%rax),%r14
+ movq -32(%rax),%r13
+ movq -24(%rax),%r12
+ movq -16(%rax),%rbp
+ movq -8(%rax),%rbx
+ leaq (%rax),%rsp
+.Lsqr_epilogue:
+ .byte 0xf3,0xc3
+.size rsaz_512_sqr,.-rsaz_512_sqr
+.globl rsaz_512_mul
+.type rsaz_512_mul,@function
+.align 32
+rsaz_512_mul:
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+
+ subq $128+24,%rsp
+.Lmul_body:
+.byte 102,72,15,110,199
+.byte 102,72,15,110,201
+ movq %r8,128(%rsp)
+ movq (%rdx),%rbx
+ movq %rdx,%rbp
+ call __rsaz_512_mul
+
+.byte 102,72,15,126,199
+.byte 102,72,15,126,205
+
+ movq (%rsp),%r8
+ movq 8(%rsp),%r9
+ movq 16(%rsp),%r10
+ movq 24(%rsp),%r11
+ movq 32(%rsp),%r12
+ movq 40(%rsp),%r13
+ movq 48(%rsp),%r14
+ movq 56(%rsp),%r15
+
+ call __rsaz_512_reduce
+ addq 64(%rsp),%r8
+ adcq 72(%rsp),%r9
+ adcq 80(%rsp),%r10
+ adcq 88(%rsp),%r11
+ adcq 96(%rsp),%r12
+ adcq 104(%rsp),%r13
+ adcq 112(%rsp),%r14
+ adcq 120(%rsp),%r15
+ sbbq %rcx,%rcx
+
+ call __rsaz_512_subtract
+
+ leaq 128+24+48(%rsp),%rax
+ movq -48(%rax),%r15
+ movq -40(%rax),%r14
+ movq -32(%rax),%r13
+ movq -24(%rax),%r12
+ movq -16(%rax),%rbp
+ movq -8(%rax),%rbx
+ leaq (%rax),%rsp
+.Lmul_epilogue:
+ .byte 0xf3,0xc3
+.size rsaz_512_mul,.-rsaz_512_mul
+.globl rsaz_512_mul_gather4
+.type rsaz_512_mul_gather4,@function
+.align 32
+rsaz_512_mul_gather4:
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+
+ movl %r9d,%r9d
+ subq $128+24,%rsp
+.Lmul_gather4_body:
+ movl 64(%rdx,%r9,4),%eax
+.byte 102,72,15,110,199
+ movl (%rdx,%r9,4),%ebx
+.byte 102,72,15,110,201
+ movq %r8,128(%rsp)
+
+ shlq $32,%rax
+ orq %rax,%rbx
+ movq (%rsi),%rax
+ movq 8(%rsi),%rcx
+ leaq 128(%rdx,%r9,4),%rbp
+ mulq %rbx
+ movq %rax,(%rsp)
+ movq %rcx,%rax
+ movq %rdx,%r8
+
+ mulq %rbx
+ movd (%rbp),%xmm4
+ addq %rax,%r8
+ movq 16(%rsi),%rax
+ movq %rdx,%r9
+ adcq $0,%r9
+
+ mulq %rbx
+ movd 64(%rbp),%xmm5
+ addq %rax,%r9
+ movq 24(%rsi),%rax
+ movq %rdx,%r10
+ adcq $0,%r10
+
+ mulq %rbx
+ pslldq $4,%xmm5
+ addq %rax,%r10
+ movq 32(%rsi),%rax
+ movq %rdx,%r11
+ adcq $0,%r11
+
+ mulq %rbx
+ por %xmm5,%xmm4
+ addq %rax,%r11
+ movq 40(%rsi),%rax
+ movq %rdx,%r12
+ adcq $0,%r12
+
+ mulq %rbx
+ addq %rax,%r12
+ movq 48(%rsi),%rax
+ movq %rdx,%r13
+ adcq $0,%r13
+
+ mulq %rbx
+ leaq 128(%rbp),%rbp
+ addq %rax,%r13
+ movq 56(%rsi),%rax
+ movq %rdx,%r14
+ adcq $0,%r14
+
+ mulq %rbx
+.byte 102,72,15,126,227
+ addq %rax,%r14
+ movq (%rsi),%rax
+ movq %rdx,%r15
+ adcq $0,%r15
+
+ leaq 8(%rsp),%rdi
+ movl $7,%ecx
+ jmp .Loop_mul_gather
+
+.align 32
+.Loop_mul_gather:
+ mulq %rbx
+ addq %rax,%r8
+ movq 8(%rsi),%rax
+ movq %r8,(%rdi)
+ movq %rdx,%r8
+ adcq $0,%r8
+
+ mulq %rbx
+ movd (%rbp),%xmm4
+ addq %rax,%r9
+ movq 16(%rsi),%rax
+ adcq $0,%rdx
+ addq %r9,%r8
+ movq %rdx,%r9
+ adcq $0,%r9
+
+ mulq %rbx
+ movd 64(%rbp),%xmm5
+ addq %rax,%r10
+ movq 24(%rsi),%rax
+ adcq $0,%rdx
+ addq %r10,%r9
+ movq %rdx,%r10
+ adcq $0,%r10
+
+ mulq %rbx
+ pslldq $4,%xmm5
+ addq %rax,%r11
+ movq 32(%rsi),%rax
+ adcq $0,%rdx
+ addq %r11,%r10
+ movq %rdx,%r11
+ adcq $0,%r11
+
+ mulq %rbx
+ por %xmm5,%xmm4
+ addq %rax,%r12
+ movq 40(%rsi),%rax
+ adcq $0,%rdx
+ addq %r12,%r11
+ movq %rdx,%r12
+ adcq $0,%r12
+
+ mulq %rbx
+ addq %rax,%r13
+ movq 48(%rsi),%rax
+ adcq $0,%rdx
+ addq %r13,%r12
+ movq %rdx,%r13
+ adcq $0,%r13
+
+ mulq %rbx
+ addq %rax,%r14
+ movq 56(%rsi),%rax
+ adcq $0,%rdx
+ addq %r14,%r13
+ movq %rdx,%r14
+ adcq $0,%r14
+
+ mulq %rbx
+.byte 102,72,15,126,227
+ addq %rax,%r15
+ movq (%rsi),%rax
+ adcq $0,%rdx
+ addq %r15,%r14
+ movq %rdx,%r15
+ adcq $0,%r15
+
+ leaq 128(%rbp),%rbp
+ leaq 8(%rdi),%rdi
+
+ decl %ecx
+ jnz .Loop_mul_gather
+
+ movq %r8,(%rdi)
+ movq %r9,8(%rdi)
+ movq %r10,16(%rdi)
+ movq %r11,24(%rdi)
+ movq %r12,32(%rdi)
+ movq %r13,40(%rdi)
+ movq %r14,48(%rdi)
+ movq %r15,56(%rdi)
+
+.byte 102,72,15,126,199
+.byte 102,72,15,126,205
+
+ movq (%rsp),%r8
+ movq 8(%rsp),%r9
+ movq 16(%rsp),%r10
+ movq 24(%rsp),%r11
+ movq 32(%rsp),%r12
+ movq 40(%rsp),%r13
+ movq 48(%rsp),%r14
+ movq 56(%rsp),%r15
+
+ call __rsaz_512_reduce
+ addq 64(%rsp),%r8
+ adcq 72(%rsp),%r9
+ adcq 80(%rsp),%r10
+ adcq 88(%rsp),%r11
+ adcq 96(%rsp),%r12
+ adcq 104(%rsp),%r13
+ adcq 112(%rsp),%r14
+ adcq 120(%rsp),%r15
+ sbbq %rcx,%rcx
+
+ call __rsaz_512_subtract
+
+ leaq 128+24+48(%rsp),%rax
+ movq -48(%rax),%r15
+ movq -40(%rax),%r14
+ movq -32(%rax),%r13
+ movq -24(%rax),%r12
+ movq -16(%rax),%rbp
+ movq -8(%rax),%rbx
+ leaq (%rax),%rsp
+.Lmul_gather4_epilogue:
+ .byte 0xf3,0xc3
+.size rsaz_512_mul_gather4,.-rsaz_512_mul_gather4
+.globl rsaz_512_mul_scatter4
+.type rsaz_512_mul_scatter4,@function
+.align 32
+rsaz_512_mul_scatter4:
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+
+ movl %r9d,%r9d
+ subq $128+24,%rsp
+.Lmul_scatter4_body:
+ leaq (%r8,%r9,4),%r8
+.byte 102,72,15,110,199
+.byte 102,72,15,110,202
+.byte 102,73,15,110,208
+ movq %rcx,128(%rsp)
+
+ movq %rdi,%rbp
+ movq (%rdi),%rbx
+ call __rsaz_512_mul
+
+.byte 102,72,15,126,199
+.byte 102,72,15,126,205
+
+ movq (%rsp),%r8
+ movq 8(%rsp),%r9
+ movq 16(%rsp),%r10
+ movq 24(%rsp),%r11
+ movq 32(%rsp),%r12
+ movq 40(%rsp),%r13
+ movq 48(%rsp),%r14
+ movq 56(%rsp),%r15
+
+ call __rsaz_512_reduce
+ addq 64(%rsp),%r8
+ adcq 72(%rsp),%r9
+ adcq 80(%rsp),%r10
+ adcq 88(%rsp),%r11
+ adcq 96(%rsp),%r12
+ adcq 104(%rsp),%r13
+ adcq 112(%rsp),%r14
+ adcq 120(%rsp),%r15
+.byte 102,72,15,126,214
+ sbbq %rcx,%rcx
+
+ call __rsaz_512_subtract
+
+ movl %r8d,0(%rsi)
+ shrq $32,%r8
+ movl %r9d,128(%rsi)
+ shrq $32,%r9
+ movl %r10d,256(%rsi)
+ shrq $32,%r10
+ movl %r11d,384(%rsi)
+ shrq $32,%r11
+ movl %r12d,512(%rsi)
+ shrq $32,%r12
+ movl %r13d,640(%rsi)
+ shrq $32,%r13
+ movl %r14d,768(%rsi)
+ shrq $32,%r14
+ movl %r15d,896(%rsi)
+ shrq $32,%r15
+ movl %r8d,64(%rsi)
+ movl %r9d,192(%rsi)
+ movl %r10d,320(%rsi)
+ movl %r11d,448(%rsi)
+ movl %r12d,576(%rsi)
+ movl %r13d,704(%rsi)
+ movl %r14d,832(%rsi)
+ movl %r15d,960(%rsi)
+
+ leaq 128+24+48(%rsp),%rax
+ movq -48(%rax),%r15
+ movq -40(%rax),%r14
+ movq -32(%rax),%r13
+ movq -24(%rax),%r12
+ movq -16(%rax),%rbp
+ movq -8(%rax),%rbx
+ leaq (%rax),%rsp
+.Lmul_scatter4_epilogue:
+ .byte 0xf3,0xc3
+.size rsaz_512_mul_scatter4,.-rsaz_512_mul_scatter4
+.globl rsaz_512_mul_by_one
+.type rsaz_512_mul_by_one,@function
+.align 32
+rsaz_512_mul_by_one:
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+
+ subq $128+24,%rsp
+.Lmul_by_one_body:
+ movq %rdx,%rbp
+ movq %rcx,128(%rsp)
+
+ movq (%rsi),%r8
+ pxor %xmm0,%xmm0
+ movq 8(%rsi),%r9
+ movq 16(%rsi),%r10
+ movq 24(%rsi),%r11
+ movq 32(%rsi),%r12
+ movq 40(%rsi),%r13
+ movq 48(%rsi),%r14
+ movq 56(%rsi),%r15
+
+ movdqa %xmm0,(%rsp)
+ movdqa %xmm0,16(%rsp)
+ movdqa %xmm0,32(%rsp)
+ movdqa %xmm0,48(%rsp)
+ movdqa %xmm0,64(%rsp)
+ movdqa %xmm0,80(%rsp)
+ movdqa %xmm0,96(%rsp)
+ call __rsaz_512_reduce
+ movq %r8,(%rdi)
+ movq %r9,8(%rdi)
+ movq %r10,16(%rdi)
+ movq %r11,24(%rdi)
+ movq %r12,32(%rdi)
+ movq %r13,40(%rdi)
+ movq %r14,48(%rdi)
+ movq %r15,56(%rdi)
+
+ leaq 128+24+48(%rsp),%rax
+ movq -48(%rax),%r15
+ movq -40(%rax),%r14
+ movq -32(%rax),%r13
+ movq -24(%rax),%r12
+ movq -16(%rax),%rbp
+ movq -8(%rax),%rbx
+ leaq (%rax),%rsp
+.Lmul_by_one_epilogue:
+ .byte 0xf3,0xc3
+.size rsaz_512_mul_by_one,.-rsaz_512_mul_by_one
+.type __rsaz_512_reduce,@function
+.align 32
+__rsaz_512_reduce:
+ movq %r8,%rbx
+ imulq 128+8(%rsp),%rbx
+ movq 0(%rbp),%rax
+ movl $8,%ecx
+ jmp .Lreduction_loop
+
+.align 32
+.Lreduction_loop:
+ mulq %rbx
+ movq 8(%rbp),%rax
+ negq %r8
+ movq %rdx,%r8
+ adcq $0,%r8
+
+ mulq %rbx
+ addq %rax,%r9
+ movq 16(%rbp),%rax
+ adcq $0,%rdx
+ addq %r9,%r8
+ movq %rdx,%r9
+ adcq $0,%r9
+
+ mulq %rbx
+ addq %rax,%r10
+ movq 24(%rbp),%rax
+ adcq $0,%rdx
+ addq %r10,%r9
+ movq %rdx,%r10
+ adcq $0,%r10
+
+ mulq %rbx
+ addq %rax,%r11
+ movq 32(%rbp),%rax
+ adcq $0,%rdx
+ addq %r11,%r10
+ movq 128+8(%rsp),%rsi
+
+
+ adcq $0,%rdx
+ movq %rdx,%r11
+
+ mulq %rbx
+ addq %rax,%r12
+ movq 40(%rbp),%rax
+ adcq $0,%rdx
+ imulq %r8,%rsi
+ addq %r12,%r11
+ movq %rdx,%r12
+ adcq $0,%r12
+
+ mulq %rbx
+ addq %rax,%r13
+ movq 48(%rbp),%rax
+ adcq $0,%rdx
+ addq %r13,%r12
+ movq %rdx,%r13
+ adcq $0,%r13
+
+ mulq %rbx
+ addq %rax,%r14
+ movq 56(%rbp),%rax
+ adcq $0,%rdx
+ addq %r14,%r13
+ movq %rdx,%r14
+ adcq $0,%r14
+
+ mulq %rbx
+ movq %rsi,%rbx
+ addq %rax,%r15
+ movq 0(%rbp),%rax
+ adcq $0,%rdx
+ addq %r15,%r14
+ movq %rdx,%r15
+ adcq $0,%r15
+
+ decl %ecx
+ jne .Lreduction_loop
+
+ .byte 0xf3,0xc3
+.size __rsaz_512_reduce,.-__rsaz_512_reduce
+.type __rsaz_512_subtract,@function
+.align 32
+__rsaz_512_subtract:
+ movq %r8,(%rdi)
+ movq %r9,8(%rdi)
+ movq %r10,16(%rdi)
+ movq %r11,24(%rdi)
+ movq %r12,32(%rdi)
+ movq %r13,40(%rdi)
+ movq %r14,48(%rdi)
+ movq %r15,56(%rdi)
+
+ movq 0(%rbp),%r8
+ movq 8(%rbp),%r9
+ negq %r8
+ notq %r9
+ andq %rcx,%r8
+ movq 16(%rbp),%r10
+ andq %rcx,%r9
+ notq %r10
+ movq 24(%rbp),%r11
+ andq %rcx,%r10
+ notq %r11
+ movq 32(%rbp),%r12
+ andq %rcx,%r11
+ notq %r12
+ movq 40(%rbp),%r13
+ andq %rcx,%r12
+ notq %r13
+ movq 48(%rbp),%r14
+ andq %rcx,%r13
+ notq %r14
+ movq 56(%rbp),%r15
+ andq %rcx,%r14
+ notq %r15
+ andq %rcx,%r15
+
+ addq (%rdi),%r8
+ adcq 8(%rdi),%r9
+ adcq 16(%rdi),%r10
+ adcq 24(%rdi),%r11
+ adcq 32(%rdi),%r12
+ adcq 40(%rdi),%r13
+ adcq 48(%rdi),%r14
+ adcq 56(%rdi),%r15
+
+ movq %r8,(%rdi)
+ movq %r9,8(%rdi)
+ movq %r10,16(%rdi)
+ movq %r11,24(%rdi)
+ movq %r12,32(%rdi)
+ movq %r13,40(%rdi)
+ movq %r14,48(%rdi)
+ movq %r15,56(%rdi)
+
+ .byte 0xf3,0xc3
+.size __rsaz_512_subtract,.-__rsaz_512_subtract
+.type __rsaz_512_mul,@function
+.align 32
+__rsaz_512_mul:
+ leaq 8(%rsp),%rdi
+
+ movq (%rsi),%rax
+ mulq %rbx
+ movq %rax,(%rdi)
+ movq 8(%rsi),%rax
+ movq %rdx,%r8
+
+ mulq %rbx
+ addq %rax,%r8
+ movq 16(%rsi),%rax
+ movq %rdx,%r9
+ adcq $0,%r9
+
+ mulq %rbx
+ addq %rax,%r9
+ movq 24(%rsi),%rax
+ movq %rdx,%r10
+ adcq $0,%r10
+
+ mulq %rbx
+ addq %rax,%r10
+ movq 32(%rsi),%rax
+ movq %rdx,%r11
+ adcq $0,%r11
+
+ mulq %rbx
+ addq %rax,%r11
+ movq 40(%rsi),%rax
+ movq %rdx,%r12
+ adcq $0,%r12
+
+ mulq %rbx
+ addq %rax,%r12
+ movq 48(%rsi),%rax
+ movq %rdx,%r13
+ adcq $0,%r13
+
+ mulq %rbx
+ addq %rax,%r13
+ movq 56(%rsi),%rax
+ movq %rdx,%r14
+ adcq $0,%r14
+
+ mulq %rbx
+ addq %rax,%r14
+ movq (%rsi),%rax
+ movq %rdx,%r15
+ adcq $0,%r15
+
+ leaq 8(%rbp),%rbp
+ leaq 8(%rdi),%rdi
+
+ movl $7,%ecx
+ jmp .Loop_mul
+
+.align 32
+.Loop_mul:
+ movq (%rbp),%rbx
+ mulq %rbx
+ addq %rax,%r8
+ movq 8(%rsi),%rax
+ movq %r8,(%rdi)
+ movq %rdx,%r8
+ adcq $0,%r8
+
+ mulq %rbx
+ addq %rax,%r9
+ movq 16(%rsi),%rax
+ adcq $0,%rdx
+ addq %r9,%r8
+ movq %rdx,%r9
+ adcq $0,%r9
+
+ mulq %rbx
+ addq %rax,%r10
+ movq 24(%rsi),%rax
+ adcq $0,%rdx
+ addq %r10,%r9
+ movq %rdx,%r10
+ adcq $0,%r10
+
+ mulq %rbx
+ addq %rax,%r11
+ movq 32(%rsi),%rax
+ adcq $0,%rdx
+ addq %r11,%r10
+ movq %rdx,%r11
+ adcq $0,%r11
+
+ mulq %rbx
+ addq %rax,%r12
+ movq 40(%rsi),%rax
+ adcq $0,%rdx
+ addq %r12,%r11
+ movq %rdx,%r12
+ adcq $0,%r12
+
+ mulq %rbx
+ addq %rax,%r13
+ movq 48(%rsi),%rax
+ adcq $0,%rdx
+ addq %r13,%r12
+ movq %rdx,%r13
+ adcq $0,%r13
+
+ mulq %rbx
+ addq %rax,%r14
+ movq 56(%rsi),%rax
+ adcq $0,%rdx
+ addq %r14,%r13
+ movq %rdx,%r14
+ leaq 8(%rbp),%rbp
+ adcq $0,%r14
+
+ mulq %rbx
+ addq %rax,%r15
+ movq (%rsi),%rax
+ adcq $0,%rdx
+ addq %r15,%r14
+ movq %rdx,%r15
+ adcq $0,%r15
+
+ leaq 8(%rdi),%rdi
+
+ decl %ecx
+ jnz .Loop_mul
+
+ movq %r8,(%rdi)
+ movq %r9,8(%rdi)
+ movq %r10,16(%rdi)
+ movq %r11,24(%rdi)
+ movq %r12,32(%rdi)
+ movq %r13,40(%rdi)
+ movq %r14,48(%rdi)
+ movq %r15,56(%rdi)
+
+ .byte 0xf3,0xc3
+.size __rsaz_512_mul,.-__rsaz_512_mul
+.globl rsaz_512_scatter4
+.type rsaz_512_scatter4,@function
+.align 16
+rsaz_512_scatter4:
+ leaq (%rdi,%rdx,4),%rdi
+ movl $8,%r9d
+ jmp .Loop_scatter
+.align 16
+.Loop_scatter:
+ movq (%rsi),%rax
+ leaq 8(%rsi),%rsi
+ movl %eax,(%rdi)
+ shrq $32,%rax
+ movl %eax,64(%rdi)
+ leaq 128(%rdi),%rdi
+ decl %r9d
+ jnz .Loop_scatter
+ .byte 0xf3,0xc3
+.size rsaz_512_scatter4,.-rsaz_512_scatter4
+
+.globl rsaz_512_gather4
+.type rsaz_512_gather4,@function
+.align 16
+rsaz_512_gather4:
+ leaq (%rsi,%rdx,4),%rsi
+ movl $8,%r9d
+ jmp .Loop_gather
+.align 16
+.Loop_gather:
+ movl (%rsi),%eax
+ movl 64(%rsi),%r8d
+ leaq 128(%rsi),%rsi
+ shlq $32,%r8
+ orq %r8,%rax
+ movq %rax,(%rdi)
+ leaq 8(%rdi),%rdi
+ decl %r9d
+ jnz .Loop_gather
+ .byte 0xf3,0xc3
+.size rsaz_512_gather4,.-rsaz_512_gather4
diff --git a/secure/lib/libcrypto/amd64/sha1-mb-x86_64.S b/secure/lib/libcrypto/amd64/sha1-mb-x86_64.S
new file mode 100644
index 0000000..6c7cd2f
--- /dev/null
+++ b/secure/lib/libcrypto/amd64/sha1-mb-x86_64.S
@@ -0,0 +1,2935 @@
+ # $FreeBSD$
+.text
+
+
+
+.globl sha1_multi_block
+.type sha1_multi_block,@function
+.align 32
+sha1_multi_block:
+ movq OPENSSL_ia32cap_P+4(%rip),%rcx
+ btq $61,%rcx
+ jc _shaext_shortcut
+ movq %rsp,%rax
+ pushq %rbx
+ pushq %rbp
+ subq $288,%rsp
+ andq $-256,%rsp
+ movq %rax,272(%rsp)
+.Lbody:
+ leaq K_XX_XX(%rip),%rbp
+ leaq 256(%rsp),%rbx
+
+.Loop_grande:
+ movl %edx,280(%rsp)
+ xorl %edx,%edx
+ movq 0(%rsi),%r8
+ movl 8(%rsi),%ecx
+ cmpl %edx,%ecx
+ cmovgl %ecx,%edx
+ testl %ecx,%ecx
+ movl %ecx,0(%rbx)
+ cmovleq %rbp,%r8
+ movq 16(%rsi),%r9
+ movl 24(%rsi),%ecx
+ cmpl %edx,%ecx
+ cmovgl %ecx,%edx
+ testl %ecx,%ecx
+ movl %ecx,4(%rbx)
+ cmovleq %rbp,%r9
+ movq 32(%rsi),%r10
+ movl 40(%rsi),%ecx
+ cmpl %edx,%ecx
+ cmovgl %ecx,%edx
+ testl %ecx,%ecx
+ movl %ecx,8(%rbx)
+ cmovleq %rbp,%r10
+ movq 48(%rsi),%r11
+ movl 56(%rsi),%ecx
+ cmpl %edx,%ecx
+ cmovgl %ecx,%edx
+ testl %ecx,%ecx
+ movl %ecx,12(%rbx)
+ cmovleq %rbp,%r11
+ testl %edx,%edx
+ jz .Ldone
+
+ movdqu 0(%rdi),%xmm10
+ leaq 128(%rsp),%rax
+ movdqu 32(%rdi),%xmm11
+ movdqu 64(%rdi),%xmm12
+ movdqu 96(%rdi),%xmm13
+ movdqu 128(%rdi),%xmm14
+ movdqa 96(%rbp),%xmm5
+ movdqa -32(%rbp),%xmm15
+ jmp .Loop
+
+.align 32
+.Loop:
+ movd (%r8),%xmm0
+ leaq 64(%r8),%r8
+ movd (%r9),%xmm2
+ leaq 64(%r9),%r9
+ movd (%r10),%xmm3
+ leaq 64(%r10),%r10
+ movd (%r11),%xmm4
+ leaq 64(%r11),%r11
+ punpckldq %xmm3,%xmm0
+ movd -60(%r8),%xmm1
+ punpckldq %xmm4,%xmm2
+ movd -60(%r9),%xmm9
+ punpckldq %xmm2,%xmm0
+ movd -60(%r10),%xmm8
+.byte 102,15,56,0,197
+ movd -60(%r11),%xmm7
+ punpckldq %xmm8,%xmm1
+ movdqa %xmm10,%xmm8
+ paddd %xmm15,%xmm14
+ punpckldq %xmm7,%xmm9
+ movdqa %xmm11,%xmm7
+ movdqa %xmm11,%xmm6
+ pslld $5,%xmm8
+ pandn %xmm13,%xmm7
+ pand %xmm12,%xmm6
+ punpckldq %xmm9,%xmm1
+ movdqa %xmm10,%xmm9
+
+ movdqa %xmm0,0-128(%rax)
+ paddd %xmm0,%xmm14
+ movd -56(%r8),%xmm2
+ psrld $27,%xmm9
+ pxor %xmm7,%xmm6
+ movdqa %xmm11,%xmm7
+
+ por %xmm9,%xmm8
+ movd -56(%r9),%xmm9
+ pslld $30,%xmm7
+ paddd %xmm6,%xmm14
+
+ psrld $2,%xmm11
+ paddd %xmm8,%xmm14
+.byte 102,15,56,0,205
+ movd -56(%r10),%xmm8
+ por %xmm7,%xmm11
+ movd -56(%r11),%xmm7
+ punpckldq %xmm8,%xmm2
+ movdqa %xmm14,%xmm8
+ paddd %xmm15,%xmm13
+ punpckldq %xmm7,%xmm9
+ movdqa %xmm10,%xmm7
+ movdqa %xmm10,%xmm6
+ pslld $5,%xmm8
+ pandn %xmm12,%xmm7
+ pand %xmm11,%xmm6
+ punpckldq %xmm9,%xmm2
+ movdqa %xmm14,%xmm9
+
+ movdqa %xmm1,16-128(%rax)
+ paddd %xmm1,%xmm13
+ movd -52(%r8),%xmm3
+ psrld $27,%xmm9
+ pxor %xmm7,%xmm6
+ movdqa %xmm10,%xmm7
+
+ por %xmm9,%xmm8
+ movd -52(%r9),%xmm9
+ pslld $30,%xmm7
+ paddd %xmm6,%xmm13
+
+ psrld $2,%xmm10
+ paddd %xmm8,%xmm13
+.byte 102,15,56,0,213
+ movd -52(%r10),%xmm8
+ por %xmm7,%xmm10
+ movd -52(%r11),%xmm7
+ punpckldq %xmm8,%xmm3
+ movdqa %xmm13,%xmm8
+ paddd %xmm15,%xmm12
+ punpckldq %xmm7,%xmm9
+ movdqa %xmm14,%xmm7
+ movdqa %xmm14,%xmm6
+ pslld $5,%xmm8
+ pandn %xmm11,%xmm7
+ pand %xmm10,%xmm6
+ punpckldq %xmm9,%xmm3
+ movdqa %xmm13,%xmm9
+
+ movdqa %xmm2,32-128(%rax)
+ paddd %xmm2,%xmm12
+ movd -48(%r8),%xmm4
+ psrld $27,%xmm9
+ pxor %xmm7,%xmm6
+ movdqa %xmm14,%xmm7
+
+ por %xmm9,%xmm8
+ movd -48(%r9),%xmm9
+ pslld $30,%xmm7
+ paddd %xmm6,%xmm12
+
+ psrld $2,%xmm14
+ paddd %xmm8,%xmm12
+.byte 102,15,56,0,221
+ movd -48(%r10),%xmm8
+ por %xmm7,%xmm14
+ movd -48(%r11),%xmm7
+ punpckldq %xmm8,%xmm4
+ movdqa %xmm12,%xmm8
+ paddd %xmm15,%xmm11
+ punpckldq %xmm7,%xmm9
+ movdqa %xmm13,%xmm7
+ movdqa %xmm13,%xmm6
+ pslld $5,%xmm8
+ pandn %xmm10,%xmm7
+ pand %xmm14,%xmm6
+ punpckldq %xmm9,%xmm4
+ movdqa %xmm12,%xmm9
+
+ movdqa %xmm3,48-128(%rax)
+ paddd %xmm3,%xmm11
+ movd -44(%r8),%xmm0
+ psrld $27,%xmm9
+ pxor %xmm7,%xmm6
+ movdqa %xmm13,%xmm7
+
+ por %xmm9,%xmm8
+ movd -44(%r9),%xmm9
+ pslld $30,%xmm7
+ paddd %xmm6,%xmm11
+
+ psrld $2,%xmm13
+ paddd %xmm8,%xmm11
+.byte 102,15,56,0,229
+ movd -44(%r10),%xmm8
+ por %xmm7,%xmm13
+ movd -44(%r11),%xmm7
+ punpckldq %xmm8,%xmm0
+ movdqa %xmm11,%xmm8
+ paddd %xmm15,%xmm10
+ punpckldq %xmm7,%xmm9
+ movdqa %xmm12,%xmm7
+ movdqa %xmm12,%xmm6
+ pslld $5,%xmm8
+ pandn %xmm14,%xmm7
+ pand %xmm13,%xmm6
+ punpckldq %xmm9,%xmm0
+ movdqa %xmm11,%xmm9
+
+ movdqa %xmm4,64-128(%rax)
+ paddd %xmm4,%xmm10
+ movd -40(%r8),%xmm1
+ psrld $27,%xmm9
+ pxor %xmm7,%xmm6
+ movdqa %xmm12,%xmm7
+
+ por %xmm9,%xmm8
+ movd -40(%r9),%xmm9
+ pslld $30,%xmm7
+ paddd %xmm6,%xmm10
+
+ psrld $2,%xmm12
+ paddd %xmm8,%xmm10
+.byte 102,15,56,0,197
+ movd -40(%r10),%xmm8
+ por %xmm7,%xmm12
+ movd -40(%r11),%xmm7
+ punpckldq %xmm8,%xmm1
+ movdqa %xmm10,%xmm8
+ paddd %xmm15,%xmm14
+ punpckldq %xmm7,%xmm9
+ movdqa %xmm11,%xmm7
+ movdqa %xmm11,%xmm6
+ pslld $5,%xmm8
+ pandn %xmm13,%xmm7
+ pand %xmm12,%xmm6
+ punpckldq %xmm9,%xmm1
+ movdqa %xmm10,%xmm9
+
+ movdqa %xmm0,80-128(%rax)
+ paddd %xmm0,%xmm14
+ movd -36(%r8),%xmm2
+ psrld $27,%xmm9
+ pxor %xmm7,%xmm6
+ movdqa %xmm11,%xmm7
+
+ por %xmm9,%xmm8
+ movd -36(%r9),%xmm9
+ pslld $30,%xmm7
+ paddd %xmm6,%xmm14
+
+ psrld $2,%xmm11
+ paddd %xmm8,%xmm14
+.byte 102,15,56,0,205
+ movd -36(%r10),%xmm8
+ por %xmm7,%xmm11
+ movd -36(%r11),%xmm7
+ punpckldq %xmm8,%xmm2
+ movdqa %xmm14,%xmm8
+ paddd %xmm15,%xmm13
+ punpckldq %xmm7,%xmm9
+ movdqa %xmm10,%xmm7
+ movdqa %xmm10,%xmm6
+ pslld $5,%xmm8
+ pandn %xmm12,%xmm7
+ pand %xmm11,%xmm6
+ punpckldq %xmm9,%xmm2
+ movdqa %xmm14,%xmm9
+
+ movdqa %xmm1,96-128(%rax)
+ paddd %xmm1,%xmm13
+ movd -32(%r8),%xmm3
+ psrld $27,%xmm9
+ pxor %xmm7,%xmm6
+ movdqa %xmm10,%xmm7
+
+ por %xmm9,%xmm8
+ movd -32(%r9),%xmm9
+ pslld $30,%xmm7
+ paddd %xmm6,%xmm13
+
+ psrld $2,%xmm10
+ paddd %xmm8,%xmm13
+.byte 102,15,56,0,213
+ movd -32(%r10),%xmm8
+ por %xmm7,%xmm10
+ movd -32(%r11),%xmm7
+ punpckldq %xmm8,%xmm3
+ movdqa %xmm13,%xmm8
+ paddd %xmm15,%xmm12
+ punpckldq %xmm7,%xmm9
+ movdqa %xmm14,%xmm7
+ movdqa %xmm14,%xmm6
+ pslld $5,%xmm8
+ pandn %xmm11,%xmm7
+ pand %xmm10,%xmm6
+ punpckldq %xmm9,%xmm3
+ movdqa %xmm13,%xmm9
+
+ movdqa %xmm2,112-128(%rax)
+ paddd %xmm2,%xmm12
+ movd -28(%r8),%xmm4
+ psrld $27,%xmm9
+ pxor %xmm7,%xmm6
+ movdqa %xmm14,%xmm7
+
+ por %xmm9,%xmm8
+ movd -28(%r9),%xmm9
+ pslld $30,%xmm7
+ paddd %xmm6,%xmm12
+
+ psrld $2,%xmm14
+ paddd %xmm8,%xmm12
+.byte 102,15,56,0,221
+ movd -28(%r10),%xmm8
+ por %xmm7,%xmm14
+ movd -28(%r11),%xmm7
+ punpckldq %xmm8,%xmm4
+ movdqa %xmm12,%xmm8
+ paddd %xmm15,%xmm11
+ punpckldq %xmm7,%xmm9
+ movdqa %xmm13,%xmm7
+ movdqa %xmm13,%xmm6
+ pslld $5,%xmm8
+ pandn %xmm10,%xmm7
+ pand %xmm14,%xmm6
+ punpckldq %xmm9,%xmm4
+ movdqa %xmm12,%xmm9
+
+ movdqa %xmm3,128-128(%rax)
+ paddd %xmm3,%xmm11
+ movd -24(%r8),%xmm0
+ psrld $27,%xmm9
+ pxor %xmm7,%xmm6
+ movdqa %xmm13,%xmm7
+
+ por %xmm9,%xmm8
+ movd -24(%r9),%xmm9
+ pslld $30,%xmm7
+ paddd %xmm6,%xmm11
+
+ psrld $2,%xmm13
+ paddd %xmm8,%xmm11
+.byte 102,15,56,0,229
+ movd -24(%r10),%xmm8
+ por %xmm7,%xmm13
+ movd -24(%r11),%xmm7
+ punpckldq %xmm8,%xmm0
+ movdqa %xmm11,%xmm8
+ paddd %xmm15,%xmm10
+ punpckldq %xmm7,%xmm9
+ movdqa %xmm12,%xmm7
+ movdqa %xmm12,%xmm6
+ pslld $5,%xmm8
+ pandn %xmm14,%xmm7
+ pand %xmm13,%xmm6
+ punpckldq %xmm9,%xmm0
+ movdqa %xmm11,%xmm9
+
+ movdqa %xmm4,144-128(%rax)
+ paddd %xmm4,%xmm10
+ movd -20(%r8),%xmm1
+ psrld $27,%xmm9
+ pxor %xmm7,%xmm6
+ movdqa %xmm12,%xmm7
+
+ por %xmm9,%xmm8
+ movd -20(%r9),%xmm9
+ pslld $30,%xmm7
+ paddd %xmm6,%xmm10
+
+ psrld $2,%xmm12
+ paddd %xmm8,%xmm10
+.byte 102,15,56,0,197
+ movd -20(%r10),%xmm8
+ por %xmm7,%xmm12
+ movd -20(%r11),%xmm7
+ punpckldq %xmm8,%xmm1
+ movdqa %xmm10,%xmm8
+ paddd %xmm15,%xmm14
+ punpckldq %xmm7,%xmm9
+ movdqa %xmm11,%xmm7
+ movdqa %xmm11,%xmm6
+ pslld $5,%xmm8
+ pandn %xmm13,%xmm7
+ pand %xmm12,%xmm6
+ punpckldq %xmm9,%xmm1
+ movdqa %xmm10,%xmm9
+
+ movdqa %xmm0,160-128(%rax)
+ paddd %xmm0,%xmm14
+ movd -16(%r8),%xmm2
+ psrld $27,%xmm9
+ pxor %xmm7,%xmm6
+ movdqa %xmm11,%xmm7
+
+ por %xmm9,%xmm8
+ movd -16(%r9),%xmm9
+ pslld $30,%xmm7
+ paddd %xmm6,%xmm14
+
+ psrld $2,%xmm11
+ paddd %xmm8,%xmm14
+.byte 102,15,56,0,205
+ movd -16(%r10),%xmm8
+ por %xmm7,%xmm11
+ movd -16(%r11),%xmm7
+ punpckldq %xmm8,%xmm2
+ movdqa %xmm14,%xmm8
+ paddd %xmm15,%xmm13
+ punpckldq %xmm7,%xmm9
+ movdqa %xmm10,%xmm7
+ movdqa %xmm10,%xmm6
+ pslld $5,%xmm8
+ pandn %xmm12,%xmm7
+ pand %xmm11,%xmm6
+ punpckldq %xmm9,%xmm2
+ movdqa %xmm14,%xmm9
+
+ movdqa %xmm1,176-128(%rax)
+ paddd %xmm1,%xmm13
+ movd -12(%r8),%xmm3
+ psrld $27,%xmm9
+ pxor %xmm7,%xmm6
+ movdqa %xmm10,%xmm7
+
+ por %xmm9,%xmm8
+ movd -12(%r9),%xmm9
+ pslld $30,%xmm7
+ paddd %xmm6,%xmm13
+
+ psrld $2,%xmm10
+ paddd %xmm8,%xmm13
+.byte 102,15,56,0,213
+ movd -12(%r10),%xmm8
+ por %xmm7,%xmm10
+ movd -12(%r11),%xmm7
+ punpckldq %xmm8,%xmm3
+ movdqa %xmm13,%xmm8
+ paddd %xmm15,%xmm12
+ punpckldq %xmm7,%xmm9
+ movdqa %xmm14,%xmm7
+ movdqa %xmm14,%xmm6
+ pslld $5,%xmm8
+ pandn %xmm11,%xmm7
+ pand %xmm10,%xmm6
+ punpckldq %xmm9,%xmm3
+ movdqa %xmm13,%xmm9
+
+ movdqa %xmm2,192-128(%rax)
+ paddd %xmm2,%xmm12
+ movd -8(%r8),%xmm4
+ psrld $27,%xmm9
+ pxor %xmm7,%xmm6
+ movdqa %xmm14,%xmm7
+
+ por %xmm9,%xmm8
+ movd -8(%r9),%xmm9
+ pslld $30,%xmm7
+ paddd %xmm6,%xmm12
+
+ psrld $2,%xmm14
+ paddd %xmm8,%xmm12
+.byte 102,15,56,0,221
+ movd -8(%r10),%xmm8
+ por %xmm7,%xmm14
+ movd -8(%r11),%xmm7
+ punpckldq %xmm8,%xmm4
+ movdqa %xmm12,%xmm8
+ paddd %xmm15,%xmm11
+ punpckldq %xmm7,%xmm9
+ movdqa %xmm13,%xmm7
+ movdqa %xmm13,%xmm6
+ pslld $5,%xmm8
+ pandn %xmm10,%xmm7
+ pand %xmm14,%xmm6
+ punpckldq %xmm9,%xmm4
+ movdqa %xmm12,%xmm9
+
+ movdqa %xmm3,208-128(%rax)
+ paddd %xmm3,%xmm11
+ movd -4(%r8),%xmm0
+ psrld $27,%xmm9
+ pxor %xmm7,%xmm6
+ movdqa %xmm13,%xmm7
+
+ por %xmm9,%xmm8
+ movd -4(%r9),%xmm9
+ pslld $30,%xmm7
+ paddd %xmm6,%xmm11
+
+ psrld $2,%xmm13
+ paddd %xmm8,%xmm11
+.byte 102,15,56,0,229
+ movd -4(%r10),%xmm8
+ por %xmm7,%xmm13
+ movdqa 0-128(%rax),%xmm1
+ movd -4(%r11),%xmm7
+ punpckldq %xmm8,%xmm0
+ movdqa %xmm11,%xmm8
+ paddd %xmm15,%xmm10
+ punpckldq %xmm7,%xmm9
+ movdqa %xmm12,%xmm7
+ movdqa %xmm12,%xmm6
+ pslld $5,%xmm8
+ prefetcht0 63(%r8)
+ pandn %xmm14,%xmm7
+ pand %xmm13,%xmm6
+ punpckldq %xmm9,%xmm0
+ movdqa %xmm11,%xmm9
+
+ movdqa %xmm4,224-128(%rax)
+ paddd %xmm4,%xmm10
+ psrld $27,%xmm9
+ pxor %xmm7,%xmm6
+ movdqa %xmm12,%xmm7
+ prefetcht0 63(%r9)
+
+ por %xmm9,%xmm8
+ pslld $30,%xmm7
+ paddd %xmm6,%xmm10
+ prefetcht0 63(%r10)
+
+ psrld $2,%xmm12
+ paddd %xmm8,%xmm10
+.byte 102,15,56,0,197
+ prefetcht0 63(%r11)
+ por %xmm7,%xmm12
+ movdqa 16-128(%rax),%xmm2
+ pxor %xmm3,%xmm1
+ movdqa 32-128(%rax),%xmm3
+
+ movdqa %xmm10,%xmm8
+ pxor 128-128(%rax),%xmm1
+ paddd %xmm15,%xmm14
+ movdqa %xmm11,%xmm7
+ pslld $5,%xmm8
+ pxor %xmm3,%xmm1
+ movdqa %xmm11,%xmm6
+ pandn %xmm13,%xmm7
+ movdqa %xmm1,%xmm5
+ pand %xmm12,%xmm6
+ movdqa %xmm10,%xmm9
+ psrld $31,%xmm5
+ paddd %xmm1,%xmm1
+
+ movdqa %xmm0,240-128(%rax)
+ paddd %xmm0,%xmm14
+ psrld $27,%xmm9
+ pxor %xmm7,%xmm6
+
+ movdqa %xmm11,%xmm7
+ por %xmm9,%xmm8
+ pslld $30,%xmm7
+ paddd %xmm6,%xmm14
+
+ psrld $2,%xmm11
+ paddd %xmm8,%xmm14
+ por %xmm5,%xmm1
+ por %xmm7,%xmm11
+ pxor %xmm4,%xmm2
+ movdqa 48-128(%rax),%xmm4
+
+ movdqa %xmm14,%xmm8
+ pxor 144-128(%rax),%xmm2
+ paddd %xmm15,%xmm13
+ movdqa %xmm10,%xmm7
+ pslld $5,%xmm8
+ pxor %xmm4,%xmm2
+ movdqa %xmm10,%xmm6
+ pandn %xmm12,%xmm7
+ movdqa %xmm2,%xmm5
+ pand %xmm11,%xmm6
+ movdqa %xmm14,%xmm9
+ psrld $31,%xmm5
+ paddd %xmm2,%xmm2
+
+ movdqa %xmm1,0-128(%rax)
+ paddd %xmm1,%xmm13
+ psrld $27,%xmm9
+ pxor %xmm7,%xmm6
+
+ movdqa %xmm10,%xmm7
+ por %xmm9,%xmm8
+ pslld $30,%xmm7
+ paddd %xmm6,%xmm13
+
+ psrld $2,%xmm10
+ paddd %xmm8,%xmm13
+ por %xmm5,%xmm2
+ por %xmm7,%xmm10
+ pxor %xmm0,%xmm3
+ movdqa 64-128(%rax),%xmm0
+
+ movdqa %xmm13,%xmm8
+ pxor 160-128(%rax),%xmm3
+ paddd %xmm15,%xmm12
+ movdqa %xmm14,%xmm7
+ pslld $5,%xmm8
+ pxor %xmm0,%xmm3
+ movdqa %xmm14,%xmm6
+ pandn %xmm11,%xmm7
+ movdqa %xmm3,%xmm5
+ pand %xmm10,%xmm6
+ movdqa %xmm13,%xmm9
+ psrld $31,%xmm5
+ paddd %xmm3,%xmm3
+
+ movdqa %xmm2,16-128(%rax)
+ paddd %xmm2,%xmm12
+ psrld $27,%xmm9
+ pxor %xmm7,%xmm6
+
+ movdqa %xmm14,%xmm7
+ por %xmm9,%xmm8
+ pslld $30,%xmm7
+ paddd %xmm6,%xmm12
+
+ psrld $2,%xmm14
+ paddd %xmm8,%xmm12
+ por %xmm5,%xmm3
+ por %xmm7,%xmm14
+ pxor %xmm1,%xmm4
+ movdqa 80-128(%rax),%xmm1
+
+ movdqa %xmm12,%xmm8
+ pxor 176-128(%rax),%xmm4
+ paddd %xmm15,%xmm11
+ movdqa %xmm13,%xmm7
+ pslld $5,%xmm8
+ pxor %xmm1,%xmm4
+ movdqa %xmm13,%xmm6
+ pandn %xmm10,%xmm7
+ movdqa %xmm4,%xmm5
+ pand %xmm14,%xmm6
+ movdqa %xmm12,%xmm9
+ psrld $31,%xmm5
+ paddd %xmm4,%xmm4
+
+ movdqa %xmm3,32-128(%rax)
+ paddd %xmm3,%xmm11
+ psrld $27,%xmm9
+ pxor %xmm7,%xmm6
+
+ movdqa %xmm13,%xmm7
+ por %xmm9,%xmm8
+ pslld $30,%xmm7
+ paddd %xmm6,%xmm11
+
+ psrld $2,%xmm13
+ paddd %xmm8,%xmm11
+ por %xmm5,%xmm4
+ por %xmm7,%xmm13
+ pxor %xmm2,%xmm0
+ movdqa 96-128(%rax),%xmm2
+
+ movdqa %xmm11,%xmm8
+ pxor 192-128(%rax),%xmm0
+ paddd %xmm15,%xmm10
+ movdqa %xmm12,%xmm7
+ pslld $5,%xmm8
+ pxor %xmm2,%xmm0
+ movdqa %xmm12,%xmm6
+ pandn %xmm14,%xmm7
+ movdqa %xmm0,%xmm5
+ pand %xmm13,%xmm6
+ movdqa %xmm11,%xmm9
+ psrld $31,%xmm5
+ paddd %xmm0,%xmm0
+
+ movdqa %xmm4,48-128(%rax)
+ paddd %xmm4,%xmm10
+ psrld $27,%xmm9
+ pxor %xmm7,%xmm6
+
+ movdqa %xmm12,%xmm7
+ por %xmm9,%xmm8
+ pslld $30,%xmm7
+ paddd %xmm6,%xmm10
+
+ psrld $2,%xmm12
+ paddd %xmm8,%xmm10
+ por %xmm5,%xmm0
+ por %xmm7,%xmm12
+ movdqa 0(%rbp),%xmm15
+ pxor %xmm3,%xmm1
+ movdqa 112-128(%rax),%xmm3
+
+ movdqa %xmm10,%xmm8
+ movdqa %xmm13,%xmm6
+ pxor 208-128(%rax),%xmm1
+ paddd %xmm15,%xmm14
+ pslld $5,%xmm8
+ pxor %xmm11,%xmm6
+
+ movdqa %xmm10,%xmm9
+ movdqa %xmm0,64-128(%rax)
+ paddd %xmm0,%xmm14
+ pxor %xmm3,%xmm1
+ psrld $27,%xmm9
+ pxor %xmm12,%xmm6
+ movdqa %xmm11,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm1,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm14
+ paddd %xmm1,%xmm1
+
+ psrld $2,%xmm11
+ paddd %xmm8,%xmm14
+ por %xmm5,%xmm1
+ por %xmm7,%xmm11
+ pxor %xmm4,%xmm2
+ movdqa 128-128(%rax),%xmm4
+
+ movdqa %xmm14,%xmm8
+ movdqa %xmm12,%xmm6
+ pxor 224-128(%rax),%xmm2
+ paddd %xmm15,%xmm13
+ pslld $5,%xmm8
+ pxor %xmm10,%xmm6
+
+ movdqa %xmm14,%xmm9
+ movdqa %xmm1,80-128(%rax)
+ paddd %xmm1,%xmm13
+ pxor %xmm4,%xmm2
+ psrld $27,%xmm9
+ pxor %xmm11,%xmm6
+ movdqa %xmm10,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm2,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm13
+ paddd %xmm2,%xmm2
+
+ psrld $2,%xmm10
+ paddd %xmm8,%xmm13
+ por %xmm5,%xmm2
+ por %xmm7,%xmm10
+ pxor %xmm0,%xmm3
+ movdqa 144-128(%rax),%xmm0
+
+ movdqa %xmm13,%xmm8
+ movdqa %xmm11,%xmm6
+ pxor 240-128(%rax),%xmm3
+ paddd %xmm15,%xmm12
+ pslld $5,%xmm8
+ pxor %xmm14,%xmm6
+
+ movdqa %xmm13,%xmm9
+ movdqa %xmm2,96-128(%rax)
+ paddd %xmm2,%xmm12
+ pxor %xmm0,%xmm3
+ psrld $27,%xmm9
+ pxor %xmm10,%xmm6
+ movdqa %xmm14,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm3,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm12
+ paddd %xmm3,%xmm3
+
+ psrld $2,%xmm14
+ paddd %xmm8,%xmm12
+ por %xmm5,%xmm3
+ por %xmm7,%xmm14
+ pxor %xmm1,%xmm4
+ movdqa 160-128(%rax),%xmm1
+
+ movdqa %xmm12,%xmm8
+ movdqa %xmm10,%xmm6
+ pxor 0-128(%rax),%xmm4
+ paddd %xmm15,%xmm11
+ pslld $5,%xmm8
+ pxor %xmm13,%xmm6
+
+ movdqa %xmm12,%xmm9
+ movdqa %xmm3,112-128(%rax)
+ paddd %xmm3,%xmm11
+ pxor %xmm1,%xmm4
+ psrld $27,%xmm9
+ pxor %xmm14,%xmm6
+ movdqa %xmm13,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm4,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm11
+ paddd %xmm4,%xmm4
+
+ psrld $2,%xmm13
+ paddd %xmm8,%xmm11
+ por %xmm5,%xmm4
+ por %xmm7,%xmm13
+ pxor %xmm2,%xmm0
+ movdqa 176-128(%rax),%xmm2
+
+ movdqa %xmm11,%xmm8
+ movdqa %xmm14,%xmm6
+ pxor 16-128(%rax),%xmm0
+ paddd %xmm15,%xmm10
+ pslld $5,%xmm8
+ pxor %xmm12,%xmm6
+
+ movdqa %xmm11,%xmm9
+ movdqa %xmm4,128-128(%rax)
+ paddd %xmm4,%xmm10
+ pxor %xmm2,%xmm0
+ psrld $27,%xmm9
+ pxor %xmm13,%xmm6
+ movdqa %xmm12,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm0,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm10
+ paddd %xmm0,%xmm0
+
+ psrld $2,%xmm12
+ paddd %xmm8,%xmm10
+ por %xmm5,%xmm0
+ por %xmm7,%xmm12
+ pxor %xmm3,%xmm1
+ movdqa 192-128(%rax),%xmm3
+
+ movdqa %xmm10,%xmm8
+ movdqa %xmm13,%xmm6
+ pxor 32-128(%rax),%xmm1
+ paddd %xmm15,%xmm14
+ pslld $5,%xmm8
+ pxor %xmm11,%xmm6
+
+ movdqa %xmm10,%xmm9
+ movdqa %xmm0,144-128(%rax)
+ paddd %xmm0,%xmm14
+ pxor %xmm3,%xmm1
+ psrld $27,%xmm9
+ pxor %xmm12,%xmm6
+ movdqa %xmm11,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm1,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm14
+ paddd %xmm1,%xmm1
+
+ psrld $2,%xmm11
+ paddd %xmm8,%xmm14
+ por %xmm5,%xmm1
+ por %xmm7,%xmm11
+ pxor %xmm4,%xmm2
+ movdqa 208-128(%rax),%xmm4
+
+ movdqa %xmm14,%xmm8
+ movdqa %xmm12,%xmm6
+ pxor 48-128(%rax),%xmm2
+ paddd %xmm15,%xmm13
+ pslld $5,%xmm8
+ pxor %xmm10,%xmm6
+
+ movdqa %xmm14,%xmm9
+ movdqa %xmm1,160-128(%rax)
+ paddd %xmm1,%xmm13
+ pxor %xmm4,%xmm2
+ psrld $27,%xmm9
+ pxor %xmm11,%xmm6
+ movdqa %xmm10,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm2,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm13
+ paddd %xmm2,%xmm2
+
+ psrld $2,%xmm10
+ paddd %xmm8,%xmm13
+ por %xmm5,%xmm2
+ por %xmm7,%xmm10
+ pxor %xmm0,%xmm3
+ movdqa 224-128(%rax),%xmm0
+
+ movdqa %xmm13,%xmm8
+ movdqa %xmm11,%xmm6
+ pxor 64-128(%rax),%xmm3
+ paddd %xmm15,%xmm12
+ pslld $5,%xmm8
+ pxor %xmm14,%xmm6
+
+ movdqa %xmm13,%xmm9
+ movdqa %xmm2,176-128(%rax)
+ paddd %xmm2,%xmm12
+ pxor %xmm0,%xmm3
+ psrld $27,%xmm9
+ pxor %xmm10,%xmm6
+ movdqa %xmm14,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm3,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm12
+ paddd %xmm3,%xmm3
+
+ psrld $2,%xmm14
+ paddd %xmm8,%xmm12
+ por %xmm5,%xmm3
+ por %xmm7,%xmm14
+ pxor %xmm1,%xmm4
+ movdqa 240-128(%rax),%xmm1
+
+ movdqa %xmm12,%xmm8
+ movdqa %xmm10,%xmm6
+ pxor 80-128(%rax),%xmm4
+ paddd %xmm15,%xmm11
+ pslld $5,%xmm8
+ pxor %xmm13,%xmm6
+
+ movdqa %xmm12,%xmm9
+ movdqa %xmm3,192-128(%rax)
+ paddd %xmm3,%xmm11
+ pxor %xmm1,%xmm4
+ psrld $27,%xmm9
+ pxor %xmm14,%xmm6
+ movdqa %xmm13,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm4,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm11
+ paddd %xmm4,%xmm4
+
+ psrld $2,%xmm13
+ paddd %xmm8,%xmm11
+ por %xmm5,%xmm4
+ por %xmm7,%xmm13
+ pxor %xmm2,%xmm0
+ movdqa 0-128(%rax),%xmm2
+
+ movdqa %xmm11,%xmm8
+ movdqa %xmm14,%xmm6
+ pxor 96-128(%rax),%xmm0
+ paddd %xmm15,%xmm10
+ pslld $5,%xmm8
+ pxor %xmm12,%xmm6
+
+ movdqa %xmm11,%xmm9
+ movdqa %xmm4,208-128(%rax)
+ paddd %xmm4,%xmm10
+ pxor %xmm2,%xmm0
+ psrld $27,%xmm9
+ pxor %xmm13,%xmm6
+ movdqa %xmm12,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm0,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm10
+ paddd %xmm0,%xmm0
+
+ psrld $2,%xmm12
+ paddd %xmm8,%xmm10
+ por %xmm5,%xmm0
+ por %xmm7,%xmm12
+ pxor %xmm3,%xmm1
+ movdqa 16-128(%rax),%xmm3
+
+ movdqa %xmm10,%xmm8
+ movdqa %xmm13,%xmm6
+ pxor 112-128(%rax),%xmm1
+ paddd %xmm15,%xmm14
+ pslld $5,%xmm8
+ pxor %xmm11,%xmm6
+
+ movdqa %xmm10,%xmm9
+ movdqa %xmm0,224-128(%rax)
+ paddd %xmm0,%xmm14
+ pxor %xmm3,%xmm1
+ psrld $27,%xmm9
+ pxor %xmm12,%xmm6
+ movdqa %xmm11,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm1,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm14
+ paddd %xmm1,%xmm1
+
+ psrld $2,%xmm11
+ paddd %xmm8,%xmm14
+ por %xmm5,%xmm1
+ por %xmm7,%xmm11
+ pxor %xmm4,%xmm2
+ movdqa 32-128(%rax),%xmm4
+
+ movdqa %xmm14,%xmm8
+ movdqa %xmm12,%xmm6
+ pxor 128-128(%rax),%xmm2
+ paddd %xmm15,%xmm13
+ pslld $5,%xmm8
+ pxor %xmm10,%xmm6
+
+ movdqa %xmm14,%xmm9
+ movdqa %xmm1,240-128(%rax)
+ paddd %xmm1,%xmm13
+ pxor %xmm4,%xmm2
+ psrld $27,%xmm9
+ pxor %xmm11,%xmm6
+ movdqa %xmm10,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm2,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm13
+ paddd %xmm2,%xmm2
+
+ psrld $2,%xmm10
+ paddd %xmm8,%xmm13
+ por %xmm5,%xmm2
+ por %xmm7,%xmm10
+ pxor %xmm0,%xmm3
+ movdqa 48-128(%rax),%xmm0
+
+ movdqa %xmm13,%xmm8
+ movdqa %xmm11,%xmm6
+ pxor 144-128(%rax),%xmm3
+ paddd %xmm15,%xmm12
+ pslld $5,%xmm8
+ pxor %xmm14,%xmm6
+
+ movdqa %xmm13,%xmm9
+ movdqa %xmm2,0-128(%rax)
+ paddd %xmm2,%xmm12
+ pxor %xmm0,%xmm3
+ psrld $27,%xmm9
+ pxor %xmm10,%xmm6
+ movdqa %xmm14,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm3,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm12
+ paddd %xmm3,%xmm3
+
+ psrld $2,%xmm14
+ paddd %xmm8,%xmm12
+ por %xmm5,%xmm3
+ por %xmm7,%xmm14
+ pxor %xmm1,%xmm4
+ movdqa 64-128(%rax),%xmm1
+
+ movdqa %xmm12,%xmm8
+ movdqa %xmm10,%xmm6
+ pxor 160-128(%rax),%xmm4
+ paddd %xmm15,%xmm11
+ pslld $5,%xmm8
+ pxor %xmm13,%xmm6
+
+ movdqa %xmm12,%xmm9
+ movdqa %xmm3,16-128(%rax)
+ paddd %xmm3,%xmm11
+ pxor %xmm1,%xmm4
+ psrld $27,%xmm9
+ pxor %xmm14,%xmm6
+ movdqa %xmm13,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm4,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm11
+ paddd %xmm4,%xmm4
+
+ psrld $2,%xmm13
+ paddd %xmm8,%xmm11
+ por %xmm5,%xmm4
+ por %xmm7,%xmm13
+ pxor %xmm2,%xmm0
+ movdqa 80-128(%rax),%xmm2
+
+ movdqa %xmm11,%xmm8
+ movdqa %xmm14,%xmm6
+ pxor 176-128(%rax),%xmm0
+ paddd %xmm15,%xmm10
+ pslld $5,%xmm8
+ pxor %xmm12,%xmm6
+
+ movdqa %xmm11,%xmm9
+ movdqa %xmm4,32-128(%rax)
+ paddd %xmm4,%xmm10
+ pxor %xmm2,%xmm0
+ psrld $27,%xmm9
+ pxor %xmm13,%xmm6
+ movdqa %xmm12,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm0,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm10
+ paddd %xmm0,%xmm0
+
+ psrld $2,%xmm12
+ paddd %xmm8,%xmm10
+ por %xmm5,%xmm0
+ por %xmm7,%xmm12
+ pxor %xmm3,%xmm1
+ movdqa 96-128(%rax),%xmm3
+
+ movdqa %xmm10,%xmm8
+ movdqa %xmm13,%xmm6
+ pxor 192-128(%rax),%xmm1
+ paddd %xmm15,%xmm14
+ pslld $5,%xmm8
+ pxor %xmm11,%xmm6
+
+ movdqa %xmm10,%xmm9
+ movdqa %xmm0,48-128(%rax)
+ paddd %xmm0,%xmm14
+ pxor %xmm3,%xmm1
+ psrld $27,%xmm9
+ pxor %xmm12,%xmm6
+ movdqa %xmm11,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm1,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm14
+ paddd %xmm1,%xmm1
+
+ psrld $2,%xmm11
+ paddd %xmm8,%xmm14
+ por %xmm5,%xmm1
+ por %xmm7,%xmm11
+ pxor %xmm4,%xmm2
+ movdqa 112-128(%rax),%xmm4
+
+ movdqa %xmm14,%xmm8
+ movdqa %xmm12,%xmm6
+ pxor 208-128(%rax),%xmm2
+ paddd %xmm15,%xmm13
+ pslld $5,%xmm8
+ pxor %xmm10,%xmm6
+
+ movdqa %xmm14,%xmm9
+ movdqa %xmm1,64-128(%rax)
+ paddd %xmm1,%xmm13
+ pxor %xmm4,%xmm2
+ psrld $27,%xmm9
+ pxor %xmm11,%xmm6
+ movdqa %xmm10,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm2,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm13
+ paddd %xmm2,%xmm2
+
+ psrld $2,%xmm10
+ paddd %xmm8,%xmm13
+ por %xmm5,%xmm2
+ por %xmm7,%xmm10
+ pxor %xmm0,%xmm3
+ movdqa 128-128(%rax),%xmm0
+
+ movdqa %xmm13,%xmm8
+ movdqa %xmm11,%xmm6
+ pxor 224-128(%rax),%xmm3
+ paddd %xmm15,%xmm12
+ pslld $5,%xmm8
+ pxor %xmm14,%xmm6
+
+ movdqa %xmm13,%xmm9
+ movdqa %xmm2,80-128(%rax)
+ paddd %xmm2,%xmm12
+ pxor %xmm0,%xmm3
+ psrld $27,%xmm9
+ pxor %xmm10,%xmm6
+ movdqa %xmm14,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm3,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm12
+ paddd %xmm3,%xmm3
+
+ psrld $2,%xmm14
+ paddd %xmm8,%xmm12
+ por %xmm5,%xmm3
+ por %xmm7,%xmm14
+ pxor %xmm1,%xmm4
+ movdqa 144-128(%rax),%xmm1
+
+ movdqa %xmm12,%xmm8
+ movdqa %xmm10,%xmm6
+ pxor 240-128(%rax),%xmm4
+ paddd %xmm15,%xmm11
+ pslld $5,%xmm8
+ pxor %xmm13,%xmm6
+
+ movdqa %xmm12,%xmm9
+ movdqa %xmm3,96-128(%rax)
+ paddd %xmm3,%xmm11
+ pxor %xmm1,%xmm4
+ psrld $27,%xmm9
+ pxor %xmm14,%xmm6
+ movdqa %xmm13,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm4,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm11
+ paddd %xmm4,%xmm4
+
+ psrld $2,%xmm13
+ paddd %xmm8,%xmm11
+ por %xmm5,%xmm4
+ por %xmm7,%xmm13
+ pxor %xmm2,%xmm0
+ movdqa 160-128(%rax),%xmm2
+
+ movdqa %xmm11,%xmm8
+ movdqa %xmm14,%xmm6
+ pxor 0-128(%rax),%xmm0
+ paddd %xmm15,%xmm10
+ pslld $5,%xmm8
+ pxor %xmm12,%xmm6
+
+ movdqa %xmm11,%xmm9
+ movdqa %xmm4,112-128(%rax)
+ paddd %xmm4,%xmm10
+ pxor %xmm2,%xmm0
+ psrld $27,%xmm9
+ pxor %xmm13,%xmm6
+ movdqa %xmm12,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm0,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm10
+ paddd %xmm0,%xmm0
+
+ psrld $2,%xmm12
+ paddd %xmm8,%xmm10
+ por %xmm5,%xmm0
+ por %xmm7,%xmm12
+ movdqa 32(%rbp),%xmm15
+ pxor %xmm3,%xmm1
+ movdqa 176-128(%rax),%xmm3
+
+ movdqa %xmm10,%xmm8
+ movdqa %xmm13,%xmm7
+ pxor 16-128(%rax),%xmm1
+ pxor %xmm3,%xmm1
+ paddd %xmm15,%xmm14
+ pslld $5,%xmm8
+ movdqa %xmm10,%xmm9
+ pand %xmm12,%xmm7
+
+ movdqa %xmm13,%xmm6
+ movdqa %xmm1,%xmm5
+ psrld $27,%xmm9
+ paddd %xmm7,%xmm14
+ pxor %xmm12,%xmm6
+
+ movdqa %xmm0,128-128(%rax)
+ paddd %xmm0,%xmm14
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ pand %xmm11,%xmm6
+ movdqa %xmm11,%xmm7
+
+ pslld $30,%xmm7
+ paddd %xmm1,%xmm1
+ paddd %xmm6,%xmm14
+
+ psrld $2,%xmm11
+ paddd %xmm8,%xmm14
+ por %xmm5,%xmm1
+ por %xmm7,%xmm11
+ pxor %xmm4,%xmm2
+ movdqa 192-128(%rax),%xmm4
+
+ movdqa %xmm14,%xmm8
+ movdqa %xmm12,%xmm7
+ pxor 32-128(%rax),%xmm2
+ pxor %xmm4,%xmm2
+ paddd %xmm15,%xmm13
+ pslld $5,%xmm8
+ movdqa %xmm14,%xmm9
+ pand %xmm11,%xmm7
+
+ movdqa %xmm12,%xmm6
+ movdqa %xmm2,%xmm5
+ psrld $27,%xmm9
+ paddd %xmm7,%xmm13
+ pxor %xmm11,%xmm6
+
+ movdqa %xmm1,144-128(%rax)
+ paddd %xmm1,%xmm13
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ pand %xmm10,%xmm6
+ movdqa %xmm10,%xmm7
+
+ pslld $30,%xmm7
+ paddd %xmm2,%xmm2
+ paddd %xmm6,%xmm13
+
+ psrld $2,%xmm10
+ paddd %xmm8,%xmm13
+ por %xmm5,%xmm2
+ por %xmm7,%xmm10
+ pxor %xmm0,%xmm3
+ movdqa 208-128(%rax),%xmm0
+
+ movdqa %xmm13,%xmm8
+ movdqa %xmm11,%xmm7
+ pxor 48-128(%rax),%xmm3
+ pxor %xmm0,%xmm3
+ paddd %xmm15,%xmm12
+ pslld $5,%xmm8
+ movdqa %xmm13,%xmm9
+ pand %xmm10,%xmm7
+
+ movdqa %xmm11,%xmm6
+ movdqa %xmm3,%xmm5
+ psrld $27,%xmm9
+ paddd %xmm7,%xmm12
+ pxor %xmm10,%xmm6
+
+ movdqa %xmm2,160-128(%rax)
+ paddd %xmm2,%xmm12
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ pand %xmm14,%xmm6
+ movdqa %xmm14,%xmm7
+
+ pslld $30,%xmm7
+ paddd %xmm3,%xmm3
+ paddd %xmm6,%xmm12
+
+ psrld $2,%xmm14
+ paddd %xmm8,%xmm12
+ por %xmm5,%xmm3
+ por %xmm7,%xmm14
+ pxor %xmm1,%xmm4
+ movdqa 224-128(%rax),%xmm1
+
+ movdqa %xmm12,%xmm8
+ movdqa %xmm10,%xmm7
+ pxor 64-128(%rax),%xmm4
+ pxor %xmm1,%xmm4
+ paddd %xmm15,%xmm11
+ pslld $5,%xmm8
+ movdqa %xmm12,%xmm9
+ pand %xmm14,%xmm7
+
+ movdqa %xmm10,%xmm6
+ movdqa %xmm4,%xmm5
+ psrld $27,%xmm9
+ paddd %xmm7,%xmm11
+ pxor %xmm14,%xmm6
+
+ movdqa %xmm3,176-128(%rax)
+ paddd %xmm3,%xmm11
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ pand %xmm13,%xmm6
+ movdqa %xmm13,%xmm7
+
+ pslld $30,%xmm7
+ paddd %xmm4,%xmm4
+ paddd %xmm6,%xmm11
+
+ psrld $2,%xmm13
+ paddd %xmm8,%xmm11
+ por %xmm5,%xmm4
+ por %xmm7,%xmm13
+ pxor %xmm2,%xmm0
+ movdqa 240-128(%rax),%xmm2
+
+ movdqa %xmm11,%xmm8
+ movdqa %xmm14,%xmm7
+ pxor 80-128(%rax),%xmm0
+ pxor %xmm2,%xmm0
+ paddd %xmm15,%xmm10
+ pslld $5,%xmm8
+ movdqa %xmm11,%xmm9
+ pand %xmm13,%xmm7
+
+ movdqa %xmm14,%xmm6
+ movdqa %xmm0,%xmm5
+ psrld $27,%xmm9
+ paddd %xmm7,%xmm10
+ pxor %xmm13,%xmm6
+
+ movdqa %xmm4,192-128(%rax)
+ paddd %xmm4,%xmm10
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ pand %xmm12,%xmm6
+ movdqa %xmm12,%xmm7
+
+ pslld $30,%xmm7
+ paddd %xmm0,%xmm0
+ paddd %xmm6,%xmm10
+
+ psrld $2,%xmm12
+ paddd %xmm8,%xmm10
+ por %xmm5,%xmm0
+ por %xmm7,%xmm12
+ pxor %xmm3,%xmm1
+ movdqa 0-128(%rax),%xmm3
+
+ movdqa %xmm10,%xmm8
+ movdqa %xmm13,%xmm7
+ pxor 96-128(%rax),%xmm1
+ pxor %xmm3,%xmm1
+ paddd %xmm15,%xmm14
+ pslld $5,%xmm8
+ movdqa %xmm10,%xmm9
+ pand %xmm12,%xmm7
+
+ movdqa %xmm13,%xmm6
+ movdqa %xmm1,%xmm5
+ psrld $27,%xmm9
+ paddd %xmm7,%xmm14
+ pxor %xmm12,%xmm6
+
+ movdqa %xmm0,208-128(%rax)
+ paddd %xmm0,%xmm14
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ pand %xmm11,%xmm6
+ movdqa %xmm11,%xmm7
+
+ pslld $30,%xmm7
+ paddd %xmm1,%xmm1
+ paddd %xmm6,%xmm14
+
+ psrld $2,%xmm11
+ paddd %xmm8,%xmm14
+ por %xmm5,%xmm1
+ por %xmm7,%xmm11
+ pxor %xmm4,%xmm2
+ movdqa 16-128(%rax),%xmm4
+
+ movdqa %xmm14,%xmm8
+ movdqa %xmm12,%xmm7
+ pxor 112-128(%rax),%xmm2
+ pxor %xmm4,%xmm2
+ paddd %xmm15,%xmm13
+ pslld $5,%xmm8
+ movdqa %xmm14,%xmm9
+ pand %xmm11,%xmm7
+
+ movdqa %xmm12,%xmm6
+ movdqa %xmm2,%xmm5
+ psrld $27,%xmm9
+ paddd %xmm7,%xmm13
+ pxor %xmm11,%xmm6
+
+ movdqa %xmm1,224-128(%rax)
+ paddd %xmm1,%xmm13
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ pand %xmm10,%xmm6
+ movdqa %xmm10,%xmm7
+
+ pslld $30,%xmm7
+ paddd %xmm2,%xmm2
+ paddd %xmm6,%xmm13
+
+ psrld $2,%xmm10
+ paddd %xmm8,%xmm13
+ por %xmm5,%xmm2
+ por %xmm7,%xmm10
+ pxor %xmm0,%xmm3
+ movdqa 32-128(%rax),%xmm0
+
+ movdqa %xmm13,%xmm8
+ movdqa %xmm11,%xmm7
+ pxor 128-128(%rax),%xmm3
+ pxor %xmm0,%xmm3
+ paddd %xmm15,%xmm12
+ pslld $5,%xmm8
+ movdqa %xmm13,%xmm9
+ pand %xmm10,%xmm7
+
+ movdqa %xmm11,%xmm6
+ movdqa %xmm3,%xmm5
+ psrld $27,%xmm9
+ paddd %xmm7,%xmm12
+ pxor %xmm10,%xmm6
+
+ movdqa %xmm2,240-128(%rax)
+ paddd %xmm2,%xmm12
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ pand %xmm14,%xmm6
+ movdqa %xmm14,%xmm7
+
+ pslld $30,%xmm7
+ paddd %xmm3,%xmm3
+ paddd %xmm6,%xmm12
+
+ psrld $2,%xmm14
+ paddd %xmm8,%xmm12
+ por %xmm5,%xmm3
+ por %xmm7,%xmm14
+ pxor %xmm1,%xmm4
+ movdqa 48-128(%rax),%xmm1
+
+ movdqa %xmm12,%xmm8
+ movdqa %xmm10,%xmm7
+ pxor 144-128(%rax),%xmm4
+ pxor %xmm1,%xmm4
+ paddd %xmm15,%xmm11
+ pslld $5,%xmm8
+ movdqa %xmm12,%xmm9
+ pand %xmm14,%xmm7
+
+ movdqa %xmm10,%xmm6
+ movdqa %xmm4,%xmm5
+ psrld $27,%xmm9
+ paddd %xmm7,%xmm11
+ pxor %xmm14,%xmm6
+
+ movdqa %xmm3,0-128(%rax)
+ paddd %xmm3,%xmm11
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ pand %xmm13,%xmm6
+ movdqa %xmm13,%xmm7
+
+ pslld $30,%xmm7
+ paddd %xmm4,%xmm4
+ paddd %xmm6,%xmm11
+
+ psrld $2,%xmm13
+ paddd %xmm8,%xmm11
+ por %xmm5,%xmm4
+ por %xmm7,%xmm13
+ pxor %xmm2,%xmm0
+ movdqa 64-128(%rax),%xmm2
+
+ movdqa %xmm11,%xmm8
+ movdqa %xmm14,%xmm7
+ pxor 160-128(%rax),%xmm0
+ pxor %xmm2,%xmm0
+ paddd %xmm15,%xmm10
+ pslld $5,%xmm8
+ movdqa %xmm11,%xmm9
+ pand %xmm13,%xmm7
+
+ movdqa %xmm14,%xmm6
+ movdqa %xmm0,%xmm5
+ psrld $27,%xmm9
+ paddd %xmm7,%xmm10
+ pxor %xmm13,%xmm6
+
+ movdqa %xmm4,16-128(%rax)
+ paddd %xmm4,%xmm10
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ pand %xmm12,%xmm6
+ movdqa %xmm12,%xmm7
+
+ pslld $30,%xmm7
+ paddd %xmm0,%xmm0
+ paddd %xmm6,%xmm10
+
+ psrld $2,%xmm12
+ paddd %xmm8,%xmm10
+ por %xmm5,%xmm0
+ por %xmm7,%xmm12
+ pxor %xmm3,%xmm1
+ movdqa 80-128(%rax),%xmm3
+
+ movdqa %xmm10,%xmm8
+ movdqa %xmm13,%xmm7
+ pxor 176-128(%rax),%xmm1
+ pxor %xmm3,%xmm1
+ paddd %xmm15,%xmm14
+ pslld $5,%xmm8
+ movdqa %xmm10,%xmm9
+ pand %xmm12,%xmm7
+
+ movdqa %xmm13,%xmm6
+ movdqa %xmm1,%xmm5
+ psrld $27,%xmm9
+ paddd %xmm7,%xmm14
+ pxor %xmm12,%xmm6
+
+ movdqa %xmm0,32-128(%rax)
+ paddd %xmm0,%xmm14
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ pand %xmm11,%xmm6
+ movdqa %xmm11,%xmm7
+
+ pslld $30,%xmm7
+ paddd %xmm1,%xmm1
+ paddd %xmm6,%xmm14
+
+ psrld $2,%xmm11
+ paddd %xmm8,%xmm14
+ por %xmm5,%xmm1
+ por %xmm7,%xmm11
+ pxor %xmm4,%xmm2
+ movdqa 96-128(%rax),%xmm4
+
+ movdqa %xmm14,%xmm8
+ movdqa %xmm12,%xmm7
+ pxor 192-128(%rax),%xmm2
+ pxor %xmm4,%xmm2
+ paddd %xmm15,%xmm13
+ pslld $5,%xmm8
+ movdqa %xmm14,%xmm9
+ pand %xmm11,%xmm7
+
+ movdqa %xmm12,%xmm6
+ movdqa %xmm2,%xmm5
+ psrld $27,%xmm9
+ paddd %xmm7,%xmm13
+ pxor %xmm11,%xmm6
+
+ movdqa %xmm1,48-128(%rax)
+ paddd %xmm1,%xmm13
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ pand %xmm10,%xmm6
+ movdqa %xmm10,%xmm7
+
+ pslld $30,%xmm7
+ paddd %xmm2,%xmm2
+ paddd %xmm6,%xmm13
+
+ psrld $2,%xmm10
+ paddd %xmm8,%xmm13
+ por %xmm5,%xmm2
+ por %xmm7,%xmm10
+ pxor %xmm0,%xmm3
+ movdqa 112-128(%rax),%xmm0
+
+ movdqa %xmm13,%xmm8
+ movdqa %xmm11,%xmm7
+ pxor 208-128(%rax),%xmm3
+ pxor %xmm0,%xmm3
+ paddd %xmm15,%xmm12
+ pslld $5,%xmm8
+ movdqa %xmm13,%xmm9
+ pand %xmm10,%xmm7
+
+ movdqa %xmm11,%xmm6
+ movdqa %xmm3,%xmm5
+ psrld $27,%xmm9
+ paddd %xmm7,%xmm12
+ pxor %xmm10,%xmm6
+
+ movdqa %xmm2,64-128(%rax)
+ paddd %xmm2,%xmm12
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ pand %xmm14,%xmm6
+ movdqa %xmm14,%xmm7
+
+ pslld $30,%xmm7
+ paddd %xmm3,%xmm3
+ paddd %xmm6,%xmm12
+
+ psrld $2,%xmm14
+ paddd %xmm8,%xmm12
+ por %xmm5,%xmm3
+ por %xmm7,%xmm14
+ pxor %xmm1,%xmm4
+ movdqa 128-128(%rax),%xmm1
+
+ movdqa %xmm12,%xmm8
+ movdqa %xmm10,%xmm7
+ pxor 224-128(%rax),%xmm4
+ pxor %xmm1,%xmm4
+ paddd %xmm15,%xmm11
+ pslld $5,%xmm8
+ movdqa %xmm12,%xmm9
+ pand %xmm14,%xmm7
+
+ movdqa %xmm10,%xmm6
+ movdqa %xmm4,%xmm5
+ psrld $27,%xmm9
+ paddd %xmm7,%xmm11
+ pxor %xmm14,%xmm6
+
+ movdqa %xmm3,80-128(%rax)
+ paddd %xmm3,%xmm11
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ pand %xmm13,%xmm6
+ movdqa %xmm13,%xmm7
+
+ pslld $30,%xmm7
+ paddd %xmm4,%xmm4
+ paddd %xmm6,%xmm11
+
+ psrld $2,%xmm13
+ paddd %xmm8,%xmm11
+ por %xmm5,%xmm4
+ por %xmm7,%xmm13
+ pxor %xmm2,%xmm0
+ movdqa 144-128(%rax),%xmm2
+
+ movdqa %xmm11,%xmm8
+ movdqa %xmm14,%xmm7
+ pxor 240-128(%rax),%xmm0
+ pxor %xmm2,%xmm0
+ paddd %xmm15,%xmm10
+ pslld $5,%xmm8
+ movdqa %xmm11,%xmm9
+ pand %xmm13,%xmm7
+
+ movdqa %xmm14,%xmm6
+ movdqa %xmm0,%xmm5
+ psrld $27,%xmm9
+ paddd %xmm7,%xmm10
+ pxor %xmm13,%xmm6
+
+ movdqa %xmm4,96-128(%rax)
+ paddd %xmm4,%xmm10
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ pand %xmm12,%xmm6
+ movdqa %xmm12,%xmm7
+
+ pslld $30,%xmm7
+ paddd %xmm0,%xmm0
+ paddd %xmm6,%xmm10
+
+ psrld $2,%xmm12
+ paddd %xmm8,%xmm10
+ por %xmm5,%xmm0
+ por %xmm7,%xmm12
+ pxor %xmm3,%xmm1
+ movdqa 160-128(%rax),%xmm3
+
+ movdqa %xmm10,%xmm8
+ movdqa %xmm13,%xmm7
+ pxor 0-128(%rax),%xmm1
+ pxor %xmm3,%xmm1
+ paddd %xmm15,%xmm14
+ pslld $5,%xmm8
+ movdqa %xmm10,%xmm9
+ pand %xmm12,%xmm7
+
+ movdqa %xmm13,%xmm6
+ movdqa %xmm1,%xmm5
+ psrld $27,%xmm9
+ paddd %xmm7,%xmm14
+ pxor %xmm12,%xmm6
+
+ movdqa %xmm0,112-128(%rax)
+ paddd %xmm0,%xmm14
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ pand %xmm11,%xmm6
+ movdqa %xmm11,%xmm7
+
+ pslld $30,%xmm7
+ paddd %xmm1,%xmm1
+ paddd %xmm6,%xmm14
+
+ psrld $2,%xmm11
+ paddd %xmm8,%xmm14
+ por %xmm5,%xmm1
+ por %xmm7,%xmm11
+ pxor %xmm4,%xmm2
+ movdqa 176-128(%rax),%xmm4
+
+ movdqa %xmm14,%xmm8
+ movdqa %xmm12,%xmm7
+ pxor 16-128(%rax),%xmm2
+ pxor %xmm4,%xmm2
+ paddd %xmm15,%xmm13
+ pslld $5,%xmm8
+ movdqa %xmm14,%xmm9
+ pand %xmm11,%xmm7
+
+ movdqa %xmm12,%xmm6
+ movdqa %xmm2,%xmm5
+ psrld $27,%xmm9
+ paddd %xmm7,%xmm13
+ pxor %xmm11,%xmm6
+
+ movdqa %xmm1,128-128(%rax)
+ paddd %xmm1,%xmm13
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ pand %xmm10,%xmm6
+ movdqa %xmm10,%xmm7
+
+ pslld $30,%xmm7
+ paddd %xmm2,%xmm2
+ paddd %xmm6,%xmm13
+
+ psrld $2,%xmm10
+ paddd %xmm8,%xmm13
+ por %xmm5,%xmm2
+ por %xmm7,%xmm10
+ pxor %xmm0,%xmm3
+ movdqa 192-128(%rax),%xmm0
+
+ movdqa %xmm13,%xmm8
+ movdqa %xmm11,%xmm7
+ pxor 32-128(%rax),%xmm3
+ pxor %xmm0,%xmm3
+ paddd %xmm15,%xmm12
+ pslld $5,%xmm8
+ movdqa %xmm13,%xmm9
+ pand %xmm10,%xmm7
+
+ movdqa %xmm11,%xmm6
+ movdqa %xmm3,%xmm5
+ psrld $27,%xmm9
+ paddd %xmm7,%xmm12
+ pxor %xmm10,%xmm6
+
+ movdqa %xmm2,144-128(%rax)
+ paddd %xmm2,%xmm12
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ pand %xmm14,%xmm6
+ movdqa %xmm14,%xmm7
+
+ pslld $30,%xmm7
+ paddd %xmm3,%xmm3
+ paddd %xmm6,%xmm12
+
+ psrld $2,%xmm14
+ paddd %xmm8,%xmm12
+ por %xmm5,%xmm3
+ por %xmm7,%xmm14
+ pxor %xmm1,%xmm4
+ movdqa 208-128(%rax),%xmm1
+
+ movdqa %xmm12,%xmm8
+ movdqa %xmm10,%xmm7
+ pxor 48-128(%rax),%xmm4
+ pxor %xmm1,%xmm4
+ paddd %xmm15,%xmm11
+ pslld $5,%xmm8
+ movdqa %xmm12,%xmm9
+ pand %xmm14,%xmm7
+
+ movdqa %xmm10,%xmm6
+ movdqa %xmm4,%xmm5
+ psrld $27,%xmm9
+ paddd %xmm7,%xmm11
+ pxor %xmm14,%xmm6
+
+ movdqa %xmm3,160-128(%rax)
+ paddd %xmm3,%xmm11
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ pand %xmm13,%xmm6
+ movdqa %xmm13,%xmm7
+
+ pslld $30,%xmm7
+ paddd %xmm4,%xmm4
+ paddd %xmm6,%xmm11
+
+ psrld $2,%xmm13
+ paddd %xmm8,%xmm11
+ por %xmm5,%xmm4
+ por %xmm7,%xmm13
+ pxor %xmm2,%xmm0
+ movdqa 224-128(%rax),%xmm2
+
+ movdqa %xmm11,%xmm8
+ movdqa %xmm14,%xmm7
+ pxor 64-128(%rax),%xmm0
+ pxor %xmm2,%xmm0
+ paddd %xmm15,%xmm10
+ pslld $5,%xmm8
+ movdqa %xmm11,%xmm9
+ pand %xmm13,%xmm7
+
+ movdqa %xmm14,%xmm6
+ movdqa %xmm0,%xmm5
+ psrld $27,%xmm9
+ paddd %xmm7,%xmm10
+ pxor %xmm13,%xmm6
+
+ movdqa %xmm4,176-128(%rax)
+ paddd %xmm4,%xmm10
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ pand %xmm12,%xmm6
+ movdqa %xmm12,%xmm7
+
+ pslld $30,%xmm7
+ paddd %xmm0,%xmm0
+ paddd %xmm6,%xmm10
+
+ psrld $2,%xmm12
+ paddd %xmm8,%xmm10
+ por %xmm5,%xmm0
+ por %xmm7,%xmm12
+ movdqa 64(%rbp),%xmm15
+ pxor %xmm3,%xmm1
+ movdqa 240-128(%rax),%xmm3
+
+ movdqa %xmm10,%xmm8
+ movdqa %xmm13,%xmm6
+ pxor 80-128(%rax),%xmm1
+ paddd %xmm15,%xmm14
+ pslld $5,%xmm8
+ pxor %xmm11,%xmm6
+
+ movdqa %xmm10,%xmm9
+ movdqa %xmm0,192-128(%rax)
+ paddd %xmm0,%xmm14
+ pxor %xmm3,%xmm1
+ psrld $27,%xmm9
+ pxor %xmm12,%xmm6
+ movdqa %xmm11,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm1,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm14
+ paddd %xmm1,%xmm1
+
+ psrld $2,%xmm11
+ paddd %xmm8,%xmm14
+ por %xmm5,%xmm1
+ por %xmm7,%xmm11
+ pxor %xmm4,%xmm2
+ movdqa 0-128(%rax),%xmm4
+
+ movdqa %xmm14,%xmm8
+ movdqa %xmm12,%xmm6
+ pxor 96-128(%rax),%xmm2
+ paddd %xmm15,%xmm13
+ pslld $5,%xmm8
+ pxor %xmm10,%xmm6
+
+ movdqa %xmm14,%xmm9
+ movdqa %xmm1,208-128(%rax)
+ paddd %xmm1,%xmm13
+ pxor %xmm4,%xmm2
+ psrld $27,%xmm9
+ pxor %xmm11,%xmm6
+ movdqa %xmm10,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm2,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm13
+ paddd %xmm2,%xmm2
+
+ psrld $2,%xmm10
+ paddd %xmm8,%xmm13
+ por %xmm5,%xmm2
+ por %xmm7,%xmm10
+ pxor %xmm0,%xmm3
+ movdqa 16-128(%rax),%xmm0
+
+ movdqa %xmm13,%xmm8
+ movdqa %xmm11,%xmm6
+ pxor 112-128(%rax),%xmm3
+ paddd %xmm15,%xmm12
+ pslld $5,%xmm8
+ pxor %xmm14,%xmm6
+
+ movdqa %xmm13,%xmm9
+ movdqa %xmm2,224-128(%rax)
+ paddd %xmm2,%xmm12
+ pxor %xmm0,%xmm3
+ psrld $27,%xmm9
+ pxor %xmm10,%xmm6
+ movdqa %xmm14,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm3,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm12
+ paddd %xmm3,%xmm3
+
+ psrld $2,%xmm14
+ paddd %xmm8,%xmm12
+ por %xmm5,%xmm3
+ por %xmm7,%xmm14
+ pxor %xmm1,%xmm4
+ movdqa 32-128(%rax),%xmm1
+
+ movdqa %xmm12,%xmm8
+ movdqa %xmm10,%xmm6
+ pxor 128-128(%rax),%xmm4
+ paddd %xmm15,%xmm11
+ pslld $5,%xmm8
+ pxor %xmm13,%xmm6
+
+ movdqa %xmm12,%xmm9
+ movdqa %xmm3,240-128(%rax)
+ paddd %xmm3,%xmm11
+ pxor %xmm1,%xmm4
+ psrld $27,%xmm9
+ pxor %xmm14,%xmm6
+ movdqa %xmm13,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm4,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm11
+ paddd %xmm4,%xmm4
+
+ psrld $2,%xmm13
+ paddd %xmm8,%xmm11
+ por %xmm5,%xmm4
+ por %xmm7,%xmm13
+ pxor %xmm2,%xmm0
+ movdqa 48-128(%rax),%xmm2
+
+ movdqa %xmm11,%xmm8
+ movdqa %xmm14,%xmm6
+ pxor 144-128(%rax),%xmm0
+ paddd %xmm15,%xmm10
+ pslld $5,%xmm8
+ pxor %xmm12,%xmm6
+
+ movdqa %xmm11,%xmm9
+ movdqa %xmm4,0-128(%rax)
+ paddd %xmm4,%xmm10
+ pxor %xmm2,%xmm0
+ psrld $27,%xmm9
+ pxor %xmm13,%xmm6
+ movdqa %xmm12,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm0,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm10
+ paddd %xmm0,%xmm0
+
+ psrld $2,%xmm12
+ paddd %xmm8,%xmm10
+ por %xmm5,%xmm0
+ por %xmm7,%xmm12
+ pxor %xmm3,%xmm1
+ movdqa 64-128(%rax),%xmm3
+
+ movdqa %xmm10,%xmm8
+ movdqa %xmm13,%xmm6
+ pxor 160-128(%rax),%xmm1
+ paddd %xmm15,%xmm14
+ pslld $5,%xmm8
+ pxor %xmm11,%xmm6
+
+ movdqa %xmm10,%xmm9
+ movdqa %xmm0,16-128(%rax)
+ paddd %xmm0,%xmm14
+ pxor %xmm3,%xmm1
+ psrld $27,%xmm9
+ pxor %xmm12,%xmm6
+ movdqa %xmm11,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm1,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm14
+ paddd %xmm1,%xmm1
+
+ psrld $2,%xmm11
+ paddd %xmm8,%xmm14
+ por %xmm5,%xmm1
+ por %xmm7,%xmm11
+ pxor %xmm4,%xmm2
+ movdqa 80-128(%rax),%xmm4
+
+ movdqa %xmm14,%xmm8
+ movdqa %xmm12,%xmm6
+ pxor 176-128(%rax),%xmm2
+ paddd %xmm15,%xmm13
+ pslld $5,%xmm8
+ pxor %xmm10,%xmm6
+
+ movdqa %xmm14,%xmm9
+ movdqa %xmm1,32-128(%rax)
+ paddd %xmm1,%xmm13
+ pxor %xmm4,%xmm2
+ psrld $27,%xmm9
+ pxor %xmm11,%xmm6
+ movdqa %xmm10,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm2,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm13
+ paddd %xmm2,%xmm2
+
+ psrld $2,%xmm10
+ paddd %xmm8,%xmm13
+ por %xmm5,%xmm2
+ por %xmm7,%xmm10
+ pxor %xmm0,%xmm3
+ movdqa 96-128(%rax),%xmm0
+
+ movdqa %xmm13,%xmm8
+ movdqa %xmm11,%xmm6
+ pxor 192-128(%rax),%xmm3
+ paddd %xmm15,%xmm12
+ pslld $5,%xmm8
+ pxor %xmm14,%xmm6
+
+ movdqa %xmm13,%xmm9
+ movdqa %xmm2,48-128(%rax)
+ paddd %xmm2,%xmm12
+ pxor %xmm0,%xmm3
+ psrld $27,%xmm9
+ pxor %xmm10,%xmm6
+ movdqa %xmm14,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm3,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm12
+ paddd %xmm3,%xmm3
+
+ psrld $2,%xmm14
+ paddd %xmm8,%xmm12
+ por %xmm5,%xmm3
+ por %xmm7,%xmm14
+ pxor %xmm1,%xmm4
+ movdqa 112-128(%rax),%xmm1
+
+ movdqa %xmm12,%xmm8
+ movdqa %xmm10,%xmm6
+ pxor 208-128(%rax),%xmm4
+ paddd %xmm15,%xmm11
+ pslld $5,%xmm8
+ pxor %xmm13,%xmm6
+
+ movdqa %xmm12,%xmm9
+ movdqa %xmm3,64-128(%rax)
+ paddd %xmm3,%xmm11
+ pxor %xmm1,%xmm4
+ psrld $27,%xmm9
+ pxor %xmm14,%xmm6
+ movdqa %xmm13,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm4,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm11
+ paddd %xmm4,%xmm4
+
+ psrld $2,%xmm13
+ paddd %xmm8,%xmm11
+ por %xmm5,%xmm4
+ por %xmm7,%xmm13
+ pxor %xmm2,%xmm0
+ movdqa 128-128(%rax),%xmm2
+
+ movdqa %xmm11,%xmm8
+ movdqa %xmm14,%xmm6
+ pxor 224-128(%rax),%xmm0
+ paddd %xmm15,%xmm10
+ pslld $5,%xmm8
+ pxor %xmm12,%xmm6
+
+ movdqa %xmm11,%xmm9
+ movdqa %xmm4,80-128(%rax)
+ paddd %xmm4,%xmm10
+ pxor %xmm2,%xmm0
+ psrld $27,%xmm9
+ pxor %xmm13,%xmm6
+ movdqa %xmm12,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm0,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm10
+ paddd %xmm0,%xmm0
+
+ psrld $2,%xmm12
+ paddd %xmm8,%xmm10
+ por %xmm5,%xmm0
+ por %xmm7,%xmm12
+ pxor %xmm3,%xmm1
+ movdqa 144-128(%rax),%xmm3
+
+ movdqa %xmm10,%xmm8
+ movdqa %xmm13,%xmm6
+ pxor 240-128(%rax),%xmm1
+ paddd %xmm15,%xmm14
+ pslld $5,%xmm8
+ pxor %xmm11,%xmm6
+
+ movdqa %xmm10,%xmm9
+ movdqa %xmm0,96-128(%rax)
+ paddd %xmm0,%xmm14
+ pxor %xmm3,%xmm1
+ psrld $27,%xmm9
+ pxor %xmm12,%xmm6
+ movdqa %xmm11,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm1,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm14
+ paddd %xmm1,%xmm1
+
+ psrld $2,%xmm11
+ paddd %xmm8,%xmm14
+ por %xmm5,%xmm1
+ por %xmm7,%xmm11
+ pxor %xmm4,%xmm2
+ movdqa 160-128(%rax),%xmm4
+
+ movdqa %xmm14,%xmm8
+ movdqa %xmm12,%xmm6
+ pxor 0-128(%rax),%xmm2
+ paddd %xmm15,%xmm13
+ pslld $5,%xmm8
+ pxor %xmm10,%xmm6
+
+ movdqa %xmm14,%xmm9
+ movdqa %xmm1,112-128(%rax)
+ paddd %xmm1,%xmm13
+ pxor %xmm4,%xmm2
+ psrld $27,%xmm9
+ pxor %xmm11,%xmm6
+ movdqa %xmm10,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm2,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm13
+ paddd %xmm2,%xmm2
+
+ psrld $2,%xmm10
+ paddd %xmm8,%xmm13
+ por %xmm5,%xmm2
+ por %xmm7,%xmm10
+ pxor %xmm0,%xmm3
+ movdqa 176-128(%rax),%xmm0
+
+ movdqa %xmm13,%xmm8
+ movdqa %xmm11,%xmm6
+ pxor 16-128(%rax),%xmm3
+ paddd %xmm15,%xmm12
+ pslld $5,%xmm8
+ pxor %xmm14,%xmm6
+
+ movdqa %xmm13,%xmm9
+ paddd %xmm2,%xmm12
+ pxor %xmm0,%xmm3
+ psrld $27,%xmm9
+ pxor %xmm10,%xmm6
+ movdqa %xmm14,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm3,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm12
+ paddd %xmm3,%xmm3
+
+ psrld $2,%xmm14
+ paddd %xmm8,%xmm12
+ por %xmm5,%xmm3
+ por %xmm7,%xmm14
+ pxor %xmm1,%xmm4
+ movdqa 192-128(%rax),%xmm1
+
+ movdqa %xmm12,%xmm8
+ movdqa %xmm10,%xmm6
+ pxor 32-128(%rax),%xmm4
+ paddd %xmm15,%xmm11
+ pslld $5,%xmm8
+ pxor %xmm13,%xmm6
+
+ movdqa %xmm12,%xmm9
+ paddd %xmm3,%xmm11
+ pxor %xmm1,%xmm4
+ psrld $27,%xmm9
+ pxor %xmm14,%xmm6
+ movdqa %xmm13,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm4,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm11
+ paddd %xmm4,%xmm4
+
+ psrld $2,%xmm13
+ paddd %xmm8,%xmm11
+ por %xmm5,%xmm4
+ por %xmm7,%xmm13
+ pxor %xmm2,%xmm0
+ movdqa 208-128(%rax),%xmm2
+
+ movdqa %xmm11,%xmm8
+ movdqa %xmm14,%xmm6
+ pxor 48-128(%rax),%xmm0
+ paddd %xmm15,%xmm10
+ pslld $5,%xmm8
+ pxor %xmm12,%xmm6
+
+ movdqa %xmm11,%xmm9
+ paddd %xmm4,%xmm10
+ pxor %xmm2,%xmm0
+ psrld $27,%xmm9
+ pxor %xmm13,%xmm6
+ movdqa %xmm12,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm0,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm10
+ paddd %xmm0,%xmm0
+
+ psrld $2,%xmm12
+ paddd %xmm8,%xmm10
+ por %xmm5,%xmm0
+ por %xmm7,%xmm12
+ pxor %xmm3,%xmm1
+ movdqa 224-128(%rax),%xmm3
+
+ movdqa %xmm10,%xmm8
+ movdqa %xmm13,%xmm6
+ pxor 64-128(%rax),%xmm1
+ paddd %xmm15,%xmm14
+ pslld $5,%xmm8
+ pxor %xmm11,%xmm6
+
+ movdqa %xmm10,%xmm9
+ paddd %xmm0,%xmm14
+ pxor %xmm3,%xmm1
+ psrld $27,%xmm9
+ pxor %xmm12,%xmm6
+ movdqa %xmm11,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm1,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm14
+ paddd %xmm1,%xmm1
+
+ psrld $2,%xmm11
+ paddd %xmm8,%xmm14
+ por %xmm5,%xmm1
+ por %xmm7,%xmm11
+ pxor %xmm4,%xmm2
+ movdqa 240-128(%rax),%xmm4
+
+ movdqa %xmm14,%xmm8
+ movdqa %xmm12,%xmm6
+ pxor 80-128(%rax),%xmm2
+ paddd %xmm15,%xmm13
+ pslld $5,%xmm8
+ pxor %xmm10,%xmm6
+
+ movdqa %xmm14,%xmm9
+ paddd %xmm1,%xmm13
+ pxor %xmm4,%xmm2
+ psrld $27,%xmm9
+ pxor %xmm11,%xmm6
+ movdqa %xmm10,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm2,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm13
+ paddd %xmm2,%xmm2
+
+ psrld $2,%xmm10
+ paddd %xmm8,%xmm13
+ por %xmm5,%xmm2
+ por %xmm7,%xmm10
+ pxor %xmm0,%xmm3
+ movdqa 0-128(%rax),%xmm0
+
+ movdqa %xmm13,%xmm8
+ movdqa %xmm11,%xmm6
+ pxor 96-128(%rax),%xmm3
+ paddd %xmm15,%xmm12
+ pslld $5,%xmm8
+ pxor %xmm14,%xmm6
+
+ movdqa %xmm13,%xmm9
+ paddd %xmm2,%xmm12
+ pxor %xmm0,%xmm3
+ psrld $27,%xmm9
+ pxor %xmm10,%xmm6
+ movdqa %xmm14,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm3,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm12
+ paddd %xmm3,%xmm3
+
+ psrld $2,%xmm14
+ paddd %xmm8,%xmm12
+ por %xmm5,%xmm3
+ por %xmm7,%xmm14
+ pxor %xmm1,%xmm4
+ movdqa 16-128(%rax),%xmm1
+
+ movdqa %xmm12,%xmm8
+ movdqa %xmm10,%xmm6
+ pxor 112-128(%rax),%xmm4
+ paddd %xmm15,%xmm11
+ pslld $5,%xmm8
+ pxor %xmm13,%xmm6
+
+ movdqa %xmm12,%xmm9
+ paddd %xmm3,%xmm11
+ pxor %xmm1,%xmm4
+ psrld $27,%xmm9
+ pxor %xmm14,%xmm6
+ movdqa %xmm13,%xmm7
+
+ pslld $30,%xmm7
+ movdqa %xmm4,%xmm5
+ por %xmm9,%xmm8
+ psrld $31,%xmm5
+ paddd %xmm6,%xmm11
+ paddd %xmm4,%xmm4
+
+ psrld $2,%xmm13
+ paddd %xmm8,%xmm11
+ por %xmm5,%xmm4
+ por %xmm7,%xmm13
+ movdqa %xmm11,%xmm8
+ paddd %xmm15,%xmm10
+ movdqa %xmm14,%xmm6
+ pslld $5,%xmm8
+ pxor %xmm12,%xmm6
+
+ movdqa %xmm11,%xmm9
+ paddd %xmm4,%xmm10
+ psrld $27,%xmm9
+ movdqa %xmm12,%xmm7
+ pxor %xmm13,%xmm6
+
+ pslld $30,%xmm7
+ por %xmm9,%xmm8
+ paddd %xmm6,%xmm10
+
+ psrld $2,%xmm12
+ paddd %xmm8,%xmm10
+ por %xmm7,%xmm12
+ movdqa (%rbx),%xmm0
+ movl $1,%ecx
+ cmpl 0(%rbx),%ecx
+ pxor %xmm8,%xmm8
+ cmovgeq %rbp,%r8
+ cmpl 4(%rbx),%ecx
+ movdqa %xmm0,%xmm1
+ cmovgeq %rbp,%r9
+ cmpl 8(%rbx),%ecx
+ pcmpgtd %xmm8,%xmm1
+ cmovgeq %rbp,%r10
+ cmpl 12(%rbx),%ecx
+ paddd %xmm1,%xmm0
+ cmovgeq %rbp,%r11
+
+ movdqu 0(%rdi),%xmm6
+ pand %xmm1,%xmm10
+ movdqu 32(%rdi),%xmm7
+ pand %xmm1,%xmm11
+ paddd %xmm6,%xmm10
+ movdqu 64(%rdi),%xmm8
+ pand %xmm1,%xmm12
+ paddd %xmm7,%xmm11
+ movdqu 96(%rdi),%xmm9
+ pand %xmm1,%xmm13
+ paddd %xmm8,%xmm12
+ movdqu 128(%rdi),%xmm5
+ pand %xmm1,%xmm14
+ movdqu %xmm10,0(%rdi)
+ paddd %xmm9,%xmm13
+ movdqu %xmm11,32(%rdi)
+ paddd %xmm5,%xmm14
+ movdqu %xmm12,64(%rdi)
+ movdqu %xmm13,96(%rdi)
+ movdqu %xmm14,128(%rdi)
+
+ movdqa %xmm0,(%rbx)
+ movdqa 96(%rbp),%xmm5
+ movdqa -32(%rbp),%xmm15
+ decl %edx
+ jnz .Loop
+
+ movl 280(%rsp),%edx
+ leaq 16(%rdi),%rdi
+ leaq 64(%rsi),%rsi
+ decl %edx
+ jnz .Loop_grande
+
+.Ldone:
+ movq 272(%rsp),%rax
+ movq -16(%rax),%rbp
+ movq -8(%rax),%rbx
+ leaq (%rax),%rsp
+.Lepilogue:
+ .byte 0xf3,0xc3
+.size sha1_multi_block,.-sha1_multi_block
+.type sha1_multi_block_shaext,@function
+.align 32
+sha1_multi_block_shaext:
+_shaext_shortcut:
+ movq %rsp,%rax
+ pushq %rbx
+ pushq %rbp
+ subq $288,%rsp
+ shll $1,%edx
+ andq $-256,%rsp
+ leaq 64(%rdi),%rdi
+ movq %rax,272(%rsp)
+.Lbody_shaext:
+ leaq 256(%rsp),%rbx
+ movdqa K_XX_XX+128(%rip),%xmm3
+
+.Loop_grande_shaext:
+ movl %edx,280(%rsp)
+ xorl %edx,%edx
+ movq 0(%rsi),%r8
+ movl 8(%rsi),%ecx
+ cmpl %edx,%ecx
+ cmovgl %ecx,%edx
+ testl %ecx,%ecx
+ movl %ecx,0(%rbx)
+ cmovleq %rsp,%r8
+ movq 16(%rsi),%r9
+ movl 24(%rsi),%ecx
+ cmpl %edx,%ecx
+ cmovgl %ecx,%edx
+ testl %ecx,%ecx
+ movl %ecx,4(%rbx)
+ cmovleq %rsp,%r9
+ testl %edx,%edx
+ jz .Ldone_shaext
+
+ movq 0-64(%rdi),%xmm0
+ movq 32-64(%rdi),%xmm4
+ movq 64-64(%rdi),%xmm5
+ movq 96-64(%rdi),%xmm6
+ movq 128-64(%rdi),%xmm7
+
+ punpckldq %xmm4,%xmm0
+ punpckldq %xmm6,%xmm5
+
+ movdqa %xmm0,%xmm8
+ punpcklqdq %xmm5,%xmm0
+ punpckhqdq %xmm5,%xmm8
+
+ pshufd $63,%xmm7,%xmm1
+ pshufd $127,%xmm7,%xmm9
+ pshufd $27,%xmm0,%xmm0
+ pshufd $27,%xmm8,%xmm8
+ jmp .Loop_shaext
+
+.align 32
+.Loop_shaext:
+ movdqu 0(%r8),%xmm4
+ movdqu 0(%r9),%xmm11
+ movdqu 16(%r8),%xmm5
+ movdqu 16(%r9),%xmm12
+ movdqu 32(%r8),%xmm6
+.byte 102,15,56,0,227
+ movdqu 32(%r9),%xmm13
+.byte 102,68,15,56,0,219
+ movdqu 48(%r8),%xmm7
+ leaq 64(%r8),%r8
+.byte 102,15,56,0,235
+ movdqu 48(%r9),%xmm14
+ leaq 64(%r9),%r9
+.byte 102,68,15,56,0,227
+
+ movdqa %xmm1,80(%rsp)
+ paddd %xmm4,%xmm1
+ movdqa %xmm9,112(%rsp)
+ paddd %xmm11,%xmm9
+ movdqa %xmm0,64(%rsp)
+ movdqa %xmm0,%xmm2
+ movdqa %xmm8,96(%rsp)
+ movdqa %xmm8,%xmm10
+.byte 15,58,204,193,0
+.byte 15,56,200,213
+.byte 69,15,58,204,193,0
+.byte 69,15,56,200,212
+.byte 102,15,56,0,243
+ prefetcht0 127(%r8)
+.byte 15,56,201,229
+.byte 102,68,15,56,0,235
+ prefetcht0 127(%r9)
+.byte 69,15,56,201,220
+
+.byte 102,15,56,0,251
+ movdqa %xmm0,%xmm1
+.byte 102,68,15,56,0,243
+ movdqa %xmm8,%xmm9
+.byte 15,58,204,194,0
+.byte 15,56,200,206
+.byte 69,15,58,204,194,0
+.byte 69,15,56,200,205
+ pxor %xmm6,%xmm4
+.byte 15,56,201,238
+ pxor %xmm13,%xmm11
+.byte 69,15,56,201,229
+ movdqa %xmm0,%xmm2
+ movdqa %xmm8,%xmm10
+.byte 15,58,204,193,0
+.byte 15,56,200,215
+.byte 69,15,58,204,193,0
+.byte 69,15,56,200,214
+.byte 15,56,202,231
+.byte 69,15,56,202,222
+ pxor %xmm7,%xmm5
+.byte 15,56,201,247
+ pxor %xmm14,%xmm12
+.byte 69,15,56,201,238
+ movdqa %xmm0,%xmm1
+ movdqa %xmm8,%xmm9
+.byte 15,58,204,194,0
+.byte 15,56,200,204
+.byte 69,15,58,204,194,0
+.byte 69,15,56,200,203
+.byte 15,56,202,236
+.byte 69,15,56,202,227
+ pxor %xmm4,%xmm6
+.byte 15,56,201,252
+ pxor %xmm11,%xmm13
+.byte 69,15,56,201,243
+ movdqa %xmm0,%xmm2
+ movdqa %xmm8,%xmm10
+.byte 15,58,204,193,0
+.byte 15,56,200,213
+.byte 69,15,58,204,193,0
+.byte 69,15,56,200,212
+.byte 15,56,202,245
+.byte 69,15,56,202,236
+ pxor %xmm5,%xmm7
+.byte 15,56,201,229
+ pxor %xmm12,%xmm14
+.byte 69,15,56,201,220
+ movdqa %xmm0,%xmm1
+ movdqa %xmm8,%xmm9
+.byte 15,58,204,194,1
+.byte 15,56,200,206
+.byte 69,15,58,204,194,1
+.byte 69,15,56,200,205
+.byte 15,56,202,254
+.byte 69,15,56,202,245
+ pxor %xmm6,%xmm4
+.byte 15,56,201,238
+ pxor %xmm13,%xmm11
+.byte 69,15,56,201,229
+ movdqa %xmm0,%xmm2
+ movdqa %xmm8,%xmm10
+.byte 15,58,204,193,1
+.byte 15,56,200,215
+.byte 69,15,58,204,193,1
+.byte 69,15,56,200,214
+.byte 15,56,202,231
+.byte 69,15,56,202,222
+ pxor %xmm7,%xmm5
+.byte 15,56,201,247
+ pxor %xmm14,%xmm12
+.byte 69,15,56,201,238
+ movdqa %xmm0,%xmm1
+ movdqa %xmm8,%xmm9
+.byte 15,58,204,194,1
+.byte 15,56,200,204
+.byte 69,15,58,204,194,1
+.byte 69,15,56,200,203
+.byte 15,56,202,236
+.byte 69,15,56,202,227
+ pxor %xmm4,%xmm6
+.byte 15,56,201,252
+ pxor %xmm11,%xmm13
+.byte 69,15,56,201,243
+ movdqa %xmm0,%xmm2
+ movdqa %xmm8,%xmm10
+.byte 15,58,204,193,1
+.byte 15,56,200,213
+.byte 69,15,58,204,193,1
+.byte 69,15,56,200,212
+.byte 15,56,202,245
+.byte 69,15,56,202,236
+ pxor %xmm5,%xmm7
+.byte 15,56,201,229
+ pxor %xmm12,%xmm14
+.byte 69,15,56,201,220
+ movdqa %xmm0,%xmm1
+ movdqa %xmm8,%xmm9
+.byte 15,58,204,194,1
+.byte 15,56,200,206
+.byte 69,15,58,204,194,1
+.byte 69,15,56,200,205
+.byte 15,56,202,254
+.byte 69,15,56,202,245
+ pxor %xmm6,%xmm4
+.byte 15,56,201,238
+ pxor %xmm13,%xmm11
+.byte 69,15,56,201,229
+ movdqa %xmm0,%xmm2
+ movdqa %xmm8,%xmm10
+.byte 15,58,204,193,2
+.byte 15,56,200,215
+.byte 69,15,58,204,193,2
+.byte 69,15,56,200,214
+.byte 15,56,202,231
+.byte 69,15,56,202,222
+ pxor %xmm7,%xmm5
+.byte 15,56,201,247
+ pxor %xmm14,%xmm12
+.byte 69,15,56,201,238
+ movdqa %xmm0,%xmm1
+ movdqa %xmm8,%xmm9
+.byte 15,58,204,194,2
+.byte 15,56,200,204
+.byte 69,15,58,204,194,2
+.byte 69,15,56,200,203
+.byte 15,56,202,236
+.byte 69,15,56,202,227
+ pxor %xmm4,%xmm6
+.byte 15,56,201,252
+ pxor %xmm11,%xmm13
+.byte 69,15,56,201,243
+ movdqa %xmm0,%xmm2
+ movdqa %xmm8,%xmm10
+.byte 15,58,204,193,2
+.byte 15,56,200,213
+.byte 69,15,58,204,193,2
+.byte 69,15,56,200,212
+.byte 15,56,202,245
+.byte 69,15,56,202,236
+ pxor %xmm5,%xmm7
+.byte 15,56,201,229
+ pxor %xmm12,%xmm14
+.byte 69,15,56,201,220
+ movdqa %xmm0,%xmm1
+ movdqa %xmm8,%xmm9
+.byte 15,58,204,194,2
+.byte 15,56,200,206
+.byte 69,15,58,204,194,2
+.byte 69,15,56,200,205
+.byte 15,56,202,254
+.byte 69,15,56,202,245
+ pxor %xmm6,%xmm4
+.byte 15,56,201,238
+ pxor %xmm13,%xmm11
+.byte 69,15,56,201,229
+ movdqa %xmm0,%xmm2
+ movdqa %xmm8,%xmm10
+.byte 15,58,204,193,2
+.byte 15,56,200,215
+.byte 69,15,58,204,193,2
+.byte 69,15,56,200,214
+.byte 15,56,202,231
+.byte 69,15,56,202,222
+ pxor %xmm7,%xmm5
+.byte 15,56,201,247
+ pxor %xmm14,%xmm12
+.byte 69,15,56,201,238
+ movdqa %xmm0,%xmm1
+ movdqa %xmm8,%xmm9
+.byte 15,58,204,194,3
+.byte 15,56,200,204
+.byte 69,15,58,204,194,3
+.byte 69,15,56,200,203
+.byte 15,56,202,236
+.byte 69,15,56,202,227
+ pxor %xmm4,%xmm6
+.byte 15,56,201,252
+ pxor %xmm11,%xmm13
+.byte 69,15,56,201,243
+ movdqa %xmm0,%xmm2
+ movdqa %xmm8,%xmm10
+.byte 15,58,204,193,3
+.byte 15,56,200,213
+.byte 69,15,58,204,193,3
+.byte 69,15,56,200,212
+.byte 15,56,202,245
+.byte 69,15,56,202,236
+ pxor %xmm5,%xmm7
+ pxor %xmm12,%xmm14
+
+ movl $1,%ecx
+ pxor %xmm4,%xmm4
+ cmpl 0(%rbx),%ecx
+ cmovgeq %rsp,%r8
+
+ movdqa %xmm0,%xmm1
+ movdqa %xmm8,%xmm9
+.byte 15,58,204,194,3
+.byte 15,56,200,206
+.byte 69,15,58,204,194,3
+.byte 69,15,56,200,205
+.byte 15,56,202,254
+.byte 69,15,56,202,245
+
+ cmpl 4(%rbx),%ecx
+ cmovgeq %rsp,%r9
+ movq (%rbx),%xmm6
+
+ movdqa %xmm0,%xmm2
+ movdqa %xmm8,%xmm10
+.byte 15,58,204,193,3
+.byte 15,56,200,215
+.byte 69,15,58,204,193,3
+.byte 69,15,56,200,214
+
+ pshufd $0,%xmm6,%xmm11
+ pshufd $85,%xmm6,%xmm12
+ movdqa %xmm6,%xmm7
+ pcmpgtd %xmm4,%xmm11
+ pcmpgtd %xmm4,%xmm12
+
+ movdqa %xmm0,%xmm1
+ movdqa %xmm8,%xmm9
+.byte 15,58,204,194,3
+.byte 15,56,200,204
+.byte 69,15,58,204,194,3
+.byte 68,15,56,200,204
+
+ pcmpgtd %xmm4,%xmm7
+ pand %xmm11,%xmm0
+ pand %xmm11,%xmm1
+ pand %xmm12,%xmm8
+ pand %xmm12,%xmm9
+ paddd %xmm7,%xmm6
+
+ paddd 64(%rsp),%xmm0
+ paddd 80(%rsp),%xmm1
+ paddd 96(%rsp),%xmm8
+ paddd 112(%rsp),%xmm9
+
+ movq %xmm6,(%rbx)
+ decl %edx
+ jnz .Loop_shaext
+
+ movl 280(%rsp),%edx
+
+ pshufd $27,%xmm0,%xmm0
+ pshufd $27,%xmm8,%xmm8
+
+ movdqa %xmm0,%xmm6
+ punpckldq %xmm8,%xmm0
+ punpckhdq %xmm8,%xmm6
+ punpckhdq %xmm9,%xmm1
+ movq %xmm0,0-64(%rdi)
+ psrldq $8,%xmm0
+ movq %xmm6,64-64(%rdi)
+ psrldq $8,%xmm6
+ movq %xmm0,32-64(%rdi)
+ psrldq $8,%xmm1
+ movq %xmm6,96-64(%rdi)
+ movq %xmm1,128-64(%rdi)
+
+ leaq 8(%rdi),%rdi
+ leaq 32(%rsi),%rsi
+ decl %edx
+ jnz .Loop_grande_shaext
+
+.Ldone_shaext:
+
+ movq -16(%rax),%rbp
+ movq -8(%rax),%rbx
+ leaq (%rax),%rsp
+.Lepilogue_shaext:
+ .byte 0xf3,0xc3
+.size sha1_multi_block_shaext,.-sha1_multi_block_shaext
+
+.align 256
+.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999
+.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999
+K_XX_XX:
+.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1
+.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1
+.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc
+.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc
+.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6
+.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6
+.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+.byte 0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0
+.byte 83,72,65,49,32,109,117,108,116,105,45,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
diff --git a/secure/lib/libcrypto/amd64/sha1-x86_64.S b/secure/lib/libcrypto/amd64/sha1-x86_64.S
index 421423a..25c27e5 100644
--- a/secure/lib/libcrypto/amd64/sha1-x86_64.S
+++ b/secure/lib/libcrypto/amd64/sha1-x86_64.S
@@ -8,23 +8,27 @@
sha1_block_data_order:
movl OPENSSL_ia32cap_P+0(%rip),%r9d
movl OPENSSL_ia32cap_P+4(%rip),%r8d
+ movl OPENSSL_ia32cap_P+8(%rip),%r10d
testl $512,%r8d
jz .Lialu
+ testl $536870912,%r10d
+ jnz _shaext_shortcut
jmp _ssse3_shortcut
.align 16
.Lialu:
+ movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
- movq %rsp,%r11
+ pushq %r14
movq %rdi,%r8
subq $72,%rsp
movq %rsi,%r9
andq $-64,%rsp
movq %rdx,%r10
- movq %r11,64(%rsp)
+ movq %rax,64(%rsp)
.Lprologue:
movl 0(%r8),%esi
@@ -38,1230 +42,1168 @@ sha1_block_data_order:
.Lloop:
movl 0(%r9),%edx
bswapl %edx
- movl %edx,0(%rsp)
- movl %r11d,%eax
movl 4(%r9),%ebp
+ movl %r12d,%eax
+ movl %edx,0(%rsp)
movl %esi,%ecx
- xorl %r12d,%eax
bswapl %ebp
+ xorl %r11d,%eax
roll $5,%ecx
- leal 1518500249(%rdx,%r13,1),%r13d
andl %edi,%eax
- movl %ebp,4(%rsp)
+ leal 1518500249(%rdx,%r13,1),%r13d
addl %ecx,%r13d
xorl %r12d,%eax
roll $30,%edi
addl %eax,%r13d
- movl %edi,%eax
- movl 8(%r9),%edx
+ movl 8(%r9),%r14d
+ movl %r11d,%eax
+ movl %ebp,4(%rsp)
movl %r13d,%ecx
- xorl %r11d,%eax
- bswapl %edx
+ bswapl %r14d
+ xorl %edi,%eax
roll $5,%ecx
- leal 1518500249(%rbp,%r12,1),%r12d
andl %esi,%eax
- movl %edx,8(%rsp)
+ leal 1518500249(%rbp,%r12,1),%r12d
addl %ecx,%r12d
xorl %r11d,%eax
roll $30,%esi
addl %eax,%r12d
- movl %esi,%eax
- movl 12(%r9),%ebp
+ movl 12(%r9),%edx
+ movl %edi,%eax
+ movl %r14d,8(%rsp)
movl %r12d,%ecx
- xorl %edi,%eax
- bswapl %ebp
+ bswapl %edx
+ xorl %esi,%eax
roll $5,%ecx
- leal 1518500249(%rdx,%r11,1),%r11d
andl %r13d,%eax
- movl %ebp,12(%rsp)
+ leal 1518500249(%r14,%r11,1),%r11d
addl %ecx,%r11d
xorl %edi,%eax
roll $30,%r13d
addl %eax,%r11d
- movl %r13d,%eax
- movl 16(%r9),%edx
+ movl 16(%r9),%ebp
+ movl %esi,%eax
+ movl %edx,12(%rsp)
movl %r11d,%ecx
- xorl %esi,%eax
- bswapl %edx
+ bswapl %ebp
+ xorl %r13d,%eax
roll $5,%ecx
- leal 1518500249(%rbp,%rdi,1),%edi
andl %r12d,%eax
- movl %edx,16(%rsp)
+ leal 1518500249(%rdx,%rdi,1),%edi
addl %ecx,%edi
xorl %esi,%eax
roll $30,%r12d
addl %eax,%edi
- movl %r12d,%eax
- movl 20(%r9),%ebp
+ movl 20(%r9),%r14d
+ movl %r13d,%eax
+ movl %ebp,16(%rsp)
movl %edi,%ecx
- xorl %r13d,%eax
- bswapl %ebp
+ bswapl %r14d
+ xorl %r12d,%eax
roll $5,%ecx
- leal 1518500249(%rdx,%rsi,1),%esi
andl %r11d,%eax
- movl %ebp,20(%rsp)
+ leal 1518500249(%rbp,%rsi,1),%esi
addl %ecx,%esi
xorl %r13d,%eax
roll $30,%r11d
addl %eax,%esi
- movl %r11d,%eax
movl 24(%r9),%edx
+ movl %r12d,%eax
+ movl %r14d,20(%rsp)
movl %esi,%ecx
- xorl %r12d,%eax
bswapl %edx
+ xorl %r11d,%eax
roll $5,%ecx
- leal 1518500249(%rbp,%r13,1),%r13d
andl %edi,%eax
- movl %edx,24(%rsp)
+ leal 1518500249(%r14,%r13,1),%r13d
addl %ecx,%r13d
xorl %r12d,%eax
roll $30,%edi
addl %eax,%r13d
- movl %edi,%eax
movl 28(%r9),%ebp
+ movl %r11d,%eax
+ movl %edx,24(%rsp)
movl %r13d,%ecx
- xorl %r11d,%eax
bswapl %ebp
+ xorl %edi,%eax
roll $5,%ecx
- leal 1518500249(%rdx,%r12,1),%r12d
andl %esi,%eax
- movl %ebp,28(%rsp)
+ leal 1518500249(%rdx,%r12,1),%r12d
addl %ecx,%r12d
xorl %r11d,%eax
roll $30,%esi
addl %eax,%r12d
- movl %esi,%eax
- movl 32(%r9),%edx
+ movl 32(%r9),%r14d
+ movl %edi,%eax
+ movl %ebp,28(%rsp)
movl %r12d,%ecx
- xorl %edi,%eax
- bswapl %edx
+ bswapl %r14d
+ xorl %esi,%eax
roll $5,%ecx
- leal 1518500249(%rbp,%r11,1),%r11d
andl %r13d,%eax
- movl %edx,32(%rsp)
+ leal 1518500249(%rbp,%r11,1),%r11d
addl %ecx,%r11d
xorl %edi,%eax
roll $30,%r13d
addl %eax,%r11d
- movl %r13d,%eax
- movl 36(%r9),%ebp
+ movl 36(%r9),%edx
+ movl %esi,%eax
+ movl %r14d,32(%rsp)
movl %r11d,%ecx
- xorl %esi,%eax
- bswapl %ebp
+ bswapl %edx
+ xorl %r13d,%eax
roll $5,%ecx
- leal 1518500249(%rdx,%rdi,1),%edi
andl %r12d,%eax
- movl %ebp,36(%rsp)
+ leal 1518500249(%r14,%rdi,1),%edi
addl %ecx,%edi
xorl %esi,%eax
roll $30,%r12d
addl %eax,%edi
- movl %r12d,%eax
- movl 40(%r9),%edx
+ movl 40(%r9),%ebp
+ movl %r13d,%eax
+ movl %edx,36(%rsp)
movl %edi,%ecx
- xorl %r13d,%eax
- bswapl %edx
+ bswapl %ebp
+ xorl %r12d,%eax
roll $5,%ecx
- leal 1518500249(%rbp,%rsi,1),%esi
andl %r11d,%eax
- movl %edx,40(%rsp)
+ leal 1518500249(%rdx,%rsi,1),%esi
addl %ecx,%esi
xorl %r13d,%eax
roll $30,%r11d
addl %eax,%esi
- movl %r11d,%eax
- movl 44(%r9),%ebp
+ movl 44(%r9),%r14d
+ movl %r12d,%eax
+ movl %ebp,40(%rsp)
movl %esi,%ecx
- xorl %r12d,%eax
- bswapl %ebp
+ bswapl %r14d
+ xorl %r11d,%eax
roll $5,%ecx
- leal 1518500249(%rdx,%r13,1),%r13d
andl %edi,%eax
- movl %ebp,44(%rsp)
+ leal 1518500249(%rbp,%r13,1),%r13d
addl %ecx,%r13d
xorl %r12d,%eax
roll $30,%edi
addl %eax,%r13d
- movl %edi,%eax
movl 48(%r9),%edx
+ movl %r11d,%eax
+ movl %r14d,44(%rsp)
movl %r13d,%ecx
- xorl %r11d,%eax
bswapl %edx
+ xorl %edi,%eax
roll $5,%ecx
- leal 1518500249(%rbp,%r12,1),%r12d
andl %esi,%eax
- movl %edx,48(%rsp)
+ leal 1518500249(%r14,%r12,1),%r12d
addl %ecx,%r12d
xorl %r11d,%eax
roll $30,%esi
addl %eax,%r12d
- movl %esi,%eax
movl 52(%r9),%ebp
+ movl %edi,%eax
+ movl %edx,48(%rsp)
movl %r12d,%ecx
- xorl %edi,%eax
bswapl %ebp
+ xorl %esi,%eax
roll $5,%ecx
- leal 1518500249(%rdx,%r11,1),%r11d
andl %r13d,%eax
- movl %ebp,52(%rsp)
+ leal 1518500249(%rdx,%r11,1),%r11d
addl %ecx,%r11d
xorl %edi,%eax
roll $30,%r13d
addl %eax,%r11d
- movl %r13d,%eax
- movl 56(%r9),%edx
+ movl 56(%r9),%r14d
+ movl %esi,%eax
+ movl %ebp,52(%rsp)
movl %r11d,%ecx
- xorl %esi,%eax
- bswapl %edx
+ bswapl %r14d
+ xorl %r13d,%eax
roll $5,%ecx
- leal 1518500249(%rbp,%rdi,1),%edi
andl %r12d,%eax
- movl %edx,56(%rsp)
+ leal 1518500249(%rbp,%rdi,1),%edi
addl %ecx,%edi
xorl %esi,%eax
roll $30,%r12d
addl %eax,%edi
- movl %r12d,%eax
- movl 60(%r9),%ebp
+ movl 60(%r9),%edx
+ movl %r13d,%eax
+ movl %r14d,56(%rsp)
movl %edi,%ecx
- xorl %r13d,%eax
- bswapl %ebp
+ bswapl %edx
+ xorl %r12d,%eax
roll $5,%ecx
- leal 1518500249(%rdx,%rsi,1),%esi
andl %r11d,%eax
- movl %ebp,60(%rsp)
+ leal 1518500249(%r14,%rsi,1),%esi
addl %ecx,%esi
xorl %r13d,%eax
roll $30,%r11d
addl %eax,%esi
- movl 0(%rsp),%edx
- movl %r11d,%eax
+ xorl 0(%rsp),%ebp
+ movl %r12d,%eax
+ movl %edx,60(%rsp)
movl %esi,%ecx
- xorl 8(%rsp),%edx
- xorl %r12d,%eax
+ xorl 8(%rsp),%ebp
+ xorl %r11d,%eax
roll $5,%ecx
- xorl 32(%rsp),%edx
+ xorl 32(%rsp),%ebp
andl %edi,%eax
- leal 1518500249(%rbp,%r13,1),%r13d
- xorl 52(%rsp),%edx
+ leal 1518500249(%rdx,%r13,1),%r13d
+ roll $30,%edi
xorl %r12d,%eax
- roll $1,%edx
addl %ecx,%r13d
- roll $30,%edi
- movl %edx,0(%rsp)
+ roll $1,%ebp
addl %eax,%r13d
- movl 4(%rsp),%ebp
- movl %edi,%eax
+ xorl 4(%rsp),%r14d
+ movl %r11d,%eax
+ movl %ebp,0(%rsp)
movl %r13d,%ecx
- xorl 12(%rsp),%ebp
- xorl %r11d,%eax
+ xorl 12(%rsp),%r14d
+ xorl %edi,%eax
roll $5,%ecx
- xorl 36(%rsp),%ebp
+ xorl 36(%rsp),%r14d
andl %esi,%eax
- leal 1518500249(%rdx,%r12,1),%r12d
- xorl 56(%rsp),%ebp
+ leal 1518500249(%rbp,%r12,1),%r12d
+ roll $30,%esi
xorl %r11d,%eax
- roll $1,%ebp
addl %ecx,%r12d
- roll $30,%esi
- movl %ebp,4(%rsp)
+ roll $1,%r14d
addl %eax,%r12d
- movl 8(%rsp),%edx
- movl %esi,%eax
+ xorl 8(%rsp),%edx
+ movl %edi,%eax
+ movl %r14d,4(%rsp)
movl %r12d,%ecx
xorl 16(%rsp),%edx
- xorl %edi,%eax
+ xorl %esi,%eax
roll $5,%ecx
xorl 40(%rsp),%edx
andl %r13d,%eax
- leal 1518500249(%rbp,%r11,1),%r11d
- xorl 60(%rsp),%edx
+ leal 1518500249(%r14,%r11,1),%r11d
+ roll $30,%r13d
xorl %edi,%eax
- roll $1,%edx
addl %ecx,%r11d
- roll $30,%r13d
- movl %edx,8(%rsp)
+ roll $1,%edx
addl %eax,%r11d
- movl 12(%rsp),%ebp
- movl %r13d,%eax
+ xorl 12(%rsp),%ebp
+ movl %esi,%eax
+ movl %edx,8(%rsp)
movl %r11d,%ecx
xorl 20(%rsp),%ebp
- xorl %esi,%eax
+ xorl %r13d,%eax
roll $5,%ecx
xorl 44(%rsp),%ebp
andl %r12d,%eax
leal 1518500249(%rdx,%rdi,1),%edi
- xorl 0(%rsp),%ebp
+ roll $30,%r12d
xorl %esi,%eax
- roll $1,%ebp
addl %ecx,%edi
- roll $30,%r12d
- movl %ebp,12(%rsp)
+ roll $1,%ebp
addl %eax,%edi
- movl 16(%rsp),%edx
- movl %r12d,%eax
+ xorl 16(%rsp),%r14d
+ movl %r13d,%eax
+ movl %ebp,12(%rsp)
movl %edi,%ecx
- xorl 24(%rsp),%edx
- xorl %r13d,%eax
+ xorl 24(%rsp),%r14d
+ xorl %r12d,%eax
roll $5,%ecx
- xorl 48(%rsp),%edx
+ xorl 48(%rsp),%r14d
andl %r11d,%eax
leal 1518500249(%rbp,%rsi,1),%esi
- xorl 4(%rsp),%edx
+ roll $30,%r11d
xorl %r13d,%eax
- roll $1,%edx
addl %ecx,%esi
- roll $30,%r11d
- movl %edx,16(%rsp)
+ roll $1,%r14d
addl %eax,%esi
- movl 20(%rsp),%ebp
- movl %r11d,%eax
+ xorl 20(%rsp),%edx
+ movl %edi,%eax
+ movl %r14d,16(%rsp)
movl %esi,%ecx
- xorl 28(%rsp),%ebp
- xorl %edi,%eax
- roll $5,%ecx
- leal 1859775393(%rdx,%r13,1),%r13d
- xorl 52(%rsp),%ebp
+ xorl 28(%rsp),%edx
xorl %r12d,%eax
+ roll $5,%ecx
+ xorl 52(%rsp),%edx
+ leal 1859775393(%r14,%r13,1),%r13d
+ xorl %r11d,%eax
addl %ecx,%r13d
- xorl 8(%rsp),%ebp
roll $30,%edi
addl %eax,%r13d
- roll $1,%ebp
- movl %ebp,20(%rsp)
- movl 24(%rsp),%edx
- movl %edi,%eax
+ roll $1,%edx
+ xorl 24(%rsp),%ebp
+ movl %esi,%eax
+ movl %edx,20(%rsp)
movl %r13d,%ecx
- xorl 32(%rsp),%edx
- xorl %esi,%eax
- roll $5,%ecx
- leal 1859775393(%rbp,%r12,1),%r12d
- xorl 56(%rsp),%edx
+ xorl 32(%rsp),%ebp
xorl %r11d,%eax
+ roll $5,%ecx
+ xorl 56(%rsp),%ebp
+ leal 1859775393(%rdx,%r12,1),%r12d
+ xorl %edi,%eax
addl %ecx,%r12d
- xorl 12(%rsp),%edx
roll $30,%esi
addl %eax,%r12d
- roll $1,%edx
- movl %edx,24(%rsp)
- movl 28(%rsp),%ebp
- movl %esi,%eax
+ roll $1,%ebp
+ xorl 28(%rsp),%r14d
+ movl %r13d,%eax
+ movl %ebp,24(%rsp)
movl %r12d,%ecx
- xorl 36(%rsp),%ebp
- xorl %r13d,%eax
- roll $5,%ecx
- leal 1859775393(%rdx,%r11,1),%r11d
- xorl 60(%rsp),%ebp
+ xorl 36(%rsp),%r14d
xorl %edi,%eax
+ roll $5,%ecx
+ xorl 60(%rsp),%r14d
+ leal 1859775393(%rbp,%r11,1),%r11d
+ xorl %esi,%eax
addl %ecx,%r11d
- xorl 16(%rsp),%ebp
roll $30,%r13d
addl %eax,%r11d
- roll $1,%ebp
- movl %ebp,28(%rsp)
- movl 32(%rsp),%edx
- movl %r13d,%eax
+ roll $1,%r14d
+ xorl 32(%rsp),%edx
+ movl %r12d,%eax
+ movl %r14d,28(%rsp)
movl %r11d,%ecx
xorl 40(%rsp),%edx
- xorl %r12d,%eax
+ xorl %esi,%eax
roll $5,%ecx
- leal 1859775393(%rbp,%rdi,1),%edi
xorl 0(%rsp),%edx
- xorl %esi,%eax
+ leal 1859775393(%r14,%rdi,1),%edi
+ xorl %r13d,%eax
addl %ecx,%edi
- xorl 20(%rsp),%edx
roll $30,%r12d
addl %eax,%edi
roll $1,%edx
+ xorl 36(%rsp),%ebp
+ movl %r11d,%eax
movl %edx,32(%rsp)
- movl 36(%rsp),%ebp
- movl %r12d,%eax
movl %edi,%ecx
xorl 44(%rsp),%ebp
- xorl %r11d,%eax
+ xorl %r13d,%eax
roll $5,%ecx
- leal 1859775393(%rdx,%rsi,1),%esi
xorl 4(%rsp),%ebp
- xorl %r13d,%eax
+ leal 1859775393(%rdx,%rsi,1),%esi
+ xorl %r12d,%eax
addl %ecx,%esi
- xorl 24(%rsp),%ebp
roll $30,%r11d
addl %eax,%esi
roll $1,%ebp
+ xorl 40(%rsp),%r14d
+ movl %edi,%eax
movl %ebp,36(%rsp)
- movl 40(%rsp),%edx
- movl %r11d,%eax
movl %esi,%ecx
- xorl 48(%rsp),%edx
- xorl %edi,%eax
+ xorl 48(%rsp),%r14d
+ xorl %r12d,%eax
roll $5,%ecx
+ xorl 8(%rsp),%r14d
leal 1859775393(%rbp,%r13,1),%r13d
- xorl 8(%rsp),%edx
- xorl %r12d,%eax
+ xorl %r11d,%eax
addl %ecx,%r13d
- xorl 28(%rsp),%edx
roll $30,%edi
addl %eax,%r13d
- roll $1,%edx
- movl %edx,40(%rsp)
- movl 44(%rsp),%ebp
- movl %edi,%eax
+ roll $1,%r14d
+ xorl 44(%rsp),%edx
+ movl %esi,%eax
+ movl %r14d,40(%rsp)
movl %r13d,%ecx
- xorl 52(%rsp),%ebp
- xorl %esi,%eax
- roll $5,%ecx
- leal 1859775393(%rdx,%r12,1),%r12d
- xorl 12(%rsp),%ebp
+ xorl 52(%rsp),%edx
xorl %r11d,%eax
+ roll $5,%ecx
+ xorl 12(%rsp),%edx
+ leal 1859775393(%r14,%r12,1),%r12d
+ xorl %edi,%eax
addl %ecx,%r12d
- xorl 32(%rsp),%ebp
roll $30,%esi
addl %eax,%r12d
- roll $1,%ebp
- movl %ebp,44(%rsp)
- movl 48(%rsp),%edx
- movl %esi,%eax
+ roll $1,%edx
+ xorl 48(%rsp),%ebp
+ movl %r13d,%eax
+ movl %edx,44(%rsp)
movl %r12d,%ecx
- xorl 56(%rsp),%edx
- xorl %r13d,%eax
- roll $5,%ecx
- leal 1859775393(%rbp,%r11,1),%r11d
- xorl 16(%rsp),%edx
+ xorl 56(%rsp),%ebp
xorl %edi,%eax
+ roll $5,%ecx
+ xorl 16(%rsp),%ebp
+ leal 1859775393(%rdx,%r11,1),%r11d
+ xorl %esi,%eax
addl %ecx,%r11d
- xorl 36(%rsp),%edx
roll $30,%r13d
addl %eax,%r11d
- roll $1,%edx
- movl %edx,48(%rsp)
- movl 52(%rsp),%ebp
- movl %r13d,%eax
+ roll $1,%ebp
+ xorl 52(%rsp),%r14d
+ movl %r12d,%eax
+ movl %ebp,48(%rsp)
movl %r11d,%ecx
- xorl 60(%rsp),%ebp
- xorl %r12d,%eax
- roll $5,%ecx
- leal 1859775393(%rdx,%rdi,1),%edi
- xorl 20(%rsp),%ebp
+ xorl 60(%rsp),%r14d
xorl %esi,%eax
+ roll $5,%ecx
+ xorl 20(%rsp),%r14d
+ leal 1859775393(%rbp,%rdi,1),%edi
+ xorl %r13d,%eax
addl %ecx,%edi
- xorl 40(%rsp),%ebp
roll $30,%r12d
addl %eax,%edi
- roll $1,%ebp
- movl %ebp,52(%rsp)
- movl 56(%rsp),%edx
- movl %r12d,%eax
+ roll $1,%r14d
+ xorl 56(%rsp),%edx
+ movl %r11d,%eax
+ movl %r14d,52(%rsp)
movl %edi,%ecx
xorl 0(%rsp),%edx
- xorl %r11d,%eax
+ xorl %r13d,%eax
roll $5,%ecx
- leal 1859775393(%rbp,%rsi,1),%esi
xorl 24(%rsp),%edx
- xorl %r13d,%eax
+ leal 1859775393(%r14,%rsi,1),%esi
+ xorl %r12d,%eax
addl %ecx,%esi
- xorl 44(%rsp),%edx
roll $30,%r11d
addl %eax,%esi
roll $1,%edx
+ xorl 60(%rsp),%ebp
+ movl %edi,%eax
movl %edx,56(%rsp)
- movl 60(%rsp),%ebp
- movl %r11d,%eax
movl %esi,%ecx
xorl 4(%rsp),%ebp
- xorl %edi,%eax
+ xorl %r12d,%eax
roll $5,%ecx
- leal 1859775393(%rdx,%r13,1),%r13d
xorl 28(%rsp),%ebp
- xorl %r12d,%eax
+ leal 1859775393(%rdx,%r13,1),%r13d
+ xorl %r11d,%eax
addl %ecx,%r13d
- xorl 48(%rsp),%ebp
roll $30,%edi
addl %eax,%r13d
roll $1,%ebp
+ xorl 0(%rsp),%r14d
+ movl %esi,%eax
movl %ebp,60(%rsp)
- movl 0(%rsp),%edx
- movl %edi,%eax
movl %r13d,%ecx
- xorl 8(%rsp),%edx
- xorl %esi,%eax
+ xorl 8(%rsp),%r14d
+ xorl %r11d,%eax
roll $5,%ecx
+ xorl 32(%rsp),%r14d
leal 1859775393(%rbp,%r12,1),%r12d
- xorl 32(%rsp),%edx
- xorl %r11d,%eax
+ xorl %edi,%eax
addl %ecx,%r12d
- xorl 52(%rsp),%edx
roll $30,%esi
addl %eax,%r12d
- roll $1,%edx
- movl %edx,0(%rsp)
- movl 4(%rsp),%ebp
- movl %esi,%eax
+ roll $1,%r14d
+ xorl 4(%rsp),%edx
+ movl %r13d,%eax
+ movl %r14d,0(%rsp)
movl %r12d,%ecx
- xorl 12(%rsp),%ebp
- xorl %r13d,%eax
- roll $5,%ecx
- leal 1859775393(%rdx,%r11,1),%r11d
- xorl 36(%rsp),%ebp
+ xorl 12(%rsp),%edx
xorl %edi,%eax
+ roll $5,%ecx
+ xorl 36(%rsp),%edx
+ leal 1859775393(%r14,%r11,1),%r11d
+ xorl %esi,%eax
addl %ecx,%r11d
- xorl 56(%rsp),%ebp
roll $30,%r13d
addl %eax,%r11d
- roll $1,%ebp
- movl %ebp,4(%rsp)
- movl 8(%rsp),%edx
- movl %r13d,%eax
+ roll $1,%edx
+ xorl 8(%rsp),%ebp
+ movl %r12d,%eax
+ movl %edx,4(%rsp)
movl %r11d,%ecx
- xorl 16(%rsp),%edx
- xorl %r12d,%eax
- roll $5,%ecx
- leal 1859775393(%rbp,%rdi,1),%edi
- xorl 40(%rsp),%edx
+ xorl 16(%rsp),%ebp
xorl %esi,%eax
+ roll $5,%ecx
+ xorl 40(%rsp),%ebp
+ leal 1859775393(%rdx,%rdi,1),%edi
+ xorl %r13d,%eax
addl %ecx,%edi
- xorl 60(%rsp),%edx
roll $30,%r12d
addl %eax,%edi
- roll $1,%edx
- movl %edx,8(%rsp)
- movl 12(%rsp),%ebp
- movl %r12d,%eax
+ roll $1,%ebp
+ xorl 12(%rsp),%r14d
+ movl %r11d,%eax
+ movl %ebp,8(%rsp)
movl %edi,%ecx
- xorl 20(%rsp),%ebp
- xorl %r11d,%eax
- roll $5,%ecx
- leal 1859775393(%rdx,%rsi,1),%esi
- xorl 44(%rsp),%ebp
+ xorl 20(%rsp),%r14d
xorl %r13d,%eax
+ roll $5,%ecx
+ xorl 44(%rsp),%r14d
+ leal 1859775393(%rbp,%rsi,1),%esi
+ xorl %r12d,%eax
addl %ecx,%esi
- xorl 0(%rsp),%ebp
roll $30,%r11d
addl %eax,%esi
- roll $1,%ebp
- movl %ebp,12(%rsp)
- movl 16(%rsp),%edx
- movl %r11d,%eax
+ roll $1,%r14d
+ xorl 16(%rsp),%edx
+ movl %edi,%eax
+ movl %r14d,12(%rsp)
movl %esi,%ecx
xorl 24(%rsp),%edx
- xorl %edi,%eax
+ xorl %r12d,%eax
roll $5,%ecx
- leal 1859775393(%rbp,%r13,1),%r13d
xorl 48(%rsp),%edx
- xorl %r12d,%eax
+ leal 1859775393(%r14,%r13,1),%r13d
+ xorl %r11d,%eax
addl %ecx,%r13d
- xorl 4(%rsp),%edx
roll $30,%edi
addl %eax,%r13d
roll $1,%edx
+ xorl 20(%rsp),%ebp
+ movl %esi,%eax
movl %edx,16(%rsp)
- movl 20(%rsp),%ebp
- movl %edi,%eax
movl %r13d,%ecx
xorl 28(%rsp),%ebp
- xorl %esi,%eax
+ xorl %r11d,%eax
roll $5,%ecx
- leal 1859775393(%rdx,%r12,1),%r12d
xorl 52(%rsp),%ebp
- xorl %r11d,%eax
+ leal 1859775393(%rdx,%r12,1),%r12d
+ xorl %edi,%eax
addl %ecx,%r12d
- xorl 8(%rsp),%ebp
roll $30,%esi
addl %eax,%r12d
roll $1,%ebp
+ xorl 24(%rsp),%r14d
+ movl %r13d,%eax
movl %ebp,20(%rsp)
- movl 24(%rsp),%edx
- movl %esi,%eax
movl %r12d,%ecx
- xorl 32(%rsp),%edx
- xorl %r13d,%eax
+ xorl 32(%rsp),%r14d
+ xorl %edi,%eax
roll $5,%ecx
+ xorl 56(%rsp),%r14d
leal 1859775393(%rbp,%r11,1),%r11d
- xorl 56(%rsp),%edx
- xorl %edi,%eax
+ xorl %esi,%eax
addl %ecx,%r11d
- xorl 12(%rsp),%edx
roll $30,%r13d
addl %eax,%r11d
- roll $1,%edx
- movl %edx,24(%rsp)
- movl 28(%rsp),%ebp
- movl %r13d,%eax
+ roll $1,%r14d
+ xorl 28(%rsp),%edx
+ movl %r12d,%eax
+ movl %r14d,24(%rsp)
movl %r11d,%ecx
- xorl 36(%rsp),%ebp
- xorl %r12d,%eax
- roll $5,%ecx
- leal 1859775393(%rdx,%rdi,1),%edi
- xorl 60(%rsp),%ebp
+ xorl 36(%rsp),%edx
xorl %esi,%eax
+ roll $5,%ecx
+ xorl 60(%rsp),%edx
+ leal 1859775393(%r14,%rdi,1),%edi
+ xorl %r13d,%eax
addl %ecx,%edi
- xorl 16(%rsp),%ebp
roll $30,%r12d
addl %eax,%edi
- roll $1,%ebp
- movl %ebp,28(%rsp)
- movl 32(%rsp),%edx
- movl %r12d,%eax
+ roll $1,%edx
+ xorl 32(%rsp),%ebp
+ movl %r11d,%eax
+ movl %edx,28(%rsp)
movl %edi,%ecx
- xorl 40(%rsp),%edx
- xorl %r11d,%eax
- roll $5,%ecx
- leal 1859775393(%rbp,%rsi,1),%esi
- xorl 0(%rsp),%edx
+ xorl 40(%rsp),%ebp
xorl %r13d,%eax
+ roll $5,%ecx
+ xorl 0(%rsp),%ebp
+ leal 1859775393(%rdx,%rsi,1),%esi
+ xorl %r12d,%eax
addl %ecx,%esi
- xorl 20(%rsp),%edx
roll $30,%r11d
addl %eax,%esi
- roll $1,%edx
- movl %edx,32(%rsp)
- movl 36(%rsp),%ebp
- movl %r11d,%eax
- movl %r11d,%ebx
- xorl 44(%rsp),%ebp
- andl %r12d,%eax
+ roll $1,%ebp
+ xorl 36(%rsp),%r14d
+ movl %r12d,%eax
+ movl %ebp,32(%rsp)
+ movl %r12d,%ebx
+ xorl 44(%rsp),%r14d
+ andl %r11d,%eax
movl %esi,%ecx
- xorl 4(%rsp),%ebp
- xorl %r12d,%ebx
- leal -1894007588(%rdx,%r13,1),%r13d
+ xorl 4(%rsp),%r14d
+ leal -1894007588(%rbp,%r13,1),%r13d
+ xorl %r11d,%ebx
roll $5,%ecx
- xorl 24(%rsp),%ebp
addl %eax,%r13d
+ roll $1,%r14d
andl %edi,%ebx
- roll $1,%ebp
- addl %ebx,%r13d
- roll $30,%edi
- movl %ebp,36(%rsp)
addl %ecx,%r13d
- movl 40(%rsp),%edx
- movl %edi,%eax
- movl %edi,%ebx
+ roll $30,%edi
+ addl %ebx,%r13d
+ xorl 40(%rsp),%edx
+ movl %r11d,%eax
+ movl %r14d,36(%rsp)
+ movl %r11d,%ebx
xorl 48(%rsp),%edx
- andl %r11d,%eax
+ andl %edi,%eax
movl %r13d,%ecx
xorl 8(%rsp),%edx
- xorl %r11d,%ebx
- leal -1894007588(%rbp,%r12,1),%r12d
+ leal -1894007588(%r14,%r12,1),%r12d
+ xorl %edi,%ebx
roll $5,%ecx
- xorl 28(%rsp),%edx
addl %eax,%r12d
- andl %esi,%ebx
roll $1,%edx
- addl %ebx,%r12d
+ andl %esi,%ebx
+ addl %ecx,%r12d
roll $30,%esi
+ addl %ebx,%r12d
+ xorl 44(%rsp),%ebp
+ movl %edi,%eax
movl %edx,40(%rsp)
- addl %ecx,%r12d
- movl 44(%rsp),%ebp
- movl %esi,%eax
- movl %esi,%ebx
+ movl %edi,%ebx
xorl 52(%rsp),%ebp
- andl %edi,%eax
+ andl %esi,%eax
movl %r12d,%ecx
xorl 12(%rsp),%ebp
- xorl %edi,%ebx
leal -1894007588(%rdx,%r11,1),%r11d
+ xorl %esi,%ebx
roll $5,%ecx
- xorl 32(%rsp),%ebp
addl %eax,%r11d
- andl %r13d,%ebx
roll $1,%ebp
- addl %ebx,%r11d
+ andl %r13d,%ebx
+ addl %ecx,%r11d
roll $30,%r13d
+ addl %ebx,%r11d
+ xorl 48(%rsp),%r14d
+ movl %esi,%eax
movl %ebp,44(%rsp)
- addl %ecx,%r11d
- movl 48(%rsp),%edx
- movl %r13d,%eax
- movl %r13d,%ebx
- xorl 56(%rsp),%edx
- andl %esi,%eax
+ movl %esi,%ebx
+ xorl 56(%rsp),%r14d
+ andl %r13d,%eax
movl %r11d,%ecx
- xorl 16(%rsp),%edx
- xorl %esi,%ebx
+ xorl 16(%rsp),%r14d
leal -1894007588(%rbp,%rdi,1),%edi
+ xorl %r13d,%ebx
roll $5,%ecx
- xorl 36(%rsp),%edx
addl %eax,%edi
+ roll $1,%r14d
andl %r12d,%ebx
- roll $1,%edx
- addl %ebx,%edi
- roll $30,%r12d
- movl %edx,48(%rsp)
addl %ecx,%edi
- movl 52(%rsp),%ebp
- movl %r12d,%eax
- movl %r12d,%ebx
- xorl 60(%rsp),%ebp
- andl %r13d,%eax
+ roll $30,%r12d
+ addl %ebx,%edi
+ xorl 52(%rsp),%edx
+ movl %r13d,%eax
+ movl %r14d,48(%rsp)
+ movl %r13d,%ebx
+ xorl 60(%rsp),%edx
+ andl %r12d,%eax
movl %edi,%ecx
- xorl 20(%rsp),%ebp
- xorl %r13d,%ebx
- leal -1894007588(%rdx,%rsi,1),%esi
+ xorl 20(%rsp),%edx
+ leal -1894007588(%r14,%rsi,1),%esi
+ xorl %r12d,%ebx
roll $5,%ecx
- xorl 40(%rsp),%ebp
addl %eax,%esi
+ roll $1,%edx
andl %r11d,%ebx
- roll $1,%ebp
- addl %ebx,%esi
- roll $30,%r11d
- movl %ebp,52(%rsp)
addl %ecx,%esi
- movl 56(%rsp),%edx
- movl %r11d,%eax
- movl %r11d,%ebx
- xorl 0(%rsp),%edx
- andl %r12d,%eax
+ roll $30,%r11d
+ addl %ebx,%esi
+ xorl 56(%rsp),%ebp
+ movl %r12d,%eax
+ movl %edx,52(%rsp)
+ movl %r12d,%ebx
+ xorl 0(%rsp),%ebp
+ andl %r11d,%eax
movl %esi,%ecx
- xorl 24(%rsp),%edx
- xorl %r12d,%ebx
- leal -1894007588(%rbp,%r13,1),%r13d
+ xorl 24(%rsp),%ebp
+ leal -1894007588(%rdx,%r13,1),%r13d
+ xorl %r11d,%ebx
roll $5,%ecx
- xorl 44(%rsp),%edx
addl %eax,%r13d
+ roll $1,%ebp
andl %edi,%ebx
- roll $1,%edx
- addl %ebx,%r13d
- roll $30,%edi
- movl %edx,56(%rsp)
addl %ecx,%r13d
- movl 60(%rsp),%ebp
- movl %edi,%eax
- movl %edi,%ebx
- xorl 4(%rsp),%ebp
- andl %r11d,%eax
+ roll $30,%edi
+ addl %ebx,%r13d
+ xorl 60(%rsp),%r14d
+ movl %r11d,%eax
+ movl %ebp,56(%rsp)
+ movl %r11d,%ebx
+ xorl 4(%rsp),%r14d
+ andl %edi,%eax
movl %r13d,%ecx
- xorl 28(%rsp),%ebp
- xorl %r11d,%ebx
- leal -1894007588(%rdx,%r12,1),%r12d
+ xorl 28(%rsp),%r14d
+ leal -1894007588(%rbp,%r12,1),%r12d
+ xorl %edi,%ebx
roll $5,%ecx
- xorl 48(%rsp),%ebp
addl %eax,%r12d
+ roll $1,%r14d
andl %esi,%ebx
- roll $1,%ebp
- addl %ebx,%r12d
- roll $30,%esi
- movl %ebp,60(%rsp)
addl %ecx,%r12d
- movl 0(%rsp),%edx
- movl %esi,%eax
- movl %esi,%ebx
+ roll $30,%esi
+ addl %ebx,%r12d
+ xorl 0(%rsp),%edx
+ movl %edi,%eax
+ movl %r14d,60(%rsp)
+ movl %edi,%ebx
xorl 8(%rsp),%edx
- andl %edi,%eax
+ andl %esi,%eax
movl %r12d,%ecx
xorl 32(%rsp),%edx
- xorl %edi,%ebx
- leal -1894007588(%rbp,%r11,1),%r11d
+ leal -1894007588(%r14,%r11,1),%r11d
+ xorl %esi,%ebx
roll $5,%ecx
- xorl 52(%rsp),%edx
addl %eax,%r11d
- andl %r13d,%ebx
roll $1,%edx
- addl %ebx,%r11d
+ andl %r13d,%ebx
+ addl %ecx,%r11d
roll $30,%r13d
+ addl %ebx,%r11d
+ xorl 4(%rsp),%ebp
+ movl %esi,%eax
movl %edx,0(%rsp)
- addl %ecx,%r11d
- movl 4(%rsp),%ebp
- movl %r13d,%eax
- movl %r13d,%ebx
+ movl %esi,%ebx
xorl 12(%rsp),%ebp
- andl %esi,%eax
+ andl %r13d,%eax
movl %r11d,%ecx
xorl 36(%rsp),%ebp
- xorl %esi,%ebx
leal -1894007588(%rdx,%rdi,1),%edi
+ xorl %r13d,%ebx
roll $5,%ecx
- xorl 56(%rsp),%ebp
addl %eax,%edi
- andl %r12d,%ebx
roll $1,%ebp
- addl %ebx,%edi
+ andl %r12d,%ebx
+ addl %ecx,%edi
roll $30,%r12d
+ addl %ebx,%edi
+ xorl 8(%rsp),%r14d
+ movl %r13d,%eax
movl %ebp,4(%rsp)
- addl %ecx,%edi
- movl 8(%rsp),%edx
- movl %r12d,%eax
- movl %r12d,%ebx
- xorl 16(%rsp),%edx
- andl %r13d,%eax
+ movl %r13d,%ebx
+ xorl 16(%rsp),%r14d
+ andl %r12d,%eax
movl %edi,%ecx
- xorl 40(%rsp),%edx
- xorl %r13d,%ebx
+ xorl 40(%rsp),%r14d
leal -1894007588(%rbp,%rsi,1),%esi
+ xorl %r12d,%ebx
roll $5,%ecx
- xorl 60(%rsp),%edx
addl %eax,%esi
+ roll $1,%r14d
andl %r11d,%ebx
- roll $1,%edx
- addl %ebx,%esi
- roll $30,%r11d
- movl %edx,8(%rsp)
addl %ecx,%esi
- movl 12(%rsp),%ebp
- movl %r11d,%eax
- movl %r11d,%ebx
- xorl 20(%rsp),%ebp
- andl %r12d,%eax
+ roll $30,%r11d
+ addl %ebx,%esi
+ xorl 12(%rsp),%edx
+ movl %r12d,%eax
+ movl %r14d,8(%rsp)
+ movl %r12d,%ebx
+ xorl 20(%rsp),%edx
+ andl %r11d,%eax
movl %esi,%ecx
- xorl 44(%rsp),%ebp
- xorl %r12d,%ebx
- leal -1894007588(%rdx,%r13,1),%r13d
+ xorl 44(%rsp),%edx
+ leal -1894007588(%r14,%r13,1),%r13d
+ xorl %r11d,%ebx
roll $5,%ecx
- xorl 0(%rsp),%ebp
addl %eax,%r13d
+ roll $1,%edx
andl %edi,%ebx
- roll $1,%ebp
- addl %ebx,%r13d
- roll $30,%edi
- movl %ebp,12(%rsp)
addl %ecx,%r13d
- movl 16(%rsp),%edx
- movl %edi,%eax
- movl %edi,%ebx
- xorl 24(%rsp),%edx
- andl %r11d,%eax
+ roll $30,%edi
+ addl %ebx,%r13d
+ xorl 16(%rsp),%ebp
+ movl %r11d,%eax
+ movl %edx,12(%rsp)
+ movl %r11d,%ebx
+ xorl 24(%rsp),%ebp
+ andl %edi,%eax
movl %r13d,%ecx
- xorl 48(%rsp),%edx
- xorl %r11d,%ebx
- leal -1894007588(%rbp,%r12,1),%r12d
+ xorl 48(%rsp),%ebp
+ leal -1894007588(%rdx,%r12,1),%r12d
+ xorl %edi,%ebx
roll $5,%ecx
- xorl 4(%rsp),%edx
addl %eax,%r12d
+ roll $1,%ebp
andl %esi,%ebx
- roll $1,%edx
- addl %ebx,%r12d
- roll $30,%esi
- movl %edx,16(%rsp)
addl %ecx,%r12d
- movl 20(%rsp),%ebp
- movl %esi,%eax
- movl %esi,%ebx
- xorl 28(%rsp),%ebp
- andl %edi,%eax
+ roll $30,%esi
+ addl %ebx,%r12d
+ xorl 20(%rsp),%r14d
+ movl %edi,%eax
+ movl %ebp,16(%rsp)
+ movl %edi,%ebx
+ xorl 28(%rsp),%r14d
+ andl %esi,%eax
movl %r12d,%ecx
- xorl 52(%rsp),%ebp
- xorl %edi,%ebx
- leal -1894007588(%rdx,%r11,1),%r11d
+ xorl 52(%rsp),%r14d
+ leal -1894007588(%rbp,%r11,1),%r11d
+ xorl %esi,%ebx
roll $5,%ecx
- xorl 8(%rsp),%ebp
addl %eax,%r11d
+ roll $1,%r14d
andl %r13d,%ebx
- roll $1,%ebp
- addl %ebx,%r11d
- roll $30,%r13d
- movl %ebp,20(%rsp)
addl %ecx,%r11d
- movl 24(%rsp),%edx
- movl %r13d,%eax
- movl %r13d,%ebx
+ roll $30,%r13d
+ addl %ebx,%r11d
+ xorl 24(%rsp),%edx
+ movl %esi,%eax
+ movl %r14d,20(%rsp)
+ movl %esi,%ebx
xorl 32(%rsp),%edx
- andl %esi,%eax
+ andl %r13d,%eax
movl %r11d,%ecx
xorl 56(%rsp),%edx
- xorl %esi,%ebx
- leal -1894007588(%rbp,%rdi,1),%edi
+ leal -1894007588(%r14,%rdi,1),%edi
+ xorl %r13d,%ebx
roll $5,%ecx
- xorl 12(%rsp),%edx
addl %eax,%edi
- andl %r12d,%ebx
roll $1,%edx
- addl %ebx,%edi
+ andl %r12d,%ebx
+ addl %ecx,%edi
roll $30,%r12d
+ addl %ebx,%edi
+ xorl 28(%rsp),%ebp
+ movl %r13d,%eax
movl %edx,24(%rsp)
- addl %ecx,%edi
- movl 28(%rsp),%ebp
- movl %r12d,%eax
- movl %r12d,%ebx
+ movl %r13d,%ebx
xorl 36(%rsp),%ebp
- andl %r13d,%eax
+ andl %r12d,%eax
movl %edi,%ecx
xorl 60(%rsp),%ebp
- xorl %r13d,%ebx
leal -1894007588(%rdx,%rsi,1),%esi
+ xorl %r12d,%ebx
roll $5,%ecx
- xorl 16(%rsp),%ebp
addl %eax,%esi
- andl %r11d,%ebx
roll $1,%ebp
- addl %ebx,%esi
+ andl %r11d,%ebx
+ addl %ecx,%esi
roll $30,%r11d
+ addl %ebx,%esi
+ xorl 32(%rsp),%r14d
+ movl %r12d,%eax
movl %ebp,28(%rsp)
- addl %ecx,%esi
- movl 32(%rsp),%edx
- movl %r11d,%eax
- movl %r11d,%ebx
- xorl 40(%rsp),%edx
- andl %r12d,%eax
+ movl %r12d,%ebx
+ xorl 40(%rsp),%r14d
+ andl %r11d,%eax
movl %esi,%ecx
- xorl 0(%rsp),%edx
- xorl %r12d,%ebx
+ xorl 0(%rsp),%r14d
leal -1894007588(%rbp,%r13,1),%r13d
+ xorl %r11d,%ebx
roll $5,%ecx
- xorl 20(%rsp),%edx
addl %eax,%r13d
+ roll $1,%r14d
andl %edi,%ebx
- roll $1,%edx
- addl %ebx,%r13d
- roll $30,%edi
- movl %edx,32(%rsp)
addl %ecx,%r13d
- movl 36(%rsp),%ebp
- movl %edi,%eax
- movl %edi,%ebx
- xorl 44(%rsp),%ebp
- andl %r11d,%eax
+ roll $30,%edi
+ addl %ebx,%r13d
+ xorl 36(%rsp),%edx
+ movl %r11d,%eax
+ movl %r14d,32(%rsp)
+ movl %r11d,%ebx
+ xorl 44(%rsp),%edx
+ andl %edi,%eax
movl %r13d,%ecx
- xorl 4(%rsp),%ebp
- xorl %r11d,%ebx
- leal -1894007588(%rdx,%r12,1),%r12d
+ xorl 4(%rsp),%edx
+ leal -1894007588(%r14,%r12,1),%r12d
+ xorl %edi,%ebx
roll $5,%ecx
- xorl 24(%rsp),%ebp
addl %eax,%r12d
+ roll $1,%edx
andl %esi,%ebx
- roll $1,%ebp
- addl %ebx,%r12d
- roll $30,%esi
- movl %ebp,36(%rsp)
addl %ecx,%r12d
- movl 40(%rsp),%edx
- movl %esi,%eax
- movl %esi,%ebx
- xorl 48(%rsp),%edx
- andl %edi,%eax
+ roll $30,%esi
+ addl %ebx,%r12d
+ xorl 40(%rsp),%ebp
+ movl %edi,%eax
+ movl %edx,36(%rsp)
+ movl %edi,%ebx
+ xorl 48(%rsp),%ebp
+ andl %esi,%eax
movl %r12d,%ecx
- xorl 8(%rsp),%edx
- xorl %edi,%ebx
- leal -1894007588(%rbp,%r11,1),%r11d
+ xorl 8(%rsp),%ebp
+ leal -1894007588(%rdx,%r11,1),%r11d
+ xorl %esi,%ebx
roll $5,%ecx
- xorl 28(%rsp),%edx
addl %eax,%r11d
+ roll $1,%ebp
andl %r13d,%ebx
- roll $1,%edx
- addl %ebx,%r11d
- roll $30,%r13d
- movl %edx,40(%rsp)
addl %ecx,%r11d
- movl 44(%rsp),%ebp
- movl %r13d,%eax
- movl %r13d,%ebx
- xorl 52(%rsp),%ebp
- andl %esi,%eax
+ roll $30,%r13d
+ addl %ebx,%r11d
+ xorl 44(%rsp),%r14d
+ movl %esi,%eax
+ movl %ebp,40(%rsp)
+ movl %esi,%ebx
+ xorl 52(%rsp),%r14d
+ andl %r13d,%eax
movl %r11d,%ecx
- xorl 12(%rsp),%ebp
- xorl %esi,%ebx
- leal -1894007588(%rdx,%rdi,1),%edi
+ xorl 12(%rsp),%r14d
+ leal -1894007588(%rbp,%rdi,1),%edi
+ xorl %r13d,%ebx
roll $5,%ecx
- xorl 32(%rsp),%ebp
addl %eax,%edi
+ roll $1,%r14d
andl %r12d,%ebx
- roll $1,%ebp
- addl %ebx,%edi
- roll $30,%r12d
- movl %ebp,44(%rsp)
addl %ecx,%edi
- movl 48(%rsp),%edx
- movl %r12d,%eax
- movl %r12d,%ebx
+ roll $30,%r12d
+ addl %ebx,%edi
+ xorl 48(%rsp),%edx
+ movl %r13d,%eax
+ movl %r14d,44(%rsp)
+ movl %r13d,%ebx
xorl 56(%rsp),%edx
- andl %r13d,%eax
+ andl %r12d,%eax
movl %edi,%ecx
xorl 16(%rsp),%edx
- xorl %r13d,%ebx
- leal -1894007588(%rbp,%rsi,1),%esi
+ leal -1894007588(%r14,%rsi,1),%esi
+ xorl %r12d,%ebx
roll $5,%ecx
- xorl 36(%rsp),%edx
addl %eax,%esi
- andl %r11d,%ebx
roll $1,%edx
- addl %ebx,%esi
+ andl %r11d,%ebx
+ addl %ecx,%esi
roll $30,%r11d
+ addl %ebx,%esi
+ xorl 52(%rsp),%ebp
+ movl %edi,%eax
movl %edx,48(%rsp)
- addl %ecx,%esi
- movl 52(%rsp),%ebp
- movl %r11d,%eax
movl %esi,%ecx
xorl 60(%rsp),%ebp
- xorl %edi,%eax
+ xorl %r12d,%eax
roll $5,%ecx
- leal -899497514(%rdx,%r13,1),%r13d
xorl 20(%rsp),%ebp
- xorl %r12d,%eax
+ leal -899497514(%rdx,%r13,1),%r13d
+ xorl %r11d,%eax
addl %ecx,%r13d
- xorl 40(%rsp),%ebp
roll $30,%edi
addl %eax,%r13d
roll $1,%ebp
+ xorl 56(%rsp),%r14d
+ movl %esi,%eax
movl %ebp,52(%rsp)
- movl 56(%rsp),%edx
- movl %edi,%eax
movl %r13d,%ecx
- xorl 0(%rsp),%edx
- xorl %esi,%eax
+ xorl 0(%rsp),%r14d
+ xorl %r11d,%eax
roll $5,%ecx
+ xorl 24(%rsp),%r14d
leal -899497514(%rbp,%r12,1),%r12d
- xorl 24(%rsp),%edx
- xorl %r11d,%eax
+ xorl %edi,%eax
addl %ecx,%r12d
- xorl 44(%rsp),%edx
roll $30,%esi
addl %eax,%r12d
- roll $1,%edx
- movl %edx,56(%rsp)
- movl 60(%rsp),%ebp
- movl %esi,%eax
+ roll $1,%r14d
+ xorl 60(%rsp),%edx
+ movl %r13d,%eax
+ movl %r14d,56(%rsp)
movl %r12d,%ecx
- xorl 4(%rsp),%ebp
- xorl %r13d,%eax
- roll $5,%ecx
- leal -899497514(%rdx,%r11,1),%r11d
- xorl 28(%rsp),%ebp
+ xorl 4(%rsp),%edx
xorl %edi,%eax
+ roll $5,%ecx
+ xorl 28(%rsp),%edx
+ leal -899497514(%r14,%r11,1),%r11d
+ xorl %esi,%eax
addl %ecx,%r11d
- xorl 48(%rsp),%ebp
roll $30,%r13d
addl %eax,%r11d
- roll $1,%ebp
- movl %ebp,60(%rsp)
- movl 0(%rsp),%edx
- movl %r13d,%eax
+ roll $1,%edx
+ xorl 0(%rsp),%ebp
+ movl %r12d,%eax
+ movl %edx,60(%rsp)
movl %r11d,%ecx
- xorl 8(%rsp),%edx
- xorl %r12d,%eax
- roll $5,%ecx
- leal -899497514(%rbp,%rdi,1),%edi
- xorl 32(%rsp),%edx
+ xorl 8(%rsp),%ebp
xorl %esi,%eax
+ roll $5,%ecx
+ xorl 32(%rsp),%ebp
+ leal -899497514(%rdx,%rdi,1),%edi
+ xorl %r13d,%eax
addl %ecx,%edi
- xorl 52(%rsp),%edx
roll $30,%r12d
addl %eax,%edi
- roll $1,%edx
- movl %edx,0(%rsp)
- movl 4(%rsp),%ebp
- movl %r12d,%eax
+ roll $1,%ebp
+ xorl 4(%rsp),%r14d
+ movl %r11d,%eax
+ movl %ebp,0(%rsp)
movl %edi,%ecx
- xorl 12(%rsp),%ebp
- xorl %r11d,%eax
- roll $5,%ecx
- leal -899497514(%rdx,%rsi,1),%esi
- xorl 36(%rsp),%ebp
+ xorl 12(%rsp),%r14d
xorl %r13d,%eax
+ roll $5,%ecx
+ xorl 36(%rsp),%r14d
+ leal -899497514(%rbp,%rsi,1),%esi
+ xorl %r12d,%eax
addl %ecx,%esi
- xorl 56(%rsp),%ebp
roll $30,%r11d
addl %eax,%esi
- roll $1,%ebp
- movl %ebp,4(%rsp)
- movl 8(%rsp),%edx
- movl %r11d,%eax
+ roll $1,%r14d
+ xorl 8(%rsp),%edx
+ movl %edi,%eax
+ movl %r14d,4(%rsp)
movl %esi,%ecx
xorl 16(%rsp),%edx
- xorl %edi,%eax
+ xorl %r12d,%eax
roll $5,%ecx
- leal -899497514(%rbp,%r13,1),%r13d
xorl 40(%rsp),%edx
- xorl %r12d,%eax
+ leal -899497514(%r14,%r13,1),%r13d
+ xorl %r11d,%eax
addl %ecx,%r13d
- xorl 60(%rsp),%edx
roll $30,%edi
addl %eax,%r13d
roll $1,%edx
+ xorl 12(%rsp),%ebp
+ movl %esi,%eax
movl %edx,8(%rsp)
- movl 12(%rsp),%ebp
- movl %edi,%eax
movl %r13d,%ecx
xorl 20(%rsp),%ebp
- xorl %esi,%eax
+ xorl %r11d,%eax
roll $5,%ecx
- leal -899497514(%rdx,%r12,1),%r12d
xorl 44(%rsp),%ebp
- xorl %r11d,%eax
+ leal -899497514(%rdx,%r12,1),%r12d
+ xorl %edi,%eax
addl %ecx,%r12d
- xorl 0(%rsp),%ebp
roll $30,%esi
addl %eax,%r12d
roll $1,%ebp
+ xorl 16(%rsp),%r14d
+ movl %r13d,%eax
movl %ebp,12(%rsp)
- movl 16(%rsp),%edx
- movl %esi,%eax
movl %r12d,%ecx
- xorl 24(%rsp),%edx
- xorl %r13d,%eax
+ xorl 24(%rsp),%r14d
+ xorl %edi,%eax
roll $5,%ecx
+ xorl 48(%rsp),%r14d
leal -899497514(%rbp,%r11,1),%r11d
- xorl 48(%rsp),%edx
- xorl %edi,%eax
+ xorl %esi,%eax
addl %ecx,%r11d
- xorl 4(%rsp),%edx
roll $30,%r13d
addl %eax,%r11d
- roll $1,%edx
- movl %edx,16(%rsp)
- movl 20(%rsp),%ebp
- movl %r13d,%eax
+ roll $1,%r14d
+ xorl 20(%rsp),%edx
+ movl %r12d,%eax
+ movl %r14d,16(%rsp)
movl %r11d,%ecx
- xorl 28(%rsp),%ebp
- xorl %r12d,%eax
- roll $5,%ecx
- leal -899497514(%rdx,%rdi,1),%edi
- xorl 52(%rsp),%ebp
+ xorl 28(%rsp),%edx
xorl %esi,%eax
+ roll $5,%ecx
+ xorl 52(%rsp),%edx
+ leal -899497514(%r14,%rdi,1),%edi
+ xorl %r13d,%eax
addl %ecx,%edi
- xorl 8(%rsp),%ebp
roll $30,%r12d
addl %eax,%edi
- roll $1,%ebp
- movl %ebp,20(%rsp)
- movl 24(%rsp),%edx
- movl %r12d,%eax
+ roll $1,%edx
+ xorl 24(%rsp),%ebp
+ movl %r11d,%eax
+ movl %edx,20(%rsp)
movl %edi,%ecx
- xorl 32(%rsp),%edx
- xorl %r11d,%eax
- roll $5,%ecx
- leal -899497514(%rbp,%rsi,1),%esi
- xorl 56(%rsp),%edx
+ xorl 32(%rsp),%ebp
xorl %r13d,%eax
+ roll $5,%ecx
+ xorl 56(%rsp),%ebp
+ leal -899497514(%rdx,%rsi,1),%esi
+ xorl %r12d,%eax
addl %ecx,%esi
- xorl 12(%rsp),%edx
roll $30,%r11d
addl %eax,%esi
- roll $1,%edx
- movl %edx,24(%rsp)
- movl 28(%rsp),%ebp
- movl %r11d,%eax
+ roll $1,%ebp
+ xorl 28(%rsp),%r14d
+ movl %edi,%eax
+ movl %ebp,24(%rsp)
movl %esi,%ecx
- xorl 36(%rsp),%ebp
- xorl %edi,%eax
- roll $5,%ecx
- leal -899497514(%rdx,%r13,1),%r13d
- xorl 60(%rsp),%ebp
+ xorl 36(%rsp),%r14d
xorl %r12d,%eax
+ roll $5,%ecx
+ xorl 60(%rsp),%r14d
+ leal -899497514(%rbp,%r13,1),%r13d
+ xorl %r11d,%eax
addl %ecx,%r13d
- xorl 16(%rsp),%ebp
roll $30,%edi
addl %eax,%r13d
- roll $1,%ebp
- movl %ebp,28(%rsp)
- movl 32(%rsp),%edx
- movl %edi,%eax
+ roll $1,%r14d
+ xorl 32(%rsp),%edx
+ movl %esi,%eax
+ movl %r14d,28(%rsp)
movl %r13d,%ecx
xorl 40(%rsp),%edx
- xorl %esi,%eax
+ xorl %r11d,%eax
roll $5,%ecx
- leal -899497514(%rbp,%r12,1),%r12d
xorl 0(%rsp),%edx
- xorl %r11d,%eax
+ leal -899497514(%r14,%r12,1),%r12d
+ xorl %edi,%eax
addl %ecx,%r12d
- xorl 20(%rsp),%edx
roll $30,%esi
addl %eax,%r12d
roll $1,%edx
- movl %edx,32(%rsp)
- movl 36(%rsp),%ebp
- movl %esi,%eax
+ xorl 36(%rsp),%ebp
+ movl %r13d,%eax
+
movl %r12d,%ecx
xorl 44(%rsp),%ebp
- xorl %r13d,%eax
+ xorl %edi,%eax
roll $5,%ecx
- leal -899497514(%rdx,%r11,1),%r11d
xorl 4(%rsp),%ebp
- xorl %edi,%eax
+ leal -899497514(%rdx,%r11,1),%r11d
+ xorl %esi,%eax
addl %ecx,%r11d
- xorl 24(%rsp),%ebp
roll $30,%r13d
addl %eax,%r11d
roll $1,%ebp
- movl %ebp,36(%rsp)
- movl 40(%rsp),%edx
- movl %r13d,%eax
+ xorl 40(%rsp),%r14d
+ movl %r12d,%eax
+
movl %r11d,%ecx
- xorl 48(%rsp),%edx
- xorl %r12d,%eax
+ xorl 48(%rsp),%r14d
+ xorl %esi,%eax
roll $5,%ecx
+ xorl 8(%rsp),%r14d
leal -899497514(%rbp,%rdi,1),%edi
- xorl 8(%rsp),%edx
- xorl %esi,%eax
+ xorl %r13d,%eax
addl %ecx,%edi
- xorl 28(%rsp),%edx
roll $30,%r12d
addl %eax,%edi
- roll $1,%edx
- movl %edx,40(%rsp)
- movl 44(%rsp),%ebp
- movl %r12d,%eax
+ roll $1,%r14d
+ xorl 44(%rsp),%edx
+ movl %r11d,%eax
+
movl %edi,%ecx
- xorl 52(%rsp),%ebp
- xorl %r11d,%eax
- roll $5,%ecx
- leal -899497514(%rdx,%rsi,1),%esi
- xorl 12(%rsp),%ebp
+ xorl 52(%rsp),%edx
xorl %r13d,%eax
+ roll $5,%ecx
+ xorl 12(%rsp),%edx
+ leal -899497514(%r14,%rsi,1),%esi
+ xorl %r12d,%eax
addl %ecx,%esi
- xorl 32(%rsp),%ebp
roll $30,%r11d
addl %eax,%esi
- roll $1,%ebp
- movl %ebp,44(%rsp)
- movl 48(%rsp),%edx
- movl %r11d,%eax
+ roll $1,%edx
+ xorl 48(%rsp),%ebp
+ movl %edi,%eax
+
movl %esi,%ecx
- xorl 56(%rsp),%edx
- xorl %edi,%eax
- roll $5,%ecx
- leal -899497514(%rbp,%r13,1),%r13d
- xorl 16(%rsp),%edx
+ xorl 56(%rsp),%ebp
xorl %r12d,%eax
+ roll $5,%ecx
+ xorl 16(%rsp),%ebp
+ leal -899497514(%rdx,%r13,1),%r13d
+ xorl %r11d,%eax
addl %ecx,%r13d
- xorl 36(%rsp),%edx
roll $30,%edi
addl %eax,%r13d
- roll $1,%edx
- movl %edx,48(%rsp)
- movl 52(%rsp),%ebp
- movl %edi,%eax
+ roll $1,%ebp
+ xorl 52(%rsp),%r14d
+ movl %esi,%eax
+
movl %r13d,%ecx
- xorl 60(%rsp),%ebp
- xorl %esi,%eax
- roll $5,%ecx
- leal -899497514(%rdx,%r12,1),%r12d
- xorl 20(%rsp),%ebp
+ xorl 60(%rsp),%r14d
xorl %r11d,%eax
+ roll $5,%ecx
+ xorl 20(%rsp),%r14d
+ leal -899497514(%rbp,%r12,1),%r12d
+ xorl %edi,%eax
addl %ecx,%r12d
- xorl 40(%rsp),%ebp
roll $30,%esi
addl %eax,%r12d
- roll $1,%ebp
- movl 56(%rsp),%edx
- movl %esi,%eax
+ roll $1,%r14d
+ xorl 56(%rsp),%edx
+ movl %r13d,%eax
+
movl %r12d,%ecx
xorl 0(%rsp),%edx
- xorl %r13d,%eax
+ xorl %edi,%eax
roll $5,%ecx
- leal -899497514(%rbp,%r11,1),%r11d
xorl 24(%rsp),%edx
- xorl %edi,%eax
+ leal -899497514(%r14,%r11,1),%r11d
+ xorl %esi,%eax
addl %ecx,%r11d
- xorl 44(%rsp),%edx
roll $30,%r13d
addl %eax,%r11d
roll $1,%edx
- movl 60(%rsp),%ebp
- movl %r13d,%eax
+ xorl 60(%rsp),%ebp
+ movl %r12d,%eax
+
movl %r11d,%ecx
xorl 4(%rsp),%ebp
- xorl %r12d,%eax
+ xorl %esi,%eax
roll $5,%ecx
- leal -899497514(%rdx,%rdi,1),%edi
xorl 28(%rsp),%ebp
- xorl %esi,%eax
+ leal -899497514(%rdx,%rdi,1),%edi
+ xorl %r13d,%eax
addl %ecx,%edi
- xorl 48(%rsp),%ebp
roll $30,%r12d
addl %eax,%edi
roll $1,%ebp
- movl %r12d,%eax
+ movl %r11d,%eax
movl %edi,%ecx
- xorl %r11d,%eax
+ xorl %r13d,%eax
leal -899497514(%rbp,%rsi,1),%esi
roll $5,%ecx
- xorl %r13d,%eax
+ xorl %r12d,%eax
addl %ecx,%esi
roll $30,%r11d
addl %eax,%esi
@@ -1281,29 +1223,202 @@ sha1_block_data_order:
jnz .Lloop
movq 64(%rsp),%rsi
- movq (%rsi),%r13
- movq 8(%rsi),%r12
- movq 16(%rsi),%rbp
- movq 24(%rsi),%rbx
- leaq 32(%rsi),%rsp
+ movq -40(%rsi),%r14
+ movq -32(%rsi),%r13
+ movq -24(%rsi),%r12
+ movq -16(%rsi),%rbp
+ movq -8(%rsi),%rbx
+ leaq (%rsi),%rsp
.Lepilogue:
.byte 0xf3,0xc3
.size sha1_block_data_order,.-sha1_block_data_order
+.type sha1_block_data_order_shaext,@function
+.align 32
+sha1_block_data_order_shaext:
+_shaext_shortcut:
+ movdqu (%rdi),%xmm0
+ movd 16(%rdi),%xmm1
+ movdqa K_XX_XX+160(%rip),%xmm3
+
+ movdqu (%rsi),%xmm4
+ pshufd $27,%xmm0,%xmm0
+ movdqu 16(%rsi),%xmm5
+ pshufd $27,%xmm1,%xmm1
+ movdqu 32(%rsi),%xmm6
+.byte 102,15,56,0,227
+ movdqu 48(%rsi),%xmm7
+.byte 102,15,56,0,235
+.byte 102,15,56,0,243
+ movdqa %xmm1,%xmm9
+.byte 102,15,56,0,251
+ jmp .Loop_shaext
+
+.align 16
+.Loop_shaext:
+ decq %rdx
+ leaq 64(%rsi),%rax
+ paddd %xmm4,%xmm1
+ cmovneq %rax,%rsi
+ movdqa %xmm0,%xmm8
+.byte 15,56,201,229
+ movdqa %xmm0,%xmm2
+.byte 15,58,204,193,0
+.byte 15,56,200,213
+ pxor %xmm6,%xmm4
+.byte 15,56,201,238
+.byte 15,56,202,231
+
+ movdqa %xmm0,%xmm1
+.byte 15,58,204,194,0
+.byte 15,56,200,206
+ pxor %xmm7,%xmm5
+.byte 15,56,202,236
+.byte 15,56,201,247
+ movdqa %xmm0,%xmm2
+.byte 15,58,204,193,0
+.byte 15,56,200,215
+ pxor %xmm4,%xmm6
+.byte 15,56,201,252
+.byte 15,56,202,245
+
+ movdqa %xmm0,%xmm1
+.byte 15,58,204,194,0
+.byte 15,56,200,204
+ pxor %xmm5,%xmm7
+.byte 15,56,202,254
+.byte 15,56,201,229
+ movdqa %xmm0,%xmm2
+.byte 15,58,204,193,0
+.byte 15,56,200,213
+ pxor %xmm6,%xmm4
+.byte 15,56,201,238
+.byte 15,56,202,231
+
+ movdqa %xmm0,%xmm1
+.byte 15,58,204,194,1
+.byte 15,56,200,206
+ pxor %xmm7,%xmm5
+.byte 15,56,202,236
+.byte 15,56,201,247
+ movdqa %xmm0,%xmm2
+.byte 15,58,204,193,1
+.byte 15,56,200,215
+ pxor %xmm4,%xmm6
+.byte 15,56,201,252
+.byte 15,56,202,245
+
+ movdqa %xmm0,%xmm1
+.byte 15,58,204,194,1
+.byte 15,56,200,204
+ pxor %xmm5,%xmm7
+.byte 15,56,202,254
+.byte 15,56,201,229
+ movdqa %xmm0,%xmm2
+.byte 15,58,204,193,1
+.byte 15,56,200,213
+ pxor %xmm6,%xmm4
+.byte 15,56,201,238
+.byte 15,56,202,231
+
+ movdqa %xmm0,%xmm1
+.byte 15,58,204,194,1
+.byte 15,56,200,206
+ pxor %xmm7,%xmm5
+.byte 15,56,202,236
+.byte 15,56,201,247
+ movdqa %xmm0,%xmm2
+.byte 15,58,204,193,2
+.byte 15,56,200,215
+ pxor %xmm4,%xmm6
+.byte 15,56,201,252
+.byte 15,56,202,245
+
+ movdqa %xmm0,%xmm1
+.byte 15,58,204,194,2
+.byte 15,56,200,204
+ pxor %xmm5,%xmm7
+.byte 15,56,202,254
+.byte 15,56,201,229
+ movdqa %xmm0,%xmm2
+.byte 15,58,204,193,2
+.byte 15,56,200,213
+ pxor %xmm6,%xmm4
+.byte 15,56,201,238
+.byte 15,56,202,231
+
+ movdqa %xmm0,%xmm1
+.byte 15,58,204,194,2
+.byte 15,56,200,206
+ pxor %xmm7,%xmm5
+.byte 15,56,202,236
+.byte 15,56,201,247
+ movdqa %xmm0,%xmm2
+.byte 15,58,204,193,2
+.byte 15,56,200,215
+ pxor %xmm4,%xmm6
+.byte 15,56,201,252
+.byte 15,56,202,245
+
+ movdqa %xmm0,%xmm1
+.byte 15,58,204,194,3
+.byte 15,56,200,204
+ pxor %xmm5,%xmm7
+.byte 15,56,202,254
+ movdqu (%rsi),%xmm4
+ movdqa %xmm0,%xmm2
+.byte 15,58,204,193,3
+.byte 15,56,200,213
+ movdqu 16(%rsi),%xmm5
+.byte 102,15,56,0,227
+
+ movdqa %xmm0,%xmm1
+.byte 15,58,204,194,3
+.byte 15,56,200,206
+ movdqu 32(%rsi),%xmm6
+.byte 102,15,56,0,235
+
+ movdqa %xmm0,%xmm2
+.byte 15,58,204,193,3
+.byte 15,56,200,215
+ movdqu 48(%rsi),%xmm7
+.byte 102,15,56,0,243
+
+ movdqa %xmm0,%xmm1
+.byte 15,58,204,194,3
+.byte 65,15,56,200,201
+.byte 102,15,56,0,251
+
+ paddd %xmm8,%xmm0
+ movdqa %xmm1,%xmm9
+
+ jnz .Loop_shaext
+
+ pshufd $27,%xmm0,%xmm0
+ pshufd $27,%xmm1,%xmm1
+ movdqu %xmm0,(%rdi)
+ movd %xmm1,16(%rdi)
+ .byte 0xf3,0xc3
+.size sha1_block_data_order_shaext,.-sha1_block_data_order_shaext
.type sha1_block_data_order_ssse3,@function
.align 16
sha1_block_data_order_ssse3:
_ssse3_shortcut:
+ movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
+ pushq %r13
+ pushq %r14
leaq -64(%rsp),%rsp
+ movq %rax,%r14
+ andq $-64,%rsp
movq %rdi,%r8
movq %rsi,%r9
movq %rdx,%r10
shlq $6,%r10
addq %r9,%r10
- leaq K_XX_XX(%rip),%r11
+ leaq K_XX_XX+64(%rip),%r11
movl 0(%r8),%eax
movl 4(%r8),%ebx
@@ -1311,19 +1426,22 @@ _ssse3_shortcut:
movl 12(%r8),%edx
movl %ebx,%esi
movl 16(%r8),%ebp
+ movl %ecx,%edi
+ xorl %edx,%edi
+ andl %edi,%esi
movdqa 64(%r11),%xmm6
- movdqa 0(%r11),%xmm9
+ movdqa -64(%r11),%xmm9
movdqu 0(%r9),%xmm0
movdqu 16(%r9),%xmm1
movdqu 32(%r9),%xmm2
movdqu 48(%r9),%xmm3
.byte 102,15,56,0,198
- addq $64,%r9
.byte 102,15,56,0,206
.byte 102,15,56,0,214
-.byte 102,15,56,0,222
+ addq $64,%r9
paddd %xmm9,%xmm0
+.byte 102,15,56,0,222
paddd %xmm9,%xmm1
paddd %xmm9,%xmm2
movdqa %xmm0,0(%rsp)
@@ -1335,904 +1453,882 @@ _ssse3_shortcut:
jmp .Loop_ssse3
.align 16
.Loop_ssse3:
- movdqa %xmm1,%xmm4
- addl 0(%rsp),%ebp
- xorl %edx,%ecx
+ rorl $2,%ebx
+ pshufd $238,%xmm0,%xmm4
+ xorl %edx,%esi
movdqa %xmm3,%xmm8
-.byte 102,15,58,15,224,8
+ paddd %xmm3,%xmm9
movl %eax,%edi
+ addl 0(%rsp),%ebp
+ punpcklqdq %xmm1,%xmm4
+ xorl %ecx,%ebx
roll $5,%eax
- paddd %xmm3,%xmm9
- andl %ecx,%esi
- xorl %edx,%ecx
+ addl %esi,%ebp
psrldq $4,%xmm8
- xorl %edx,%esi
- addl %eax,%ebp
+ andl %ebx,%edi
+ xorl %ecx,%ebx
pxor %xmm0,%xmm4
- rorl $2,%ebx
- addl %esi,%ebp
+ addl %eax,%ebp
+ rorl $7,%eax
pxor %xmm2,%xmm8
- addl 4(%rsp),%edx
- xorl %ecx,%ebx
+ xorl %ecx,%edi
movl %ebp,%esi
- roll $5,%ebp
+ addl 4(%rsp),%edx
pxor %xmm8,%xmm4
- andl %ebx,%edi
- xorl %ecx,%ebx
+ xorl %ebx,%eax
+ roll $5,%ebp
movdqa %xmm9,48(%rsp)
- xorl %ecx,%edi
- addl %ebp,%edx
- movdqa %xmm4,%xmm10
- movdqa %xmm4,%xmm8
- rorl $7,%eax
addl %edi,%edx
- addl 8(%rsp),%ecx
+ andl %eax,%esi
+ movdqa %xmm4,%xmm10
xorl %ebx,%eax
+ addl %ebp,%edx
+ rorl $7,%ebp
+ movdqa %xmm4,%xmm8
+ xorl %ebx,%esi
pslldq $12,%xmm10
paddd %xmm4,%xmm4
movl %edx,%edi
- roll $5,%edx
- andl %eax,%esi
- xorl %ebx,%eax
+ addl 8(%rsp),%ecx
psrld $31,%xmm8
- xorl %ebx,%esi
- addl %edx,%ecx
- movdqa %xmm10,%xmm9
- rorl $7,%ebp
+ xorl %eax,%ebp
+ roll $5,%edx
addl %esi,%ecx
+ movdqa %xmm10,%xmm9
+ andl %ebp,%edi
+ xorl %eax,%ebp
psrld $30,%xmm10
+ addl %edx,%ecx
+ rorl $7,%edx
por %xmm8,%xmm4
- addl 12(%rsp),%ebx
- xorl %eax,%ebp
+ xorl %eax,%edi
movl %ecx,%esi
- roll $5,%ecx
+ addl 12(%rsp),%ebx
pslld $2,%xmm9
pxor %xmm10,%xmm4
- andl %ebp,%edi
- xorl %eax,%ebp
- movdqa 0(%r11),%xmm10
- xorl %eax,%edi
- addl %ecx,%ebx
- pxor %xmm9,%xmm4
- rorl $7,%edx
+ xorl %ebp,%edx
+ movdqa -64(%r11),%xmm10
+ roll $5,%ecx
addl %edi,%ebx
- movdqa %xmm2,%xmm5
- addl 16(%rsp),%eax
+ andl %edx,%esi
+ pxor %xmm9,%xmm4
xorl %ebp,%edx
+ addl %ecx,%ebx
+ rorl $7,%ecx
+ pshufd $238,%xmm1,%xmm5
+ xorl %ebp,%esi
movdqa %xmm4,%xmm9
-.byte 102,15,58,15,233,8
+ paddd %xmm4,%xmm10
movl %ebx,%edi
+ addl 16(%rsp),%eax
+ punpcklqdq %xmm2,%xmm5
+ xorl %edx,%ecx
roll $5,%ebx
- paddd %xmm4,%xmm10
- andl %edx,%esi
- xorl %ebp,%edx
+ addl %esi,%eax
psrldq $4,%xmm9
- xorl %ebp,%esi
- addl %ebx,%eax
+ andl %ecx,%edi
+ xorl %edx,%ecx
pxor %xmm1,%xmm5
- rorl $7,%ecx
- addl %esi,%eax
+ addl %ebx,%eax
+ rorl $7,%ebx
pxor %xmm3,%xmm9
- addl 20(%rsp),%ebp
- xorl %edx,%ecx
+ xorl %edx,%edi
movl %eax,%esi
- roll $5,%eax
+ addl 20(%rsp),%ebp
pxor %xmm9,%xmm5
- andl %ecx,%edi
- xorl %edx,%ecx
+ xorl %ecx,%ebx
+ roll $5,%eax
movdqa %xmm10,0(%rsp)
- xorl %edx,%edi
- addl %eax,%ebp
- movdqa %xmm5,%xmm8
- movdqa %xmm5,%xmm9
- rorl $7,%ebx
addl %edi,%ebp
- addl 24(%rsp),%edx
+ andl %ebx,%esi
+ movdqa %xmm5,%xmm8
xorl %ecx,%ebx
+ addl %eax,%ebp
+ rorl $7,%eax
+ movdqa %xmm5,%xmm9
+ xorl %ecx,%esi
pslldq $12,%xmm8
paddd %xmm5,%xmm5
movl %ebp,%edi
- roll $5,%ebp
- andl %ebx,%esi
- xorl %ecx,%ebx
+ addl 24(%rsp),%edx
psrld $31,%xmm9
- xorl %ecx,%esi
- addl %ebp,%edx
- movdqa %xmm8,%xmm10
- rorl $7,%eax
+ xorl %ebx,%eax
+ roll $5,%ebp
addl %esi,%edx
+ movdqa %xmm8,%xmm10
+ andl %eax,%edi
+ xorl %ebx,%eax
psrld $30,%xmm8
+ addl %ebp,%edx
+ rorl $7,%ebp
por %xmm9,%xmm5
- addl 28(%rsp),%ecx
- xorl %ebx,%eax
+ xorl %ebx,%edi
movl %edx,%esi
- roll $5,%edx
+ addl 28(%rsp),%ecx
pslld $2,%xmm10
pxor %xmm8,%xmm5
- andl %eax,%edi
- xorl %ebx,%eax
- movdqa 16(%r11),%xmm8
- xorl %ebx,%edi
- addl %edx,%ecx
- pxor %xmm10,%xmm5
- rorl $7,%ebp
+ xorl %eax,%ebp
+ movdqa -32(%r11),%xmm8
+ roll $5,%edx
addl %edi,%ecx
- movdqa %xmm3,%xmm6
- addl 32(%rsp),%ebx
+ andl %ebp,%esi
+ pxor %xmm10,%xmm5
xorl %eax,%ebp
+ addl %edx,%ecx
+ rorl $7,%edx
+ pshufd $238,%xmm2,%xmm6
+ xorl %eax,%esi
movdqa %xmm5,%xmm10
-.byte 102,15,58,15,242,8
+ paddd %xmm5,%xmm8
movl %ecx,%edi
+ addl 32(%rsp),%ebx
+ punpcklqdq %xmm3,%xmm6
+ xorl %ebp,%edx
roll $5,%ecx
- paddd %xmm5,%xmm8
- andl %ebp,%esi
- xorl %eax,%ebp
+ addl %esi,%ebx
psrldq $4,%xmm10
- xorl %eax,%esi
- addl %ecx,%ebx
+ andl %edx,%edi
+ xorl %ebp,%edx
pxor %xmm2,%xmm6
- rorl $7,%edx
- addl %esi,%ebx
+ addl %ecx,%ebx
+ rorl $7,%ecx
pxor %xmm4,%xmm10
- addl 36(%rsp),%eax
- xorl %ebp,%edx
+ xorl %ebp,%edi
movl %ebx,%esi
- roll $5,%ebx
+ addl 36(%rsp),%eax
pxor %xmm10,%xmm6
- andl %edx,%edi
- xorl %ebp,%edx
+ xorl %edx,%ecx
+ roll $5,%ebx
movdqa %xmm8,16(%rsp)
- xorl %ebp,%edi
- addl %ebx,%eax
- movdqa %xmm6,%xmm9
- movdqa %xmm6,%xmm10
- rorl $7,%ecx
addl %edi,%eax
- addl 40(%rsp),%ebp
+ andl %ecx,%esi
+ movdqa %xmm6,%xmm9
xorl %edx,%ecx
+ addl %ebx,%eax
+ rorl $7,%ebx
+ movdqa %xmm6,%xmm10
+ xorl %edx,%esi
pslldq $12,%xmm9
paddd %xmm6,%xmm6
movl %eax,%edi
- roll $5,%eax
- andl %ecx,%esi
- xorl %edx,%ecx
+ addl 40(%rsp),%ebp
psrld $31,%xmm10
- xorl %edx,%esi
- addl %eax,%ebp
- movdqa %xmm9,%xmm8
- rorl $7,%ebx
+ xorl %ecx,%ebx
+ roll $5,%eax
addl %esi,%ebp
+ movdqa %xmm9,%xmm8
+ andl %ebx,%edi
+ xorl %ecx,%ebx
psrld $30,%xmm9
+ addl %eax,%ebp
+ rorl $7,%eax
por %xmm10,%xmm6
- addl 44(%rsp),%edx
- xorl %ecx,%ebx
+ xorl %ecx,%edi
movl %ebp,%esi
- roll $5,%ebp
+ addl 44(%rsp),%edx
pslld $2,%xmm8
pxor %xmm9,%xmm6
- andl %ebx,%edi
- xorl %ecx,%ebx
- movdqa 16(%r11),%xmm9
- xorl %ecx,%edi
- addl %ebp,%edx
- pxor %xmm8,%xmm6
- rorl $7,%eax
+ xorl %ebx,%eax
+ movdqa -32(%r11),%xmm9
+ roll $5,%ebp
addl %edi,%edx
- movdqa %xmm4,%xmm7
- addl 48(%rsp),%ecx
+ andl %eax,%esi
+ pxor %xmm8,%xmm6
xorl %ebx,%eax
+ addl %ebp,%edx
+ rorl $7,%ebp
+ pshufd $238,%xmm3,%xmm7
+ xorl %ebx,%esi
movdqa %xmm6,%xmm8
-.byte 102,15,58,15,251,8
+ paddd %xmm6,%xmm9
movl %edx,%edi
+ addl 48(%rsp),%ecx
+ punpcklqdq %xmm4,%xmm7
+ xorl %eax,%ebp
roll $5,%edx
- paddd %xmm6,%xmm9
- andl %eax,%esi
- xorl %ebx,%eax
+ addl %esi,%ecx
psrldq $4,%xmm8
- xorl %ebx,%esi
- addl %edx,%ecx
+ andl %ebp,%edi
+ xorl %eax,%ebp
pxor %xmm3,%xmm7
- rorl $7,%ebp
- addl %esi,%ecx
+ addl %edx,%ecx
+ rorl $7,%edx
pxor %xmm5,%xmm8
- addl 52(%rsp),%ebx
- xorl %eax,%ebp
+ xorl %eax,%edi
movl %ecx,%esi
- roll $5,%ecx
+ addl 52(%rsp),%ebx
pxor %xmm8,%xmm7
- andl %ebp,%edi
- xorl %eax,%ebp
+ xorl %ebp,%edx
+ roll $5,%ecx
movdqa %xmm9,32(%rsp)
- xorl %eax,%edi
- addl %ecx,%ebx
- movdqa %xmm7,%xmm10
- movdqa %xmm7,%xmm8
- rorl $7,%edx
addl %edi,%ebx
- addl 56(%rsp),%eax
+ andl %edx,%esi
+ movdqa %xmm7,%xmm10
xorl %ebp,%edx
+ addl %ecx,%ebx
+ rorl $7,%ecx
+ movdqa %xmm7,%xmm8
+ xorl %ebp,%esi
pslldq $12,%xmm10
paddd %xmm7,%xmm7
movl %ebx,%edi
- roll $5,%ebx
- andl %edx,%esi
- xorl %ebp,%edx
+ addl 56(%rsp),%eax
psrld $31,%xmm8
- xorl %ebp,%esi
- addl %ebx,%eax
- movdqa %xmm10,%xmm9
- rorl $7,%ecx
+ xorl %edx,%ecx
+ roll $5,%ebx
addl %esi,%eax
+ movdqa %xmm10,%xmm9
+ andl %ecx,%edi
+ xorl %edx,%ecx
psrld $30,%xmm10
+ addl %ebx,%eax
+ rorl $7,%ebx
por %xmm8,%xmm7
- addl 60(%rsp),%ebp
- xorl %edx,%ecx
+ xorl %edx,%edi
movl %eax,%esi
- roll $5,%eax
+ addl 60(%rsp),%ebp
pslld $2,%xmm9
pxor %xmm10,%xmm7
- andl %ecx,%edi
- xorl %edx,%ecx
- movdqa 16(%r11),%xmm10
- xorl %edx,%edi
- addl %eax,%ebp
- pxor %xmm9,%xmm7
- rorl $7,%ebx
+ xorl %ecx,%ebx
+ movdqa -32(%r11),%xmm10
+ roll $5,%eax
addl %edi,%ebp
- movdqa %xmm7,%xmm9
- addl 0(%rsp),%edx
- pxor %xmm4,%xmm0
-.byte 102,68,15,58,15,206,8
+ andl %ebx,%esi
+ pxor %xmm9,%xmm7
+ pshufd $238,%xmm6,%xmm9
xorl %ecx,%ebx
+ addl %eax,%ebp
+ rorl $7,%eax
+ pxor %xmm4,%xmm0
+ xorl %ecx,%esi
movl %ebp,%edi
+ addl 0(%rsp),%edx
+ punpcklqdq %xmm7,%xmm9
+ xorl %ebx,%eax
roll $5,%ebp
pxor %xmm1,%xmm0
- andl %ebx,%esi
- xorl %ecx,%ebx
+ addl %esi,%edx
+ andl %eax,%edi
movdqa %xmm10,%xmm8
+ xorl %ebx,%eax
paddd %xmm7,%xmm10
- xorl %ecx,%esi
addl %ebp,%edx
pxor %xmm9,%xmm0
- rorl $7,%eax
- addl %esi,%edx
+ rorl $7,%ebp
+ xorl %ebx,%edi
+ movl %edx,%esi
addl 4(%rsp),%ecx
- xorl %ebx,%eax
movdqa %xmm0,%xmm9
- movdqa %xmm10,48(%rsp)
- movl %edx,%esi
+ xorl %eax,%ebp
roll $5,%edx
- andl %eax,%edi
- xorl %ebx,%eax
+ movdqa %xmm10,48(%rsp)
+ addl %edi,%ecx
+ andl %ebp,%esi
+ xorl %eax,%ebp
pslld $2,%xmm0
- xorl %ebx,%edi
addl %edx,%ecx
+ rorl $7,%edx
psrld $30,%xmm9
- rorl $7,%ebp
- addl %edi,%ecx
- addl 8(%rsp),%ebx
- xorl %eax,%ebp
+ xorl %eax,%esi
movl %ecx,%edi
- roll $5,%ecx
+ addl 8(%rsp),%ebx
por %xmm9,%xmm0
- andl %ebp,%esi
- xorl %eax,%ebp
- movdqa %xmm0,%xmm10
- xorl %eax,%esi
- addl %ecx,%ebx
- rorl $7,%edx
- addl %esi,%ebx
- addl 12(%rsp),%eax
xorl %ebp,%edx
- movl %ebx,%esi
- roll $5,%ebx
+ roll $5,%ecx
+ pshufd $238,%xmm7,%xmm10
+ addl %esi,%ebx
andl %edx,%edi
xorl %ebp,%edx
+ addl %ecx,%ebx
+ addl 12(%rsp),%eax
xorl %ebp,%edi
- addl %ebx,%eax
- rorl $7,%ecx
+ movl %ebx,%esi
+ roll $5,%ebx
addl %edi,%eax
- addl 16(%rsp),%ebp
- pxor %xmm5,%xmm1
-.byte 102,68,15,58,15,215,8
xorl %edx,%esi
+ rorl $7,%ecx
+ addl %ebx,%eax
+ pxor %xmm5,%xmm1
+ addl 16(%rsp),%ebp
+ xorl %ecx,%esi
+ punpcklqdq %xmm0,%xmm10
movl %eax,%edi
roll $5,%eax
pxor %xmm2,%xmm1
- xorl %ecx,%esi
- addl %eax,%ebp
+ addl %esi,%ebp
+ xorl %ecx,%edi
movdqa %xmm8,%xmm9
- paddd %xmm0,%xmm8
rorl $7,%ebx
- addl %esi,%ebp
+ paddd %xmm0,%xmm8
+ addl %eax,%ebp
pxor %xmm10,%xmm1
addl 20(%rsp),%edx
- xorl %ecx,%edi
+ xorl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
movdqa %xmm1,%xmm10
+ addl %edi,%edx
+ xorl %ebx,%esi
movdqa %xmm8,0(%rsp)
- xorl %ebx,%edi
- addl %ebp,%edx
rorl $7,%eax
- addl %edi,%edx
- pslld $2,%xmm1
+ addl %ebp,%edx
addl 24(%rsp),%ecx
- xorl %ebx,%esi
- psrld $30,%xmm10
+ pslld $2,%xmm1
+ xorl %eax,%esi
movl %edx,%edi
+ psrld $30,%xmm10
roll $5,%edx
- xorl %eax,%esi
- addl %edx,%ecx
- rorl $7,%ebp
addl %esi,%ecx
+ xorl %eax,%edi
+ rorl $7,%ebp
por %xmm10,%xmm1
+ addl %edx,%ecx
addl 28(%rsp),%ebx
- xorl %eax,%edi
- movdqa %xmm1,%xmm8
+ pshufd $238,%xmm0,%xmm8
+ xorl %ebp,%edi
movl %ecx,%esi
roll $5,%ecx
- xorl %ebp,%edi
- addl %ecx,%ebx
- rorl $7,%edx
addl %edi,%ebx
- addl 32(%rsp),%eax
- pxor %xmm6,%xmm2
-.byte 102,68,15,58,15,192,8
xorl %ebp,%esi
+ rorl $7,%edx
+ addl %ecx,%ebx
+ pxor %xmm6,%xmm2
+ addl 32(%rsp),%eax
+ xorl %edx,%esi
+ punpcklqdq %xmm1,%xmm8
movl %ebx,%edi
roll $5,%ebx
pxor %xmm3,%xmm2
- xorl %edx,%esi
- addl %ebx,%eax
- movdqa 32(%r11),%xmm10
- paddd %xmm1,%xmm9
- rorl $7,%ecx
addl %esi,%eax
+ xorl %edx,%edi
+ movdqa 0(%r11),%xmm10
+ rorl $7,%ecx
+ paddd %xmm1,%xmm9
+ addl %ebx,%eax
pxor %xmm8,%xmm2
addl 36(%rsp),%ebp
- xorl %edx,%edi
+ xorl %ecx,%edi
movl %eax,%esi
roll $5,%eax
movdqa %xmm2,%xmm8
+ addl %edi,%ebp
+ xorl %ecx,%esi
movdqa %xmm9,16(%rsp)
- xorl %ecx,%edi
- addl %eax,%ebp
rorl $7,%ebx
- addl %edi,%ebp
- pslld $2,%xmm2
+ addl %eax,%ebp
addl 40(%rsp),%edx
- xorl %ecx,%esi
- psrld $30,%xmm8
+ pslld $2,%xmm2
+ xorl %ebx,%esi
movl %ebp,%edi
+ psrld $30,%xmm8
roll $5,%ebp
- xorl %ebx,%esi
- addl %ebp,%edx
- rorl $7,%eax
addl %esi,%edx
+ xorl %ebx,%edi
+ rorl $7,%eax
por %xmm8,%xmm2
+ addl %ebp,%edx
addl 44(%rsp),%ecx
- xorl %ebx,%edi
- movdqa %xmm2,%xmm9
+ pshufd $238,%xmm1,%xmm9
+ xorl %eax,%edi
movl %edx,%esi
roll $5,%edx
- xorl %eax,%edi
- addl %edx,%ecx
- rorl $7,%ebp
addl %edi,%ecx
- addl 48(%rsp),%ebx
- pxor %xmm7,%xmm3
-.byte 102,68,15,58,15,201,8
xorl %eax,%esi
+ rorl $7,%ebp
+ addl %edx,%ecx
+ pxor %xmm7,%xmm3
+ addl 48(%rsp),%ebx
+ xorl %ebp,%esi
+ punpcklqdq %xmm2,%xmm9
movl %ecx,%edi
roll $5,%ecx
pxor %xmm4,%xmm3
- xorl %ebp,%esi
- addl %ecx,%ebx
+ addl %esi,%ebx
+ xorl %ebp,%edi
movdqa %xmm10,%xmm8
- paddd %xmm2,%xmm10
rorl $7,%edx
- addl %esi,%ebx
+ paddd %xmm2,%xmm10
+ addl %ecx,%ebx
pxor %xmm9,%xmm3
addl 52(%rsp),%eax
- xorl %ebp,%edi
+ xorl %edx,%edi
movl %ebx,%esi
roll $5,%ebx
movdqa %xmm3,%xmm9
+ addl %edi,%eax
+ xorl %edx,%esi
movdqa %xmm10,32(%rsp)
- xorl %edx,%edi
- addl %ebx,%eax
rorl $7,%ecx
- addl %edi,%eax
- pslld $2,%xmm3
+ addl %ebx,%eax
addl 56(%rsp),%ebp
- xorl %edx,%esi
- psrld $30,%xmm9
+ pslld $2,%xmm3
+ xorl %ecx,%esi
movl %eax,%edi
+ psrld $30,%xmm9
roll $5,%eax
- xorl %ecx,%esi
- addl %eax,%ebp
- rorl $7,%ebx
addl %esi,%ebp
+ xorl %ecx,%edi
+ rorl $7,%ebx
por %xmm9,%xmm3
+ addl %eax,%ebp
addl 60(%rsp),%edx
- xorl %ecx,%edi
- movdqa %xmm3,%xmm10
+ pshufd $238,%xmm2,%xmm10
+ xorl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
- xorl %ebx,%edi
- addl %ebp,%edx
- rorl $7,%eax
addl %edi,%edx
- addl 0(%rsp),%ecx
- pxor %xmm0,%xmm4
-.byte 102,68,15,58,15,210,8
xorl %ebx,%esi
+ rorl $7,%eax
+ addl %ebp,%edx
+ pxor %xmm0,%xmm4
+ addl 0(%rsp),%ecx
+ xorl %eax,%esi
+ punpcklqdq %xmm3,%xmm10
movl %edx,%edi
roll $5,%edx
pxor %xmm5,%xmm4
- xorl %eax,%esi
- addl %edx,%ecx
+ addl %esi,%ecx
+ xorl %eax,%edi
movdqa %xmm8,%xmm9
- paddd %xmm3,%xmm8
rorl $7,%ebp
- addl %esi,%ecx
+ paddd %xmm3,%xmm8
+ addl %edx,%ecx
pxor %xmm10,%xmm4
addl 4(%rsp),%ebx
- xorl %eax,%edi
+ xorl %ebp,%edi
movl %ecx,%esi
roll $5,%ecx
movdqa %xmm4,%xmm10
+ addl %edi,%ebx
+ xorl %ebp,%esi
movdqa %xmm8,48(%rsp)
- xorl %ebp,%edi
- addl %ecx,%ebx
rorl $7,%edx
- addl %edi,%ebx
- pslld $2,%xmm4
+ addl %ecx,%ebx
addl 8(%rsp),%eax
- xorl %ebp,%esi
- psrld $30,%xmm10
+ pslld $2,%xmm4
+ xorl %edx,%esi
movl %ebx,%edi
+ psrld $30,%xmm10
roll $5,%ebx
- xorl %edx,%esi
- addl %ebx,%eax
- rorl $7,%ecx
addl %esi,%eax
+ xorl %edx,%edi
+ rorl $7,%ecx
por %xmm10,%xmm4
+ addl %ebx,%eax
addl 12(%rsp),%ebp
- xorl %edx,%edi
- movdqa %xmm4,%xmm8
+ pshufd $238,%xmm3,%xmm8
+ xorl %ecx,%edi
movl %eax,%esi
roll $5,%eax
- xorl %ecx,%edi
- addl %eax,%ebp
- rorl $7,%ebx
addl %edi,%ebp
- addl 16(%rsp),%edx
- pxor %xmm1,%xmm5
-.byte 102,68,15,58,15,195,8
xorl %ecx,%esi
+ rorl $7,%ebx
+ addl %eax,%ebp
+ pxor %xmm1,%xmm5
+ addl 16(%rsp),%edx
+ xorl %ebx,%esi
+ punpcklqdq %xmm4,%xmm8
movl %ebp,%edi
roll $5,%ebp
pxor %xmm6,%xmm5
- xorl %ebx,%esi
- addl %ebp,%edx
+ addl %esi,%edx
+ xorl %ebx,%edi
movdqa %xmm9,%xmm10
- paddd %xmm4,%xmm9
rorl $7,%eax
- addl %esi,%edx
+ paddd %xmm4,%xmm9
+ addl %ebp,%edx
pxor %xmm8,%xmm5
addl 20(%rsp),%ecx
- xorl %ebx,%edi
+ xorl %eax,%edi
movl %edx,%esi
roll $5,%edx
movdqa %xmm5,%xmm8
+ addl %edi,%ecx
+ xorl %eax,%esi
movdqa %xmm9,0(%rsp)
- xorl %eax,%edi
- addl %edx,%ecx
rorl $7,%ebp
- addl %edi,%ecx
- pslld $2,%xmm5
+ addl %edx,%ecx
addl 24(%rsp),%ebx
- xorl %eax,%esi
- psrld $30,%xmm8
+ pslld $2,%xmm5
+ xorl %ebp,%esi
movl %ecx,%edi
+ psrld $30,%xmm8
roll $5,%ecx
- xorl %ebp,%esi
- addl %ecx,%ebx
- rorl $7,%edx
addl %esi,%ebx
+ xorl %ebp,%edi
+ rorl $7,%edx
por %xmm8,%xmm5
+ addl %ecx,%ebx
addl 28(%rsp),%eax
- xorl %ebp,%edi
- movdqa %xmm5,%xmm9
+ pshufd $238,%xmm4,%xmm9
+ rorl $7,%ecx
movl %ebx,%esi
- roll $5,%ebx
xorl %edx,%edi
- addl %ebx,%eax
- rorl $7,%ecx
+ roll $5,%ebx
addl %edi,%eax
- movl %ecx,%edi
- pxor %xmm2,%xmm6
-.byte 102,68,15,58,15,204,8
+ xorl %ecx,%esi
xorl %edx,%ecx
+ addl %ebx,%eax
+ pxor %xmm2,%xmm6
addl 32(%rsp),%ebp
- andl %edx,%edi
- pxor %xmm7,%xmm6
andl %ecx,%esi
+ xorl %edx,%ecx
rorl $7,%ebx
- movdqa %xmm10,%xmm8
- paddd %xmm5,%xmm10
- addl %edi,%ebp
+ punpcklqdq %xmm5,%xmm9
movl %eax,%edi
- pxor %xmm9,%xmm6
+ xorl %ecx,%esi
+ pxor %xmm7,%xmm6
roll $5,%eax
addl %esi,%ebp
- xorl %edx,%ecx
- addl %eax,%ebp
- movdqa %xmm6,%xmm9
- movdqa %xmm10,16(%rsp)
- movl %ebx,%esi
+ movdqa %xmm10,%xmm8
+ xorl %ebx,%edi
+ paddd %xmm5,%xmm10
xorl %ecx,%ebx
+ pxor %xmm9,%xmm6
+ addl %eax,%ebp
addl 36(%rsp),%edx
- andl %ecx,%esi
- pslld $2,%xmm6
andl %ebx,%edi
+ xorl %ecx,%ebx
rorl $7,%eax
- psrld $30,%xmm9
- addl %esi,%edx
+ movdqa %xmm6,%xmm9
movl %ebp,%esi
+ xorl %ebx,%edi
+ movdqa %xmm10,16(%rsp)
roll $5,%ebp
addl %edi,%edx
- xorl %ecx,%ebx
- addl %ebp,%edx
- por %xmm9,%xmm6
- movl %eax,%edi
+ xorl %eax,%esi
+ pslld $2,%xmm6
xorl %ebx,%eax
- movdqa %xmm6,%xmm10
+ addl %ebp,%edx
+ psrld $30,%xmm9
addl 40(%rsp),%ecx
- andl %ebx,%edi
andl %eax,%esi
+ xorl %ebx,%eax
+ por %xmm9,%xmm6
rorl $7,%ebp
- addl %edi,%ecx
movl %edx,%edi
+ xorl %eax,%esi
roll $5,%edx
+ pshufd $238,%xmm5,%xmm10
addl %esi,%ecx
- xorl %ebx,%eax
- addl %edx,%ecx
- movl %ebp,%esi
+ xorl %ebp,%edi
xorl %eax,%ebp
+ addl %edx,%ecx
addl 44(%rsp),%ebx
- andl %eax,%esi
andl %ebp,%edi
+ xorl %eax,%ebp
rorl $7,%edx
- addl %esi,%ebx
movl %ecx,%esi
+ xorl %ebp,%edi
roll $5,%ecx
addl %edi,%ebx
- xorl %eax,%ebp
+ xorl %edx,%esi
+ xorl %ebp,%edx
addl %ecx,%ebx
- movl %edx,%edi
pxor %xmm3,%xmm7
-.byte 102,68,15,58,15,213,8
- xorl %ebp,%edx
addl 48(%rsp),%eax
- andl %ebp,%edi
- pxor %xmm0,%xmm7
andl %edx,%esi
+ xorl %ebp,%edx
rorl $7,%ecx
- movdqa 48(%r11),%xmm9
- paddd %xmm6,%xmm8
- addl %edi,%eax
+ punpcklqdq %xmm6,%xmm10
movl %ebx,%edi
- pxor %xmm10,%xmm7
+ xorl %edx,%esi
+ pxor %xmm0,%xmm7
roll $5,%ebx
addl %esi,%eax
- xorl %ebp,%edx
- addl %ebx,%eax
- movdqa %xmm7,%xmm10
- movdqa %xmm8,32(%rsp)
- movl %ecx,%esi
+ movdqa 32(%r11),%xmm9
+ xorl %ecx,%edi
+ paddd %xmm6,%xmm8
xorl %edx,%ecx
+ pxor %xmm10,%xmm7
+ addl %ebx,%eax
addl 52(%rsp),%ebp
- andl %edx,%esi
- pslld $2,%xmm7
andl %ecx,%edi
+ xorl %edx,%ecx
rorl $7,%ebx
- psrld $30,%xmm10
- addl %esi,%ebp
+ movdqa %xmm7,%xmm10
movl %eax,%esi
+ xorl %ecx,%edi
+ movdqa %xmm8,32(%rsp)
roll $5,%eax
addl %edi,%ebp
- xorl %edx,%ecx
- addl %eax,%ebp
- por %xmm10,%xmm7
- movl %ebx,%edi
+ xorl %ebx,%esi
+ pslld $2,%xmm7
xorl %ecx,%ebx
- movdqa %xmm7,%xmm8
+ addl %eax,%ebp
+ psrld $30,%xmm10
addl 56(%rsp),%edx
- andl %ecx,%edi
andl %ebx,%esi
+ xorl %ecx,%ebx
+ por %xmm10,%xmm7
rorl $7,%eax
- addl %edi,%edx
movl %ebp,%edi
+ xorl %ebx,%esi
roll $5,%ebp
+ pshufd $238,%xmm6,%xmm8
addl %esi,%edx
- xorl %ecx,%ebx
- addl %ebp,%edx
- movl %eax,%esi
+ xorl %eax,%edi
xorl %ebx,%eax
+ addl %ebp,%edx
addl 60(%rsp),%ecx
- andl %ebx,%esi
andl %eax,%edi
+ xorl %ebx,%eax
rorl $7,%ebp
- addl %esi,%ecx
movl %edx,%esi
+ xorl %eax,%edi
roll $5,%edx
addl %edi,%ecx
- xorl %ebx,%eax
+ xorl %ebp,%esi
+ xorl %eax,%ebp
addl %edx,%ecx
- movl %ebp,%edi
pxor %xmm4,%xmm0
-.byte 102,68,15,58,15,198,8
- xorl %eax,%ebp
addl 0(%rsp),%ebx
- andl %eax,%edi
- pxor %xmm1,%xmm0
andl %ebp,%esi
+ xorl %eax,%ebp
rorl $7,%edx
- movdqa %xmm9,%xmm10
- paddd %xmm7,%xmm9
- addl %edi,%ebx
+ punpcklqdq %xmm7,%xmm8
movl %ecx,%edi
- pxor %xmm8,%xmm0
+ xorl %ebp,%esi
+ pxor %xmm1,%xmm0
roll $5,%ecx
addl %esi,%ebx
- xorl %eax,%ebp
- addl %ecx,%ebx
- movdqa %xmm0,%xmm8
- movdqa %xmm9,48(%rsp)
- movl %edx,%esi
+ movdqa %xmm9,%xmm10
+ xorl %edx,%edi
+ paddd %xmm7,%xmm9
xorl %ebp,%edx
+ pxor %xmm8,%xmm0
+ addl %ecx,%ebx
addl 4(%rsp),%eax
- andl %ebp,%esi
- pslld $2,%xmm0
andl %edx,%edi
+ xorl %ebp,%edx
rorl $7,%ecx
- psrld $30,%xmm8
- addl %esi,%eax
+ movdqa %xmm0,%xmm8
movl %ebx,%esi
+ xorl %edx,%edi
+ movdqa %xmm9,48(%rsp)
roll $5,%ebx
addl %edi,%eax
- xorl %ebp,%edx
- addl %ebx,%eax
- por %xmm8,%xmm0
- movl %ecx,%edi
+ xorl %ecx,%esi
+ pslld $2,%xmm0
xorl %edx,%ecx
- movdqa %xmm0,%xmm9
+ addl %ebx,%eax
+ psrld $30,%xmm8
addl 8(%rsp),%ebp
- andl %edx,%edi
andl %ecx,%esi
+ xorl %edx,%ecx
+ por %xmm8,%xmm0
rorl $7,%ebx
- addl %edi,%ebp
movl %eax,%edi
+ xorl %ecx,%esi
roll $5,%eax
+ pshufd $238,%xmm7,%xmm9
addl %esi,%ebp
- xorl %edx,%ecx
- addl %eax,%ebp
- movl %ebx,%esi
+ xorl %ebx,%edi
xorl %ecx,%ebx
+ addl %eax,%ebp
addl 12(%rsp),%edx
- andl %ecx,%esi
andl %ebx,%edi
+ xorl %ecx,%ebx
rorl $7,%eax
- addl %esi,%edx
movl %ebp,%esi
+ xorl %ebx,%edi
roll $5,%ebp
addl %edi,%edx
- xorl %ecx,%ebx
+ xorl %eax,%esi
+ xorl %ebx,%eax
addl %ebp,%edx
- movl %eax,%edi
pxor %xmm5,%xmm1
-.byte 102,68,15,58,15,207,8
- xorl %ebx,%eax
addl 16(%rsp),%ecx
- andl %ebx,%edi
- pxor %xmm2,%xmm1
andl %eax,%esi
+ xorl %ebx,%eax
rorl $7,%ebp
- movdqa %xmm10,%xmm8
- paddd %xmm0,%xmm10
- addl %edi,%ecx
+ punpcklqdq %xmm0,%xmm9
movl %edx,%edi
- pxor %xmm9,%xmm1
+ xorl %eax,%esi
+ pxor %xmm2,%xmm1
roll $5,%edx
addl %esi,%ecx
- xorl %ebx,%eax
- addl %edx,%ecx
- movdqa %xmm1,%xmm9
- movdqa %xmm10,0(%rsp)
- movl %ebp,%esi
+ movdqa %xmm10,%xmm8
+ xorl %ebp,%edi
+ paddd %xmm0,%xmm10
xorl %eax,%ebp
+ pxor %xmm9,%xmm1
+ addl %edx,%ecx
addl 20(%rsp),%ebx
- andl %eax,%esi
- pslld $2,%xmm1
andl %ebp,%edi
+ xorl %eax,%ebp
rorl $7,%edx
- psrld $30,%xmm9
- addl %esi,%ebx
+ movdqa %xmm1,%xmm9
movl %ecx,%esi
+ xorl %ebp,%edi
+ movdqa %xmm10,0(%rsp)
roll $5,%ecx
addl %edi,%ebx
- xorl %eax,%ebp
- addl %ecx,%ebx
- por %xmm9,%xmm1
- movl %edx,%edi
+ xorl %edx,%esi
+ pslld $2,%xmm1
xorl %ebp,%edx
- movdqa %xmm1,%xmm10
+ addl %ecx,%ebx
+ psrld $30,%xmm9
addl 24(%rsp),%eax
- andl %ebp,%edi
andl %edx,%esi
+ xorl %ebp,%edx
+ por %xmm9,%xmm1
rorl $7,%ecx
- addl %edi,%eax
movl %ebx,%edi
+ xorl %edx,%esi
roll $5,%ebx
+ pshufd $238,%xmm0,%xmm10
addl %esi,%eax
- xorl %ebp,%edx
- addl %ebx,%eax
- movl %ecx,%esi
+ xorl %ecx,%edi
xorl %edx,%ecx
+ addl %ebx,%eax
addl 28(%rsp),%ebp
- andl %edx,%esi
andl %ecx,%edi
+ xorl %edx,%ecx
rorl $7,%ebx
- addl %esi,%ebp
movl %eax,%esi
+ xorl %ecx,%edi
roll $5,%eax
addl %edi,%ebp
- xorl %edx,%ecx
+ xorl %ebx,%esi
+ xorl %ecx,%ebx
addl %eax,%ebp
- movl %ebx,%edi
pxor %xmm6,%xmm2
-.byte 102,68,15,58,15,208,8
- xorl %ecx,%ebx
addl 32(%rsp),%edx
- andl %ecx,%edi
- pxor %xmm3,%xmm2
andl %ebx,%esi
+ xorl %ecx,%ebx
rorl $7,%eax
- movdqa %xmm8,%xmm9
- paddd %xmm1,%xmm8
- addl %edi,%edx
+ punpcklqdq %xmm1,%xmm10
movl %ebp,%edi
- pxor %xmm10,%xmm2
+ xorl %ebx,%esi
+ pxor %xmm3,%xmm2
roll $5,%ebp
addl %esi,%edx
- xorl %ecx,%ebx
- addl %ebp,%edx
- movdqa %xmm2,%xmm10
- movdqa %xmm8,16(%rsp)
- movl %eax,%esi
+ movdqa %xmm8,%xmm9
+ xorl %eax,%edi
+ paddd %xmm1,%xmm8
xorl %ebx,%eax
+ pxor %xmm10,%xmm2
+ addl %ebp,%edx
addl 36(%rsp),%ecx
- andl %ebx,%esi
- pslld $2,%xmm2
andl %eax,%edi
+ xorl %ebx,%eax
rorl $7,%ebp
- psrld $30,%xmm10
- addl %esi,%ecx
+ movdqa %xmm2,%xmm10
movl %edx,%esi
+ xorl %eax,%edi
+ movdqa %xmm8,16(%rsp)
roll $5,%edx
addl %edi,%ecx
- xorl %ebx,%eax
- addl %edx,%ecx
- por %xmm10,%xmm2
- movl %ebp,%edi
+ xorl %ebp,%esi
+ pslld $2,%xmm2
xorl %eax,%ebp
- movdqa %xmm2,%xmm8
+ addl %edx,%ecx
+ psrld $30,%xmm10
addl 40(%rsp),%ebx
- andl %eax,%edi
andl %ebp,%esi
+ xorl %eax,%ebp
+ por %xmm10,%xmm2
rorl $7,%edx
- addl %edi,%ebx
movl %ecx,%edi
+ xorl %ebp,%esi
roll $5,%ecx
+ pshufd $238,%xmm1,%xmm8
addl %esi,%ebx
- xorl %eax,%ebp
- addl %ecx,%ebx
- movl %edx,%esi
+ xorl %edx,%edi
xorl %ebp,%edx
+ addl %ecx,%ebx
addl 44(%rsp),%eax
- andl %ebp,%esi
andl %edx,%edi
+ xorl %ebp,%edx
rorl $7,%ecx
- addl %esi,%eax
movl %ebx,%esi
+ xorl %edx,%edi
roll $5,%ebx
addl %edi,%eax
- xorl %ebp,%edx
+ xorl %edx,%esi
addl %ebx,%eax
- addl 48(%rsp),%ebp
pxor %xmm7,%xmm3
-.byte 102,68,15,58,15,193,8
- xorl %edx,%esi
+ addl 48(%rsp),%ebp
+ xorl %ecx,%esi
+ punpcklqdq %xmm2,%xmm8
movl %eax,%edi
roll $5,%eax
pxor %xmm4,%xmm3
- xorl %ecx,%esi
- addl %eax,%ebp
+ addl %esi,%ebp
+ xorl %ecx,%edi
movdqa %xmm9,%xmm10
- paddd %xmm2,%xmm9
rorl $7,%ebx
- addl %esi,%ebp
+ paddd %xmm2,%xmm9
+ addl %eax,%ebp
pxor %xmm8,%xmm3
addl 52(%rsp),%edx
- xorl %ecx,%edi
+ xorl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
movdqa %xmm3,%xmm8
+ addl %edi,%edx
+ xorl %ebx,%esi
movdqa %xmm9,32(%rsp)
- xorl %ebx,%edi
- addl %ebp,%edx
rorl $7,%eax
- addl %edi,%edx
- pslld $2,%xmm3
+ addl %ebp,%edx
addl 56(%rsp),%ecx
- xorl %ebx,%esi
- psrld $30,%xmm8
+ pslld $2,%xmm3
+ xorl %eax,%esi
movl %edx,%edi
+ psrld $30,%xmm8
roll $5,%edx
- xorl %eax,%esi
- addl %edx,%ecx
- rorl $7,%ebp
addl %esi,%ecx
+ xorl %eax,%edi
+ rorl $7,%ebp
por %xmm8,%xmm3
+ addl %edx,%ecx
addl 60(%rsp),%ebx
- xorl %eax,%edi
+ xorl %ebp,%edi
movl %ecx,%esi
roll $5,%ecx
- xorl %ebp,%edi
- addl %ecx,%ebx
- rorl $7,%edx
addl %edi,%ebx
- addl 0(%rsp),%eax
- paddd %xmm3,%xmm10
xorl %ebp,%esi
+ rorl $7,%edx
+ addl %ecx,%ebx
+ addl 0(%rsp),%eax
+ xorl %edx,%esi
movl %ebx,%edi
roll $5,%ebx
- xorl %edx,%esi
+ paddd %xmm3,%xmm10
+ addl %esi,%eax
+ xorl %edx,%edi
movdqa %xmm10,48(%rsp)
- addl %ebx,%eax
rorl $7,%ecx
- addl %esi,%eax
+ addl %ebx,%eax
addl 4(%rsp),%ebp
- xorl %edx,%edi
+ xorl %ecx,%edi
movl %eax,%esi
roll $5,%eax
- xorl %ecx,%edi
- addl %eax,%ebp
- rorl $7,%ebx
addl %edi,%ebp
- addl 8(%rsp),%edx
xorl %ecx,%esi
+ rorl $7,%ebx
+ addl %eax,%ebp
+ addl 8(%rsp),%edx
+ xorl %ebx,%esi
movl %ebp,%edi
roll $5,%ebp
- xorl %ebx,%esi
- addl %ebp,%edx
- rorl $7,%eax
addl %esi,%edx
- addl 12(%rsp),%ecx
xorl %ebx,%edi
+ rorl $7,%eax
+ addl %ebp,%edx
+ addl 12(%rsp),%ecx
+ xorl %eax,%edi
movl %edx,%esi
roll $5,%edx
- xorl %eax,%edi
- addl %edx,%ecx
- rorl $7,%ebp
addl %edi,%ecx
+ xorl %eax,%esi
+ rorl $7,%ebp
+ addl %edx,%ecx
cmpq %r10,%r9
je .Ldone_ssse3
movdqa 64(%r11),%xmm6
- movdqa 0(%r11),%xmm9
+ movdqa -64(%r11),%xmm9
movdqu 0(%r9),%xmm0
movdqu 16(%r9),%xmm1
movdqu 32(%r9),%xmm2
@@ -2240,113 +2336,112 @@ _ssse3_shortcut:
.byte 102,15,56,0,198
addq $64,%r9
addl 16(%rsp),%ebx
- xorl %eax,%esi
-.byte 102,15,56,0,206
+ xorl %ebp,%esi
movl %ecx,%edi
+.byte 102,15,56,0,206
roll $5,%ecx
+ addl %esi,%ebx
+ xorl %ebp,%edi
+ rorl $7,%edx
paddd %xmm9,%xmm0
- xorl %ebp,%esi
addl %ecx,%ebx
- rorl $7,%edx
- addl %esi,%ebx
- movdqa %xmm0,0(%rsp)
addl 20(%rsp),%eax
- xorl %ebp,%edi
- psubd %xmm9,%xmm0
+ xorl %edx,%edi
movl %ebx,%esi
+ movdqa %xmm0,0(%rsp)
roll $5,%ebx
- xorl %edx,%edi
- addl %ebx,%eax
- rorl $7,%ecx
addl %edi,%eax
- addl 24(%rsp),%ebp
xorl %edx,%esi
+ rorl $7,%ecx
+ psubd %xmm9,%xmm0
+ addl %ebx,%eax
+ addl 24(%rsp),%ebp
+ xorl %ecx,%esi
movl %eax,%edi
roll $5,%eax
- xorl %ecx,%esi
- addl %eax,%ebp
- rorl $7,%ebx
addl %esi,%ebp
- addl 28(%rsp),%edx
xorl %ecx,%edi
+ rorl $7,%ebx
+ addl %eax,%ebp
+ addl 28(%rsp),%edx
+ xorl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
- xorl %ebx,%edi
- addl %ebp,%edx
- rorl $7,%eax
addl %edi,%edx
- addl 32(%rsp),%ecx
xorl %ebx,%esi
-.byte 102,15,56,0,214
+ rorl $7,%eax
+ addl %ebp,%edx
+ addl 32(%rsp),%ecx
+ xorl %eax,%esi
movl %edx,%edi
+.byte 102,15,56,0,214
roll $5,%edx
+ addl %esi,%ecx
+ xorl %eax,%edi
+ rorl $7,%ebp
paddd %xmm9,%xmm1
- xorl %eax,%esi
addl %edx,%ecx
- rorl $7,%ebp
- addl %esi,%ecx
- movdqa %xmm1,16(%rsp)
addl 36(%rsp),%ebx
- xorl %eax,%edi
- psubd %xmm9,%xmm1
+ xorl %ebp,%edi
movl %ecx,%esi
+ movdqa %xmm1,16(%rsp)
roll $5,%ecx
- xorl %ebp,%edi
- addl %ecx,%ebx
- rorl $7,%edx
addl %edi,%ebx
- addl 40(%rsp),%eax
xorl %ebp,%esi
+ rorl $7,%edx
+ psubd %xmm9,%xmm1
+ addl %ecx,%ebx
+ addl 40(%rsp),%eax
+ xorl %edx,%esi
movl %ebx,%edi
roll $5,%ebx
- xorl %edx,%esi
- addl %ebx,%eax
- rorl $7,%ecx
addl %esi,%eax
- addl 44(%rsp),%ebp
xorl %edx,%edi
+ rorl $7,%ecx
+ addl %ebx,%eax
+ addl 44(%rsp),%ebp
+ xorl %ecx,%edi
movl %eax,%esi
roll $5,%eax
- xorl %ecx,%edi
- addl %eax,%ebp
- rorl $7,%ebx
addl %edi,%ebp
- addl 48(%rsp),%edx
xorl %ecx,%esi
-.byte 102,15,56,0,222
+ rorl $7,%ebx
+ addl %eax,%ebp
+ addl 48(%rsp),%edx
+ xorl %ebx,%esi
movl %ebp,%edi
+.byte 102,15,56,0,222
roll $5,%ebp
+ addl %esi,%edx
+ xorl %ebx,%edi
+ rorl $7,%eax
paddd %xmm9,%xmm2
- xorl %ebx,%esi
addl %ebp,%edx
- rorl $7,%eax
- addl %esi,%edx
- movdqa %xmm2,32(%rsp)
addl 52(%rsp),%ecx
- xorl %ebx,%edi
- psubd %xmm9,%xmm2
+ xorl %eax,%edi
movl %edx,%esi
+ movdqa %xmm2,32(%rsp)
roll $5,%edx
- xorl %eax,%edi
- addl %edx,%ecx
- rorl $7,%ebp
addl %edi,%ecx
- addl 56(%rsp),%ebx
xorl %eax,%esi
+ rorl $7,%ebp
+ psubd %xmm9,%xmm2
+ addl %edx,%ecx
+ addl 56(%rsp),%ebx
+ xorl %ebp,%esi
movl %ecx,%edi
roll $5,%ecx
- xorl %ebp,%esi
- addl %ecx,%ebx
- rorl $7,%edx
addl %esi,%ebx
- addl 60(%rsp),%eax
xorl %ebp,%edi
+ rorl $7,%edx
+ addl %ecx,%ebx
+ addl 60(%rsp),%eax
+ xorl %edx,%edi
movl %ebx,%esi
roll $5,%ebx
- xorl %edx,%edi
- addl %ebx,%eax
- rorl $7,%ecx
addl %edi,%eax
+ rorl $7,%ecx
+ addl %ebx,%eax
addl 0(%r8),%eax
addl 4(%r8),%esi
addl 8(%r8),%ecx
@@ -2356,108 +2451,110 @@ _ssse3_shortcut:
movl %esi,4(%r8)
movl %esi,%ebx
movl %ecx,8(%r8)
+ movl %ecx,%edi
movl %edx,12(%r8)
+ xorl %edx,%edi
movl %ebp,16(%r8)
+ andl %edi,%esi
jmp .Loop_ssse3
.align 16
.Ldone_ssse3:
addl 16(%rsp),%ebx
- xorl %eax,%esi
+ xorl %ebp,%esi
movl %ecx,%edi
roll $5,%ecx
- xorl %ebp,%esi
- addl %ecx,%ebx
- rorl $7,%edx
addl %esi,%ebx
- addl 20(%rsp),%eax
xorl %ebp,%edi
+ rorl $7,%edx
+ addl %ecx,%ebx
+ addl 20(%rsp),%eax
+ xorl %edx,%edi
movl %ebx,%esi
roll $5,%ebx
- xorl %edx,%edi
- addl %ebx,%eax
- rorl $7,%ecx
addl %edi,%eax
- addl 24(%rsp),%ebp
xorl %edx,%esi
+ rorl $7,%ecx
+ addl %ebx,%eax
+ addl 24(%rsp),%ebp
+ xorl %ecx,%esi
movl %eax,%edi
roll $5,%eax
- xorl %ecx,%esi
- addl %eax,%ebp
- rorl $7,%ebx
addl %esi,%ebp
- addl 28(%rsp),%edx
xorl %ecx,%edi
+ rorl $7,%ebx
+ addl %eax,%ebp
+ addl 28(%rsp),%edx
+ xorl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
- xorl %ebx,%edi
- addl %ebp,%edx
- rorl $7,%eax
addl %edi,%edx
- addl 32(%rsp),%ecx
xorl %ebx,%esi
+ rorl $7,%eax
+ addl %ebp,%edx
+ addl 32(%rsp),%ecx
+ xorl %eax,%esi
movl %edx,%edi
roll $5,%edx
- xorl %eax,%esi
- addl %edx,%ecx
- rorl $7,%ebp
addl %esi,%ecx
- addl 36(%rsp),%ebx
xorl %eax,%edi
+ rorl $7,%ebp
+ addl %edx,%ecx
+ addl 36(%rsp),%ebx
+ xorl %ebp,%edi
movl %ecx,%esi
roll $5,%ecx
- xorl %ebp,%edi
- addl %ecx,%ebx
- rorl $7,%edx
addl %edi,%ebx
- addl 40(%rsp),%eax
xorl %ebp,%esi
+ rorl $7,%edx
+ addl %ecx,%ebx
+ addl 40(%rsp),%eax
+ xorl %edx,%esi
movl %ebx,%edi
roll $5,%ebx
- xorl %edx,%esi
- addl %ebx,%eax
- rorl $7,%ecx
addl %esi,%eax
- addl 44(%rsp),%ebp
xorl %edx,%edi
+ rorl $7,%ecx
+ addl %ebx,%eax
+ addl 44(%rsp),%ebp
+ xorl %ecx,%edi
movl %eax,%esi
roll $5,%eax
- xorl %ecx,%edi
- addl %eax,%ebp
- rorl $7,%ebx
addl %edi,%ebp
- addl 48(%rsp),%edx
xorl %ecx,%esi
+ rorl $7,%ebx
+ addl %eax,%ebp
+ addl 48(%rsp),%edx
+ xorl %ebx,%esi
movl %ebp,%edi
roll $5,%ebp
- xorl %ebx,%esi
- addl %ebp,%edx
- rorl $7,%eax
addl %esi,%edx
- addl 52(%rsp),%ecx
xorl %ebx,%edi
+ rorl $7,%eax
+ addl %ebp,%edx
+ addl 52(%rsp),%ecx
+ xorl %eax,%edi
movl %edx,%esi
roll $5,%edx
- xorl %eax,%edi
- addl %edx,%ecx
- rorl $7,%ebp
addl %edi,%ecx
- addl 56(%rsp),%ebx
xorl %eax,%esi
+ rorl $7,%ebp
+ addl %edx,%ecx
+ addl 56(%rsp),%ebx
+ xorl %ebp,%esi
movl %ecx,%edi
roll $5,%ecx
- xorl %ebp,%esi
- addl %ecx,%ebx
- rorl $7,%edx
addl %esi,%ebx
- addl 60(%rsp),%eax
xorl %ebp,%edi
+ rorl $7,%edx
+ addl %ecx,%ebx
+ addl 60(%rsp),%eax
+ xorl %edx,%edi
movl %ebx,%esi
roll $5,%ebx
- xorl %edx,%edi
- addl %ebx,%eax
- rorl $7,%ecx
addl %edi,%eax
+ rorl $7,%ecx
+ addl %ebx,%eax
addl 0(%r8),%eax
addl 4(%r8),%esi
addl 8(%r8),%ecx
@@ -2468,20 +2565,28 @@ _ssse3_shortcut:
movl %ecx,8(%r8)
movl %edx,12(%r8)
movl %ebp,16(%r8)
- leaq 64(%rsp),%rsi
- movq 0(%rsi),%r12
- movq 8(%rsi),%rbp
- movq 16(%rsi),%rbx
- leaq 24(%rsi),%rsp
+ leaq (%r14),%rsi
+ movq -40(%rsi),%r14
+ movq -32(%rsi),%r13
+ movq -24(%rsi),%r12
+ movq -16(%rsi),%rbp
+ movq -8(%rsi),%rbx
+ leaq (%rsi),%rsp
.Lepilogue_ssse3:
.byte 0xf3,0xc3
.size sha1_block_data_order_ssse3,.-sha1_block_data_order_ssse3
.align 64
K_XX_XX:
-.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999
-.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1
-.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc
-.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6
-.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999
+.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999
+.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1
+.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1
+.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc
+.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc
+.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6
+.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6
+.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+.byte 0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0
.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 64
diff --git a/secure/lib/libcrypto/amd64/sha256-mb-x86_64.S b/secure/lib/libcrypto/amd64/sha256-mb-x86_64.S
new file mode 100644
index 0000000..893d42a
--- /dev/null
+++ b/secure/lib/libcrypto/amd64/sha256-mb-x86_64.S
@@ -0,0 +1,3259 @@
+ # $FreeBSD$
+.text
+
+
+
+.globl sha256_multi_block
+.type sha256_multi_block,@function
+.align 32
+sha256_multi_block:
+ movq OPENSSL_ia32cap_P+4(%rip),%rcx
+ btq $61,%rcx
+ jc _shaext_shortcut
+ movq %rsp,%rax
+ pushq %rbx
+ pushq %rbp
+ subq $288,%rsp
+ andq $-256,%rsp
+ movq %rax,272(%rsp)
+.Lbody:
+ leaq K256+128(%rip),%rbp
+ leaq 256(%rsp),%rbx
+ leaq 128(%rdi),%rdi
+
+.Loop_grande:
+ movl %edx,280(%rsp)
+ xorl %edx,%edx
+ movq 0(%rsi),%r8
+ movl 8(%rsi),%ecx
+ cmpl %edx,%ecx
+ cmovgl %ecx,%edx
+ testl %ecx,%ecx
+ movl %ecx,0(%rbx)
+ cmovleq %rbp,%r8
+ movq 16(%rsi),%r9
+ movl 24(%rsi),%ecx
+ cmpl %edx,%ecx
+ cmovgl %ecx,%edx
+ testl %ecx,%ecx
+ movl %ecx,4(%rbx)
+ cmovleq %rbp,%r9
+ movq 32(%rsi),%r10
+ movl 40(%rsi),%ecx
+ cmpl %edx,%ecx
+ cmovgl %ecx,%edx
+ testl %ecx,%ecx
+ movl %ecx,8(%rbx)
+ cmovleq %rbp,%r10
+ movq 48(%rsi),%r11
+ movl 56(%rsi),%ecx
+ cmpl %edx,%ecx
+ cmovgl %ecx,%edx
+ testl %ecx,%ecx
+ movl %ecx,12(%rbx)
+ cmovleq %rbp,%r11
+ testl %edx,%edx
+ jz .Ldone
+
+ movdqu 0-128(%rdi),%xmm8
+ leaq 128(%rsp),%rax
+ movdqu 32-128(%rdi),%xmm9
+ movdqu 64-128(%rdi),%xmm10
+ movdqu 96-128(%rdi),%xmm11
+ movdqu 128-128(%rdi),%xmm12
+ movdqu 160-128(%rdi),%xmm13
+ movdqu 192-128(%rdi),%xmm14
+ movdqu 224-128(%rdi),%xmm15
+ movdqu .Lpbswap(%rip),%xmm6
+ jmp .Loop
+
+.align 32
+.Loop:
+ movdqa %xmm10,%xmm4
+ pxor %xmm9,%xmm4
+ movd 0(%r8),%xmm5
+ movd 0(%r9),%xmm0
+ movd 0(%r10),%xmm1
+ movd 0(%r11),%xmm2
+ punpckldq %xmm1,%xmm5
+ punpckldq %xmm2,%xmm0
+ punpckldq %xmm0,%xmm5
+ movdqa %xmm12,%xmm7
+.byte 102,15,56,0,238
+ movdqa %xmm12,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm12,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,0-128(%rax)
+ paddd %xmm15,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd -128(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm12,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm12,%xmm3
+ pslld $26-21,%xmm2
+ pandn %xmm14,%xmm0
+ pand %xmm13,%xmm3
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm8,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm8,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ movdqa %xmm9,%xmm3
+ movdqa %xmm8,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm8,%xmm3
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm3,%xmm4
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm9,%xmm15
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm4,%xmm15
+ paddd %xmm5,%xmm11
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm15
+ paddd %xmm7,%xmm15
+ movd 4(%r8),%xmm5
+ movd 4(%r9),%xmm0
+ movd 4(%r10),%xmm1
+ movd 4(%r11),%xmm2
+ punpckldq %xmm1,%xmm5
+ punpckldq %xmm2,%xmm0
+ punpckldq %xmm0,%xmm5
+ movdqa %xmm11,%xmm7
+
+ movdqa %xmm11,%xmm2
+.byte 102,15,56,0,238
+ psrld $6,%xmm7
+ movdqa %xmm11,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,16-128(%rax)
+ paddd %xmm14,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd -96(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm11,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm11,%xmm4
+ pslld $26-21,%xmm2
+ pandn %xmm13,%xmm0
+ pand %xmm12,%xmm4
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm15,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm15,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm4,%xmm0
+ movdqa %xmm8,%xmm4
+ movdqa %xmm15,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm15,%xmm4
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm4,%xmm3
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm8,%xmm14
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm3,%xmm14
+ paddd %xmm5,%xmm10
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm14
+ paddd %xmm7,%xmm14
+ movd 8(%r8),%xmm5
+ movd 8(%r9),%xmm0
+ movd 8(%r10),%xmm1
+ movd 8(%r11),%xmm2
+ punpckldq %xmm1,%xmm5
+ punpckldq %xmm2,%xmm0
+ punpckldq %xmm0,%xmm5
+ movdqa %xmm10,%xmm7
+.byte 102,15,56,0,238
+ movdqa %xmm10,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm10,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,32-128(%rax)
+ paddd %xmm13,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd -64(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm10,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm10,%xmm3
+ pslld $26-21,%xmm2
+ pandn %xmm12,%xmm0
+ pand %xmm11,%xmm3
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm14,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm14,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ movdqa %xmm15,%xmm3
+ movdqa %xmm14,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm14,%xmm3
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm3,%xmm4
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm15,%xmm13
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm4,%xmm13
+ paddd %xmm5,%xmm9
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm13
+ paddd %xmm7,%xmm13
+ movd 12(%r8),%xmm5
+ movd 12(%r9),%xmm0
+ movd 12(%r10),%xmm1
+ movd 12(%r11),%xmm2
+ punpckldq %xmm1,%xmm5
+ punpckldq %xmm2,%xmm0
+ punpckldq %xmm0,%xmm5
+ movdqa %xmm9,%xmm7
+
+ movdqa %xmm9,%xmm2
+.byte 102,15,56,0,238
+ psrld $6,%xmm7
+ movdqa %xmm9,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,48-128(%rax)
+ paddd %xmm12,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd -32(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm9,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm9,%xmm4
+ pslld $26-21,%xmm2
+ pandn %xmm11,%xmm0
+ pand %xmm10,%xmm4
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm13,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm13,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm4,%xmm0
+ movdqa %xmm14,%xmm4
+ movdqa %xmm13,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm13,%xmm4
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm4,%xmm3
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm14,%xmm12
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm3,%xmm12
+ paddd %xmm5,%xmm8
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm12
+ paddd %xmm7,%xmm12
+ movd 16(%r8),%xmm5
+ movd 16(%r9),%xmm0
+ movd 16(%r10),%xmm1
+ movd 16(%r11),%xmm2
+ punpckldq %xmm1,%xmm5
+ punpckldq %xmm2,%xmm0
+ punpckldq %xmm0,%xmm5
+ movdqa %xmm8,%xmm7
+.byte 102,15,56,0,238
+ movdqa %xmm8,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm8,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,64-128(%rax)
+ paddd %xmm11,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd 0(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm8,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm8,%xmm3
+ pslld $26-21,%xmm2
+ pandn %xmm10,%xmm0
+ pand %xmm9,%xmm3
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm12,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm12,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ movdqa %xmm13,%xmm3
+ movdqa %xmm12,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm12,%xmm3
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm3,%xmm4
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm13,%xmm11
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm4,%xmm11
+ paddd %xmm5,%xmm15
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm11
+ paddd %xmm7,%xmm11
+ movd 20(%r8),%xmm5
+ movd 20(%r9),%xmm0
+ movd 20(%r10),%xmm1
+ movd 20(%r11),%xmm2
+ punpckldq %xmm1,%xmm5
+ punpckldq %xmm2,%xmm0
+ punpckldq %xmm0,%xmm5
+ movdqa %xmm15,%xmm7
+
+ movdqa %xmm15,%xmm2
+.byte 102,15,56,0,238
+ psrld $6,%xmm7
+ movdqa %xmm15,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,80-128(%rax)
+ paddd %xmm10,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd 32(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm15,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm15,%xmm4
+ pslld $26-21,%xmm2
+ pandn %xmm9,%xmm0
+ pand %xmm8,%xmm4
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm11,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm11,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm4,%xmm0
+ movdqa %xmm12,%xmm4
+ movdqa %xmm11,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm11,%xmm4
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm4,%xmm3
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm12,%xmm10
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm3,%xmm10
+ paddd %xmm5,%xmm14
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm10
+ paddd %xmm7,%xmm10
+ movd 24(%r8),%xmm5
+ movd 24(%r9),%xmm0
+ movd 24(%r10),%xmm1
+ movd 24(%r11),%xmm2
+ punpckldq %xmm1,%xmm5
+ punpckldq %xmm2,%xmm0
+ punpckldq %xmm0,%xmm5
+ movdqa %xmm14,%xmm7
+.byte 102,15,56,0,238
+ movdqa %xmm14,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm14,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,96-128(%rax)
+ paddd %xmm9,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd 64(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm14,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm14,%xmm3
+ pslld $26-21,%xmm2
+ pandn %xmm8,%xmm0
+ pand %xmm15,%xmm3
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm10,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm10,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ movdqa %xmm11,%xmm3
+ movdqa %xmm10,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm10,%xmm3
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm3,%xmm4
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm11,%xmm9
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm4,%xmm9
+ paddd %xmm5,%xmm13
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm9
+ paddd %xmm7,%xmm9
+ movd 28(%r8),%xmm5
+ movd 28(%r9),%xmm0
+ movd 28(%r10),%xmm1
+ movd 28(%r11),%xmm2
+ punpckldq %xmm1,%xmm5
+ punpckldq %xmm2,%xmm0
+ punpckldq %xmm0,%xmm5
+ movdqa %xmm13,%xmm7
+
+ movdqa %xmm13,%xmm2
+.byte 102,15,56,0,238
+ psrld $6,%xmm7
+ movdqa %xmm13,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,112-128(%rax)
+ paddd %xmm8,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd 96(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm13,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm13,%xmm4
+ pslld $26-21,%xmm2
+ pandn %xmm15,%xmm0
+ pand %xmm14,%xmm4
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm9,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm9,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm4,%xmm0
+ movdqa %xmm10,%xmm4
+ movdqa %xmm9,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm9,%xmm4
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm4,%xmm3
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm10,%xmm8
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm3,%xmm8
+ paddd %xmm5,%xmm12
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm8
+ paddd %xmm7,%xmm8
+ leaq 256(%rbp),%rbp
+ movd 32(%r8),%xmm5
+ movd 32(%r9),%xmm0
+ movd 32(%r10),%xmm1
+ movd 32(%r11),%xmm2
+ punpckldq %xmm1,%xmm5
+ punpckldq %xmm2,%xmm0
+ punpckldq %xmm0,%xmm5
+ movdqa %xmm12,%xmm7
+.byte 102,15,56,0,238
+ movdqa %xmm12,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm12,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,128-128(%rax)
+ paddd %xmm15,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd -128(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm12,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm12,%xmm3
+ pslld $26-21,%xmm2
+ pandn %xmm14,%xmm0
+ pand %xmm13,%xmm3
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm8,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm8,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ movdqa %xmm9,%xmm3
+ movdqa %xmm8,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm8,%xmm3
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm3,%xmm4
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm9,%xmm15
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm4,%xmm15
+ paddd %xmm5,%xmm11
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm15
+ paddd %xmm7,%xmm15
+ movd 36(%r8),%xmm5
+ movd 36(%r9),%xmm0
+ movd 36(%r10),%xmm1
+ movd 36(%r11),%xmm2
+ punpckldq %xmm1,%xmm5
+ punpckldq %xmm2,%xmm0
+ punpckldq %xmm0,%xmm5
+ movdqa %xmm11,%xmm7
+
+ movdqa %xmm11,%xmm2
+.byte 102,15,56,0,238
+ psrld $6,%xmm7
+ movdqa %xmm11,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,144-128(%rax)
+ paddd %xmm14,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd -96(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm11,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm11,%xmm4
+ pslld $26-21,%xmm2
+ pandn %xmm13,%xmm0
+ pand %xmm12,%xmm4
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm15,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm15,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm4,%xmm0
+ movdqa %xmm8,%xmm4
+ movdqa %xmm15,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm15,%xmm4
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm4,%xmm3
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm8,%xmm14
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm3,%xmm14
+ paddd %xmm5,%xmm10
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm14
+ paddd %xmm7,%xmm14
+ movd 40(%r8),%xmm5
+ movd 40(%r9),%xmm0
+ movd 40(%r10),%xmm1
+ movd 40(%r11),%xmm2
+ punpckldq %xmm1,%xmm5
+ punpckldq %xmm2,%xmm0
+ punpckldq %xmm0,%xmm5
+ movdqa %xmm10,%xmm7
+.byte 102,15,56,0,238
+ movdqa %xmm10,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm10,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,160-128(%rax)
+ paddd %xmm13,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd -64(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm10,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm10,%xmm3
+ pslld $26-21,%xmm2
+ pandn %xmm12,%xmm0
+ pand %xmm11,%xmm3
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm14,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm14,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ movdqa %xmm15,%xmm3
+ movdqa %xmm14,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm14,%xmm3
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm3,%xmm4
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm15,%xmm13
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm4,%xmm13
+ paddd %xmm5,%xmm9
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm13
+ paddd %xmm7,%xmm13
+ movd 44(%r8),%xmm5
+ movd 44(%r9),%xmm0
+ movd 44(%r10),%xmm1
+ movd 44(%r11),%xmm2
+ punpckldq %xmm1,%xmm5
+ punpckldq %xmm2,%xmm0
+ punpckldq %xmm0,%xmm5
+ movdqa %xmm9,%xmm7
+
+ movdqa %xmm9,%xmm2
+.byte 102,15,56,0,238
+ psrld $6,%xmm7
+ movdqa %xmm9,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,176-128(%rax)
+ paddd %xmm12,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd -32(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm9,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm9,%xmm4
+ pslld $26-21,%xmm2
+ pandn %xmm11,%xmm0
+ pand %xmm10,%xmm4
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm13,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm13,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm4,%xmm0
+ movdqa %xmm14,%xmm4
+ movdqa %xmm13,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm13,%xmm4
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm4,%xmm3
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm14,%xmm12
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm3,%xmm12
+ paddd %xmm5,%xmm8
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm12
+ paddd %xmm7,%xmm12
+ movd 48(%r8),%xmm5
+ movd 48(%r9),%xmm0
+ movd 48(%r10),%xmm1
+ movd 48(%r11),%xmm2
+ punpckldq %xmm1,%xmm5
+ punpckldq %xmm2,%xmm0
+ punpckldq %xmm0,%xmm5
+ movdqa %xmm8,%xmm7
+.byte 102,15,56,0,238
+ movdqa %xmm8,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm8,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,192-128(%rax)
+ paddd %xmm11,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd 0(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm8,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm8,%xmm3
+ pslld $26-21,%xmm2
+ pandn %xmm10,%xmm0
+ pand %xmm9,%xmm3
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm12,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm12,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ movdqa %xmm13,%xmm3
+ movdqa %xmm12,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm12,%xmm3
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm3,%xmm4
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm13,%xmm11
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm4,%xmm11
+ paddd %xmm5,%xmm15
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm11
+ paddd %xmm7,%xmm11
+ movd 52(%r8),%xmm5
+ movd 52(%r9),%xmm0
+ movd 52(%r10),%xmm1
+ movd 52(%r11),%xmm2
+ punpckldq %xmm1,%xmm5
+ punpckldq %xmm2,%xmm0
+ punpckldq %xmm0,%xmm5
+ movdqa %xmm15,%xmm7
+
+ movdqa %xmm15,%xmm2
+.byte 102,15,56,0,238
+ psrld $6,%xmm7
+ movdqa %xmm15,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,208-128(%rax)
+ paddd %xmm10,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd 32(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm15,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm15,%xmm4
+ pslld $26-21,%xmm2
+ pandn %xmm9,%xmm0
+ pand %xmm8,%xmm4
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm11,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm11,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm4,%xmm0
+ movdqa %xmm12,%xmm4
+ movdqa %xmm11,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm11,%xmm4
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm4,%xmm3
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm12,%xmm10
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm3,%xmm10
+ paddd %xmm5,%xmm14
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm10
+ paddd %xmm7,%xmm10
+ movd 56(%r8),%xmm5
+ movd 56(%r9),%xmm0
+ movd 56(%r10),%xmm1
+ movd 56(%r11),%xmm2
+ punpckldq %xmm1,%xmm5
+ punpckldq %xmm2,%xmm0
+ punpckldq %xmm0,%xmm5
+ movdqa %xmm14,%xmm7
+.byte 102,15,56,0,238
+ movdqa %xmm14,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm14,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,224-128(%rax)
+ paddd %xmm9,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd 64(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm14,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm14,%xmm3
+ pslld $26-21,%xmm2
+ pandn %xmm8,%xmm0
+ pand %xmm15,%xmm3
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm10,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm10,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ movdqa %xmm11,%xmm3
+ movdqa %xmm10,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm10,%xmm3
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm3,%xmm4
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm11,%xmm9
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm4,%xmm9
+ paddd %xmm5,%xmm13
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm9
+ paddd %xmm7,%xmm9
+ movd 60(%r8),%xmm5
+ leaq 64(%r8),%r8
+ movd 60(%r9),%xmm0
+ leaq 64(%r9),%r9
+ movd 60(%r10),%xmm1
+ leaq 64(%r10),%r10
+ movd 60(%r11),%xmm2
+ leaq 64(%r11),%r11
+ punpckldq %xmm1,%xmm5
+ punpckldq %xmm2,%xmm0
+ punpckldq %xmm0,%xmm5
+ movdqa %xmm13,%xmm7
+
+ movdqa %xmm13,%xmm2
+.byte 102,15,56,0,238
+ psrld $6,%xmm7
+ movdqa %xmm13,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,240-128(%rax)
+ paddd %xmm8,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd 96(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm13,%xmm0
+ prefetcht0 63(%r8)
+ pxor %xmm2,%xmm7
+ movdqa %xmm13,%xmm4
+ pslld $26-21,%xmm2
+ pandn %xmm15,%xmm0
+ pand %xmm14,%xmm4
+ pxor %xmm1,%xmm7
+
+ prefetcht0 63(%r9)
+ movdqa %xmm9,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm9,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm4,%xmm0
+ movdqa %xmm10,%xmm4
+ movdqa %xmm9,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm9,%xmm4
+
+ prefetcht0 63(%r10)
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm4,%xmm3
+ pxor %xmm7,%xmm1
+
+ prefetcht0 63(%r11)
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm10,%xmm8
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm3,%xmm8
+ paddd %xmm5,%xmm12
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm8
+ paddd %xmm7,%xmm8
+ leaq 256(%rbp),%rbp
+ movdqu 0-128(%rax),%xmm5
+ movl $3,%ecx
+ jmp .Loop_16_xx
+.align 32
+.Loop_16_xx:
+ movdqa 16-128(%rax),%xmm6
+ paddd 144-128(%rax),%xmm5
+
+ movdqa %xmm6,%xmm7
+ movdqa %xmm6,%xmm1
+ psrld $3,%xmm7
+ movdqa %xmm6,%xmm2
+
+ psrld $7,%xmm1
+ movdqa 224-128(%rax),%xmm0
+ pslld $14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $18-7,%xmm1
+ movdqa %xmm0,%xmm3
+ pxor %xmm2,%xmm7
+ pslld $25-14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $10,%xmm0
+ movdqa %xmm3,%xmm1
+
+ psrld $17,%xmm3
+ pxor %xmm2,%xmm7
+ pslld $13,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ psrld $19-17,%xmm3
+ pxor %xmm1,%xmm0
+ pslld $15-13,%xmm1
+ pxor %xmm3,%xmm0
+ pxor %xmm1,%xmm0
+ paddd %xmm0,%xmm5
+ movdqa %xmm12,%xmm7
+
+ movdqa %xmm12,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm12,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,0-128(%rax)
+ paddd %xmm15,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd -128(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm12,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm12,%xmm3
+ pslld $26-21,%xmm2
+ pandn %xmm14,%xmm0
+ pand %xmm13,%xmm3
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm8,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm8,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ movdqa %xmm9,%xmm3
+ movdqa %xmm8,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm8,%xmm3
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm3,%xmm4
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm9,%xmm15
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm4,%xmm15
+ paddd %xmm5,%xmm11
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm15
+ paddd %xmm7,%xmm15
+ movdqa 32-128(%rax),%xmm5
+ paddd 160-128(%rax),%xmm6
+
+ movdqa %xmm5,%xmm7
+ movdqa %xmm5,%xmm1
+ psrld $3,%xmm7
+ movdqa %xmm5,%xmm2
+
+ psrld $7,%xmm1
+ movdqa 240-128(%rax),%xmm0
+ pslld $14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $18-7,%xmm1
+ movdqa %xmm0,%xmm4
+ pxor %xmm2,%xmm7
+ pslld $25-14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $10,%xmm0
+ movdqa %xmm4,%xmm1
+
+ psrld $17,%xmm4
+ pxor %xmm2,%xmm7
+ pslld $13,%xmm1
+ paddd %xmm7,%xmm6
+ pxor %xmm4,%xmm0
+ psrld $19-17,%xmm4
+ pxor %xmm1,%xmm0
+ pslld $15-13,%xmm1
+ pxor %xmm4,%xmm0
+ pxor %xmm1,%xmm0
+ paddd %xmm0,%xmm6
+ movdqa %xmm11,%xmm7
+
+ movdqa %xmm11,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm11,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm6,16-128(%rax)
+ paddd %xmm14,%xmm6
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd -96(%rbp),%xmm6
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm11,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm11,%xmm4
+ pslld $26-21,%xmm2
+ pandn %xmm13,%xmm0
+ pand %xmm12,%xmm4
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm15,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm15,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm6
+ pxor %xmm4,%xmm0
+ movdqa %xmm8,%xmm4
+ movdqa %xmm15,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm15,%xmm4
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm6
+ pslld $19-10,%xmm2
+ pand %xmm4,%xmm3
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm8,%xmm14
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm3,%xmm14
+ paddd %xmm6,%xmm10
+ pxor %xmm2,%xmm7
+
+ paddd %xmm6,%xmm14
+ paddd %xmm7,%xmm14
+ movdqa 48-128(%rax),%xmm6
+ paddd 176-128(%rax),%xmm5
+
+ movdqa %xmm6,%xmm7
+ movdqa %xmm6,%xmm1
+ psrld $3,%xmm7
+ movdqa %xmm6,%xmm2
+
+ psrld $7,%xmm1
+ movdqa 0-128(%rax),%xmm0
+ pslld $14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $18-7,%xmm1
+ movdqa %xmm0,%xmm3
+ pxor %xmm2,%xmm7
+ pslld $25-14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $10,%xmm0
+ movdqa %xmm3,%xmm1
+
+ psrld $17,%xmm3
+ pxor %xmm2,%xmm7
+ pslld $13,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ psrld $19-17,%xmm3
+ pxor %xmm1,%xmm0
+ pslld $15-13,%xmm1
+ pxor %xmm3,%xmm0
+ pxor %xmm1,%xmm0
+ paddd %xmm0,%xmm5
+ movdqa %xmm10,%xmm7
+
+ movdqa %xmm10,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm10,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,32-128(%rax)
+ paddd %xmm13,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd -64(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm10,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm10,%xmm3
+ pslld $26-21,%xmm2
+ pandn %xmm12,%xmm0
+ pand %xmm11,%xmm3
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm14,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm14,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ movdqa %xmm15,%xmm3
+ movdqa %xmm14,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm14,%xmm3
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm3,%xmm4
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm15,%xmm13
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm4,%xmm13
+ paddd %xmm5,%xmm9
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm13
+ paddd %xmm7,%xmm13
+ movdqa 64-128(%rax),%xmm5
+ paddd 192-128(%rax),%xmm6
+
+ movdqa %xmm5,%xmm7
+ movdqa %xmm5,%xmm1
+ psrld $3,%xmm7
+ movdqa %xmm5,%xmm2
+
+ psrld $7,%xmm1
+ movdqa 16-128(%rax),%xmm0
+ pslld $14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $18-7,%xmm1
+ movdqa %xmm0,%xmm4
+ pxor %xmm2,%xmm7
+ pslld $25-14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $10,%xmm0
+ movdqa %xmm4,%xmm1
+
+ psrld $17,%xmm4
+ pxor %xmm2,%xmm7
+ pslld $13,%xmm1
+ paddd %xmm7,%xmm6
+ pxor %xmm4,%xmm0
+ psrld $19-17,%xmm4
+ pxor %xmm1,%xmm0
+ pslld $15-13,%xmm1
+ pxor %xmm4,%xmm0
+ pxor %xmm1,%xmm0
+ paddd %xmm0,%xmm6
+ movdqa %xmm9,%xmm7
+
+ movdqa %xmm9,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm9,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm6,48-128(%rax)
+ paddd %xmm12,%xmm6
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd -32(%rbp),%xmm6
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm9,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm9,%xmm4
+ pslld $26-21,%xmm2
+ pandn %xmm11,%xmm0
+ pand %xmm10,%xmm4
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm13,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm13,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm6
+ pxor %xmm4,%xmm0
+ movdqa %xmm14,%xmm4
+ movdqa %xmm13,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm13,%xmm4
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm6
+ pslld $19-10,%xmm2
+ pand %xmm4,%xmm3
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm14,%xmm12
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm3,%xmm12
+ paddd %xmm6,%xmm8
+ pxor %xmm2,%xmm7
+
+ paddd %xmm6,%xmm12
+ paddd %xmm7,%xmm12
+ movdqa 80-128(%rax),%xmm6
+ paddd 208-128(%rax),%xmm5
+
+ movdqa %xmm6,%xmm7
+ movdqa %xmm6,%xmm1
+ psrld $3,%xmm7
+ movdqa %xmm6,%xmm2
+
+ psrld $7,%xmm1
+ movdqa 32-128(%rax),%xmm0
+ pslld $14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $18-7,%xmm1
+ movdqa %xmm0,%xmm3
+ pxor %xmm2,%xmm7
+ pslld $25-14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $10,%xmm0
+ movdqa %xmm3,%xmm1
+
+ psrld $17,%xmm3
+ pxor %xmm2,%xmm7
+ pslld $13,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ psrld $19-17,%xmm3
+ pxor %xmm1,%xmm0
+ pslld $15-13,%xmm1
+ pxor %xmm3,%xmm0
+ pxor %xmm1,%xmm0
+ paddd %xmm0,%xmm5
+ movdqa %xmm8,%xmm7
+
+ movdqa %xmm8,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm8,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,64-128(%rax)
+ paddd %xmm11,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd 0(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm8,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm8,%xmm3
+ pslld $26-21,%xmm2
+ pandn %xmm10,%xmm0
+ pand %xmm9,%xmm3
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm12,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm12,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ movdqa %xmm13,%xmm3
+ movdqa %xmm12,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm12,%xmm3
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm3,%xmm4
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm13,%xmm11
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm4,%xmm11
+ paddd %xmm5,%xmm15
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm11
+ paddd %xmm7,%xmm11
+ movdqa 96-128(%rax),%xmm5
+ paddd 224-128(%rax),%xmm6
+
+ movdqa %xmm5,%xmm7
+ movdqa %xmm5,%xmm1
+ psrld $3,%xmm7
+ movdqa %xmm5,%xmm2
+
+ psrld $7,%xmm1
+ movdqa 48-128(%rax),%xmm0
+ pslld $14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $18-7,%xmm1
+ movdqa %xmm0,%xmm4
+ pxor %xmm2,%xmm7
+ pslld $25-14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $10,%xmm0
+ movdqa %xmm4,%xmm1
+
+ psrld $17,%xmm4
+ pxor %xmm2,%xmm7
+ pslld $13,%xmm1
+ paddd %xmm7,%xmm6
+ pxor %xmm4,%xmm0
+ psrld $19-17,%xmm4
+ pxor %xmm1,%xmm0
+ pslld $15-13,%xmm1
+ pxor %xmm4,%xmm0
+ pxor %xmm1,%xmm0
+ paddd %xmm0,%xmm6
+ movdqa %xmm15,%xmm7
+
+ movdqa %xmm15,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm15,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm6,80-128(%rax)
+ paddd %xmm10,%xmm6
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd 32(%rbp),%xmm6
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm15,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm15,%xmm4
+ pslld $26-21,%xmm2
+ pandn %xmm9,%xmm0
+ pand %xmm8,%xmm4
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm11,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm11,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm6
+ pxor %xmm4,%xmm0
+ movdqa %xmm12,%xmm4
+ movdqa %xmm11,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm11,%xmm4
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm6
+ pslld $19-10,%xmm2
+ pand %xmm4,%xmm3
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm12,%xmm10
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm3,%xmm10
+ paddd %xmm6,%xmm14
+ pxor %xmm2,%xmm7
+
+ paddd %xmm6,%xmm10
+ paddd %xmm7,%xmm10
+ movdqa 112-128(%rax),%xmm6
+ paddd 240-128(%rax),%xmm5
+
+ movdqa %xmm6,%xmm7
+ movdqa %xmm6,%xmm1
+ psrld $3,%xmm7
+ movdqa %xmm6,%xmm2
+
+ psrld $7,%xmm1
+ movdqa 64-128(%rax),%xmm0
+ pslld $14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $18-7,%xmm1
+ movdqa %xmm0,%xmm3
+ pxor %xmm2,%xmm7
+ pslld $25-14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $10,%xmm0
+ movdqa %xmm3,%xmm1
+
+ psrld $17,%xmm3
+ pxor %xmm2,%xmm7
+ pslld $13,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ psrld $19-17,%xmm3
+ pxor %xmm1,%xmm0
+ pslld $15-13,%xmm1
+ pxor %xmm3,%xmm0
+ pxor %xmm1,%xmm0
+ paddd %xmm0,%xmm5
+ movdqa %xmm14,%xmm7
+
+ movdqa %xmm14,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm14,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,96-128(%rax)
+ paddd %xmm9,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd 64(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm14,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm14,%xmm3
+ pslld $26-21,%xmm2
+ pandn %xmm8,%xmm0
+ pand %xmm15,%xmm3
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm10,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm10,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ movdqa %xmm11,%xmm3
+ movdqa %xmm10,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm10,%xmm3
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm3,%xmm4
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm11,%xmm9
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm4,%xmm9
+ paddd %xmm5,%xmm13
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm9
+ paddd %xmm7,%xmm9
+ movdqa 128-128(%rax),%xmm5
+ paddd 0-128(%rax),%xmm6
+
+ movdqa %xmm5,%xmm7
+ movdqa %xmm5,%xmm1
+ psrld $3,%xmm7
+ movdqa %xmm5,%xmm2
+
+ psrld $7,%xmm1
+ movdqa 80-128(%rax),%xmm0
+ pslld $14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $18-7,%xmm1
+ movdqa %xmm0,%xmm4
+ pxor %xmm2,%xmm7
+ pslld $25-14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $10,%xmm0
+ movdqa %xmm4,%xmm1
+
+ psrld $17,%xmm4
+ pxor %xmm2,%xmm7
+ pslld $13,%xmm1
+ paddd %xmm7,%xmm6
+ pxor %xmm4,%xmm0
+ psrld $19-17,%xmm4
+ pxor %xmm1,%xmm0
+ pslld $15-13,%xmm1
+ pxor %xmm4,%xmm0
+ pxor %xmm1,%xmm0
+ paddd %xmm0,%xmm6
+ movdqa %xmm13,%xmm7
+
+ movdqa %xmm13,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm13,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm6,112-128(%rax)
+ paddd %xmm8,%xmm6
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd 96(%rbp),%xmm6
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm13,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm13,%xmm4
+ pslld $26-21,%xmm2
+ pandn %xmm15,%xmm0
+ pand %xmm14,%xmm4
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm9,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm9,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm6
+ pxor %xmm4,%xmm0
+ movdqa %xmm10,%xmm4
+ movdqa %xmm9,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm9,%xmm4
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm6
+ pslld $19-10,%xmm2
+ pand %xmm4,%xmm3
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm10,%xmm8
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm3,%xmm8
+ paddd %xmm6,%xmm12
+ pxor %xmm2,%xmm7
+
+ paddd %xmm6,%xmm8
+ paddd %xmm7,%xmm8
+ leaq 256(%rbp),%rbp
+ movdqa 144-128(%rax),%xmm6
+ paddd 16-128(%rax),%xmm5
+
+ movdqa %xmm6,%xmm7
+ movdqa %xmm6,%xmm1
+ psrld $3,%xmm7
+ movdqa %xmm6,%xmm2
+
+ psrld $7,%xmm1
+ movdqa 96-128(%rax),%xmm0
+ pslld $14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $18-7,%xmm1
+ movdqa %xmm0,%xmm3
+ pxor %xmm2,%xmm7
+ pslld $25-14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $10,%xmm0
+ movdqa %xmm3,%xmm1
+
+ psrld $17,%xmm3
+ pxor %xmm2,%xmm7
+ pslld $13,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ psrld $19-17,%xmm3
+ pxor %xmm1,%xmm0
+ pslld $15-13,%xmm1
+ pxor %xmm3,%xmm0
+ pxor %xmm1,%xmm0
+ paddd %xmm0,%xmm5
+ movdqa %xmm12,%xmm7
+
+ movdqa %xmm12,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm12,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,128-128(%rax)
+ paddd %xmm15,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd -128(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm12,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm12,%xmm3
+ pslld $26-21,%xmm2
+ pandn %xmm14,%xmm0
+ pand %xmm13,%xmm3
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm8,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm8,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ movdqa %xmm9,%xmm3
+ movdqa %xmm8,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm8,%xmm3
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm3,%xmm4
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm9,%xmm15
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm4,%xmm15
+ paddd %xmm5,%xmm11
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm15
+ paddd %xmm7,%xmm15
+ movdqa 160-128(%rax),%xmm5
+ paddd 32-128(%rax),%xmm6
+
+ movdqa %xmm5,%xmm7
+ movdqa %xmm5,%xmm1
+ psrld $3,%xmm7
+ movdqa %xmm5,%xmm2
+
+ psrld $7,%xmm1
+ movdqa 112-128(%rax),%xmm0
+ pslld $14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $18-7,%xmm1
+ movdqa %xmm0,%xmm4
+ pxor %xmm2,%xmm7
+ pslld $25-14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $10,%xmm0
+ movdqa %xmm4,%xmm1
+
+ psrld $17,%xmm4
+ pxor %xmm2,%xmm7
+ pslld $13,%xmm1
+ paddd %xmm7,%xmm6
+ pxor %xmm4,%xmm0
+ psrld $19-17,%xmm4
+ pxor %xmm1,%xmm0
+ pslld $15-13,%xmm1
+ pxor %xmm4,%xmm0
+ pxor %xmm1,%xmm0
+ paddd %xmm0,%xmm6
+ movdqa %xmm11,%xmm7
+
+ movdqa %xmm11,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm11,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm6,144-128(%rax)
+ paddd %xmm14,%xmm6
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd -96(%rbp),%xmm6
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm11,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm11,%xmm4
+ pslld $26-21,%xmm2
+ pandn %xmm13,%xmm0
+ pand %xmm12,%xmm4
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm15,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm15,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm6
+ pxor %xmm4,%xmm0
+ movdqa %xmm8,%xmm4
+ movdqa %xmm15,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm15,%xmm4
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm6
+ pslld $19-10,%xmm2
+ pand %xmm4,%xmm3
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm8,%xmm14
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm3,%xmm14
+ paddd %xmm6,%xmm10
+ pxor %xmm2,%xmm7
+
+ paddd %xmm6,%xmm14
+ paddd %xmm7,%xmm14
+ movdqa 176-128(%rax),%xmm6
+ paddd 48-128(%rax),%xmm5
+
+ movdqa %xmm6,%xmm7
+ movdqa %xmm6,%xmm1
+ psrld $3,%xmm7
+ movdqa %xmm6,%xmm2
+
+ psrld $7,%xmm1
+ movdqa 128-128(%rax),%xmm0
+ pslld $14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $18-7,%xmm1
+ movdqa %xmm0,%xmm3
+ pxor %xmm2,%xmm7
+ pslld $25-14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $10,%xmm0
+ movdqa %xmm3,%xmm1
+
+ psrld $17,%xmm3
+ pxor %xmm2,%xmm7
+ pslld $13,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ psrld $19-17,%xmm3
+ pxor %xmm1,%xmm0
+ pslld $15-13,%xmm1
+ pxor %xmm3,%xmm0
+ pxor %xmm1,%xmm0
+ paddd %xmm0,%xmm5
+ movdqa %xmm10,%xmm7
+
+ movdqa %xmm10,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm10,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,160-128(%rax)
+ paddd %xmm13,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd -64(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm10,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm10,%xmm3
+ pslld $26-21,%xmm2
+ pandn %xmm12,%xmm0
+ pand %xmm11,%xmm3
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm14,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm14,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ movdqa %xmm15,%xmm3
+ movdqa %xmm14,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm14,%xmm3
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm3,%xmm4
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm15,%xmm13
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm4,%xmm13
+ paddd %xmm5,%xmm9
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm13
+ paddd %xmm7,%xmm13
+ movdqa 192-128(%rax),%xmm5
+ paddd 64-128(%rax),%xmm6
+
+ movdqa %xmm5,%xmm7
+ movdqa %xmm5,%xmm1
+ psrld $3,%xmm7
+ movdqa %xmm5,%xmm2
+
+ psrld $7,%xmm1
+ movdqa 144-128(%rax),%xmm0
+ pslld $14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $18-7,%xmm1
+ movdqa %xmm0,%xmm4
+ pxor %xmm2,%xmm7
+ pslld $25-14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $10,%xmm0
+ movdqa %xmm4,%xmm1
+
+ psrld $17,%xmm4
+ pxor %xmm2,%xmm7
+ pslld $13,%xmm1
+ paddd %xmm7,%xmm6
+ pxor %xmm4,%xmm0
+ psrld $19-17,%xmm4
+ pxor %xmm1,%xmm0
+ pslld $15-13,%xmm1
+ pxor %xmm4,%xmm0
+ pxor %xmm1,%xmm0
+ paddd %xmm0,%xmm6
+ movdqa %xmm9,%xmm7
+
+ movdqa %xmm9,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm9,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm6,176-128(%rax)
+ paddd %xmm12,%xmm6
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd -32(%rbp),%xmm6
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm9,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm9,%xmm4
+ pslld $26-21,%xmm2
+ pandn %xmm11,%xmm0
+ pand %xmm10,%xmm4
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm13,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm13,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm6
+ pxor %xmm4,%xmm0
+ movdqa %xmm14,%xmm4
+ movdqa %xmm13,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm13,%xmm4
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm6
+ pslld $19-10,%xmm2
+ pand %xmm4,%xmm3
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm14,%xmm12
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm3,%xmm12
+ paddd %xmm6,%xmm8
+ pxor %xmm2,%xmm7
+
+ paddd %xmm6,%xmm12
+ paddd %xmm7,%xmm12
+ movdqa 208-128(%rax),%xmm6
+ paddd 80-128(%rax),%xmm5
+
+ movdqa %xmm6,%xmm7
+ movdqa %xmm6,%xmm1
+ psrld $3,%xmm7
+ movdqa %xmm6,%xmm2
+
+ psrld $7,%xmm1
+ movdqa 160-128(%rax),%xmm0
+ pslld $14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $18-7,%xmm1
+ movdqa %xmm0,%xmm3
+ pxor %xmm2,%xmm7
+ pslld $25-14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $10,%xmm0
+ movdqa %xmm3,%xmm1
+
+ psrld $17,%xmm3
+ pxor %xmm2,%xmm7
+ pslld $13,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ psrld $19-17,%xmm3
+ pxor %xmm1,%xmm0
+ pslld $15-13,%xmm1
+ pxor %xmm3,%xmm0
+ pxor %xmm1,%xmm0
+ paddd %xmm0,%xmm5
+ movdqa %xmm8,%xmm7
+
+ movdqa %xmm8,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm8,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,192-128(%rax)
+ paddd %xmm11,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd 0(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm8,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm8,%xmm3
+ pslld $26-21,%xmm2
+ pandn %xmm10,%xmm0
+ pand %xmm9,%xmm3
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm12,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm12,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ movdqa %xmm13,%xmm3
+ movdqa %xmm12,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm12,%xmm3
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm3,%xmm4
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm13,%xmm11
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm4,%xmm11
+ paddd %xmm5,%xmm15
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm11
+ paddd %xmm7,%xmm11
+ movdqa 224-128(%rax),%xmm5
+ paddd 96-128(%rax),%xmm6
+
+ movdqa %xmm5,%xmm7
+ movdqa %xmm5,%xmm1
+ psrld $3,%xmm7
+ movdqa %xmm5,%xmm2
+
+ psrld $7,%xmm1
+ movdqa 176-128(%rax),%xmm0
+ pslld $14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $18-7,%xmm1
+ movdqa %xmm0,%xmm4
+ pxor %xmm2,%xmm7
+ pslld $25-14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $10,%xmm0
+ movdqa %xmm4,%xmm1
+
+ psrld $17,%xmm4
+ pxor %xmm2,%xmm7
+ pslld $13,%xmm1
+ paddd %xmm7,%xmm6
+ pxor %xmm4,%xmm0
+ psrld $19-17,%xmm4
+ pxor %xmm1,%xmm0
+ pslld $15-13,%xmm1
+ pxor %xmm4,%xmm0
+ pxor %xmm1,%xmm0
+ paddd %xmm0,%xmm6
+ movdqa %xmm15,%xmm7
+
+ movdqa %xmm15,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm15,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm6,208-128(%rax)
+ paddd %xmm10,%xmm6
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd 32(%rbp),%xmm6
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm15,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm15,%xmm4
+ pslld $26-21,%xmm2
+ pandn %xmm9,%xmm0
+ pand %xmm8,%xmm4
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm11,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm11,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm6
+ pxor %xmm4,%xmm0
+ movdqa %xmm12,%xmm4
+ movdqa %xmm11,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm11,%xmm4
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm6
+ pslld $19-10,%xmm2
+ pand %xmm4,%xmm3
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm12,%xmm10
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm3,%xmm10
+ paddd %xmm6,%xmm14
+ pxor %xmm2,%xmm7
+
+ paddd %xmm6,%xmm10
+ paddd %xmm7,%xmm10
+ movdqa 240-128(%rax),%xmm6
+ paddd 112-128(%rax),%xmm5
+
+ movdqa %xmm6,%xmm7
+ movdqa %xmm6,%xmm1
+ psrld $3,%xmm7
+ movdqa %xmm6,%xmm2
+
+ psrld $7,%xmm1
+ movdqa 192-128(%rax),%xmm0
+ pslld $14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $18-7,%xmm1
+ movdqa %xmm0,%xmm3
+ pxor %xmm2,%xmm7
+ pslld $25-14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $10,%xmm0
+ movdqa %xmm3,%xmm1
+
+ psrld $17,%xmm3
+ pxor %xmm2,%xmm7
+ pslld $13,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ psrld $19-17,%xmm3
+ pxor %xmm1,%xmm0
+ pslld $15-13,%xmm1
+ pxor %xmm3,%xmm0
+ pxor %xmm1,%xmm0
+ paddd %xmm0,%xmm5
+ movdqa %xmm14,%xmm7
+
+ movdqa %xmm14,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm14,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm5,224-128(%rax)
+ paddd %xmm9,%xmm5
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd 64(%rbp),%xmm5
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm14,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm14,%xmm3
+ pslld $26-21,%xmm2
+ pandn %xmm8,%xmm0
+ pand %xmm15,%xmm3
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm10,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm10,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm5
+ pxor %xmm3,%xmm0
+ movdqa %xmm11,%xmm3
+ movdqa %xmm10,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm10,%xmm3
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm5
+ pslld $19-10,%xmm2
+ pand %xmm3,%xmm4
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm11,%xmm9
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm4,%xmm9
+ paddd %xmm5,%xmm13
+ pxor %xmm2,%xmm7
+
+ paddd %xmm5,%xmm9
+ paddd %xmm7,%xmm9
+ movdqa 0-128(%rax),%xmm5
+ paddd 128-128(%rax),%xmm6
+
+ movdqa %xmm5,%xmm7
+ movdqa %xmm5,%xmm1
+ psrld $3,%xmm7
+ movdqa %xmm5,%xmm2
+
+ psrld $7,%xmm1
+ movdqa 208-128(%rax),%xmm0
+ pslld $14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $18-7,%xmm1
+ movdqa %xmm0,%xmm4
+ pxor %xmm2,%xmm7
+ pslld $25-14,%xmm2
+ pxor %xmm1,%xmm7
+ psrld $10,%xmm0
+ movdqa %xmm4,%xmm1
+
+ psrld $17,%xmm4
+ pxor %xmm2,%xmm7
+ pslld $13,%xmm1
+ paddd %xmm7,%xmm6
+ pxor %xmm4,%xmm0
+ psrld $19-17,%xmm4
+ pxor %xmm1,%xmm0
+ pslld $15-13,%xmm1
+ pxor %xmm4,%xmm0
+ pxor %xmm1,%xmm0
+ paddd %xmm0,%xmm6
+ movdqa %xmm13,%xmm7
+
+ movdqa %xmm13,%xmm2
+
+ psrld $6,%xmm7
+ movdqa %xmm13,%xmm1
+ pslld $7,%xmm2
+ movdqa %xmm6,240-128(%rax)
+ paddd %xmm8,%xmm6
+
+ psrld $11,%xmm1
+ pxor %xmm2,%xmm7
+ pslld $21-7,%xmm2
+ paddd 96(%rbp),%xmm6
+ pxor %xmm1,%xmm7
+
+ psrld $25-11,%xmm1
+ movdqa %xmm13,%xmm0
+
+ pxor %xmm2,%xmm7
+ movdqa %xmm13,%xmm4
+ pslld $26-21,%xmm2
+ pandn %xmm15,%xmm0
+ pand %xmm14,%xmm4
+ pxor %xmm1,%xmm7
+
+
+ movdqa %xmm9,%xmm1
+ pxor %xmm2,%xmm7
+ movdqa %xmm9,%xmm2
+ psrld $2,%xmm1
+ paddd %xmm7,%xmm6
+ pxor %xmm4,%xmm0
+ movdqa %xmm10,%xmm4
+ movdqa %xmm9,%xmm7
+ pslld $10,%xmm2
+ pxor %xmm9,%xmm4
+
+
+ psrld $13,%xmm7
+ pxor %xmm2,%xmm1
+ paddd %xmm0,%xmm6
+ pslld $19-10,%xmm2
+ pand %xmm4,%xmm3
+ pxor %xmm7,%xmm1
+
+
+ psrld $22-13,%xmm7
+ pxor %xmm2,%xmm1
+ movdqa %xmm10,%xmm8
+ pslld $30-19,%xmm2
+ pxor %xmm1,%xmm7
+ pxor %xmm3,%xmm8
+ paddd %xmm6,%xmm12
+ pxor %xmm2,%xmm7
+
+ paddd %xmm6,%xmm8
+ paddd %xmm7,%xmm8
+ leaq 256(%rbp),%rbp
+ decl %ecx
+ jnz .Loop_16_xx
+
+ movl $1,%ecx
+ leaq K256+128(%rip),%rbp
+
+ movdqa (%rbx),%xmm7
+ cmpl 0(%rbx),%ecx
+ pxor %xmm0,%xmm0
+ cmovgeq %rbp,%r8
+ cmpl 4(%rbx),%ecx
+ movdqa %xmm7,%xmm6
+ cmovgeq %rbp,%r9
+ cmpl 8(%rbx),%ecx
+ pcmpgtd %xmm0,%xmm6
+ cmovgeq %rbp,%r10
+ cmpl 12(%rbx),%ecx
+ paddd %xmm6,%xmm7
+ cmovgeq %rbp,%r11
+
+ movdqu 0-128(%rdi),%xmm0
+ pand %xmm6,%xmm8
+ movdqu 32-128(%rdi),%xmm1
+ pand %xmm6,%xmm9
+ movdqu 64-128(%rdi),%xmm2
+ pand %xmm6,%xmm10
+ movdqu 96-128(%rdi),%xmm5
+ pand %xmm6,%xmm11
+ paddd %xmm0,%xmm8
+ movdqu 128-128(%rdi),%xmm0
+ pand %xmm6,%xmm12
+ paddd %xmm1,%xmm9
+ movdqu 160-128(%rdi),%xmm1
+ pand %xmm6,%xmm13
+ paddd %xmm2,%xmm10
+ movdqu 192-128(%rdi),%xmm2
+ pand %xmm6,%xmm14
+ paddd %xmm5,%xmm11
+ movdqu 224-128(%rdi),%xmm5
+ pand %xmm6,%xmm15
+ paddd %xmm0,%xmm12
+ paddd %xmm1,%xmm13
+ movdqu %xmm8,0-128(%rdi)
+ paddd %xmm2,%xmm14
+ movdqu %xmm9,32-128(%rdi)
+ paddd %xmm5,%xmm15
+ movdqu %xmm10,64-128(%rdi)
+ movdqu %xmm11,96-128(%rdi)
+ movdqu %xmm12,128-128(%rdi)
+ movdqu %xmm13,160-128(%rdi)
+ movdqu %xmm14,192-128(%rdi)
+ movdqu %xmm15,224-128(%rdi)
+
+ movdqa %xmm7,(%rbx)
+ movdqa .Lpbswap(%rip),%xmm6
+ decl %edx
+ jnz .Loop
+
+ movl 280(%rsp),%edx
+ leaq 16(%rdi),%rdi
+ leaq 64(%rsi),%rsi
+ decl %edx
+ jnz .Loop_grande
+
+.Ldone:
+ movq 272(%rsp),%rax
+ movq -16(%rax),%rbp
+ movq -8(%rax),%rbx
+ leaq (%rax),%rsp
+.Lepilogue:
+ .byte 0xf3,0xc3
+.size sha256_multi_block,.-sha256_multi_block
+.type sha256_multi_block_shaext,@function
+.align 32
+sha256_multi_block_shaext:
+_shaext_shortcut:
+ movq %rsp,%rax
+ pushq %rbx
+ pushq %rbp
+ subq $288,%rsp
+ shll $1,%edx
+ andq $-256,%rsp
+ leaq 128(%rdi),%rdi
+ movq %rax,272(%rsp)
+.Lbody_shaext:
+ leaq 256(%rsp),%rbx
+ leaq K256_shaext+128(%rip),%rbp
+
+.Loop_grande_shaext:
+ movl %edx,280(%rsp)
+ xorl %edx,%edx
+ movq 0(%rsi),%r8
+ movl 8(%rsi),%ecx
+ cmpl %edx,%ecx
+ cmovgl %ecx,%edx
+ testl %ecx,%ecx
+ movl %ecx,0(%rbx)
+ cmovleq %rsp,%r8
+ movq 16(%rsi),%r9
+ movl 24(%rsi),%ecx
+ cmpl %edx,%ecx
+ cmovgl %ecx,%edx
+ testl %ecx,%ecx
+ movl %ecx,4(%rbx)
+ cmovleq %rsp,%r9
+ testl %edx,%edx
+ jz .Ldone_shaext
+
+ movq 0-128(%rdi),%xmm12
+ movq 32-128(%rdi),%xmm4
+ movq 64-128(%rdi),%xmm13
+ movq 96-128(%rdi),%xmm5
+ movq 128-128(%rdi),%xmm8
+ movq 160-128(%rdi),%xmm9
+ movq 192-128(%rdi),%xmm10
+ movq 224-128(%rdi),%xmm11
+
+ punpckldq %xmm4,%xmm12
+ punpckldq %xmm5,%xmm13
+ punpckldq %xmm9,%xmm8
+ punpckldq %xmm11,%xmm10
+ movdqa K256_shaext-16(%rip),%xmm3
+
+ movdqa %xmm12,%xmm14
+ movdqa %xmm13,%xmm15
+ punpcklqdq %xmm8,%xmm12
+ punpcklqdq %xmm10,%xmm13
+ punpckhqdq %xmm8,%xmm14
+ punpckhqdq %xmm10,%xmm15
+
+ pshufd $27,%xmm12,%xmm12
+ pshufd $27,%xmm13,%xmm13
+ pshufd $27,%xmm14,%xmm14
+ pshufd $27,%xmm15,%xmm15
+ jmp .Loop_shaext
+
+.align 32
+.Loop_shaext:
+ movdqu 0(%r8),%xmm4
+ movdqu 0(%r9),%xmm8
+ movdqu 16(%r8),%xmm5
+ movdqu 16(%r9),%xmm9
+ movdqu 32(%r8),%xmm6
+.byte 102,15,56,0,227
+ movdqu 32(%r9),%xmm10
+.byte 102,68,15,56,0,195
+ movdqu 48(%r8),%xmm7
+ leaq 64(%r8),%r8
+ movdqu 48(%r9),%xmm11
+ leaq 64(%r9),%r9
+
+ movdqa 0-128(%rbp),%xmm0
+.byte 102,15,56,0,235
+ paddd %xmm4,%xmm0
+ pxor %xmm12,%xmm4
+ movdqa %xmm0,%xmm1
+ movdqa 0-128(%rbp),%xmm2
+.byte 102,68,15,56,0,203
+ paddd %xmm8,%xmm2
+ movdqa %xmm13,80(%rsp)
+.byte 69,15,56,203,236
+ pxor %xmm14,%xmm8
+ movdqa %xmm2,%xmm0
+ movdqa %xmm15,112(%rsp)
+.byte 69,15,56,203,254
+ pshufd $14,%xmm1,%xmm0
+ pxor %xmm12,%xmm4
+ movdqa %xmm12,64(%rsp)
+.byte 69,15,56,203,229
+ pshufd $14,%xmm2,%xmm0
+ pxor %xmm14,%xmm8
+ movdqa %xmm14,96(%rsp)
+ movdqa 16-128(%rbp),%xmm1
+ paddd %xmm5,%xmm1
+.byte 102,15,56,0,243
+.byte 69,15,56,203,247
+
+ movdqa %xmm1,%xmm0
+ movdqa 16-128(%rbp),%xmm2
+ paddd %xmm9,%xmm2
+.byte 69,15,56,203,236
+ movdqa %xmm2,%xmm0
+ prefetcht0 127(%r8)
+.byte 102,15,56,0,251
+.byte 102,68,15,56,0,211
+ prefetcht0 127(%r9)
+.byte 69,15,56,203,254
+ pshufd $14,%xmm1,%xmm0
+.byte 102,68,15,56,0,219
+.byte 15,56,204,229
+.byte 69,15,56,203,229
+ pshufd $14,%xmm2,%xmm0
+ movdqa 32-128(%rbp),%xmm1
+ paddd %xmm6,%xmm1
+.byte 69,15,56,203,247
+
+ movdqa %xmm1,%xmm0
+ movdqa 32-128(%rbp),%xmm2
+ paddd %xmm10,%xmm2
+.byte 69,15,56,203,236
+.byte 69,15,56,204,193
+ movdqa %xmm2,%xmm0
+ movdqa %xmm7,%xmm3
+.byte 69,15,56,203,254
+ pshufd $14,%xmm1,%xmm0
+.byte 102,15,58,15,222,4
+ paddd %xmm3,%xmm4
+ movdqa %xmm11,%xmm3
+.byte 102,65,15,58,15,218,4
+.byte 15,56,204,238
+.byte 69,15,56,203,229
+ pshufd $14,%xmm2,%xmm0
+ movdqa 48-128(%rbp),%xmm1
+ paddd %xmm7,%xmm1
+.byte 69,15,56,203,247
+.byte 69,15,56,204,202
+
+ movdqa %xmm1,%xmm0
+ movdqa 48-128(%rbp),%xmm2
+ paddd %xmm3,%xmm8
+ paddd %xmm11,%xmm2
+.byte 15,56,205,231
+.byte 69,15,56,203,236
+ movdqa %xmm2,%xmm0
+ movdqa %xmm4,%xmm3
+.byte 102,15,58,15,223,4
+.byte 69,15,56,203,254
+.byte 69,15,56,205,195
+ pshufd $14,%xmm1,%xmm0
+ paddd %xmm3,%xmm5
+ movdqa %xmm8,%xmm3
+.byte 102,65,15,58,15,219,4
+.byte 15,56,204,247
+.byte 69,15,56,203,229
+ pshufd $14,%xmm2,%xmm0
+ movdqa 64-128(%rbp),%xmm1
+ paddd %xmm4,%xmm1
+.byte 69,15,56,203,247
+.byte 69,15,56,204,211
+ movdqa %xmm1,%xmm0
+ movdqa 64-128(%rbp),%xmm2
+ paddd %xmm3,%xmm9
+ paddd %xmm8,%xmm2
+.byte 15,56,205,236
+.byte 69,15,56,203,236
+ movdqa %xmm2,%xmm0
+ movdqa %xmm5,%xmm3
+.byte 102,15,58,15,220,4
+.byte 69,15,56,203,254
+.byte 69,15,56,205,200
+ pshufd $14,%xmm1,%xmm0
+ paddd %xmm3,%xmm6
+ movdqa %xmm9,%xmm3
+.byte 102,65,15,58,15,216,4
+.byte 15,56,204,252
+.byte 69,15,56,203,229
+ pshufd $14,%xmm2,%xmm0
+ movdqa 80-128(%rbp),%xmm1
+ paddd %xmm5,%xmm1
+.byte 69,15,56,203,247
+.byte 69,15,56,204,216
+ movdqa %xmm1,%xmm0
+ movdqa 80-128(%rbp),%xmm2
+ paddd %xmm3,%xmm10
+ paddd %xmm9,%xmm2
+.byte 15,56,205,245
+.byte 69,15,56,203,236
+ movdqa %xmm2,%xmm0
+ movdqa %xmm6,%xmm3
+.byte 102,15,58,15,221,4
+.byte 69,15,56,203,254
+.byte 69,15,56,205,209
+ pshufd $14,%xmm1,%xmm0
+ paddd %xmm3,%xmm7
+ movdqa %xmm10,%xmm3
+.byte 102,65,15,58,15,217,4
+.byte 15,56,204,229
+.byte 69,15,56,203,229
+ pshufd $14,%xmm2,%xmm0
+ movdqa 96-128(%rbp),%xmm1
+ paddd %xmm6,%xmm1
+.byte 69,15,56,203,247
+.byte 69,15,56,204,193
+ movdqa %xmm1,%xmm0
+ movdqa 96-128(%rbp),%xmm2
+ paddd %xmm3,%xmm11
+ paddd %xmm10,%xmm2
+.byte 15,56,205,254
+.byte 69,15,56,203,236
+ movdqa %xmm2,%xmm0
+ movdqa %xmm7,%xmm3
+.byte 102,15,58,15,222,4
+.byte 69,15,56,203,254
+.byte 69,15,56,205,218
+ pshufd $14,%xmm1,%xmm0
+ paddd %xmm3,%xmm4
+ movdqa %xmm11,%xmm3
+.byte 102,65,15,58,15,218,4
+.byte 15,56,204,238
+.byte 69,15,56,203,229
+ pshufd $14,%xmm2,%xmm0
+ movdqa 112-128(%rbp),%xmm1
+ paddd %xmm7,%xmm1
+.byte 69,15,56,203,247
+.byte 69,15,56,204,202
+ movdqa %xmm1,%xmm0
+ movdqa 112-128(%rbp),%xmm2
+ paddd %xmm3,%xmm8
+ paddd %xmm11,%xmm2
+.byte 15,56,205,231
+.byte 69,15,56,203,236
+ movdqa %xmm2,%xmm0
+ movdqa %xmm4,%xmm3
+.byte 102,15,58,15,223,4
+.byte 69,15,56,203,254
+.byte 69,15,56,205,195
+ pshufd $14,%xmm1,%xmm0
+ paddd %xmm3,%xmm5
+ movdqa %xmm8,%xmm3
+.byte 102,65,15,58,15,219,4
+.byte 15,56,204,247
+.byte 69,15,56,203,229
+ pshufd $14,%xmm2,%xmm0
+ movdqa 128-128(%rbp),%xmm1
+ paddd %xmm4,%xmm1
+.byte 69,15,56,203,247
+.byte 69,15,56,204,211
+ movdqa %xmm1,%xmm0
+ movdqa 128-128(%rbp),%xmm2
+ paddd %xmm3,%xmm9
+ paddd %xmm8,%xmm2
+.byte 15,56,205,236
+.byte 69,15,56,203,236
+ movdqa %xmm2,%xmm0
+ movdqa %xmm5,%xmm3
+.byte 102,15,58,15,220,4
+.byte 69,15,56,203,254
+.byte 69,15,56,205,200
+ pshufd $14,%xmm1,%xmm0
+ paddd %xmm3,%xmm6
+ movdqa %xmm9,%xmm3
+.byte 102,65,15,58,15,216,4
+.byte 15,56,204,252
+.byte 69,15,56,203,229
+ pshufd $14,%xmm2,%xmm0
+ movdqa 144-128(%rbp),%xmm1
+ paddd %xmm5,%xmm1
+.byte 69,15,56,203,247
+.byte 69,15,56,204,216
+ movdqa %xmm1,%xmm0
+ movdqa 144-128(%rbp),%xmm2
+ paddd %xmm3,%xmm10
+ paddd %xmm9,%xmm2
+.byte 15,56,205,245
+.byte 69,15,56,203,236
+ movdqa %xmm2,%xmm0
+ movdqa %xmm6,%xmm3
+.byte 102,15,58,15,221,4
+.byte 69,15,56,203,254
+.byte 69,15,56,205,209
+ pshufd $14,%xmm1,%xmm0
+ paddd %xmm3,%xmm7
+ movdqa %xmm10,%xmm3
+.byte 102,65,15,58,15,217,4
+.byte 15,56,204,229
+.byte 69,15,56,203,229
+ pshufd $14,%xmm2,%xmm0
+ movdqa 160-128(%rbp),%xmm1
+ paddd %xmm6,%xmm1
+.byte 69,15,56,203,247
+.byte 69,15,56,204,193
+ movdqa %xmm1,%xmm0
+ movdqa 160-128(%rbp),%xmm2
+ paddd %xmm3,%xmm11
+ paddd %xmm10,%xmm2
+.byte 15,56,205,254
+.byte 69,15,56,203,236
+ movdqa %xmm2,%xmm0
+ movdqa %xmm7,%xmm3
+.byte 102,15,58,15,222,4
+.byte 69,15,56,203,254
+.byte 69,15,56,205,218
+ pshufd $14,%xmm1,%xmm0
+ paddd %xmm3,%xmm4
+ movdqa %xmm11,%xmm3
+.byte 102,65,15,58,15,218,4
+.byte 15,56,204,238
+.byte 69,15,56,203,229
+ pshufd $14,%xmm2,%xmm0
+ movdqa 176-128(%rbp),%xmm1
+ paddd %xmm7,%xmm1
+.byte 69,15,56,203,247
+.byte 69,15,56,204,202
+ movdqa %xmm1,%xmm0
+ movdqa 176-128(%rbp),%xmm2
+ paddd %xmm3,%xmm8
+ paddd %xmm11,%xmm2
+.byte 15,56,205,231
+.byte 69,15,56,203,236
+ movdqa %xmm2,%xmm0
+ movdqa %xmm4,%xmm3
+.byte 102,15,58,15,223,4
+.byte 69,15,56,203,254
+.byte 69,15,56,205,195
+ pshufd $14,%xmm1,%xmm0
+ paddd %xmm3,%xmm5
+ movdqa %xmm8,%xmm3
+.byte 102,65,15,58,15,219,4
+.byte 15,56,204,247
+.byte 69,15,56,203,229
+ pshufd $14,%xmm2,%xmm0
+ movdqa 192-128(%rbp),%xmm1
+ paddd %xmm4,%xmm1
+.byte 69,15,56,203,247
+.byte 69,15,56,204,211
+ movdqa %xmm1,%xmm0
+ movdqa 192-128(%rbp),%xmm2
+ paddd %xmm3,%xmm9
+ paddd %xmm8,%xmm2
+.byte 15,56,205,236
+.byte 69,15,56,203,236
+ movdqa %xmm2,%xmm0
+ movdqa %xmm5,%xmm3
+.byte 102,15,58,15,220,4
+.byte 69,15,56,203,254
+.byte 69,15,56,205,200
+ pshufd $14,%xmm1,%xmm0
+ paddd %xmm3,%xmm6
+ movdqa %xmm9,%xmm3
+.byte 102,65,15,58,15,216,4
+.byte 15,56,204,252
+.byte 69,15,56,203,229
+ pshufd $14,%xmm2,%xmm0
+ movdqa 208-128(%rbp),%xmm1
+ paddd %xmm5,%xmm1
+.byte 69,15,56,203,247
+.byte 69,15,56,204,216
+ movdqa %xmm1,%xmm0
+ movdqa 208-128(%rbp),%xmm2
+ paddd %xmm3,%xmm10
+ paddd %xmm9,%xmm2
+.byte 15,56,205,245
+.byte 69,15,56,203,236
+ movdqa %xmm2,%xmm0
+ movdqa %xmm6,%xmm3
+.byte 102,15,58,15,221,4
+.byte 69,15,56,203,254
+.byte 69,15,56,205,209
+ pshufd $14,%xmm1,%xmm0
+ paddd %xmm3,%xmm7
+ movdqa %xmm10,%xmm3
+.byte 102,65,15,58,15,217,4
+ nop
+.byte 69,15,56,203,229
+ pshufd $14,%xmm2,%xmm0
+ movdqa 224-128(%rbp),%xmm1
+ paddd %xmm6,%xmm1
+.byte 69,15,56,203,247
+
+ movdqa %xmm1,%xmm0
+ movdqa 224-128(%rbp),%xmm2
+ paddd %xmm3,%xmm11
+ paddd %xmm10,%xmm2
+.byte 15,56,205,254
+ nop
+.byte 69,15,56,203,236
+ movdqa %xmm2,%xmm0
+ movl $1,%ecx
+ pxor %xmm6,%xmm6
+.byte 69,15,56,203,254
+.byte 69,15,56,205,218
+ pshufd $14,%xmm1,%xmm0
+ movdqa 240-128(%rbp),%xmm1
+ paddd %xmm7,%xmm1
+ movq (%rbx),%xmm7
+ nop
+.byte 69,15,56,203,229
+ pshufd $14,%xmm2,%xmm0
+ movdqa 240-128(%rbp),%xmm2
+ paddd %xmm11,%xmm2
+.byte 69,15,56,203,247
+
+ movdqa %xmm1,%xmm0
+ cmpl 0(%rbx),%ecx
+ cmovgeq %rsp,%r8
+ cmpl 4(%rbx),%ecx
+ cmovgeq %rsp,%r9
+ pshufd $0,%xmm7,%xmm9
+.byte 69,15,56,203,236
+ movdqa %xmm2,%xmm0
+ pshufd $85,%xmm7,%xmm10
+ movdqa %xmm7,%xmm11
+.byte 69,15,56,203,254
+ pshufd $14,%xmm1,%xmm0
+ pcmpgtd %xmm6,%xmm9
+ pcmpgtd %xmm6,%xmm10
+.byte 69,15,56,203,229
+ pshufd $14,%xmm2,%xmm0
+ pcmpgtd %xmm6,%xmm11
+ movdqa K256_shaext-16(%rip),%xmm3
+.byte 69,15,56,203,247
+
+ pand %xmm9,%xmm13
+ pand %xmm10,%xmm15
+ pand %xmm9,%xmm12
+ pand %xmm10,%xmm14
+ paddd %xmm7,%xmm11
+
+ paddd 80(%rsp),%xmm13
+ paddd 112(%rsp),%xmm15
+ paddd 64(%rsp),%xmm12
+ paddd 96(%rsp),%xmm14
+
+ movq %xmm11,(%rbx)
+ decl %edx
+ jnz .Loop_shaext
+
+ movl 280(%rsp),%edx
+
+ pshufd $27,%xmm12,%xmm12
+ pshufd $27,%xmm13,%xmm13
+ pshufd $27,%xmm14,%xmm14
+ pshufd $27,%xmm15,%xmm15
+
+ movdqa %xmm12,%xmm5
+ movdqa %xmm13,%xmm6
+ punpckldq %xmm14,%xmm12
+ punpckhdq %xmm14,%xmm5
+ punpckldq %xmm15,%xmm13
+ punpckhdq %xmm15,%xmm6
+
+ movq %xmm12,0-128(%rdi)
+ psrldq $8,%xmm12
+ movq %xmm5,128-128(%rdi)
+ psrldq $8,%xmm5
+ movq %xmm12,32-128(%rdi)
+ movq %xmm5,160-128(%rdi)
+
+ movq %xmm13,64-128(%rdi)
+ psrldq $8,%xmm13
+ movq %xmm6,192-128(%rdi)
+ psrldq $8,%xmm6
+ movq %xmm13,96-128(%rdi)
+ movq %xmm6,224-128(%rdi)
+
+ leaq 8(%rdi),%rdi
+ leaq 32(%rsi),%rsi
+ decl %edx
+ jnz .Loop_grande_shaext
+
+.Ldone_shaext:
+
+ movq -16(%rax),%rbp
+ movq -8(%rax),%rbx
+ leaq (%rax),%rsp
+.Lepilogue_shaext:
+ .byte 0xf3,0xc3
+.size sha256_multi_block_shaext,.-sha256_multi_block_shaext
+.align 256
+K256:
+.long 1116352408,1116352408,1116352408,1116352408
+.long 1116352408,1116352408,1116352408,1116352408
+.long 1899447441,1899447441,1899447441,1899447441
+.long 1899447441,1899447441,1899447441,1899447441
+.long 3049323471,3049323471,3049323471,3049323471
+.long 3049323471,3049323471,3049323471,3049323471
+.long 3921009573,3921009573,3921009573,3921009573
+.long 3921009573,3921009573,3921009573,3921009573
+.long 961987163,961987163,961987163,961987163
+.long 961987163,961987163,961987163,961987163
+.long 1508970993,1508970993,1508970993,1508970993
+.long 1508970993,1508970993,1508970993,1508970993
+.long 2453635748,2453635748,2453635748,2453635748
+.long 2453635748,2453635748,2453635748,2453635748
+.long 2870763221,2870763221,2870763221,2870763221
+.long 2870763221,2870763221,2870763221,2870763221
+.long 3624381080,3624381080,3624381080,3624381080
+.long 3624381080,3624381080,3624381080,3624381080
+.long 310598401,310598401,310598401,310598401
+.long 310598401,310598401,310598401,310598401
+.long 607225278,607225278,607225278,607225278
+.long 607225278,607225278,607225278,607225278
+.long 1426881987,1426881987,1426881987,1426881987
+.long 1426881987,1426881987,1426881987,1426881987
+.long 1925078388,1925078388,1925078388,1925078388
+.long 1925078388,1925078388,1925078388,1925078388
+.long 2162078206,2162078206,2162078206,2162078206
+.long 2162078206,2162078206,2162078206,2162078206
+.long 2614888103,2614888103,2614888103,2614888103
+.long 2614888103,2614888103,2614888103,2614888103
+.long 3248222580,3248222580,3248222580,3248222580
+.long 3248222580,3248222580,3248222580,3248222580
+.long 3835390401,3835390401,3835390401,3835390401
+.long 3835390401,3835390401,3835390401,3835390401
+.long 4022224774,4022224774,4022224774,4022224774
+.long 4022224774,4022224774,4022224774,4022224774
+.long 264347078,264347078,264347078,264347078
+.long 264347078,264347078,264347078,264347078
+.long 604807628,604807628,604807628,604807628
+.long 604807628,604807628,604807628,604807628
+.long 770255983,770255983,770255983,770255983
+.long 770255983,770255983,770255983,770255983
+.long 1249150122,1249150122,1249150122,1249150122
+.long 1249150122,1249150122,1249150122,1249150122
+.long 1555081692,1555081692,1555081692,1555081692
+.long 1555081692,1555081692,1555081692,1555081692
+.long 1996064986,1996064986,1996064986,1996064986
+.long 1996064986,1996064986,1996064986,1996064986
+.long 2554220882,2554220882,2554220882,2554220882
+.long 2554220882,2554220882,2554220882,2554220882
+.long 2821834349,2821834349,2821834349,2821834349
+.long 2821834349,2821834349,2821834349,2821834349
+.long 2952996808,2952996808,2952996808,2952996808
+.long 2952996808,2952996808,2952996808,2952996808
+.long 3210313671,3210313671,3210313671,3210313671
+.long 3210313671,3210313671,3210313671,3210313671
+.long 3336571891,3336571891,3336571891,3336571891
+.long 3336571891,3336571891,3336571891,3336571891
+.long 3584528711,3584528711,3584528711,3584528711
+.long 3584528711,3584528711,3584528711,3584528711
+.long 113926993,113926993,113926993,113926993
+.long 113926993,113926993,113926993,113926993
+.long 338241895,338241895,338241895,338241895
+.long 338241895,338241895,338241895,338241895
+.long 666307205,666307205,666307205,666307205
+.long 666307205,666307205,666307205,666307205
+.long 773529912,773529912,773529912,773529912
+.long 773529912,773529912,773529912,773529912
+.long 1294757372,1294757372,1294757372,1294757372
+.long 1294757372,1294757372,1294757372,1294757372
+.long 1396182291,1396182291,1396182291,1396182291
+.long 1396182291,1396182291,1396182291,1396182291
+.long 1695183700,1695183700,1695183700,1695183700
+.long 1695183700,1695183700,1695183700,1695183700
+.long 1986661051,1986661051,1986661051,1986661051
+.long 1986661051,1986661051,1986661051,1986661051
+.long 2177026350,2177026350,2177026350,2177026350
+.long 2177026350,2177026350,2177026350,2177026350
+.long 2456956037,2456956037,2456956037,2456956037
+.long 2456956037,2456956037,2456956037,2456956037
+.long 2730485921,2730485921,2730485921,2730485921
+.long 2730485921,2730485921,2730485921,2730485921
+.long 2820302411,2820302411,2820302411,2820302411
+.long 2820302411,2820302411,2820302411,2820302411
+.long 3259730800,3259730800,3259730800,3259730800
+.long 3259730800,3259730800,3259730800,3259730800
+.long 3345764771,3345764771,3345764771,3345764771
+.long 3345764771,3345764771,3345764771,3345764771
+.long 3516065817,3516065817,3516065817,3516065817
+.long 3516065817,3516065817,3516065817,3516065817
+.long 3600352804,3600352804,3600352804,3600352804
+.long 3600352804,3600352804,3600352804,3600352804
+.long 4094571909,4094571909,4094571909,4094571909
+.long 4094571909,4094571909,4094571909,4094571909
+.long 275423344,275423344,275423344,275423344
+.long 275423344,275423344,275423344,275423344
+.long 430227734,430227734,430227734,430227734
+.long 430227734,430227734,430227734,430227734
+.long 506948616,506948616,506948616,506948616
+.long 506948616,506948616,506948616,506948616
+.long 659060556,659060556,659060556,659060556
+.long 659060556,659060556,659060556,659060556
+.long 883997877,883997877,883997877,883997877
+.long 883997877,883997877,883997877,883997877
+.long 958139571,958139571,958139571,958139571
+.long 958139571,958139571,958139571,958139571
+.long 1322822218,1322822218,1322822218,1322822218
+.long 1322822218,1322822218,1322822218,1322822218
+.long 1537002063,1537002063,1537002063,1537002063
+.long 1537002063,1537002063,1537002063,1537002063
+.long 1747873779,1747873779,1747873779,1747873779
+.long 1747873779,1747873779,1747873779,1747873779
+.long 1955562222,1955562222,1955562222,1955562222
+.long 1955562222,1955562222,1955562222,1955562222
+.long 2024104815,2024104815,2024104815,2024104815
+.long 2024104815,2024104815,2024104815,2024104815
+.long 2227730452,2227730452,2227730452,2227730452
+.long 2227730452,2227730452,2227730452,2227730452
+.long 2361852424,2361852424,2361852424,2361852424
+.long 2361852424,2361852424,2361852424,2361852424
+.long 2428436474,2428436474,2428436474,2428436474
+.long 2428436474,2428436474,2428436474,2428436474
+.long 2756734187,2756734187,2756734187,2756734187
+.long 2756734187,2756734187,2756734187,2756734187
+.long 3204031479,3204031479,3204031479,3204031479
+.long 3204031479,3204031479,3204031479,3204031479
+.long 3329325298,3329325298,3329325298,3329325298
+.long 3329325298,3329325298,3329325298,3329325298
+.Lpbswap:
+.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+K256_shaext:
+.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+.byte 83,72,65,50,53,54,32,109,117,108,116,105,45,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
diff --git a/secure/lib/libcrypto/amd64/sha256-x86_64.S b/secure/lib/libcrypto/amd64/sha256-x86_64.S
index 79e06b4..a43a668 100644
--- a/secure/lib/libcrypto/amd64/sha256-x86_64.S
+++ b/secure/lib/libcrypto/amd64/sha256-x86_64.S
@@ -1,10 +1,19 @@
# $FreeBSD$
.text
+
.globl sha256_block_data_order
.type sha256_block_data_order,@function
.align 16
sha256_block_data_order:
+ leaq OPENSSL_ia32cap_P(%rip),%r11
+ movl 0(%r11),%r9d
+ movl 4(%r11),%r10d
+ movl 8(%r11),%r11d
+ testl $536870912,%r11d
+ jnz _shaext_shortcut
+ testl $512,%r10d
+ jnz .Lssse3_shortcut
pushq %rbx
pushq %rbp
pushq %r12
@@ -22,8 +31,6 @@ sha256_block_data_order:
movq %r11,64+24(%rsp)
.Lprologue:
- leaq K256(%rip),%rbp
-
movl 0(%rdi),%eax
movl 4(%rdi),%ebx
movl 8(%rdi),%ecx
@@ -36,1694 +43,1632 @@ sha256_block_data_order:
.align 16
.Lloop:
- xorq %rdi,%rdi
+ movl %ebx,%edi
+ leaq K256(%rip),%rbp
+ xorl %ecx,%edi
movl 0(%rsi),%r12d
movl %r8d,%r13d
movl %eax,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r9d,%r15d
- movl %r12d,0(%rsp)
- rorl $9,%r14d
xorl %r8d,%r13d
+ rorl $9,%r14d
xorl %r10d,%r15d
- rorl $5,%r13d
- addl %r11d,%r12d
+ movl %r12d,0(%rsp)
xorl %eax,%r14d
-
- addl (%rbp,%rdi,4),%r12d
andl %r8d,%r15d
- movl %ebx,%r11d
+
+ rorl $5,%r13d
+ addl %r11d,%r12d
+ xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
- xorl %r10d,%r15d
+ addl %r15d,%r12d
- xorl %ecx,%r11d
+ movl %eax,%r15d
+ addl (%rbp),%r12d
xorl %eax,%r14d
- addl %r15d,%r12d
- movl %ebx,%r15d
+ xorl %ebx,%r15d
rorl $6,%r13d
- andl %eax,%r11d
- andl %ecx,%r15d
+ movl %ebx,%r11d
+ andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%r11d
+ xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
- leaq 1(%rdi),%rdi
- addl %r14d,%r11d
+ leaq 4(%rbp),%rbp
+ addl %r14d,%r11d
movl 4(%rsi),%r12d
movl %edx,%r13d
movl %r11d,%r14d
bswapl %r12d
rorl $14,%r13d
- movl %r8d,%r15d
- movl %r12d,4(%rsp)
+ movl %r8d,%edi
- rorl $9,%r14d
xorl %edx,%r13d
- xorl %r9d,%r15d
+ rorl $9,%r14d
+ xorl %r9d,%edi
- rorl $5,%r13d
- addl %r10d,%r12d
+ movl %r12d,4(%rsp)
xorl %r11d,%r14d
+ andl %edx,%edi
- addl (%rbp,%rdi,4),%r12d
- andl %edx,%r15d
- movl %eax,%r10d
+ rorl $5,%r13d
+ addl %r10d,%r12d
+ xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
- xorl %r9d,%r15d
+ addl %edi,%r12d
- xorl %ebx,%r10d
+ movl %r11d,%edi
+ addl (%rbp),%r12d
xorl %r11d,%r14d
- addl %r15d,%r12d
- movl %eax,%r15d
+ xorl %eax,%edi
rorl $6,%r13d
- andl %r11d,%r10d
- andl %ebx,%r15d
+ movl %eax,%r10d
+ andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%r10d
+ xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
- leaq 1(%rdi),%rdi
- addl %r14d,%r10d
+ leaq 4(%rbp),%rbp
+ addl %r14d,%r10d
movl 8(%rsi),%r12d
movl %ecx,%r13d
movl %r10d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %edx,%r15d
- movl %r12d,8(%rsp)
- rorl $9,%r14d
xorl %ecx,%r13d
+ rorl $9,%r14d
xorl %r8d,%r15d
- rorl $5,%r13d
- addl %r9d,%r12d
+ movl %r12d,8(%rsp)
xorl %r10d,%r14d
-
- addl (%rbp,%rdi,4),%r12d
andl %ecx,%r15d
- movl %r11d,%r9d
+
+ rorl $5,%r13d
+ addl %r9d,%r12d
+ xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
- xorl %r8d,%r15d
+ addl %r15d,%r12d
- xorl %eax,%r9d
+ movl %r10d,%r15d
+ addl (%rbp),%r12d
xorl %r10d,%r14d
- addl %r15d,%r12d
- movl %r11d,%r15d
+ xorl %r11d,%r15d
rorl $6,%r13d
- andl %r10d,%r9d
- andl %eax,%r15d
+ movl %r11d,%r9d
+ andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%r9d
+ xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
- leaq 1(%rdi),%rdi
- addl %r14d,%r9d
+ leaq 4(%rbp),%rbp
+ addl %r14d,%r9d
movl 12(%rsi),%r12d
movl %ebx,%r13d
movl %r9d,%r14d
bswapl %r12d
rorl $14,%r13d
- movl %ecx,%r15d
- movl %r12d,12(%rsp)
+ movl %ecx,%edi
- rorl $9,%r14d
xorl %ebx,%r13d
- xorl %edx,%r15d
+ rorl $9,%r14d
+ xorl %edx,%edi
- rorl $5,%r13d
- addl %r8d,%r12d
+ movl %r12d,12(%rsp)
xorl %r9d,%r14d
+ andl %ebx,%edi
- addl (%rbp,%rdi,4),%r12d
- andl %ebx,%r15d
- movl %r10d,%r8d
+ rorl $5,%r13d
+ addl %r8d,%r12d
+ xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
- xorl %edx,%r15d
+ addl %edi,%r12d
- xorl %r11d,%r8d
+ movl %r9d,%edi
+ addl (%rbp),%r12d
xorl %r9d,%r14d
- addl %r15d,%r12d
- movl %r10d,%r15d
+ xorl %r10d,%edi
rorl $6,%r13d
- andl %r9d,%r8d
- andl %r11d,%r15d
+ movl %r10d,%r8d
+ andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%r8d
+ xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
- leaq 1(%rdi),%rdi
- addl %r14d,%r8d
+ leaq 20(%rbp),%rbp
+ addl %r14d,%r8d
movl 16(%rsi),%r12d
movl %eax,%r13d
movl %r8d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ebx,%r15d
- movl %r12d,16(%rsp)
- rorl $9,%r14d
xorl %eax,%r13d
+ rorl $9,%r14d
xorl %ecx,%r15d
- rorl $5,%r13d
- addl %edx,%r12d
+ movl %r12d,16(%rsp)
xorl %r8d,%r14d
-
- addl (%rbp,%rdi,4),%r12d
andl %eax,%r15d
- movl %r9d,%edx
+
+ rorl $5,%r13d
+ addl %edx,%r12d
+ xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
- xorl %ecx,%r15d
+ addl %r15d,%r12d
- xorl %r10d,%edx
+ movl %r8d,%r15d
+ addl (%rbp),%r12d
xorl %r8d,%r14d
- addl %r15d,%r12d
- movl %r9d,%r15d
+ xorl %r9d,%r15d
rorl $6,%r13d
- andl %r8d,%edx
- andl %r10d,%r15d
+ movl %r9d,%edx
+ andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%edx
+ xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
- leaq 1(%rdi),%rdi
- addl %r14d,%edx
+ leaq 4(%rbp),%rbp
+ addl %r14d,%edx
movl 20(%rsi),%r12d
movl %r11d,%r13d
movl %edx,%r14d
bswapl %r12d
rorl $14,%r13d
- movl %eax,%r15d
- movl %r12d,20(%rsp)
+ movl %eax,%edi
- rorl $9,%r14d
xorl %r11d,%r13d
- xorl %ebx,%r15d
+ rorl $9,%r14d
+ xorl %ebx,%edi
- rorl $5,%r13d
- addl %ecx,%r12d
+ movl %r12d,20(%rsp)
xorl %edx,%r14d
+ andl %r11d,%edi
- addl (%rbp,%rdi,4),%r12d
- andl %r11d,%r15d
- movl %r8d,%ecx
+ rorl $5,%r13d
+ addl %ecx,%r12d
+ xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
- xorl %ebx,%r15d
+ addl %edi,%r12d
- xorl %r9d,%ecx
+ movl %edx,%edi
+ addl (%rbp),%r12d
xorl %edx,%r14d
- addl %r15d,%r12d
- movl %r8d,%r15d
+ xorl %r8d,%edi
rorl $6,%r13d
- andl %edx,%ecx
- andl %r9d,%r15d
+ movl %r8d,%ecx
+ andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%ecx
+ xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
- leaq 1(%rdi),%rdi
- addl %r14d,%ecx
+ leaq 4(%rbp),%rbp
+ addl %r14d,%ecx
movl 24(%rsi),%r12d
movl %r10d,%r13d
movl %ecx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r11d,%r15d
- movl %r12d,24(%rsp)
- rorl $9,%r14d
xorl %r10d,%r13d
+ rorl $9,%r14d
xorl %eax,%r15d
- rorl $5,%r13d
- addl %ebx,%r12d
+ movl %r12d,24(%rsp)
xorl %ecx,%r14d
-
- addl (%rbp,%rdi,4),%r12d
andl %r10d,%r15d
- movl %edx,%ebx
+
+ rorl $5,%r13d
+ addl %ebx,%r12d
+ xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
- xorl %eax,%r15d
+ addl %r15d,%r12d
- xorl %r8d,%ebx
+ movl %ecx,%r15d
+ addl (%rbp),%r12d
xorl %ecx,%r14d
- addl %r15d,%r12d
- movl %edx,%r15d
+ xorl %edx,%r15d
rorl $6,%r13d
- andl %ecx,%ebx
- andl %r8d,%r15d
+ movl %edx,%ebx
+ andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%ebx
+ xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
- leaq 1(%rdi),%rdi
- addl %r14d,%ebx
+ leaq 4(%rbp),%rbp
+ addl %r14d,%ebx
movl 28(%rsi),%r12d
movl %r9d,%r13d
movl %ebx,%r14d
bswapl %r12d
rorl $14,%r13d
- movl %r10d,%r15d
- movl %r12d,28(%rsp)
+ movl %r10d,%edi
- rorl $9,%r14d
xorl %r9d,%r13d
- xorl %r11d,%r15d
+ rorl $9,%r14d
+ xorl %r11d,%edi
- rorl $5,%r13d
- addl %eax,%r12d
+ movl %r12d,28(%rsp)
xorl %ebx,%r14d
+ andl %r9d,%edi
- addl (%rbp,%rdi,4),%r12d
- andl %r9d,%r15d
- movl %ecx,%eax
+ rorl $5,%r13d
+ addl %eax,%r12d
+ xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
- xorl %r11d,%r15d
+ addl %edi,%r12d
- xorl %edx,%eax
+ movl %ebx,%edi
+ addl (%rbp),%r12d
xorl %ebx,%r14d
- addl %r15d,%r12d
- movl %ecx,%r15d
+ xorl %ecx,%edi
rorl $6,%r13d
- andl %ebx,%eax
- andl %edx,%r15d
+ movl %ecx,%eax
+ andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%eax
+ xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
- leaq 1(%rdi),%rdi
- addl %r14d,%eax
+ leaq 20(%rbp),%rbp
+ addl %r14d,%eax
movl 32(%rsi),%r12d
movl %r8d,%r13d
movl %eax,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r9d,%r15d
- movl %r12d,32(%rsp)
- rorl $9,%r14d
xorl %r8d,%r13d
+ rorl $9,%r14d
xorl %r10d,%r15d
- rorl $5,%r13d
- addl %r11d,%r12d
+ movl %r12d,32(%rsp)
xorl %eax,%r14d
-
- addl (%rbp,%rdi,4),%r12d
andl %r8d,%r15d
- movl %ebx,%r11d
+
+ rorl $5,%r13d
+ addl %r11d,%r12d
+ xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
- xorl %r10d,%r15d
+ addl %r15d,%r12d
- xorl %ecx,%r11d
+ movl %eax,%r15d
+ addl (%rbp),%r12d
xorl %eax,%r14d
- addl %r15d,%r12d
- movl %ebx,%r15d
+ xorl %ebx,%r15d
rorl $6,%r13d
- andl %eax,%r11d
- andl %ecx,%r15d
+ movl %ebx,%r11d
+ andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%r11d
+ xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
- leaq 1(%rdi),%rdi
- addl %r14d,%r11d
+ leaq 4(%rbp),%rbp
+ addl %r14d,%r11d
movl 36(%rsi),%r12d
movl %edx,%r13d
movl %r11d,%r14d
bswapl %r12d
rorl $14,%r13d
- movl %r8d,%r15d
- movl %r12d,36(%rsp)
+ movl %r8d,%edi
- rorl $9,%r14d
xorl %edx,%r13d
- xorl %r9d,%r15d
+ rorl $9,%r14d
+ xorl %r9d,%edi
- rorl $5,%r13d
- addl %r10d,%r12d
+ movl %r12d,36(%rsp)
xorl %r11d,%r14d
+ andl %edx,%edi
- addl (%rbp,%rdi,4),%r12d
- andl %edx,%r15d
- movl %eax,%r10d
+ rorl $5,%r13d
+ addl %r10d,%r12d
+ xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
- xorl %r9d,%r15d
+ addl %edi,%r12d
- xorl %ebx,%r10d
+ movl %r11d,%edi
+ addl (%rbp),%r12d
xorl %r11d,%r14d
- addl %r15d,%r12d
- movl %eax,%r15d
+ xorl %eax,%edi
rorl $6,%r13d
- andl %r11d,%r10d
- andl %ebx,%r15d
+ movl %eax,%r10d
+ andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%r10d
+ xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
- leaq 1(%rdi),%rdi
- addl %r14d,%r10d
+ leaq 4(%rbp),%rbp
+ addl %r14d,%r10d
movl 40(%rsi),%r12d
movl %ecx,%r13d
movl %r10d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %edx,%r15d
- movl %r12d,40(%rsp)
- rorl $9,%r14d
xorl %ecx,%r13d
+ rorl $9,%r14d
xorl %r8d,%r15d
- rorl $5,%r13d
- addl %r9d,%r12d
+ movl %r12d,40(%rsp)
xorl %r10d,%r14d
-
- addl (%rbp,%rdi,4),%r12d
andl %ecx,%r15d
- movl %r11d,%r9d
+
+ rorl $5,%r13d
+ addl %r9d,%r12d
+ xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
- xorl %r8d,%r15d
+ addl %r15d,%r12d
- xorl %eax,%r9d
+ movl %r10d,%r15d
+ addl (%rbp),%r12d
xorl %r10d,%r14d
- addl %r15d,%r12d
- movl %r11d,%r15d
+ xorl %r11d,%r15d
rorl $6,%r13d
- andl %r10d,%r9d
- andl %eax,%r15d
+ movl %r11d,%r9d
+ andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%r9d
+ xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
- leaq 1(%rdi),%rdi
- addl %r14d,%r9d
+ leaq 4(%rbp),%rbp
+ addl %r14d,%r9d
movl 44(%rsi),%r12d
movl %ebx,%r13d
movl %r9d,%r14d
bswapl %r12d
rorl $14,%r13d
- movl %ecx,%r15d
- movl %r12d,44(%rsp)
+ movl %ecx,%edi
- rorl $9,%r14d
xorl %ebx,%r13d
- xorl %edx,%r15d
+ rorl $9,%r14d
+ xorl %edx,%edi
- rorl $5,%r13d
- addl %r8d,%r12d
+ movl %r12d,44(%rsp)
xorl %r9d,%r14d
+ andl %ebx,%edi
- addl (%rbp,%rdi,4),%r12d
- andl %ebx,%r15d
- movl %r10d,%r8d
+ rorl $5,%r13d
+ addl %r8d,%r12d
+ xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
- xorl %edx,%r15d
+ addl %edi,%r12d
- xorl %r11d,%r8d
+ movl %r9d,%edi
+ addl (%rbp),%r12d
xorl %r9d,%r14d
- addl %r15d,%r12d
- movl %r10d,%r15d
+ xorl %r10d,%edi
rorl $6,%r13d
- andl %r9d,%r8d
- andl %r11d,%r15d
+ movl %r10d,%r8d
+ andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%r8d
+ xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
- leaq 1(%rdi),%rdi
- addl %r14d,%r8d
+ leaq 20(%rbp),%rbp
+ addl %r14d,%r8d
movl 48(%rsi),%r12d
movl %eax,%r13d
movl %r8d,%r14d
bswapl %r12d
rorl $14,%r13d
movl %ebx,%r15d
- movl %r12d,48(%rsp)
- rorl $9,%r14d
xorl %eax,%r13d
+ rorl $9,%r14d
xorl %ecx,%r15d
- rorl $5,%r13d
- addl %edx,%r12d
+ movl %r12d,48(%rsp)
xorl %r8d,%r14d
-
- addl (%rbp,%rdi,4),%r12d
andl %eax,%r15d
- movl %r9d,%edx
+
+ rorl $5,%r13d
+ addl %edx,%r12d
+ xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
- xorl %ecx,%r15d
+ addl %r15d,%r12d
- xorl %r10d,%edx
+ movl %r8d,%r15d
+ addl (%rbp),%r12d
xorl %r8d,%r14d
- addl %r15d,%r12d
- movl %r9d,%r15d
+ xorl %r9d,%r15d
rorl $6,%r13d
- andl %r8d,%edx
- andl %r10d,%r15d
+ movl %r9d,%edx
+ andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%edx
+ xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
- leaq 1(%rdi),%rdi
- addl %r14d,%edx
+ leaq 4(%rbp),%rbp
+ addl %r14d,%edx
movl 52(%rsi),%r12d
movl %r11d,%r13d
movl %edx,%r14d
bswapl %r12d
rorl $14,%r13d
- movl %eax,%r15d
- movl %r12d,52(%rsp)
+ movl %eax,%edi
- rorl $9,%r14d
xorl %r11d,%r13d
- xorl %ebx,%r15d
+ rorl $9,%r14d
+ xorl %ebx,%edi
- rorl $5,%r13d
- addl %ecx,%r12d
+ movl %r12d,52(%rsp)
xorl %edx,%r14d
+ andl %r11d,%edi
- addl (%rbp,%rdi,4),%r12d
- andl %r11d,%r15d
- movl %r8d,%ecx
+ rorl $5,%r13d
+ addl %ecx,%r12d
+ xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
- xorl %ebx,%r15d
+ addl %edi,%r12d
- xorl %r9d,%ecx
+ movl %edx,%edi
+ addl (%rbp),%r12d
xorl %edx,%r14d
- addl %r15d,%r12d
- movl %r8d,%r15d
+ xorl %r8d,%edi
rorl $6,%r13d
- andl %edx,%ecx
- andl %r9d,%r15d
+ movl %r8d,%ecx
+ andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%ecx
+ xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
- leaq 1(%rdi),%rdi
- addl %r14d,%ecx
+ leaq 4(%rbp),%rbp
+ addl %r14d,%ecx
movl 56(%rsi),%r12d
movl %r10d,%r13d
movl %ecx,%r14d
bswapl %r12d
rorl $14,%r13d
movl %r11d,%r15d
- movl %r12d,56(%rsp)
- rorl $9,%r14d
xorl %r10d,%r13d
+ rorl $9,%r14d
xorl %eax,%r15d
- rorl $5,%r13d
- addl %ebx,%r12d
+ movl %r12d,56(%rsp)
xorl %ecx,%r14d
-
- addl (%rbp,%rdi,4),%r12d
andl %r10d,%r15d
- movl %edx,%ebx
+
+ rorl $5,%r13d
+ addl %ebx,%r12d
+ xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
- xorl %eax,%r15d
+ addl %r15d,%r12d
- xorl %r8d,%ebx
+ movl %ecx,%r15d
+ addl (%rbp),%r12d
xorl %ecx,%r14d
- addl %r15d,%r12d
- movl %edx,%r15d
+ xorl %edx,%r15d
rorl $6,%r13d
- andl %ecx,%ebx
- andl %r8d,%r15d
+ movl %edx,%ebx
+ andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%ebx
+ xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
- leaq 1(%rdi),%rdi
- addl %r14d,%ebx
+ leaq 4(%rbp),%rbp
+ addl %r14d,%ebx
movl 60(%rsi),%r12d
movl %r9d,%r13d
movl %ebx,%r14d
bswapl %r12d
rorl $14,%r13d
- movl %r10d,%r15d
- movl %r12d,60(%rsp)
+ movl %r10d,%edi
- rorl $9,%r14d
xorl %r9d,%r13d
- xorl %r11d,%r15d
+ rorl $9,%r14d
+ xorl %r11d,%edi
- rorl $5,%r13d
- addl %eax,%r12d
+ movl %r12d,60(%rsp)
xorl %ebx,%r14d
+ andl %r9d,%edi
- addl (%rbp,%rdi,4),%r12d
- andl %r9d,%r15d
- movl %ecx,%eax
+ rorl $5,%r13d
+ addl %eax,%r12d
+ xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
- xorl %r11d,%r15d
+ addl %edi,%r12d
- xorl %edx,%eax
+ movl %ebx,%edi
+ addl (%rbp),%r12d
xorl %ebx,%r14d
- addl %r15d,%r12d
- movl %ecx,%r15d
+ xorl %ecx,%edi
rorl $6,%r13d
- andl %ebx,%eax
- andl %edx,%r15d
+ movl %ecx,%eax
+ andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%eax
+ xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
- leaq 1(%rdi),%rdi
- addl %r14d,%eax
+ leaq 20(%rbp),%rbp
jmp .Lrounds_16_xx
.align 16
.Lrounds_16_xx:
movl 4(%rsp),%r13d
- movl 56(%rsp),%r14d
- movl %r13d,%r12d
- movl %r14d,%r15d
+ movl 56(%rsp),%r15d
- rorl $11,%r12d
- xorl %r13d,%r12d
- shrl $3,%r13d
+ movl %r13d,%r12d
+ rorl $11,%r13d
+ addl %r14d,%eax
+ movl %r15d,%r14d
+ rorl $2,%r15d
- rorl $7,%r12d
xorl %r12d,%r13d
- movl 36(%rsp),%r12d
-
- rorl $2,%r15d
+ shrl $3,%r12d
+ rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
- addl %r13d,%r12d
- xorl %r15d,%r14d
+ xorl %r13d,%r12d
+ xorl %r14d,%r15d
+ addl 36(%rsp),%r12d
addl 0(%rsp),%r12d
movl %r8d,%r13d
- addl %r14d,%r12d
+ addl %r15d,%r12d
movl %eax,%r14d
rorl $14,%r13d
movl %r9d,%r15d
- movl %r12d,0(%rsp)
- rorl $9,%r14d
xorl %r8d,%r13d
+ rorl $9,%r14d
xorl %r10d,%r15d
- rorl $5,%r13d
- addl %r11d,%r12d
+ movl %r12d,0(%rsp)
xorl %eax,%r14d
-
- addl (%rbp,%rdi,4),%r12d
andl %r8d,%r15d
- movl %ebx,%r11d
+
+ rorl $5,%r13d
+ addl %r11d,%r12d
+ xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
- xorl %r10d,%r15d
+ addl %r15d,%r12d
- xorl %ecx,%r11d
+ movl %eax,%r15d
+ addl (%rbp),%r12d
xorl %eax,%r14d
- addl %r15d,%r12d
- movl %ebx,%r15d
+ xorl %ebx,%r15d
rorl $6,%r13d
- andl %eax,%r11d
- andl %ecx,%r15d
+ movl %ebx,%r11d
+ andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%r11d
+ xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
- leaq 1(%rdi),%rdi
- addl %r14d,%r11d
+ leaq 4(%rbp),%rbp
movl 8(%rsp),%r13d
- movl 60(%rsp),%r14d
- movl %r13d,%r12d
- movl %r14d,%r15d
+ movl 60(%rsp),%edi
- rorl $11,%r12d
- xorl %r13d,%r12d
- shrl $3,%r13d
+ movl %r13d,%r12d
+ rorl $11,%r13d
+ addl %r14d,%r11d
+ movl %edi,%r14d
+ rorl $2,%edi
- rorl $7,%r12d
xorl %r12d,%r13d
- movl 40(%rsp),%r12d
-
- rorl $2,%r15d
- xorl %r14d,%r15d
+ shrl $3,%r12d
+ rorl $7,%r13d
+ xorl %r14d,%edi
shrl $10,%r14d
- rorl $17,%r15d
- addl %r13d,%r12d
- xorl %r15d,%r14d
+ rorl $17,%edi
+ xorl %r13d,%r12d
+ xorl %r14d,%edi
+ addl 40(%rsp),%r12d
addl 4(%rsp),%r12d
movl %edx,%r13d
- addl %r14d,%r12d
+ addl %edi,%r12d
movl %r11d,%r14d
rorl $14,%r13d
- movl %r8d,%r15d
- movl %r12d,4(%rsp)
+ movl %r8d,%edi
- rorl $9,%r14d
xorl %edx,%r13d
- xorl %r9d,%r15d
+ rorl $9,%r14d
+ xorl %r9d,%edi
- rorl $5,%r13d
- addl %r10d,%r12d
+ movl %r12d,4(%rsp)
xorl %r11d,%r14d
+ andl %edx,%edi
- addl (%rbp,%rdi,4),%r12d
- andl %edx,%r15d
- movl %eax,%r10d
+ rorl $5,%r13d
+ addl %r10d,%r12d
+ xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
- xorl %r9d,%r15d
+ addl %edi,%r12d
- xorl %ebx,%r10d
+ movl %r11d,%edi
+ addl (%rbp),%r12d
xorl %r11d,%r14d
- addl %r15d,%r12d
- movl %eax,%r15d
+ xorl %eax,%edi
rorl $6,%r13d
- andl %r11d,%r10d
- andl %ebx,%r15d
+ movl %eax,%r10d
+ andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%r10d
+ xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
- leaq 1(%rdi),%rdi
- addl %r14d,%r10d
+ leaq 4(%rbp),%rbp
movl 12(%rsp),%r13d
- movl 0(%rsp),%r14d
- movl %r13d,%r12d
- movl %r14d,%r15d
+ movl 0(%rsp),%r15d
- rorl $11,%r12d
- xorl %r13d,%r12d
- shrl $3,%r13d
+ movl %r13d,%r12d
+ rorl $11,%r13d
+ addl %r14d,%r10d
+ movl %r15d,%r14d
+ rorl $2,%r15d
- rorl $7,%r12d
xorl %r12d,%r13d
- movl 44(%rsp),%r12d
-
- rorl $2,%r15d
+ shrl $3,%r12d
+ rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
- addl %r13d,%r12d
- xorl %r15d,%r14d
+ xorl %r13d,%r12d
+ xorl %r14d,%r15d
+ addl 44(%rsp),%r12d
addl 8(%rsp),%r12d
movl %ecx,%r13d
- addl %r14d,%r12d
+ addl %r15d,%r12d
movl %r10d,%r14d
rorl $14,%r13d
movl %edx,%r15d
- movl %r12d,8(%rsp)
- rorl $9,%r14d
xorl %ecx,%r13d
+ rorl $9,%r14d
xorl %r8d,%r15d
- rorl $5,%r13d
- addl %r9d,%r12d
+ movl %r12d,8(%rsp)
xorl %r10d,%r14d
-
- addl (%rbp,%rdi,4),%r12d
andl %ecx,%r15d
- movl %r11d,%r9d
+
+ rorl $5,%r13d
+ addl %r9d,%r12d
+ xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
- xorl %r8d,%r15d
+ addl %r15d,%r12d
- xorl %eax,%r9d
+ movl %r10d,%r15d
+ addl (%rbp),%r12d
xorl %r10d,%r14d
- addl %r15d,%r12d
- movl %r11d,%r15d
+ xorl %r11d,%r15d
rorl $6,%r13d
- andl %r10d,%r9d
- andl %eax,%r15d
+ movl %r11d,%r9d
+ andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%r9d
+ xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
- leaq 1(%rdi),%rdi
- addl %r14d,%r9d
+ leaq 4(%rbp),%rbp
movl 16(%rsp),%r13d
- movl 4(%rsp),%r14d
- movl %r13d,%r12d
- movl %r14d,%r15d
+ movl 4(%rsp),%edi
- rorl $11,%r12d
- xorl %r13d,%r12d
- shrl $3,%r13d
+ movl %r13d,%r12d
+ rorl $11,%r13d
+ addl %r14d,%r9d
+ movl %edi,%r14d
+ rorl $2,%edi
- rorl $7,%r12d
xorl %r12d,%r13d
- movl 48(%rsp),%r12d
-
- rorl $2,%r15d
- xorl %r14d,%r15d
+ shrl $3,%r12d
+ rorl $7,%r13d
+ xorl %r14d,%edi
shrl $10,%r14d
- rorl $17,%r15d
- addl %r13d,%r12d
- xorl %r15d,%r14d
+ rorl $17,%edi
+ xorl %r13d,%r12d
+ xorl %r14d,%edi
+ addl 48(%rsp),%r12d
addl 12(%rsp),%r12d
movl %ebx,%r13d
- addl %r14d,%r12d
+ addl %edi,%r12d
movl %r9d,%r14d
rorl $14,%r13d
- movl %ecx,%r15d
- movl %r12d,12(%rsp)
+ movl %ecx,%edi
- rorl $9,%r14d
xorl %ebx,%r13d
- xorl %edx,%r15d
+ rorl $9,%r14d
+ xorl %edx,%edi
- rorl $5,%r13d
- addl %r8d,%r12d
+ movl %r12d,12(%rsp)
xorl %r9d,%r14d
+ andl %ebx,%edi
- addl (%rbp,%rdi,4),%r12d
- andl %ebx,%r15d
- movl %r10d,%r8d
+ rorl $5,%r13d
+ addl %r8d,%r12d
+ xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
- xorl %edx,%r15d
+ addl %edi,%r12d
- xorl %r11d,%r8d
+ movl %r9d,%edi
+ addl (%rbp),%r12d
xorl %r9d,%r14d
- addl %r15d,%r12d
- movl %r10d,%r15d
+ xorl %r10d,%edi
rorl $6,%r13d
- andl %r9d,%r8d
- andl %r11d,%r15d
+ movl %r10d,%r8d
+ andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%r8d
+ xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
- leaq 1(%rdi),%rdi
- addl %r14d,%r8d
+ leaq 20(%rbp),%rbp
movl 20(%rsp),%r13d
- movl 8(%rsp),%r14d
- movl %r13d,%r12d
- movl %r14d,%r15d
+ movl 8(%rsp),%r15d
- rorl $11,%r12d
- xorl %r13d,%r12d
- shrl $3,%r13d
+ movl %r13d,%r12d
+ rorl $11,%r13d
+ addl %r14d,%r8d
+ movl %r15d,%r14d
+ rorl $2,%r15d
- rorl $7,%r12d
xorl %r12d,%r13d
- movl 52(%rsp),%r12d
-
- rorl $2,%r15d
+ shrl $3,%r12d
+ rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
- addl %r13d,%r12d
- xorl %r15d,%r14d
+ xorl %r13d,%r12d
+ xorl %r14d,%r15d
+ addl 52(%rsp),%r12d
addl 16(%rsp),%r12d
movl %eax,%r13d
- addl %r14d,%r12d
+ addl %r15d,%r12d
movl %r8d,%r14d
rorl $14,%r13d
movl %ebx,%r15d
- movl %r12d,16(%rsp)
- rorl $9,%r14d
xorl %eax,%r13d
+ rorl $9,%r14d
xorl %ecx,%r15d
- rorl $5,%r13d
- addl %edx,%r12d
+ movl %r12d,16(%rsp)
xorl %r8d,%r14d
-
- addl (%rbp,%rdi,4),%r12d
andl %eax,%r15d
- movl %r9d,%edx
+
+ rorl $5,%r13d
+ addl %edx,%r12d
+ xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
- xorl %ecx,%r15d
+ addl %r15d,%r12d
- xorl %r10d,%edx
+ movl %r8d,%r15d
+ addl (%rbp),%r12d
xorl %r8d,%r14d
- addl %r15d,%r12d
- movl %r9d,%r15d
+ xorl %r9d,%r15d
rorl $6,%r13d
- andl %r8d,%edx
- andl %r10d,%r15d
+ movl %r9d,%edx
+ andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%edx
+ xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
- leaq 1(%rdi),%rdi
- addl %r14d,%edx
+ leaq 4(%rbp),%rbp
movl 24(%rsp),%r13d
- movl 12(%rsp),%r14d
- movl %r13d,%r12d
- movl %r14d,%r15d
+ movl 12(%rsp),%edi
- rorl $11,%r12d
- xorl %r13d,%r12d
- shrl $3,%r13d
+ movl %r13d,%r12d
+ rorl $11,%r13d
+ addl %r14d,%edx
+ movl %edi,%r14d
+ rorl $2,%edi
- rorl $7,%r12d
xorl %r12d,%r13d
- movl 56(%rsp),%r12d
-
- rorl $2,%r15d
- xorl %r14d,%r15d
+ shrl $3,%r12d
+ rorl $7,%r13d
+ xorl %r14d,%edi
shrl $10,%r14d
- rorl $17,%r15d
- addl %r13d,%r12d
- xorl %r15d,%r14d
+ rorl $17,%edi
+ xorl %r13d,%r12d
+ xorl %r14d,%edi
+ addl 56(%rsp),%r12d
addl 20(%rsp),%r12d
movl %r11d,%r13d
- addl %r14d,%r12d
+ addl %edi,%r12d
movl %edx,%r14d
rorl $14,%r13d
- movl %eax,%r15d
- movl %r12d,20(%rsp)
+ movl %eax,%edi
- rorl $9,%r14d
xorl %r11d,%r13d
- xorl %ebx,%r15d
+ rorl $9,%r14d
+ xorl %ebx,%edi
- rorl $5,%r13d
- addl %ecx,%r12d
+ movl %r12d,20(%rsp)
xorl %edx,%r14d
+ andl %r11d,%edi
- addl (%rbp,%rdi,4),%r12d
- andl %r11d,%r15d
- movl %r8d,%ecx
+ rorl $5,%r13d
+ addl %ecx,%r12d
+ xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
- xorl %ebx,%r15d
+ addl %edi,%r12d
- xorl %r9d,%ecx
+ movl %edx,%edi
+ addl (%rbp),%r12d
xorl %edx,%r14d
- addl %r15d,%r12d
- movl %r8d,%r15d
+ xorl %r8d,%edi
rorl $6,%r13d
- andl %edx,%ecx
- andl %r9d,%r15d
+ movl %r8d,%ecx
+ andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%ecx
+ xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
- leaq 1(%rdi),%rdi
- addl %r14d,%ecx
+ leaq 4(%rbp),%rbp
movl 28(%rsp),%r13d
- movl 16(%rsp),%r14d
- movl %r13d,%r12d
- movl %r14d,%r15d
+ movl 16(%rsp),%r15d
- rorl $11,%r12d
- xorl %r13d,%r12d
- shrl $3,%r13d
+ movl %r13d,%r12d
+ rorl $11,%r13d
+ addl %r14d,%ecx
+ movl %r15d,%r14d
+ rorl $2,%r15d
- rorl $7,%r12d
xorl %r12d,%r13d
- movl 60(%rsp),%r12d
-
- rorl $2,%r15d
+ shrl $3,%r12d
+ rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
- addl %r13d,%r12d
- xorl %r15d,%r14d
+ xorl %r13d,%r12d
+ xorl %r14d,%r15d
+ addl 60(%rsp),%r12d
addl 24(%rsp),%r12d
movl %r10d,%r13d
- addl %r14d,%r12d
+ addl %r15d,%r12d
movl %ecx,%r14d
rorl $14,%r13d
movl %r11d,%r15d
- movl %r12d,24(%rsp)
- rorl $9,%r14d
xorl %r10d,%r13d
+ rorl $9,%r14d
xorl %eax,%r15d
- rorl $5,%r13d
- addl %ebx,%r12d
+ movl %r12d,24(%rsp)
xorl %ecx,%r14d
-
- addl (%rbp,%rdi,4),%r12d
andl %r10d,%r15d
- movl %edx,%ebx
+
+ rorl $5,%r13d
+ addl %ebx,%r12d
+ xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
- xorl %eax,%r15d
+ addl %r15d,%r12d
- xorl %r8d,%ebx
+ movl %ecx,%r15d
+ addl (%rbp),%r12d
xorl %ecx,%r14d
- addl %r15d,%r12d
- movl %edx,%r15d
+ xorl %edx,%r15d
rorl $6,%r13d
- andl %ecx,%ebx
- andl %r8d,%r15d
+ movl %edx,%ebx
+ andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%ebx
+ xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
- leaq 1(%rdi),%rdi
- addl %r14d,%ebx
+ leaq 4(%rbp),%rbp
movl 32(%rsp),%r13d
- movl 20(%rsp),%r14d
- movl %r13d,%r12d
- movl %r14d,%r15d
+ movl 20(%rsp),%edi
- rorl $11,%r12d
- xorl %r13d,%r12d
- shrl $3,%r13d
+ movl %r13d,%r12d
+ rorl $11,%r13d
+ addl %r14d,%ebx
+ movl %edi,%r14d
+ rorl $2,%edi
- rorl $7,%r12d
xorl %r12d,%r13d
- movl 0(%rsp),%r12d
-
- rorl $2,%r15d
- xorl %r14d,%r15d
+ shrl $3,%r12d
+ rorl $7,%r13d
+ xorl %r14d,%edi
shrl $10,%r14d
- rorl $17,%r15d
- addl %r13d,%r12d
- xorl %r15d,%r14d
+ rorl $17,%edi
+ xorl %r13d,%r12d
+ xorl %r14d,%edi
+ addl 0(%rsp),%r12d
addl 28(%rsp),%r12d
movl %r9d,%r13d
- addl %r14d,%r12d
+ addl %edi,%r12d
movl %ebx,%r14d
rorl $14,%r13d
- movl %r10d,%r15d
- movl %r12d,28(%rsp)
+ movl %r10d,%edi
- rorl $9,%r14d
xorl %r9d,%r13d
- xorl %r11d,%r15d
+ rorl $9,%r14d
+ xorl %r11d,%edi
- rorl $5,%r13d
- addl %eax,%r12d
+ movl %r12d,28(%rsp)
xorl %ebx,%r14d
+ andl %r9d,%edi
- addl (%rbp,%rdi,4),%r12d
- andl %r9d,%r15d
- movl %ecx,%eax
+ rorl $5,%r13d
+ addl %eax,%r12d
+ xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
- xorl %r11d,%r15d
+ addl %edi,%r12d
- xorl %edx,%eax
+ movl %ebx,%edi
+ addl (%rbp),%r12d
xorl %ebx,%r14d
- addl %r15d,%r12d
- movl %ecx,%r15d
+ xorl %ecx,%edi
rorl $6,%r13d
- andl %ebx,%eax
- andl %edx,%r15d
+ movl %ecx,%eax
+ andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%eax
+ xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
- leaq 1(%rdi),%rdi
- addl %r14d,%eax
+ leaq 20(%rbp),%rbp
movl 36(%rsp),%r13d
- movl 24(%rsp),%r14d
- movl %r13d,%r12d
- movl %r14d,%r15d
+ movl 24(%rsp),%r15d
- rorl $11,%r12d
- xorl %r13d,%r12d
- shrl $3,%r13d
+ movl %r13d,%r12d
+ rorl $11,%r13d
+ addl %r14d,%eax
+ movl %r15d,%r14d
+ rorl $2,%r15d
- rorl $7,%r12d
xorl %r12d,%r13d
- movl 4(%rsp),%r12d
-
- rorl $2,%r15d
+ shrl $3,%r12d
+ rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
- addl %r13d,%r12d
- xorl %r15d,%r14d
+ xorl %r13d,%r12d
+ xorl %r14d,%r15d
+ addl 4(%rsp),%r12d
addl 32(%rsp),%r12d
movl %r8d,%r13d
- addl %r14d,%r12d
+ addl %r15d,%r12d
movl %eax,%r14d
rorl $14,%r13d
movl %r9d,%r15d
- movl %r12d,32(%rsp)
- rorl $9,%r14d
xorl %r8d,%r13d
+ rorl $9,%r14d
xorl %r10d,%r15d
- rorl $5,%r13d
- addl %r11d,%r12d
+ movl %r12d,32(%rsp)
xorl %eax,%r14d
-
- addl (%rbp,%rdi,4),%r12d
andl %r8d,%r15d
- movl %ebx,%r11d
+
+ rorl $5,%r13d
+ addl %r11d,%r12d
+ xorl %r10d,%r15d
rorl $11,%r14d
xorl %r8d,%r13d
- xorl %r10d,%r15d
+ addl %r15d,%r12d
- xorl %ecx,%r11d
+ movl %eax,%r15d
+ addl (%rbp),%r12d
xorl %eax,%r14d
- addl %r15d,%r12d
- movl %ebx,%r15d
+ xorl %ebx,%r15d
rorl $6,%r13d
- andl %eax,%r11d
- andl %ecx,%r15d
+ movl %ebx,%r11d
+ andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%r11d
+ xorl %edi,%r11d
addl %r12d,%edx
addl %r12d,%r11d
- leaq 1(%rdi),%rdi
- addl %r14d,%r11d
+ leaq 4(%rbp),%rbp
movl 40(%rsp),%r13d
- movl 28(%rsp),%r14d
- movl %r13d,%r12d
- movl %r14d,%r15d
+ movl 28(%rsp),%edi
- rorl $11,%r12d
- xorl %r13d,%r12d
- shrl $3,%r13d
+ movl %r13d,%r12d
+ rorl $11,%r13d
+ addl %r14d,%r11d
+ movl %edi,%r14d
+ rorl $2,%edi
- rorl $7,%r12d
xorl %r12d,%r13d
- movl 8(%rsp),%r12d
-
- rorl $2,%r15d
- xorl %r14d,%r15d
+ shrl $3,%r12d
+ rorl $7,%r13d
+ xorl %r14d,%edi
shrl $10,%r14d
- rorl $17,%r15d
- addl %r13d,%r12d
- xorl %r15d,%r14d
+ rorl $17,%edi
+ xorl %r13d,%r12d
+ xorl %r14d,%edi
+ addl 8(%rsp),%r12d
addl 36(%rsp),%r12d
movl %edx,%r13d
- addl %r14d,%r12d
+ addl %edi,%r12d
movl %r11d,%r14d
rorl $14,%r13d
- movl %r8d,%r15d
- movl %r12d,36(%rsp)
+ movl %r8d,%edi
- rorl $9,%r14d
xorl %edx,%r13d
- xorl %r9d,%r15d
+ rorl $9,%r14d
+ xorl %r9d,%edi
- rorl $5,%r13d
- addl %r10d,%r12d
+ movl %r12d,36(%rsp)
xorl %r11d,%r14d
+ andl %edx,%edi
- addl (%rbp,%rdi,4),%r12d
- andl %edx,%r15d
- movl %eax,%r10d
+ rorl $5,%r13d
+ addl %r10d,%r12d
+ xorl %r9d,%edi
rorl $11,%r14d
xorl %edx,%r13d
- xorl %r9d,%r15d
+ addl %edi,%r12d
- xorl %ebx,%r10d
+ movl %r11d,%edi
+ addl (%rbp),%r12d
xorl %r11d,%r14d
- addl %r15d,%r12d
- movl %eax,%r15d
+ xorl %eax,%edi
rorl $6,%r13d
- andl %r11d,%r10d
- andl %ebx,%r15d
+ movl %eax,%r10d
+ andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%r10d
+ xorl %r15d,%r10d
addl %r12d,%ecx
addl %r12d,%r10d
- leaq 1(%rdi),%rdi
- addl %r14d,%r10d
+ leaq 4(%rbp),%rbp
movl 44(%rsp),%r13d
- movl 32(%rsp),%r14d
- movl %r13d,%r12d
- movl %r14d,%r15d
+ movl 32(%rsp),%r15d
- rorl $11,%r12d
- xorl %r13d,%r12d
- shrl $3,%r13d
+ movl %r13d,%r12d
+ rorl $11,%r13d
+ addl %r14d,%r10d
+ movl %r15d,%r14d
+ rorl $2,%r15d
- rorl $7,%r12d
xorl %r12d,%r13d
- movl 12(%rsp),%r12d
-
- rorl $2,%r15d
+ shrl $3,%r12d
+ rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
- addl %r13d,%r12d
- xorl %r15d,%r14d
+ xorl %r13d,%r12d
+ xorl %r14d,%r15d
+ addl 12(%rsp),%r12d
addl 40(%rsp),%r12d
movl %ecx,%r13d
- addl %r14d,%r12d
+ addl %r15d,%r12d
movl %r10d,%r14d
rorl $14,%r13d
movl %edx,%r15d
- movl %r12d,40(%rsp)
- rorl $9,%r14d
xorl %ecx,%r13d
+ rorl $9,%r14d
xorl %r8d,%r15d
- rorl $5,%r13d
- addl %r9d,%r12d
+ movl %r12d,40(%rsp)
xorl %r10d,%r14d
-
- addl (%rbp,%rdi,4),%r12d
andl %ecx,%r15d
- movl %r11d,%r9d
+
+ rorl $5,%r13d
+ addl %r9d,%r12d
+ xorl %r8d,%r15d
rorl $11,%r14d
xorl %ecx,%r13d
- xorl %r8d,%r15d
+ addl %r15d,%r12d
- xorl %eax,%r9d
+ movl %r10d,%r15d
+ addl (%rbp),%r12d
xorl %r10d,%r14d
- addl %r15d,%r12d
- movl %r11d,%r15d
+ xorl %r11d,%r15d
rorl $6,%r13d
- andl %r10d,%r9d
- andl %eax,%r15d
+ movl %r11d,%r9d
+ andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%r9d
+ xorl %edi,%r9d
addl %r12d,%ebx
addl %r12d,%r9d
- leaq 1(%rdi),%rdi
- addl %r14d,%r9d
+ leaq 4(%rbp),%rbp
movl 48(%rsp),%r13d
- movl 36(%rsp),%r14d
- movl %r13d,%r12d
- movl %r14d,%r15d
+ movl 36(%rsp),%edi
- rorl $11,%r12d
- xorl %r13d,%r12d
- shrl $3,%r13d
+ movl %r13d,%r12d
+ rorl $11,%r13d
+ addl %r14d,%r9d
+ movl %edi,%r14d
+ rorl $2,%edi
- rorl $7,%r12d
xorl %r12d,%r13d
- movl 16(%rsp),%r12d
-
- rorl $2,%r15d
- xorl %r14d,%r15d
+ shrl $3,%r12d
+ rorl $7,%r13d
+ xorl %r14d,%edi
shrl $10,%r14d
- rorl $17,%r15d
- addl %r13d,%r12d
- xorl %r15d,%r14d
+ rorl $17,%edi
+ xorl %r13d,%r12d
+ xorl %r14d,%edi
+ addl 16(%rsp),%r12d
addl 44(%rsp),%r12d
movl %ebx,%r13d
- addl %r14d,%r12d
+ addl %edi,%r12d
movl %r9d,%r14d
rorl $14,%r13d
- movl %ecx,%r15d
- movl %r12d,44(%rsp)
+ movl %ecx,%edi
- rorl $9,%r14d
xorl %ebx,%r13d
- xorl %edx,%r15d
+ rorl $9,%r14d
+ xorl %edx,%edi
- rorl $5,%r13d
- addl %r8d,%r12d
+ movl %r12d,44(%rsp)
xorl %r9d,%r14d
+ andl %ebx,%edi
- addl (%rbp,%rdi,4),%r12d
- andl %ebx,%r15d
- movl %r10d,%r8d
+ rorl $5,%r13d
+ addl %r8d,%r12d
+ xorl %edx,%edi
rorl $11,%r14d
xorl %ebx,%r13d
- xorl %edx,%r15d
+ addl %edi,%r12d
- xorl %r11d,%r8d
+ movl %r9d,%edi
+ addl (%rbp),%r12d
xorl %r9d,%r14d
- addl %r15d,%r12d
- movl %r10d,%r15d
+ xorl %r10d,%edi
rorl $6,%r13d
- andl %r9d,%r8d
- andl %r11d,%r15d
+ movl %r10d,%r8d
+ andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%r8d
+ xorl %r15d,%r8d
addl %r12d,%eax
addl %r12d,%r8d
- leaq 1(%rdi),%rdi
- addl %r14d,%r8d
+ leaq 20(%rbp),%rbp
movl 52(%rsp),%r13d
- movl 40(%rsp),%r14d
- movl %r13d,%r12d
- movl %r14d,%r15d
+ movl 40(%rsp),%r15d
- rorl $11,%r12d
- xorl %r13d,%r12d
- shrl $3,%r13d
+ movl %r13d,%r12d
+ rorl $11,%r13d
+ addl %r14d,%r8d
+ movl %r15d,%r14d
+ rorl $2,%r15d
- rorl $7,%r12d
xorl %r12d,%r13d
- movl 20(%rsp),%r12d
-
- rorl $2,%r15d
+ shrl $3,%r12d
+ rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
- addl %r13d,%r12d
- xorl %r15d,%r14d
+ xorl %r13d,%r12d
+ xorl %r14d,%r15d
+ addl 20(%rsp),%r12d
addl 48(%rsp),%r12d
movl %eax,%r13d
- addl %r14d,%r12d
+ addl %r15d,%r12d
movl %r8d,%r14d
rorl $14,%r13d
movl %ebx,%r15d
- movl %r12d,48(%rsp)
- rorl $9,%r14d
xorl %eax,%r13d
+ rorl $9,%r14d
xorl %ecx,%r15d
- rorl $5,%r13d
- addl %edx,%r12d
+ movl %r12d,48(%rsp)
xorl %r8d,%r14d
-
- addl (%rbp,%rdi,4),%r12d
andl %eax,%r15d
- movl %r9d,%edx
+
+ rorl $5,%r13d
+ addl %edx,%r12d
+ xorl %ecx,%r15d
rorl $11,%r14d
xorl %eax,%r13d
- xorl %ecx,%r15d
+ addl %r15d,%r12d
- xorl %r10d,%edx
+ movl %r8d,%r15d
+ addl (%rbp),%r12d
xorl %r8d,%r14d
- addl %r15d,%r12d
- movl %r9d,%r15d
+ xorl %r9d,%r15d
rorl $6,%r13d
- andl %r8d,%edx
- andl %r10d,%r15d
+ movl %r9d,%edx
+ andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%edx
+ xorl %edi,%edx
addl %r12d,%r11d
addl %r12d,%edx
- leaq 1(%rdi),%rdi
- addl %r14d,%edx
+ leaq 4(%rbp),%rbp
movl 56(%rsp),%r13d
- movl 44(%rsp),%r14d
- movl %r13d,%r12d
- movl %r14d,%r15d
+ movl 44(%rsp),%edi
- rorl $11,%r12d
- xorl %r13d,%r12d
- shrl $3,%r13d
+ movl %r13d,%r12d
+ rorl $11,%r13d
+ addl %r14d,%edx
+ movl %edi,%r14d
+ rorl $2,%edi
- rorl $7,%r12d
xorl %r12d,%r13d
- movl 24(%rsp),%r12d
-
- rorl $2,%r15d
- xorl %r14d,%r15d
+ shrl $3,%r12d
+ rorl $7,%r13d
+ xorl %r14d,%edi
shrl $10,%r14d
- rorl $17,%r15d
- addl %r13d,%r12d
- xorl %r15d,%r14d
+ rorl $17,%edi
+ xorl %r13d,%r12d
+ xorl %r14d,%edi
+ addl 24(%rsp),%r12d
addl 52(%rsp),%r12d
movl %r11d,%r13d
- addl %r14d,%r12d
+ addl %edi,%r12d
movl %edx,%r14d
rorl $14,%r13d
- movl %eax,%r15d
- movl %r12d,52(%rsp)
+ movl %eax,%edi
- rorl $9,%r14d
xorl %r11d,%r13d
- xorl %ebx,%r15d
+ rorl $9,%r14d
+ xorl %ebx,%edi
- rorl $5,%r13d
- addl %ecx,%r12d
+ movl %r12d,52(%rsp)
xorl %edx,%r14d
+ andl %r11d,%edi
- addl (%rbp,%rdi,4),%r12d
- andl %r11d,%r15d
- movl %r8d,%ecx
+ rorl $5,%r13d
+ addl %ecx,%r12d
+ xorl %ebx,%edi
rorl $11,%r14d
xorl %r11d,%r13d
- xorl %ebx,%r15d
+ addl %edi,%r12d
- xorl %r9d,%ecx
+ movl %edx,%edi
+ addl (%rbp),%r12d
xorl %edx,%r14d
- addl %r15d,%r12d
- movl %r8d,%r15d
+ xorl %r8d,%edi
rorl $6,%r13d
- andl %edx,%ecx
- andl %r9d,%r15d
+ movl %r8d,%ecx
+ andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%ecx
+ xorl %r15d,%ecx
addl %r12d,%r10d
addl %r12d,%ecx
- leaq 1(%rdi),%rdi
- addl %r14d,%ecx
+ leaq 4(%rbp),%rbp
movl 60(%rsp),%r13d
- movl 48(%rsp),%r14d
- movl %r13d,%r12d
- movl %r14d,%r15d
+ movl 48(%rsp),%r15d
- rorl $11,%r12d
- xorl %r13d,%r12d
- shrl $3,%r13d
+ movl %r13d,%r12d
+ rorl $11,%r13d
+ addl %r14d,%ecx
+ movl %r15d,%r14d
+ rorl $2,%r15d
- rorl $7,%r12d
xorl %r12d,%r13d
- movl 28(%rsp),%r12d
-
- rorl $2,%r15d
+ shrl $3,%r12d
+ rorl $7,%r13d
xorl %r14d,%r15d
shrl $10,%r14d
rorl $17,%r15d
- addl %r13d,%r12d
- xorl %r15d,%r14d
+ xorl %r13d,%r12d
+ xorl %r14d,%r15d
+ addl 28(%rsp),%r12d
addl 56(%rsp),%r12d
movl %r10d,%r13d
- addl %r14d,%r12d
+ addl %r15d,%r12d
movl %ecx,%r14d
rorl $14,%r13d
movl %r11d,%r15d
- movl %r12d,56(%rsp)
- rorl $9,%r14d
xorl %r10d,%r13d
+ rorl $9,%r14d
xorl %eax,%r15d
- rorl $5,%r13d
- addl %ebx,%r12d
+ movl %r12d,56(%rsp)
xorl %ecx,%r14d
-
- addl (%rbp,%rdi,4),%r12d
andl %r10d,%r15d
- movl %edx,%ebx
+
+ rorl $5,%r13d
+ addl %ebx,%r12d
+ xorl %eax,%r15d
rorl $11,%r14d
xorl %r10d,%r13d
- xorl %eax,%r15d
+ addl %r15d,%r12d
- xorl %r8d,%ebx
+ movl %ecx,%r15d
+ addl (%rbp),%r12d
xorl %ecx,%r14d
- addl %r15d,%r12d
- movl %edx,%r15d
+ xorl %edx,%r15d
rorl $6,%r13d
- andl %ecx,%ebx
- andl %r8d,%r15d
+ movl %edx,%ebx
+ andl %r15d,%edi
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%ebx
+ xorl %edi,%ebx
addl %r12d,%r9d
addl %r12d,%ebx
- leaq 1(%rdi),%rdi
- addl %r14d,%ebx
+ leaq 4(%rbp),%rbp
movl 0(%rsp),%r13d
- movl 52(%rsp),%r14d
- movl %r13d,%r12d
- movl %r14d,%r15d
+ movl 52(%rsp),%edi
- rorl $11,%r12d
- xorl %r13d,%r12d
- shrl $3,%r13d
+ movl %r13d,%r12d
+ rorl $11,%r13d
+ addl %r14d,%ebx
+ movl %edi,%r14d
+ rorl $2,%edi
- rorl $7,%r12d
xorl %r12d,%r13d
- movl 32(%rsp),%r12d
-
- rorl $2,%r15d
- xorl %r14d,%r15d
+ shrl $3,%r12d
+ rorl $7,%r13d
+ xorl %r14d,%edi
shrl $10,%r14d
- rorl $17,%r15d
- addl %r13d,%r12d
- xorl %r15d,%r14d
+ rorl $17,%edi
+ xorl %r13d,%r12d
+ xorl %r14d,%edi
+ addl 32(%rsp),%r12d
addl 60(%rsp),%r12d
movl %r9d,%r13d
- addl %r14d,%r12d
+ addl %edi,%r12d
movl %ebx,%r14d
rorl $14,%r13d
- movl %r10d,%r15d
- movl %r12d,60(%rsp)
+ movl %r10d,%edi
- rorl $9,%r14d
xorl %r9d,%r13d
- xorl %r11d,%r15d
+ rorl $9,%r14d
+ xorl %r11d,%edi
- rorl $5,%r13d
- addl %eax,%r12d
+ movl %r12d,60(%rsp)
xorl %ebx,%r14d
+ andl %r9d,%edi
- addl (%rbp,%rdi,4),%r12d
- andl %r9d,%r15d
- movl %ecx,%eax
+ rorl $5,%r13d
+ addl %eax,%r12d
+ xorl %r11d,%edi
rorl $11,%r14d
xorl %r9d,%r13d
- xorl %r11d,%r15d
+ addl %edi,%r12d
- xorl %edx,%eax
+ movl %ebx,%edi
+ addl (%rbp),%r12d
xorl %ebx,%r14d
- addl %r15d,%r12d
- movl %ecx,%r15d
+ xorl %ecx,%edi
rorl $6,%r13d
- andl %ebx,%eax
- andl %edx,%r15d
+ movl %ecx,%eax
+ andl %edi,%r15d
rorl $2,%r14d
addl %r13d,%r12d
- addl %r15d,%eax
+ xorl %r15d,%eax
addl %r12d,%r8d
addl %r12d,%eax
- leaq 1(%rdi),%rdi
- addl %r14d,%eax
- cmpq $64,%rdi
- jb .Lrounds_16_xx
+ leaq 20(%rbp),%rbp
+ cmpb $0,3(%rbp)
+ jnz .Lrounds_16_xx
movq 64+0(%rsp),%rdi
+ addl %r14d,%eax
leaq 64(%rsi),%rsi
addl 0(%rdi),%eax
@@ -1762,18 +1707,1344 @@ sha256_block_data_order:
.type K256,@object
K256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+
+.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
+.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
+.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
+.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
+.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
+.type sha256_block_data_order_shaext,@function
+.align 64
+sha256_block_data_order_shaext:
+_shaext_shortcut:
+ leaq K256+128(%rip),%rcx
+ movdqu (%rdi),%xmm1
+ movdqu 16(%rdi),%xmm2
+ movdqa 512-128(%rcx),%xmm7
+
+ pshufd $27,%xmm1,%xmm0
+ pshufd $177,%xmm1,%xmm1
+ pshufd $27,%xmm2,%xmm2
+ movdqa %xmm7,%xmm8
+.byte 102,15,58,15,202,8
+ punpcklqdq %xmm0,%xmm2
+ jmp .Loop_shaext
+
+.align 16
+.Loop_shaext:
+ movdqu (%rsi),%xmm3
+ movdqu 16(%rsi),%xmm4
+ movdqu 32(%rsi),%xmm5
+.byte 102,15,56,0,223
+ movdqu 48(%rsi),%xmm6
+
+ movdqa 0-128(%rcx),%xmm0
+ paddd %xmm3,%xmm0
+.byte 102,15,56,0,231
+ movdqa %xmm2,%xmm10
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ nop
+ movdqa %xmm1,%xmm9
+.byte 15,56,203,202
+
+ movdqa 32-128(%rcx),%xmm0
+ paddd %xmm4,%xmm0
+.byte 102,15,56,0,239
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ leaq 64(%rsi),%rsi
+.byte 15,56,204,220
+.byte 15,56,203,202
+
+ movdqa 64-128(%rcx),%xmm0
+ paddd %xmm5,%xmm0
+.byte 102,15,56,0,247
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm6,%xmm7
+.byte 102,15,58,15,253,4
+ nop
+ paddd %xmm7,%xmm3
+.byte 15,56,204,229
+.byte 15,56,203,202
+
+ movdqa 96-128(%rcx),%xmm0
+ paddd %xmm6,%xmm0
+.byte 15,56,205,222
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm3,%xmm7
+.byte 102,15,58,15,254,4
+ nop
+ paddd %xmm7,%xmm4
+.byte 15,56,204,238
+.byte 15,56,203,202
+ movdqa 128-128(%rcx),%xmm0
+ paddd %xmm3,%xmm0
+.byte 15,56,205,227
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm4,%xmm7
+.byte 102,15,58,15,251,4
+ nop
+ paddd %xmm7,%xmm5
+.byte 15,56,204,243
+.byte 15,56,203,202
+ movdqa 160-128(%rcx),%xmm0
+ paddd %xmm4,%xmm0
+.byte 15,56,205,236
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm5,%xmm7
+.byte 102,15,58,15,252,4
+ nop
+ paddd %xmm7,%xmm6
+.byte 15,56,204,220
+.byte 15,56,203,202
+ movdqa 192-128(%rcx),%xmm0
+ paddd %xmm5,%xmm0
+.byte 15,56,205,245
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm6,%xmm7
+.byte 102,15,58,15,253,4
+ nop
+ paddd %xmm7,%xmm3
+.byte 15,56,204,229
+.byte 15,56,203,202
+ movdqa 224-128(%rcx),%xmm0
+ paddd %xmm6,%xmm0
+.byte 15,56,205,222
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm3,%xmm7
+.byte 102,15,58,15,254,4
+ nop
+ paddd %xmm7,%xmm4
+.byte 15,56,204,238
+.byte 15,56,203,202
+ movdqa 256-128(%rcx),%xmm0
+ paddd %xmm3,%xmm0
+.byte 15,56,205,227
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm4,%xmm7
+.byte 102,15,58,15,251,4
+ nop
+ paddd %xmm7,%xmm5
+.byte 15,56,204,243
+.byte 15,56,203,202
+ movdqa 288-128(%rcx),%xmm0
+ paddd %xmm4,%xmm0
+.byte 15,56,205,236
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm5,%xmm7
+.byte 102,15,58,15,252,4
+ nop
+ paddd %xmm7,%xmm6
+.byte 15,56,204,220
+.byte 15,56,203,202
+ movdqa 320-128(%rcx),%xmm0
+ paddd %xmm5,%xmm0
+.byte 15,56,205,245
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm6,%xmm7
+.byte 102,15,58,15,253,4
+ nop
+ paddd %xmm7,%xmm3
+.byte 15,56,204,229
+.byte 15,56,203,202
+ movdqa 352-128(%rcx),%xmm0
+ paddd %xmm6,%xmm0
+.byte 15,56,205,222
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm3,%xmm7
+.byte 102,15,58,15,254,4
+ nop
+ paddd %xmm7,%xmm4
+.byte 15,56,204,238
+.byte 15,56,203,202
+ movdqa 384-128(%rcx),%xmm0
+ paddd %xmm3,%xmm0
+.byte 15,56,205,227
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm4,%xmm7
+.byte 102,15,58,15,251,4
+ nop
+ paddd %xmm7,%xmm5
+.byte 15,56,204,243
+.byte 15,56,203,202
+ movdqa 416-128(%rcx),%xmm0
+ paddd %xmm4,%xmm0
+.byte 15,56,205,236
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm5,%xmm7
+.byte 102,15,58,15,252,4
+.byte 15,56,203,202
+ paddd %xmm7,%xmm6
+
+ movdqa 448-128(%rcx),%xmm0
+ paddd %xmm5,%xmm0
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+.byte 15,56,205,245
+ movdqa %xmm8,%xmm7
+.byte 15,56,203,202
+
+ movdqa 480-128(%rcx),%xmm0
+ paddd %xmm6,%xmm0
+ nop
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ decq %rdx
+ nop
+.byte 15,56,203,202
+
+ paddd %xmm10,%xmm2
+ paddd %xmm9,%xmm1
+ jnz .Loop_shaext
+
+ pshufd $177,%xmm2,%xmm2
+ pshufd $27,%xmm1,%xmm7
+ pshufd $177,%xmm1,%xmm1
+ punpckhqdq %xmm2,%xmm1
+.byte 102,15,58,15,215,8
+
+ movdqu %xmm1,(%rdi)
+ movdqu %xmm2,16(%rdi)
+ .byte 0xf3,0xc3
+.size sha256_block_data_order_shaext,.-sha256_block_data_order_shaext
+.type sha256_block_data_order_ssse3,@function
+.align 64
+sha256_block_data_order_ssse3:
+.Lssse3_shortcut:
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+ movq %rsp,%r11
+ shlq $4,%rdx
+ subq $96,%rsp
+ leaq (%rsi,%rdx,4),%rdx
+ andq $-64,%rsp
+ movq %rdi,64+0(%rsp)
+ movq %rsi,64+8(%rsp)
+ movq %rdx,64+16(%rsp)
+ movq %r11,64+24(%rsp)
+.Lprologue_ssse3:
+
+ movl 0(%rdi),%eax
+ movl 4(%rdi),%ebx
+ movl 8(%rdi),%ecx
+ movl 12(%rdi),%edx
+ movl 16(%rdi),%r8d
+ movl 20(%rdi),%r9d
+ movl 24(%rdi),%r10d
+ movl 28(%rdi),%r11d
+
+
+ jmp .Lloop_ssse3
+.align 16
+.Lloop_ssse3:
+ movdqa K256+512(%rip),%xmm7
+ movdqu 0(%rsi),%xmm0
+ movdqu 16(%rsi),%xmm1
+ movdqu 32(%rsi),%xmm2
+.byte 102,15,56,0,199
+ movdqu 48(%rsi),%xmm3
+ leaq K256(%rip),%rbp
+.byte 102,15,56,0,207
+ movdqa 0(%rbp),%xmm4
+ movdqa 32(%rbp),%xmm5
+.byte 102,15,56,0,215
+ paddd %xmm0,%xmm4
+ movdqa 64(%rbp),%xmm6
+.byte 102,15,56,0,223
+ movdqa 96(%rbp),%xmm7
+ paddd %xmm1,%xmm5
+ paddd %xmm2,%xmm6
+ paddd %xmm3,%xmm7
+ movdqa %xmm4,0(%rsp)
+ movl %eax,%r14d
+ movdqa %xmm5,16(%rsp)
+ movl %ebx,%edi
+ movdqa %xmm6,32(%rsp)
+ xorl %ecx,%edi
+ movdqa %xmm7,48(%rsp)
+ movl %r8d,%r13d
+ jmp .Lssse3_00_47
+
+.align 16
+.Lssse3_00_47:
+ subq $-128,%rbp
+ rorl $14,%r13d
+ movdqa %xmm1,%xmm4
+ movl %r14d,%eax
+ movl %r9d,%r12d
+ movdqa %xmm3,%xmm7
+ rorl $9,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r12d
+ rorl $5,%r13d
+ xorl %eax,%r14d
+.byte 102,15,58,15,224,4
+ andl %r8d,%r12d
+ xorl %r8d,%r13d
+.byte 102,15,58,15,250,4
+ addl 0(%rsp),%r11d
+ movl %eax,%r15d
+ xorl %r10d,%r12d
+ rorl $11,%r14d
+ movdqa %xmm4,%xmm5
+ xorl %ebx,%r15d
+ addl %r12d,%r11d
+ movdqa %xmm4,%xmm6
+ rorl $6,%r13d
+ andl %r15d,%edi
+ psrld $3,%xmm4
+ xorl %eax,%r14d
+ addl %r13d,%r11d
+ xorl %ebx,%edi
+ paddd %xmm7,%xmm0
+ rorl $2,%r14d
+ addl %r11d,%edx
+ psrld $7,%xmm6
+ addl %edi,%r11d
+ movl %edx,%r13d
+ pshufd $250,%xmm3,%xmm7
+ addl %r11d,%r14d
+ rorl $14,%r13d
+ pslld $14,%xmm5
+ movl %r14d,%r11d
+ movl %r8d,%r12d
+ pxor %xmm6,%xmm4
+ rorl $9,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r12d
+ rorl $5,%r13d
+ psrld $11,%xmm6
+ xorl %r11d,%r14d
+ pxor %xmm5,%xmm4
+ andl %edx,%r12d
+ xorl %edx,%r13d
+ pslld $11,%xmm5
+ addl 4(%rsp),%r10d
+ movl %r11d,%edi
+ pxor %xmm6,%xmm4
+ xorl %r9d,%r12d
+ rorl $11,%r14d
+ movdqa %xmm7,%xmm6
+ xorl %eax,%edi
+ addl %r12d,%r10d
+ pxor %xmm5,%xmm4
+ rorl $6,%r13d
+ andl %edi,%r15d
+ xorl %r11d,%r14d
+ psrld $10,%xmm7
+ addl %r13d,%r10d
+ xorl %eax,%r15d
+ paddd %xmm4,%xmm0
+ rorl $2,%r14d
+ addl %r10d,%ecx
+ psrlq $17,%xmm6
+ addl %r15d,%r10d
+ movl %ecx,%r13d
+ addl %r10d,%r14d
+ pxor %xmm6,%xmm7
+ rorl $14,%r13d
+ movl %r14d,%r10d
+ movl %edx,%r12d
+ rorl $9,%r14d
+ psrlq $2,%xmm6
+ xorl %ecx,%r13d
+ xorl %r8d,%r12d
+ pxor %xmm6,%xmm7
+ rorl $5,%r13d
+ xorl %r10d,%r14d
+ andl %ecx,%r12d
+ pshufd $128,%xmm7,%xmm7
+ xorl %ecx,%r13d
+ addl 8(%rsp),%r9d
+ movl %r10d,%r15d
+ psrldq $8,%xmm7
+ xorl %r8d,%r12d
+ rorl $11,%r14d
+ xorl %r11d,%r15d
+ addl %r12d,%r9d
+ rorl $6,%r13d
+ paddd %xmm7,%xmm0
+ andl %r15d,%edi
+ xorl %r10d,%r14d
+ addl %r13d,%r9d
+ pshufd $80,%xmm0,%xmm7
+ xorl %r11d,%edi
+ rorl $2,%r14d
+ addl %r9d,%ebx
+ movdqa %xmm7,%xmm6
+ addl %edi,%r9d
+ movl %ebx,%r13d
+ psrld $10,%xmm7
+ addl %r9d,%r14d
+ rorl $14,%r13d
+ psrlq $17,%xmm6
+ movl %r14d,%r9d
+ movl %ecx,%r12d
+ pxor %xmm6,%xmm7
+ rorl $9,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r12d
+ rorl $5,%r13d
+ xorl %r9d,%r14d
+ psrlq $2,%xmm6
+ andl %ebx,%r12d
+ xorl %ebx,%r13d
+ addl 12(%rsp),%r8d
+ pxor %xmm6,%xmm7
+ movl %r9d,%edi
+ xorl %edx,%r12d
+ rorl $11,%r14d
+ pshufd $8,%xmm7,%xmm7
+ xorl %r10d,%edi
+ addl %r12d,%r8d
+ movdqa 0(%rbp),%xmm6
+ rorl $6,%r13d
+ andl %edi,%r15d
+ pslldq $8,%xmm7
+ xorl %r9d,%r14d
+ addl %r13d,%r8d
+ xorl %r10d,%r15d
+ paddd %xmm7,%xmm0
+ rorl $2,%r14d
+ addl %r8d,%eax
+ addl %r15d,%r8d
+ paddd %xmm0,%xmm6
+ movl %eax,%r13d
+ addl %r8d,%r14d
+ movdqa %xmm6,0(%rsp)
+ rorl $14,%r13d
+ movdqa %xmm2,%xmm4
+ movl %r14d,%r8d
+ movl %ebx,%r12d
+ movdqa %xmm0,%xmm7
+ rorl $9,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r12d
+ rorl $5,%r13d
+ xorl %r8d,%r14d
+.byte 102,15,58,15,225,4
+ andl %eax,%r12d
+ xorl %eax,%r13d
+.byte 102,15,58,15,251,4
+ addl 16(%rsp),%edx
+ movl %r8d,%r15d
+ xorl %ecx,%r12d
+ rorl $11,%r14d
+ movdqa %xmm4,%xmm5
+ xorl %r9d,%r15d
+ addl %r12d,%edx
+ movdqa %xmm4,%xmm6
+ rorl $6,%r13d
+ andl %r15d,%edi
+ psrld $3,%xmm4
+ xorl %r8d,%r14d
+ addl %r13d,%edx
+ xorl %r9d,%edi
+ paddd %xmm7,%xmm1
+ rorl $2,%r14d
+ addl %edx,%r11d
+ psrld $7,%xmm6
+ addl %edi,%edx
+ movl %r11d,%r13d
+ pshufd $250,%xmm0,%xmm7
+ addl %edx,%r14d
+ rorl $14,%r13d
+ pslld $14,%xmm5
+ movl %r14d,%edx
+ movl %eax,%r12d
+ pxor %xmm6,%xmm4
+ rorl $9,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r12d
+ rorl $5,%r13d
+ psrld $11,%xmm6
+ xorl %edx,%r14d
+ pxor %xmm5,%xmm4
+ andl %r11d,%r12d
+ xorl %r11d,%r13d
+ pslld $11,%xmm5
+ addl 20(%rsp),%ecx
+ movl %edx,%edi
+ pxor %xmm6,%xmm4
+ xorl %ebx,%r12d
+ rorl $11,%r14d
+ movdqa %xmm7,%xmm6
+ xorl %r8d,%edi
+ addl %r12d,%ecx
+ pxor %xmm5,%xmm4
+ rorl $6,%r13d
+ andl %edi,%r15d
+ xorl %edx,%r14d
+ psrld $10,%xmm7
+ addl %r13d,%ecx
+ xorl %r8d,%r15d
+ paddd %xmm4,%xmm1
+ rorl $2,%r14d
+ addl %ecx,%r10d
+ psrlq $17,%xmm6
+ addl %r15d,%ecx
+ movl %r10d,%r13d
+ addl %ecx,%r14d
+ pxor %xmm6,%xmm7
+ rorl $14,%r13d
+ movl %r14d,%ecx
+ movl %r11d,%r12d
+ rorl $9,%r14d
+ psrlq $2,%xmm6
+ xorl %r10d,%r13d
+ xorl %eax,%r12d
+ pxor %xmm6,%xmm7
+ rorl $5,%r13d
+ xorl %ecx,%r14d
+ andl %r10d,%r12d
+ pshufd $128,%xmm7,%xmm7
+ xorl %r10d,%r13d
+ addl 24(%rsp),%ebx
+ movl %ecx,%r15d
+ psrldq $8,%xmm7
+ xorl %eax,%r12d
+ rorl $11,%r14d
+ xorl %edx,%r15d
+ addl %r12d,%ebx
+ rorl $6,%r13d
+ paddd %xmm7,%xmm1
+ andl %r15d,%edi
+ xorl %ecx,%r14d
+ addl %r13d,%ebx
+ pshufd $80,%xmm1,%xmm7
+ xorl %edx,%edi
+ rorl $2,%r14d
+ addl %ebx,%r9d
+ movdqa %xmm7,%xmm6
+ addl %edi,%ebx
+ movl %r9d,%r13d
+ psrld $10,%xmm7
+ addl %ebx,%r14d
+ rorl $14,%r13d
+ psrlq $17,%xmm6
+ movl %r14d,%ebx
+ movl %r10d,%r12d
+ pxor %xmm6,%xmm7
+ rorl $9,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r12d
+ rorl $5,%r13d
+ xorl %ebx,%r14d
+ psrlq $2,%xmm6
+ andl %r9d,%r12d
+ xorl %r9d,%r13d
+ addl 28(%rsp),%eax
+ pxor %xmm6,%xmm7
+ movl %ebx,%edi
+ xorl %r11d,%r12d
+ rorl $11,%r14d
+ pshufd $8,%xmm7,%xmm7
+ xorl %ecx,%edi
+ addl %r12d,%eax
+ movdqa 32(%rbp),%xmm6
+ rorl $6,%r13d
+ andl %edi,%r15d
+ pslldq $8,%xmm7
+ xorl %ebx,%r14d
+ addl %r13d,%eax
+ xorl %ecx,%r15d
+ paddd %xmm7,%xmm1
+ rorl $2,%r14d
+ addl %eax,%r8d
+ addl %r15d,%eax
+ paddd %xmm1,%xmm6
+ movl %r8d,%r13d
+ addl %eax,%r14d
+ movdqa %xmm6,16(%rsp)
+ rorl $14,%r13d
+ movdqa %xmm3,%xmm4
+ movl %r14d,%eax
+ movl %r9d,%r12d
+ movdqa %xmm1,%xmm7
+ rorl $9,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r12d
+ rorl $5,%r13d
+ xorl %eax,%r14d
+.byte 102,15,58,15,226,4
+ andl %r8d,%r12d
+ xorl %r8d,%r13d
+.byte 102,15,58,15,248,4
+ addl 32(%rsp),%r11d
+ movl %eax,%r15d
+ xorl %r10d,%r12d
+ rorl $11,%r14d
+ movdqa %xmm4,%xmm5
+ xorl %ebx,%r15d
+ addl %r12d,%r11d
+ movdqa %xmm4,%xmm6
+ rorl $6,%r13d
+ andl %r15d,%edi
+ psrld $3,%xmm4
+ xorl %eax,%r14d
+ addl %r13d,%r11d
+ xorl %ebx,%edi
+ paddd %xmm7,%xmm2
+ rorl $2,%r14d
+ addl %r11d,%edx
+ psrld $7,%xmm6
+ addl %edi,%r11d
+ movl %edx,%r13d
+ pshufd $250,%xmm1,%xmm7
+ addl %r11d,%r14d
+ rorl $14,%r13d
+ pslld $14,%xmm5
+ movl %r14d,%r11d
+ movl %r8d,%r12d
+ pxor %xmm6,%xmm4
+ rorl $9,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r12d
+ rorl $5,%r13d
+ psrld $11,%xmm6
+ xorl %r11d,%r14d
+ pxor %xmm5,%xmm4
+ andl %edx,%r12d
+ xorl %edx,%r13d
+ pslld $11,%xmm5
+ addl 36(%rsp),%r10d
+ movl %r11d,%edi
+ pxor %xmm6,%xmm4
+ xorl %r9d,%r12d
+ rorl $11,%r14d
+ movdqa %xmm7,%xmm6
+ xorl %eax,%edi
+ addl %r12d,%r10d
+ pxor %xmm5,%xmm4
+ rorl $6,%r13d
+ andl %edi,%r15d
+ xorl %r11d,%r14d
+ psrld $10,%xmm7
+ addl %r13d,%r10d
+ xorl %eax,%r15d
+ paddd %xmm4,%xmm2
+ rorl $2,%r14d
+ addl %r10d,%ecx
+ psrlq $17,%xmm6
+ addl %r15d,%r10d
+ movl %ecx,%r13d
+ addl %r10d,%r14d
+ pxor %xmm6,%xmm7
+ rorl $14,%r13d
+ movl %r14d,%r10d
+ movl %edx,%r12d
+ rorl $9,%r14d
+ psrlq $2,%xmm6
+ xorl %ecx,%r13d
+ xorl %r8d,%r12d
+ pxor %xmm6,%xmm7
+ rorl $5,%r13d
+ xorl %r10d,%r14d
+ andl %ecx,%r12d
+ pshufd $128,%xmm7,%xmm7
+ xorl %ecx,%r13d
+ addl 40(%rsp),%r9d
+ movl %r10d,%r15d
+ psrldq $8,%xmm7
+ xorl %r8d,%r12d
+ rorl $11,%r14d
+ xorl %r11d,%r15d
+ addl %r12d,%r9d
+ rorl $6,%r13d
+ paddd %xmm7,%xmm2
+ andl %r15d,%edi
+ xorl %r10d,%r14d
+ addl %r13d,%r9d
+ pshufd $80,%xmm2,%xmm7
+ xorl %r11d,%edi
+ rorl $2,%r14d
+ addl %r9d,%ebx
+ movdqa %xmm7,%xmm6
+ addl %edi,%r9d
+ movl %ebx,%r13d
+ psrld $10,%xmm7
+ addl %r9d,%r14d
+ rorl $14,%r13d
+ psrlq $17,%xmm6
+ movl %r14d,%r9d
+ movl %ecx,%r12d
+ pxor %xmm6,%xmm7
+ rorl $9,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r12d
+ rorl $5,%r13d
+ xorl %r9d,%r14d
+ psrlq $2,%xmm6
+ andl %ebx,%r12d
+ xorl %ebx,%r13d
+ addl 44(%rsp),%r8d
+ pxor %xmm6,%xmm7
+ movl %r9d,%edi
+ xorl %edx,%r12d
+ rorl $11,%r14d
+ pshufd $8,%xmm7,%xmm7
+ xorl %r10d,%edi
+ addl %r12d,%r8d
+ movdqa 64(%rbp),%xmm6
+ rorl $6,%r13d
+ andl %edi,%r15d
+ pslldq $8,%xmm7
+ xorl %r9d,%r14d
+ addl %r13d,%r8d
+ xorl %r10d,%r15d
+ paddd %xmm7,%xmm2
+ rorl $2,%r14d
+ addl %r8d,%eax
+ addl %r15d,%r8d
+ paddd %xmm2,%xmm6
+ movl %eax,%r13d
+ addl %r8d,%r14d
+ movdqa %xmm6,32(%rsp)
+ rorl $14,%r13d
+ movdqa %xmm0,%xmm4
+ movl %r14d,%r8d
+ movl %ebx,%r12d
+ movdqa %xmm2,%xmm7
+ rorl $9,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r12d
+ rorl $5,%r13d
+ xorl %r8d,%r14d
+.byte 102,15,58,15,227,4
+ andl %eax,%r12d
+ xorl %eax,%r13d
+.byte 102,15,58,15,249,4
+ addl 48(%rsp),%edx
+ movl %r8d,%r15d
+ xorl %ecx,%r12d
+ rorl $11,%r14d
+ movdqa %xmm4,%xmm5
+ xorl %r9d,%r15d
+ addl %r12d,%edx
+ movdqa %xmm4,%xmm6
+ rorl $6,%r13d
+ andl %r15d,%edi
+ psrld $3,%xmm4
+ xorl %r8d,%r14d
+ addl %r13d,%edx
+ xorl %r9d,%edi
+ paddd %xmm7,%xmm3
+ rorl $2,%r14d
+ addl %edx,%r11d
+ psrld $7,%xmm6
+ addl %edi,%edx
+ movl %r11d,%r13d
+ pshufd $250,%xmm2,%xmm7
+ addl %edx,%r14d
+ rorl $14,%r13d
+ pslld $14,%xmm5
+ movl %r14d,%edx
+ movl %eax,%r12d
+ pxor %xmm6,%xmm4
+ rorl $9,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r12d
+ rorl $5,%r13d
+ psrld $11,%xmm6
+ xorl %edx,%r14d
+ pxor %xmm5,%xmm4
+ andl %r11d,%r12d
+ xorl %r11d,%r13d
+ pslld $11,%xmm5
+ addl 52(%rsp),%ecx
+ movl %edx,%edi
+ pxor %xmm6,%xmm4
+ xorl %ebx,%r12d
+ rorl $11,%r14d
+ movdqa %xmm7,%xmm6
+ xorl %r8d,%edi
+ addl %r12d,%ecx
+ pxor %xmm5,%xmm4
+ rorl $6,%r13d
+ andl %edi,%r15d
+ xorl %edx,%r14d
+ psrld $10,%xmm7
+ addl %r13d,%ecx
+ xorl %r8d,%r15d
+ paddd %xmm4,%xmm3
+ rorl $2,%r14d
+ addl %ecx,%r10d
+ psrlq $17,%xmm6
+ addl %r15d,%ecx
+ movl %r10d,%r13d
+ addl %ecx,%r14d
+ pxor %xmm6,%xmm7
+ rorl $14,%r13d
+ movl %r14d,%ecx
+ movl %r11d,%r12d
+ rorl $9,%r14d
+ psrlq $2,%xmm6
+ xorl %r10d,%r13d
+ xorl %eax,%r12d
+ pxor %xmm6,%xmm7
+ rorl $5,%r13d
+ xorl %ecx,%r14d
+ andl %r10d,%r12d
+ pshufd $128,%xmm7,%xmm7
+ xorl %r10d,%r13d
+ addl 56(%rsp),%ebx
+ movl %ecx,%r15d
+ psrldq $8,%xmm7
+ xorl %eax,%r12d
+ rorl $11,%r14d
+ xorl %edx,%r15d
+ addl %r12d,%ebx
+ rorl $6,%r13d
+ paddd %xmm7,%xmm3
+ andl %r15d,%edi
+ xorl %ecx,%r14d
+ addl %r13d,%ebx
+ pshufd $80,%xmm3,%xmm7
+ xorl %edx,%edi
+ rorl $2,%r14d
+ addl %ebx,%r9d
+ movdqa %xmm7,%xmm6
+ addl %edi,%ebx
+ movl %r9d,%r13d
+ psrld $10,%xmm7
+ addl %ebx,%r14d
+ rorl $14,%r13d
+ psrlq $17,%xmm6
+ movl %r14d,%ebx
+ movl %r10d,%r12d
+ pxor %xmm6,%xmm7
+ rorl $9,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r12d
+ rorl $5,%r13d
+ xorl %ebx,%r14d
+ psrlq $2,%xmm6
+ andl %r9d,%r12d
+ xorl %r9d,%r13d
+ addl 60(%rsp),%eax
+ pxor %xmm6,%xmm7
+ movl %ebx,%edi
+ xorl %r11d,%r12d
+ rorl $11,%r14d
+ pshufd $8,%xmm7,%xmm7
+ xorl %ecx,%edi
+ addl %r12d,%eax
+ movdqa 96(%rbp),%xmm6
+ rorl $6,%r13d
+ andl %edi,%r15d
+ pslldq $8,%xmm7
+ xorl %ebx,%r14d
+ addl %r13d,%eax
+ xorl %ecx,%r15d
+ paddd %xmm7,%xmm3
+ rorl $2,%r14d
+ addl %eax,%r8d
+ addl %r15d,%eax
+ paddd %xmm3,%xmm6
+ movl %r8d,%r13d
+ addl %eax,%r14d
+ movdqa %xmm6,48(%rsp)
+ cmpb $0,131(%rbp)
+ jne .Lssse3_00_47
+ rorl $14,%r13d
+ movl %r14d,%eax
+ movl %r9d,%r12d
+ rorl $9,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r12d
+ rorl $5,%r13d
+ xorl %eax,%r14d
+ andl %r8d,%r12d
+ xorl %r8d,%r13d
+ addl 0(%rsp),%r11d
+ movl %eax,%r15d
+ xorl %r10d,%r12d
+ rorl $11,%r14d
+ xorl %ebx,%r15d
+ addl %r12d,%r11d
+ rorl $6,%r13d
+ andl %r15d,%edi
+ xorl %eax,%r14d
+ addl %r13d,%r11d
+ xorl %ebx,%edi
+ rorl $2,%r14d
+ addl %r11d,%edx
+ addl %edi,%r11d
+ movl %edx,%r13d
+ addl %r11d,%r14d
+ rorl $14,%r13d
+ movl %r14d,%r11d
+ movl %r8d,%r12d
+ rorl $9,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r12d
+ rorl $5,%r13d
+ xorl %r11d,%r14d
+ andl %edx,%r12d
+ xorl %edx,%r13d
+ addl 4(%rsp),%r10d
+ movl %r11d,%edi
+ xorl %r9d,%r12d
+ rorl $11,%r14d
+ xorl %eax,%edi
+ addl %r12d,%r10d
+ rorl $6,%r13d
+ andl %edi,%r15d
+ xorl %r11d,%r14d
+ addl %r13d,%r10d
+ xorl %eax,%r15d
+ rorl $2,%r14d
+ addl %r10d,%ecx
+ addl %r15d,%r10d
+ movl %ecx,%r13d
+ addl %r10d,%r14d
+ rorl $14,%r13d
+ movl %r14d,%r10d
+ movl %edx,%r12d
+ rorl $9,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r12d
+ rorl $5,%r13d
+ xorl %r10d,%r14d
+ andl %ecx,%r12d
+ xorl %ecx,%r13d
+ addl 8(%rsp),%r9d
+ movl %r10d,%r15d
+ xorl %r8d,%r12d
+ rorl $11,%r14d
+ xorl %r11d,%r15d
+ addl %r12d,%r9d
+ rorl $6,%r13d
+ andl %r15d,%edi
+ xorl %r10d,%r14d
+ addl %r13d,%r9d
+ xorl %r11d,%edi
+ rorl $2,%r14d
+ addl %r9d,%ebx
+ addl %edi,%r9d
+ movl %ebx,%r13d
+ addl %r9d,%r14d
+ rorl $14,%r13d
+ movl %r14d,%r9d
+ movl %ecx,%r12d
+ rorl $9,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r12d
+ rorl $5,%r13d
+ xorl %r9d,%r14d
+ andl %ebx,%r12d
+ xorl %ebx,%r13d
+ addl 12(%rsp),%r8d
+ movl %r9d,%edi
+ xorl %edx,%r12d
+ rorl $11,%r14d
+ xorl %r10d,%edi
+ addl %r12d,%r8d
+ rorl $6,%r13d
+ andl %edi,%r15d
+ xorl %r9d,%r14d
+ addl %r13d,%r8d
+ xorl %r10d,%r15d
+ rorl $2,%r14d
+ addl %r8d,%eax
+ addl %r15d,%r8d
+ movl %eax,%r13d
+ addl %r8d,%r14d
+ rorl $14,%r13d
+ movl %r14d,%r8d
+ movl %ebx,%r12d
+ rorl $9,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r12d
+ rorl $5,%r13d
+ xorl %r8d,%r14d
+ andl %eax,%r12d
+ xorl %eax,%r13d
+ addl 16(%rsp),%edx
+ movl %r8d,%r15d
+ xorl %ecx,%r12d
+ rorl $11,%r14d
+ xorl %r9d,%r15d
+ addl %r12d,%edx
+ rorl $6,%r13d
+ andl %r15d,%edi
+ xorl %r8d,%r14d
+ addl %r13d,%edx
+ xorl %r9d,%edi
+ rorl $2,%r14d
+ addl %edx,%r11d
+ addl %edi,%edx
+ movl %r11d,%r13d
+ addl %edx,%r14d
+ rorl $14,%r13d
+ movl %r14d,%edx
+ movl %eax,%r12d
+ rorl $9,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r12d
+ rorl $5,%r13d
+ xorl %edx,%r14d
+ andl %r11d,%r12d
+ xorl %r11d,%r13d
+ addl 20(%rsp),%ecx
+ movl %edx,%edi
+ xorl %ebx,%r12d
+ rorl $11,%r14d
+ xorl %r8d,%edi
+ addl %r12d,%ecx
+ rorl $6,%r13d
+ andl %edi,%r15d
+ xorl %edx,%r14d
+ addl %r13d,%ecx
+ xorl %r8d,%r15d
+ rorl $2,%r14d
+ addl %ecx,%r10d
+ addl %r15d,%ecx
+ movl %r10d,%r13d
+ addl %ecx,%r14d
+ rorl $14,%r13d
+ movl %r14d,%ecx
+ movl %r11d,%r12d
+ rorl $9,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r12d
+ rorl $5,%r13d
+ xorl %ecx,%r14d
+ andl %r10d,%r12d
+ xorl %r10d,%r13d
+ addl 24(%rsp),%ebx
+ movl %ecx,%r15d
+ xorl %eax,%r12d
+ rorl $11,%r14d
+ xorl %edx,%r15d
+ addl %r12d,%ebx
+ rorl $6,%r13d
+ andl %r15d,%edi
+ xorl %ecx,%r14d
+ addl %r13d,%ebx
+ xorl %edx,%edi
+ rorl $2,%r14d
+ addl %ebx,%r9d
+ addl %edi,%ebx
+ movl %r9d,%r13d
+ addl %ebx,%r14d
+ rorl $14,%r13d
+ movl %r14d,%ebx
+ movl %r10d,%r12d
+ rorl $9,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r12d
+ rorl $5,%r13d
+ xorl %ebx,%r14d
+ andl %r9d,%r12d
+ xorl %r9d,%r13d
+ addl 28(%rsp),%eax
+ movl %ebx,%edi
+ xorl %r11d,%r12d
+ rorl $11,%r14d
+ xorl %ecx,%edi
+ addl %r12d,%eax
+ rorl $6,%r13d
+ andl %edi,%r15d
+ xorl %ebx,%r14d
+ addl %r13d,%eax
+ xorl %ecx,%r15d
+ rorl $2,%r14d
+ addl %eax,%r8d
+ addl %r15d,%eax
+ movl %r8d,%r13d
+ addl %eax,%r14d
+ rorl $14,%r13d
+ movl %r14d,%eax
+ movl %r9d,%r12d
+ rorl $9,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r12d
+ rorl $5,%r13d
+ xorl %eax,%r14d
+ andl %r8d,%r12d
+ xorl %r8d,%r13d
+ addl 32(%rsp),%r11d
+ movl %eax,%r15d
+ xorl %r10d,%r12d
+ rorl $11,%r14d
+ xorl %ebx,%r15d
+ addl %r12d,%r11d
+ rorl $6,%r13d
+ andl %r15d,%edi
+ xorl %eax,%r14d
+ addl %r13d,%r11d
+ xorl %ebx,%edi
+ rorl $2,%r14d
+ addl %r11d,%edx
+ addl %edi,%r11d
+ movl %edx,%r13d
+ addl %r11d,%r14d
+ rorl $14,%r13d
+ movl %r14d,%r11d
+ movl %r8d,%r12d
+ rorl $9,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r12d
+ rorl $5,%r13d
+ xorl %r11d,%r14d
+ andl %edx,%r12d
+ xorl %edx,%r13d
+ addl 36(%rsp),%r10d
+ movl %r11d,%edi
+ xorl %r9d,%r12d
+ rorl $11,%r14d
+ xorl %eax,%edi
+ addl %r12d,%r10d
+ rorl $6,%r13d
+ andl %edi,%r15d
+ xorl %r11d,%r14d
+ addl %r13d,%r10d
+ xorl %eax,%r15d
+ rorl $2,%r14d
+ addl %r10d,%ecx
+ addl %r15d,%r10d
+ movl %ecx,%r13d
+ addl %r10d,%r14d
+ rorl $14,%r13d
+ movl %r14d,%r10d
+ movl %edx,%r12d
+ rorl $9,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r12d
+ rorl $5,%r13d
+ xorl %r10d,%r14d
+ andl %ecx,%r12d
+ xorl %ecx,%r13d
+ addl 40(%rsp),%r9d
+ movl %r10d,%r15d
+ xorl %r8d,%r12d
+ rorl $11,%r14d
+ xorl %r11d,%r15d
+ addl %r12d,%r9d
+ rorl $6,%r13d
+ andl %r15d,%edi
+ xorl %r10d,%r14d
+ addl %r13d,%r9d
+ xorl %r11d,%edi
+ rorl $2,%r14d
+ addl %r9d,%ebx
+ addl %edi,%r9d
+ movl %ebx,%r13d
+ addl %r9d,%r14d
+ rorl $14,%r13d
+ movl %r14d,%r9d
+ movl %ecx,%r12d
+ rorl $9,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r12d
+ rorl $5,%r13d
+ xorl %r9d,%r14d
+ andl %ebx,%r12d
+ xorl %ebx,%r13d
+ addl 44(%rsp),%r8d
+ movl %r9d,%edi
+ xorl %edx,%r12d
+ rorl $11,%r14d
+ xorl %r10d,%edi
+ addl %r12d,%r8d
+ rorl $6,%r13d
+ andl %edi,%r15d
+ xorl %r9d,%r14d
+ addl %r13d,%r8d
+ xorl %r10d,%r15d
+ rorl $2,%r14d
+ addl %r8d,%eax
+ addl %r15d,%r8d
+ movl %eax,%r13d
+ addl %r8d,%r14d
+ rorl $14,%r13d
+ movl %r14d,%r8d
+ movl %ebx,%r12d
+ rorl $9,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r12d
+ rorl $5,%r13d
+ xorl %r8d,%r14d
+ andl %eax,%r12d
+ xorl %eax,%r13d
+ addl 48(%rsp),%edx
+ movl %r8d,%r15d
+ xorl %ecx,%r12d
+ rorl $11,%r14d
+ xorl %r9d,%r15d
+ addl %r12d,%edx
+ rorl $6,%r13d
+ andl %r15d,%edi
+ xorl %r8d,%r14d
+ addl %r13d,%edx
+ xorl %r9d,%edi
+ rorl $2,%r14d
+ addl %edx,%r11d
+ addl %edi,%edx
+ movl %r11d,%r13d
+ addl %edx,%r14d
+ rorl $14,%r13d
+ movl %r14d,%edx
+ movl %eax,%r12d
+ rorl $9,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r12d
+ rorl $5,%r13d
+ xorl %edx,%r14d
+ andl %r11d,%r12d
+ xorl %r11d,%r13d
+ addl 52(%rsp),%ecx
+ movl %edx,%edi
+ xorl %ebx,%r12d
+ rorl $11,%r14d
+ xorl %r8d,%edi
+ addl %r12d,%ecx
+ rorl $6,%r13d
+ andl %edi,%r15d
+ xorl %edx,%r14d
+ addl %r13d,%ecx
+ xorl %r8d,%r15d
+ rorl $2,%r14d
+ addl %ecx,%r10d
+ addl %r15d,%ecx
+ movl %r10d,%r13d
+ addl %ecx,%r14d
+ rorl $14,%r13d
+ movl %r14d,%ecx
+ movl %r11d,%r12d
+ rorl $9,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r12d
+ rorl $5,%r13d
+ xorl %ecx,%r14d
+ andl %r10d,%r12d
+ xorl %r10d,%r13d
+ addl 56(%rsp),%ebx
+ movl %ecx,%r15d
+ xorl %eax,%r12d
+ rorl $11,%r14d
+ xorl %edx,%r15d
+ addl %r12d,%ebx
+ rorl $6,%r13d
+ andl %r15d,%edi
+ xorl %ecx,%r14d
+ addl %r13d,%ebx
+ xorl %edx,%edi
+ rorl $2,%r14d
+ addl %ebx,%r9d
+ addl %edi,%ebx
+ movl %r9d,%r13d
+ addl %ebx,%r14d
+ rorl $14,%r13d
+ movl %r14d,%ebx
+ movl %r10d,%r12d
+ rorl $9,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r12d
+ rorl $5,%r13d
+ xorl %ebx,%r14d
+ andl %r9d,%r12d
+ xorl %r9d,%r13d
+ addl 60(%rsp),%eax
+ movl %ebx,%edi
+ xorl %r11d,%r12d
+ rorl $11,%r14d
+ xorl %ecx,%edi
+ addl %r12d,%eax
+ rorl $6,%r13d
+ andl %edi,%r15d
+ xorl %ebx,%r14d
+ addl %r13d,%eax
+ xorl %ecx,%r15d
+ rorl $2,%r14d
+ addl %eax,%r8d
+ addl %r15d,%eax
+ movl %r8d,%r13d
+ addl %eax,%r14d
+ movq 64+0(%rsp),%rdi
+ movl %r14d,%eax
+
+ addl 0(%rdi),%eax
+ leaq 64(%rsi),%rsi
+ addl 4(%rdi),%ebx
+ addl 8(%rdi),%ecx
+ addl 12(%rdi),%edx
+ addl 16(%rdi),%r8d
+ addl 20(%rdi),%r9d
+ addl 24(%rdi),%r10d
+ addl 28(%rdi),%r11d
+
+ cmpq 64+16(%rsp),%rsi
+
+ movl %eax,0(%rdi)
+ movl %ebx,4(%rdi)
+ movl %ecx,8(%rdi)
+ movl %edx,12(%rdi)
+ movl %r8d,16(%rdi)
+ movl %r9d,20(%rdi)
+ movl %r10d,24(%rdi)
+ movl %r11d,28(%rdi)
+ jb .Lloop_ssse3
+
+ movq 64+24(%rsp),%rsi
+ movq (%rsi),%r15
+ movq 8(%rsi),%r14
+ movq 16(%rsi),%r13
+ movq 24(%rsi),%r12
+ movq 32(%rsi),%rbp
+ movq 40(%rsi),%rbx
+ leaq 48(%rsi),%rsp
+.Lepilogue_ssse3:
+ .byte 0xf3,0xc3
+.size sha256_block_data_order_ssse3,.-sha256_block_data_order_ssse3
diff --git a/secure/lib/libcrypto/amd64/sha512-x86_64.S b/secure/lib/libcrypto/amd64/sha512-x86_64.S
index 74fc3d0..60518d4 100644
--- a/secure/lib/libcrypto/amd64/sha512-x86_64.S
+++ b/secure/lib/libcrypto/amd64/sha512-x86_64.S
@@ -1,6 +1,7 @@
# $FreeBSD$
.text
+
.globl sha512_block_data_order
.type sha512_block_data_order,@function
.align 16
@@ -22,8 +23,6 @@ sha512_block_data_order:
movq %r11,128+24(%rsp)
.Lprologue:
- leaq K512(%rip),%rbp
-
movq 0(%rdi),%rax
movq 8(%rdi),%rbx
movq 16(%rdi),%rcx
@@ -36,1694 +35,1632 @@ sha512_block_data_order:
.align 16
.Lloop:
- xorq %rdi,%rdi
+ movq %rbx,%rdi
+ leaq K512(%rip),%rbp
+ xorq %rcx,%rdi
movq 0(%rsi),%r12
movq %r8,%r13
movq %rax,%r14
bswapq %r12
rorq $23,%r13
movq %r9,%r15
- movq %r12,0(%rsp)
- rorq $5,%r14
xorq %r8,%r13
+ rorq $5,%r14
xorq %r10,%r15
- rorq $4,%r13
- addq %r11,%r12
+ movq %r12,0(%rsp)
xorq %rax,%r14
-
- addq (%rbp,%rdi,8),%r12
andq %r8,%r15
- movq %rbx,%r11
+
+ rorq $4,%r13
+ addq %r11,%r12
+ xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
- xorq %r10,%r15
+ addq %r15,%r12
- xorq %rcx,%r11
+ movq %rax,%r15
+ addq (%rbp),%r12
xorq %rax,%r14
- addq %r15,%r12
- movq %rbx,%r15
+ xorq %rbx,%r15
rorq $14,%r13
- andq %rax,%r11
- andq %rcx,%r15
+ movq %rbx,%r11
+ andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
- addq %r15,%r11
+ xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
- leaq 1(%rdi),%rdi
- addq %r14,%r11
+ leaq 8(%rbp),%rbp
+ addq %r14,%r11
movq 8(%rsi),%r12
movq %rdx,%r13
movq %r11,%r14
bswapq %r12
rorq $23,%r13
- movq %r8,%r15
- movq %r12,8(%rsp)
+ movq %r8,%rdi
- rorq $5,%r14
xorq %rdx,%r13
- xorq %r9,%r15
+ rorq $5,%r14
+ xorq %r9,%rdi
- rorq $4,%r13
- addq %r10,%r12
+ movq %r12,8(%rsp)
xorq %r11,%r14
+ andq %rdx,%rdi
- addq (%rbp,%rdi,8),%r12
- andq %rdx,%r15
- movq %rax,%r10
+ rorq $4,%r13
+ addq %r10,%r12
+ xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
- xorq %r9,%r15
+ addq %rdi,%r12
- xorq %rbx,%r10
+ movq %r11,%rdi
+ addq (%rbp),%r12
xorq %r11,%r14
- addq %r15,%r12
- movq %rax,%r15
+ xorq %rax,%rdi
rorq $14,%r13
- andq %r11,%r10
- andq %rbx,%r15
+ movq %rax,%r10
+ andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
- addq %r15,%r10
+ xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
- leaq 1(%rdi),%rdi
- addq %r14,%r10
+ leaq 24(%rbp),%rbp
+ addq %r14,%r10
movq 16(%rsi),%r12
movq %rcx,%r13
movq %r10,%r14
bswapq %r12
rorq $23,%r13
movq %rdx,%r15
- movq %r12,16(%rsp)
- rorq $5,%r14
xorq %rcx,%r13
+ rorq $5,%r14
xorq %r8,%r15
- rorq $4,%r13
- addq %r9,%r12
+ movq %r12,16(%rsp)
xorq %r10,%r14
-
- addq (%rbp,%rdi,8),%r12
andq %rcx,%r15
- movq %r11,%r9
+
+ rorq $4,%r13
+ addq %r9,%r12
+ xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
- xorq %r8,%r15
+ addq %r15,%r12
- xorq %rax,%r9
+ movq %r10,%r15
+ addq (%rbp),%r12
xorq %r10,%r14
- addq %r15,%r12
- movq %r11,%r15
+ xorq %r11,%r15
rorq $14,%r13
- andq %r10,%r9
- andq %rax,%r15
+ movq %r11,%r9
+ andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
- addq %r15,%r9
+ xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
- leaq 1(%rdi),%rdi
- addq %r14,%r9
+ leaq 8(%rbp),%rbp
+ addq %r14,%r9
movq 24(%rsi),%r12
movq %rbx,%r13
movq %r9,%r14
bswapq %r12
rorq $23,%r13
- movq %rcx,%r15
- movq %r12,24(%rsp)
+ movq %rcx,%rdi
- rorq $5,%r14
xorq %rbx,%r13
- xorq %rdx,%r15
+ rorq $5,%r14
+ xorq %rdx,%rdi
- rorq $4,%r13
- addq %r8,%r12
+ movq %r12,24(%rsp)
xorq %r9,%r14
+ andq %rbx,%rdi
- addq (%rbp,%rdi,8),%r12
- andq %rbx,%r15
- movq %r10,%r8
+ rorq $4,%r13
+ addq %r8,%r12
+ xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
- xorq %rdx,%r15
+ addq %rdi,%r12
- xorq %r11,%r8
+ movq %r9,%rdi
+ addq (%rbp),%r12
xorq %r9,%r14
- addq %r15,%r12
- movq %r10,%r15
+ xorq %r10,%rdi
rorq $14,%r13
- andq %r9,%r8
- andq %r11,%r15
+ movq %r10,%r8
+ andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
- addq %r15,%r8
+ xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
- leaq 1(%rdi),%rdi
- addq %r14,%r8
+ leaq 24(%rbp),%rbp
+ addq %r14,%r8
movq 32(%rsi),%r12
movq %rax,%r13
movq %r8,%r14
bswapq %r12
rorq $23,%r13
movq %rbx,%r15
- movq %r12,32(%rsp)
- rorq $5,%r14
xorq %rax,%r13
+ rorq $5,%r14
xorq %rcx,%r15
- rorq $4,%r13
- addq %rdx,%r12
+ movq %r12,32(%rsp)
xorq %r8,%r14
-
- addq (%rbp,%rdi,8),%r12
andq %rax,%r15
- movq %r9,%rdx
+
+ rorq $4,%r13
+ addq %rdx,%r12
+ xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
- xorq %rcx,%r15
+ addq %r15,%r12
- xorq %r10,%rdx
+ movq %r8,%r15
+ addq (%rbp),%r12
xorq %r8,%r14
- addq %r15,%r12
- movq %r9,%r15
+ xorq %r9,%r15
rorq $14,%r13
- andq %r8,%rdx
- andq %r10,%r15
+ movq %r9,%rdx
+ andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
- addq %r15,%rdx
+ xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
- leaq 1(%rdi),%rdi
- addq %r14,%rdx
+ leaq 8(%rbp),%rbp
+ addq %r14,%rdx
movq 40(%rsi),%r12
movq %r11,%r13
movq %rdx,%r14
bswapq %r12
rorq $23,%r13
- movq %rax,%r15
- movq %r12,40(%rsp)
+ movq %rax,%rdi
- rorq $5,%r14
xorq %r11,%r13
- xorq %rbx,%r15
+ rorq $5,%r14
+ xorq %rbx,%rdi
- rorq $4,%r13
- addq %rcx,%r12
+ movq %r12,40(%rsp)
xorq %rdx,%r14
+ andq %r11,%rdi
- addq (%rbp,%rdi,8),%r12
- andq %r11,%r15
- movq %r8,%rcx
+ rorq $4,%r13
+ addq %rcx,%r12
+ xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
- xorq %rbx,%r15
+ addq %rdi,%r12
- xorq %r9,%rcx
+ movq %rdx,%rdi
+ addq (%rbp),%r12
xorq %rdx,%r14
- addq %r15,%r12
- movq %r8,%r15
+ xorq %r8,%rdi
rorq $14,%r13
- andq %rdx,%rcx
- andq %r9,%r15
+ movq %r8,%rcx
+ andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
- addq %r15,%rcx
+ xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
- leaq 1(%rdi),%rdi
- addq %r14,%rcx
+ leaq 24(%rbp),%rbp
+ addq %r14,%rcx
movq 48(%rsi),%r12
movq %r10,%r13
movq %rcx,%r14
bswapq %r12
rorq $23,%r13
movq %r11,%r15
- movq %r12,48(%rsp)
- rorq $5,%r14
xorq %r10,%r13
+ rorq $5,%r14
xorq %rax,%r15
- rorq $4,%r13
- addq %rbx,%r12
+ movq %r12,48(%rsp)
xorq %rcx,%r14
-
- addq (%rbp,%rdi,8),%r12
andq %r10,%r15
- movq %rdx,%rbx
+
+ rorq $4,%r13
+ addq %rbx,%r12
+ xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
- xorq %rax,%r15
+ addq %r15,%r12
- xorq %r8,%rbx
+ movq %rcx,%r15
+ addq (%rbp),%r12
xorq %rcx,%r14
- addq %r15,%r12
- movq %rdx,%r15
+ xorq %rdx,%r15
rorq $14,%r13
- andq %rcx,%rbx
- andq %r8,%r15
+ movq %rdx,%rbx
+ andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
- addq %r15,%rbx
+ xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
- leaq 1(%rdi),%rdi
- addq %r14,%rbx
+ leaq 8(%rbp),%rbp
+ addq %r14,%rbx
movq 56(%rsi),%r12
movq %r9,%r13
movq %rbx,%r14
bswapq %r12
rorq $23,%r13
- movq %r10,%r15
- movq %r12,56(%rsp)
+ movq %r10,%rdi
- rorq $5,%r14
xorq %r9,%r13
- xorq %r11,%r15
+ rorq $5,%r14
+ xorq %r11,%rdi
- rorq $4,%r13
- addq %rax,%r12
+ movq %r12,56(%rsp)
xorq %rbx,%r14
+ andq %r9,%rdi
- addq (%rbp,%rdi,8),%r12
- andq %r9,%r15
- movq %rcx,%rax
+ rorq $4,%r13
+ addq %rax,%r12
+ xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
- xorq %r11,%r15
+ addq %rdi,%r12
- xorq %rdx,%rax
+ movq %rbx,%rdi
+ addq (%rbp),%r12
xorq %rbx,%r14
- addq %r15,%r12
- movq %rcx,%r15
+ xorq %rcx,%rdi
rorq $14,%r13
- andq %rbx,%rax
- andq %rdx,%r15
+ movq %rcx,%rax
+ andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
- addq %r15,%rax
+ xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
- leaq 1(%rdi),%rdi
- addq %r14,%rax
+ leaq 24(%rbp),%rbp
+ addq %r14,%rax
movq 64(%rsi),%r12
movq %r8,%r13
movq %rax,%r14
bswapq %r12
rorq $23,%r13
movq %r9,%r15
- movq %r12,64(%rsp)
- rorq $5,%r14
xorq %r8,%r13
+ rorq $5,%r14
xorq %r10,%r15
- rorq $4,%r13
- addq %r11,%r12
+ movq %r12,64(%rsp)
xorq %rax,%r14
-
- addq (%rbp,%rdi,8),%r12
andq %r8,%r15
- movq %rbx,%r11
+
+ rorq $4,%r13
+ addq %r11,%r12
+ xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
- xorq %r10,%r15
+ addq %r15,%r12
- xorq %rcx,%r11
+ movq %rax,%r15
+ addq (%rbp),%r12
xorq %rax,%r14
- addq %r15,%r12
- movq %rbx,%r15
+ xorq %rbx,%r15
rorq $14,%r13
- andq %rax,%r11
- andq %rcx,%r15
+ movq %rbx,%r11
+ andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
- addq %r15,%r11
+ xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
- leaq 1(%rdi),%rdi
- addq %r14,%r11
+ leaq 8(%rbp),%rbp
+ addq %r14,%r11
movq 72(%rsi),%r12
movq %rdx,%r13
movq %r11,%r14
bswapq %r12
rorq $23,%r13
- movq %r8,%r15
- movq %r12,72(%rsp)
+ movq %r8,%rdi
- rorq $5,%r14
xorq %rdx,%r13
- xorq %r9,%r15
+ rorq $5,%r14
+ xorq %r9,%rdi
- rorq $4,%r13
- addq %r10,%r12
+ movq %r12,72(%rsp)
xorq %r11,%r14
+ andq %rdx,%rdi
- addq (%rbp,%rdi,8),%r12
- andq %rdx,%r15
- movq %rax,%r10
+ rorq $4,%r13
+ addq %r10,%r12
+ xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
- xorq %r9,%r15
+ addq %rdi,%r12
- xorq %rbx,%r10
+ movq %r11,%rdi
+ addq (%rbp),%r12
xorq %r11,%r14
- addq %r15,%r12
- movq %rax,%r15
+ xorq %rax,%rdi
rorq $14,%r13
- andq %r11,%r10
- andq %rbx,%r15
+ movq %rax,%r10
+ andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
- addq %r15,%r10
+ xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
- leaq 1(%rdi),%rdi
- addq %r14,%r10
+ leaq 24(%rbp),%rbp
+ addq %r14,%r10
movq 80(%rsi),%r12
movq %rcx,%r13
movq %r10,%r14
bswapq %r12
rorq $23,%r13
movq %rdx,%r15
- movq %r12,80(%rsp)
- rorq $5,%r14
xorq %rcx,%r13
+ rorq $5,%r14
xorq %r8,%r15
- rorq $4,%r13
- addq %r9,%r12
+ movq %r12,80(%rsp)
xorq %r10,%r14
-
- addq (%rbp,%rdi,8),%r12
andq %rcx,%r15
- movq %r11,%r9
+
+ rorq $4,%r13
+ addq %r9,%r12
+ xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
- xorq %r8,%r15
+ addq %r15,%r12
- xorq %rax,%r9
+ movq %r10,%r15
+ addq (%rbp),%r12
xorq %r10,%r14
- addq %r15,%r12
- movq %r11,%r15
+ xorq %r11,%r15
rorq $14,%r13
- andq %r10,%r9
- andq %rax,%r15
+ movq %r11,%r9
+ andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
- addq %r15,%r9
+ xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
- leaq 1(%rdi),%rdi
- addq %r14,%r9
+ leaq 8(%rbp),%rbp
+ addq %r14,%r9
movq 88(%rsi),%r12
movq %rbx,%r13
movq %r9,%r14
bswapq %r12
rorq $23,%r13
- movq %rcx,%r15
- movq %r12,88(%rsp)
+ movq %rcx,%rdi
- rorq $5,%r14
xorq %rbx,%r13
- xorq %rdx,%r15
+ rorq $5,%r14
+ xorq %rdx,%rdi
- rorq $4,%r13
- addq %r8,%r12
+ movq %r12,88(%rsp)
xorq %r9,%r14
+ andq %rbx,%rdi
- addq (%rbp,%rdi,8),%r12
- andq %rbx,%r15
- movq %r10,%r8
+ rorq $4,%r13
+ addq %r8,%r12
+ xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
- xorq %rdx,%r15
+ addq %rdi,%r12
- xorq %r11,%r8
+ movq %r9,%rdi
+ addq (%rbp),%r12
xorq %r9,%r14
- addq %r15,%r12
- movq %r10,%r15
+ xorq %r10,%rdi
rorq $14,%r13
- andq %r9,%r8
- andq %r11,%r15
+ movq %r10,%r8
+ andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
- addq %r15,%r8
+ xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
- leaq 1(%rdi),%rdi
- addq %r14,%r8
+ leaq 24(%rbp),%rbp
+ addq %r14,%r8
movq 96(%rsi),%r12
movq %rax,%r13
movq %r8,%r14
bswapq %r12
rorq $23,%r13
movq %rbx,%r15
- movq %r12,96(%rsp)
- rorq $5,%r14
xorq %rax,%r13
+ rorq $5,%r14
xorq %rcx,%r15
- rorq $4,%r13
- addq %rdx,%r12
+ movq %r12,96(%rsp)
xorq %r8,%r14
-
- addq (%rbp,%rdi,8),%r12
andq %rax,%r15
- movq %r9,%rdx
+
+ rorq $4,%r13
+ addq %rdx,%r12
+ xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
- xorq %rcx,%r15
+ addq %r15,%r12
- xorq %r10,%rdx
+ movq %r8,%r15
+ addq (%rbp),%r12
xorq %r8,%r14
- addq %r15,%r12
- movq %r9,%r15
+ xorq %r9,%r15
rorq $14,%r13
- andq %r8,%rdx
- andq %r10,%r15
+ movq %r9,%rdx
+ andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
- addq %r15,%rdx
+ xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
- leaq 1(%rdi),%rdi
- addq %r14,%rdx
+ leaq 8(%rbp),%rbp
+ addq %r14,%rdx
movq 104(%rsi),%r12
movq %r11,%r13
movq %rdx,%r14
bswapq %r12
rorq $23,%r13
- movq %rax,%r15
- movq %r12,104(%rsp)
+ movq %rax,%rdi
- rorq $5,%r14
xorq %r11,%r13
- xorq %rbx,%r15
+ rorq $5,%r14
+ xorq %rbx,%rdi
- rorq $4,%r13
- addq %rcx,%r12
+ movq %r12,104(%rsp)
xorq %rdx,%r14
+ andq %r11,%rdi
- addq (%rbp,%rdi,8),%r12
- andq %r11,%r15
- movq %r8,%rcx
+ rorq $4,%r13
+ addq %rcx,%r12
+ xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
- xorq %rbx,%r15
+ addq %rdi,%r12
- xorq %r9,%rcx
+ movq %rdx,%rdi
+ addq (%rbp),%r12
xorq %rdx,%r14
- addq %r15,%r12
- movq %r8,%r15
+ xorq %r8,%rdi
rorq $14,%r13
- andq %rdx,%rcx
- andq %r9,%r15
+ movq %r8,%rcx
+ andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
- addq %r15,%rcx
+ xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
- leaq 1(%rdi),%rdi
- addq %r14,%rcx
+ leaq 24(%rbp),%rbp
+ addq %r14,%rcx
movq 112(%rsi),%r12
movq %r10,%r13
movq %rcx,%r14
bswapq %r12
rorq $23,%r13
movq %r11,%r15
- movq %r12,112(%rsp)
- rorq $5,%r14
xorq %r10,%r13
+ rorq $5,%r14
xorq %rax,%r15
- rorq $4,%r13
- addq %rbx,%r12
+ movq %r12,112(%rsp)
xorq %rcx,%r14
-
- addq (%rbp,%rdi,8),%r12
andq %r10,%r15
- movq %rdx,%rbx
+
+ rorq $4,%r13
+ addq %rbx,%r12
+ xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
- xorq %rax,%r15
+ addq %r15,%r12
- xorq %r8,%rbx
+ movq %rcx,%r15
+ addq (%rbp),%r12
xorq %rcx,%r14
- addq %r15,%r12
- movq %rdx,%r15
+ xorq %rdx,%r15
rorq $14,%r13
- andq %rcx,%rbx
- andq %r8,%r15
+ movq %rdx,%rbx
+ andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
- addq %r15,%rbx
+ xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
- leaq 1(%rdi),%rdi
- addq %r14,%rbx
+ leaq 8(%rbp),%rbp
+ addq %r14,%rbx
movq 120(%rsi),%r12
movq %r9,%r13
movq %rbx,%r14
bswapq %r12
rorq $23,%r13
- movq %r10,%r15
- movq %r12,120(%rsp)
+ movq %r10,%rdi
- rorq $5,%r14
xorq %r9,%r13
- xorq %r11,%r15
+ rorq $5,%r14
+ xorq %r11,%rdi
- rorq $4,%r13
- addq %rax,%r12
+ movq %r12,120(%rsp)
xorq %rbx,%r14
+ andq %r9,%rdi
- addq (%rbp,%rdi,8),%r12
- andq %r9,%r15
- movq %rcx,%rax
+ rorq $4,%r13
+ addq %rax,%r12
+ xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
- xorq %r11,%r15
+ addq %rdi,%r12
- xorq %rdx,%rax
+ movq %rbx,%rdi
+ addq (%rbp),%r12
xorq %rbx,%r14
- addq %r15,%r12
- movq %rcx,%r15
+ xorq %rcx,%rdi
rorq $14,%r13
- andq %rbx,%rax
- andq %rdx,%r15
+ movq %rcx,%rax
+ andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
- addq %r15,%rax
+ xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
- leaq 1(%rdi),%rdi
- addq %r14,%rax
+ leaq 24(%rbp),%rbp
jmp .Lrounds_16_xx
.align 16
.Lrounds_16_xx:
movq 8(%rsp),%r13
- movq 112(%rsp),%r14
- movq %r13,%r12
- movq %r14,%r15
+ movq 112(%rsp),%r15
- rorq $7,%r12
- xorq %r13,%r12
- shrq $7,%r13
+ movq %r13,%r12
+ rorq $7,%r13
+ addq %r14,%rax
+ movq %r15,%r14
+ rorq $42,%r15
- rorq $1,%r12
xorq %r12,%r13
- movq 72(%rsp),%r12
-
- rorq $42,%r15
+ shrq $7,%r12
+ rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
- addq %r13,%r12
- xorq %r15,%r14
+ xorq %r13,%r12
+ xorq %r14,%r15
+ addq 72(%rsp),%r12
addq 0(%rsp),%r12
movq %r8,%r13
- addq %r14,%r12
+ addq %r15,%r12
movq %rax,%r14
rorq $23,%r13
movq %r9,%r15
- movq %r12,0(%rsp)
- rorq $5,%r14
xorq %r8,%r13
+ rorq $5,%r14
xorq %r10,%r15
- rorq $4,%r13
- addq %r11,%r12
+ movq %r12,0(%rsp)
xorq %rax,%r14
-
- addq (%rbp,%rdi,8),%r12
andq %r8,%r15
- movq %rbx,%r11
+
+ rorq $4,%r13
+ addq %r11,%r12
+ xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
- xorq %r10,%r15
+ addq %r15,%r12
- xorq %rcx,%r11
+ movq %rax,%r15
+ addq (%rbp),%r12
xorq %rax,%r14
- addq %r15,%r12
- movq %rbx,%r15
+ xorq %rbx,%r15
rorq $14,%r13
- andq %rax,%r11
- andq %rcx,%r15
+ movq %rbx,%r11
+ andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
- addq %r15,%r11
+ xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
- leaq 1(%rdi),%rdi
- addq %r14,%r11
+ leaq 8(%rbp),%rbp
movq 16(%rsp),%r13
- movq 120(%rsp),%r14
- movq %r13,%r12
- movq %r14,%r15
+ movq 120(%rsp),%rdi
- rorq $7,%r12
- xorq %r13,%r12
- shrq $7,%r13
+ movq %r13,%r12
+ rorq $7,%r13
+ addq %r14,%r11
+ movq %rdi,%r14
+ rorq $42,%rdi
- rorq $1,%r12
xorq %r12,%r13
- movq 80(%rsp),%r12
-
- rorq $42,%r15
- xorq %r14,%r15
+ shrq $7,%r12
+ rorq $1,%r13
+ xorq %r14,%rdi
shrq $6,%r14
- rorq $19,%r15
- addq %r13,%r12
- xorq %r15,%r14
+ rorq $19,%rdi
+ xorq %r13,%r12
+ xorq %r14,%rdi
+ addq 80(%rsp),%r12
addq 8(%rsp),%r12
movq %rdx,%r13
- addq %r14,%r12
+ addq %rdi,%r12
movq %r11,%r14
rorq $23,%r13
- movq %r8,%r15
- movq %r12,8(%rsp)
+ movq %r8,%rdi
- rorq $5,%r14
xorq %rdx,%r13
- xorq %r9,%r15
+ rorq $5,%r14
+ xorq %r9,%rdi
- rorq $4,%r13
- addq %r10,%r12
+ movq %r12,8(%rsp)
xorq %r11,%r14
+ andq %rdx,%rdi
- addq (%rbp,%rdi,8),%r12
- andq %rdx,%r15
- movq %rax,%r10
+ rorq $4,%r13
+ addq %r10,%r12
+ xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
- xorq %r9,%r15
+ addq %rdi,%r12
- xorq %rbx,%r10
+ movq %r11,%rdi
+ addq (%rbp),%r12
xorq %r11,%r14
- addq %r15,%r12
- movq %rax,%r15
+ xorq %rax,%rdi
rorq $14,%r13
- andq %r11,%r10
- andq %rbx,%r15
+ movq %rax,%r10
+ andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
- addq %r15,%r10
+ xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
- leaq 1(%rdi),%rdi
- addq %r14,%r10
+ leaq 24(%rbp),%rbp
movq 24(%rsp),%r13
- movq 0(%rsp),%r14
- movq %r13,%r12
- movq %r14,%r15
+ movq 0(%rsp),%r15
- rorq $7,%r12
- xorq %r13,%r12
- shrq $7,%r13
+ movq %r13,%r12
+ rorq $7,%r13
+ addq %r14,%r10
+ movq %r15,%r14
+ rorq $42,%r15
- rorq $1,%r12
xorq %r12,%r13
- movq 88(%rsp),%r12
-
- rorq $42,%r15
+ shrq $7,%r12
+ rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
- addq %r13,%r12
- xorq %r15,%r14
+ xorq %r13,%r12
+ xorq %r14,%r15
+ addq 88(%rsp),%r12
addq 16(%rsp),%r12
movq %rcx,%r13
- addq %r14,%r12
+ addq %r15,%r12
movq %r10,%r14
rorq $23,%r13
movq %rdx,%r15
- movq %r12,16(%rsp)
- rorq $5,%r14
xorq %rcx,%r13
+ rorq $5,%r14
xorq %r8,%r15
- rorq $4,%r13
- addq %r9,%r12
+ movq %r12,16(%rsp)
xorq %r10,%r14
-
- addq (%rbp,%rdi,8),%r12
andq %rcx,%r15
- movq %r11,%r9
+
+ rorq $4,%r13
+ addq %r9,%r12
+ xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
- xorq %r8,%r15
+ addq %r15,%r12
- xorq %rax,%r9
+ movq %r10,%r15
+ addq (%rbp),%r12
xorq %r10,%r14
- addq %r15,%r12
- movq %r11,%r15
+ xorq %r11,%r15
rorq $14,%r13
- andq %r10,%r9
- andq %rax,%r15
+ movq %r11,%r9
+ andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
- addq %r15,%r9
+ xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
- leaq 1(%rdi),%rdi
- addq %r14,%r9
+ leaq 8(%rbp),%rbp
movq 32(%rsp),%r13
- movq 8(%rsp),%r14
- movq %r13,%r12
- movq %r14,%r15
+ movq 8(%rsp),%rdi
- rorq $7,%r12
- xorq %r13,%r12
- shrq $7,%r13
+ movq %r13,%r12
+ rorq $7,%r13
+ addq %r14,%r9
+ movq %rdi,%r14
+ rorq $42,%rdi
- rorq $1,%r12
xorq %r12,%r13
- movq 96(%rsp),%r12
-
- rorq $42,%r15
- xorq %r14,%r15
+ shrq $7,%r12
+ rorq $1,%r13
+ xorq %r14,%rdi
shrq $6,%r14
- rorq $19,%r15
- addq %r13,%r12
- xorq %r15,%r14
+ rorq $19,%rdi
+ xorq %r13,%r12
+ xorq %r14,%rdi
+ addq 96(%rsp),%r12
addq 24(%rsp),%r12
movq %rbx,%r13
- addq %r14,%r12
+ addq %rdi,%r12
movq %r9,%r14
rorq $23,%r13
- movq %rcx,%r15
- movq %r12,24(%rsp)
+ movq %rcx,%rdi
- rorq $5,%r14
xorq %rbx,%r13
- xorq %rdx,%r15
+ rorq $5,%r14
+ xorq %rdx,%rdi
- rorq $4,%r13
- addq %r8,%r12
+ movq %r12,24(%rsp)
xorq %r9,%r14
+ andq %rbx,%rdi
- addq (%rbp,%rdi,8),%r12
- andq %rbx,%r15
- movq %r10,%r8
+ rorq $4,%r13
+ addq %r8,%r12
+ xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
- xorq %rdx,%r15
+ addq %rdi,%r12
- xorq %r11,%r8
+ movq %r9,%rdi
+ addq (%rbp),%r12
xorq %r9,%r14
- addq %r15,%r12
- movq %r10,%r15
+ xorq %r10,%rdi
rorq $14,%r13
- andq %r9,%r8
- andq %r11,%r15
+ movq %r10,%r8
+ andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
- addq %r15,%r8
+ xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
- leaq 1(%rdi),%rdi
- addq %r14,%r8
+ leaq 24(%rbp),%rbp
movq 40(%rsp),%r13
- movq 16(%rsp),%r14
- movq %r13,%r12
- movq %r14,%r15
+ movq 16(%rsp),%r15
- rorq $7,%r12
- xorq %r13,%r12
- shrq $7,%r13
+ movq %r13,%r12
+ rorq $7,%r13
+ addq %r14,%r8
+ movq %r15,%r14
+ rorq $42,%r15
- rorq $1,%r12
xorq %r12,%r13
- movq 104(%rsp),%r12
-
- rorq $42,%r15
+ shrq $7,%r12
+ rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
- addq %r13,%r12
- xorq %r15,%r14
+ xorq %r13,%r12
+ xorq %r14,%r15
+ addq 104(%rsp),%r12
addq 32(%rsp),%r12
movq %rax,%r13
- addq %r14,%r12
+ addq %r15,%r12
movq %r8,%r14
rorq $23,%r13
movq %rbx,%r15
- movq %r12,32(%rsp)
- rorq $5,%r14
xorq %rax,%r13
+ rorq $5,%r14
xorq %rcx,%r15
- rorq $4,%r13
- addq %rdx,%r12
+ movq %r12,32(%rsp)
xorq %r8,%r14
-
- addq (%rbp,%rdi,8),%r12
andq %rax,%r15
- movq %r9,%rdx
+
+ rorq $4,%r13
+ addq %rdx,%r12
+ xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
- xorq %rcx,%r15
+ addq %r15,%r12
- xorq %r10,%rdx
+ movq %r8,%r15
+ addq (%rbp),%r12
xorq %r8,%r14
- addq %r15,%r12
- movq %r9,%r15
+ xorq %r9,%r15
rorq $14,%r13
- andq %r8,%rdx
- andq %r10,%r15
+ movq %r9,%rdx
+ andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
- addq %r15,%rdx
+ xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
- leaq 1(%rdi),%rdi
- addq %r14,%rdx
+ leaq 8(%rbp),%rbp
movq 48(%rsp),%r13
- movq 24(%rsp),%r14
- movq %r13,%r12
- movq %r14,%r15
+ movq 24(%rsp),%rdi
- rorq $7,%r12
- xorq %r13,%r12
- shrq $7,%r13
+ movq %r13,%r12
+ rorq $7,%r13
+ addq %r14,%rdx
+ movq %rdi,%r14
+ rorq $42,%rdi
- rorq $1,%r12
xorq %r12,%r13
- movq 112(%rsp),%r12
-
- rorq $42,%r15
- xorq %r14,%r15
+ shrq $7,%r12
+ rorq $1,%r13
+ xorq %r14,%rdi
shrq $6,%r14
- rorq $19,%r15
- addq %r13,%r12
- xorq %r15,%r14
+ rorq $19,%rdi
+ xorq %r13,%r12
+ xorq %r14,%rdi
+ addq 112(%rsp),%r12
addq 40(%rsp),%r12
movq %r11,%r13
- addq %r14,%r12
+ addq %rdi,%r12
movq %rdx,%r14
rorq $23,%r13
- movq %rax,%r15
- movq %r12,40(%rsp)
+ movq %rax,%rdi
- rorq $5,%r14
xorq %r11,%r13
- xorq %rbx,%r15
+ rorq $5,%r14
+ xorq %rbx,%rdi
- rorq $4,%r13
- addq %rcx,%r12
+ movq %r12,40(%rsp)
xorq %rdx,%r14
+ andq %r11,%rdi
- addq (%rbp,%rdi,8),%r12
- andq %r11,%r15
- movq %r8,%rcx
+ rorq $4,%r13
+ addq %rcx,%r12
+ xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
- xorq %rbx,%r15
+ addq %rdi,%r12
- xorq %r9,%rcx
+ movq %rdx,%rdi
+ addq (%rbp),%r12
xorq %rdx,%r14
- addq %r15,%r12
- movq %r8,%r15
+ xorq %r8,%rdi
rorq $14,%r13
- andq %rdx,%rcx
- andq %r9,%r15
+ movq %r8,%rcx
+ andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
- addq %r15,%rcx
+ xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
- leaq 1(%rdi),%rdi
- addq %r14,%rcx
+ leaq 24(%rbp),%rbp
movq 56(%rsp),%r13
- movq 32(%rsp),%r14
- movq %r13,%r12
- movq %r14,%r15
+ movq 32(%rsp),%r15
- rorq $7,%r12
- xorq %r13,%r12
- shrq $7,%r13
+ movq %r13,%r12
+ rorq $7,%r13
+ addq %r14,%rcx
+ movq %r15,%r14
+ rorq $42,%r15
- rorq $1,%r12
xorq %r12,%r13
- movq 120(%rsp),%r12
-
- rorq $42,%r15
+ shrq $7,%r12
+ rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
- addq %r13,%r12
- xorq %r15,%r14
+ xorq %r13,%r12
+ xorq %r14,%r15
+ addq 120(%rsp),%r12
addq 48(%rsp),%r12
movq %r10,%r13
- addq %r14,%r12
+ addq %r15,%r12
movq %rcx,%r14
rorq $23,%r13
movq %r11,%r15
- movq %r12,48(%rsp)
- rorq $5,%r14
xorq %r10,%r13
+ rorq $5,%r14
xorq %rax,%r15
- rorq $4,%r13
- addq %rbx,%r12
+ movq %r12,48(%rsp)
xorq %rcx,%r14
-
- addq (%rbp,%rdi,8),%r12
andq %r10,%r15
- movq %rdx,%rbx
+
+ rorq $4,%r13
+ addq %rbx,%r12
+ xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
- xorq %rax,%r15
+ addq %r15,%r12
- xorq %r8,%rbx
+ movq %rcx,%r15
+ addq (%rbp),%r12
xorq %rcx,%r14
- addq %r15,%r12
- movq %rdx,%r15
+ xorq %rdx,%r15
rorq $14,%r13
- andq %rcx,%rbx
- andq %r8,%r15
+ movq %rdx,%rbx
+ andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
- addq %r15,%rbx
+ xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
- leaq 1(%rdi),%rdi
- addq %r14,%rbx
+ leaq 8(%rbp),%rbp
movq 64(%rsp),%r13
- movq 40(%rsp),%r14
- movq %r13,%r12
- movq %r14,%r15
+ movq 40(%rsp),%rdi
- rorq $7,%r12
- xorq %r13,%r12
- shrq $7,%r13
+ movq %r13,%r12
+ rorq $7,%r13
+ addq %r14,%rbx
+ movq %rdi,%r14
+ rorq $42,%rdi
- rorq $1,%r12
xorq %r12,%r13
- movq 0(%rsp),%r12
-
- rorq $42,%r15
- xorq %r14,%r15
+ shrq $7,%r12
+ rorq $1,%r13
+ xorq %r14,%rdi
shrq $6,%r14
- rorq $19,%r15
- addq %r13,%r12
- xorq %r15,%r14
+ rorq $19,%rdi
+ xorq %r13,%r12
+ xorq %r14,%rdi
+ addq 0(%rsp),%r12
addq 56(%rsp),%r12
movq %r9,%r13
- addq %r14,%r12
+ addq %rdi,%r12
movq %rbx,%r14
rorq $23,%r13
- movq %r10,%r15
- movq %r12,56(%rsp)
+ movq %r10,%rdi
- rorq $5,%r14
xorq %r9,%r13
- xorq %r11,%r15
+ rorq $5,%r14
+ xorq %r11,%rdi
- rorq $4,%r13
- addq %rax,%r12
+ movq %r12,56(%rsp)
xorq %rbx,%r14
+ andq %r9,%rdi
- addq (%rbp,%rdi,8),%r12
- andq %r9,%r15
- movq %rcx,%rax
+ rorq $4,%r13
+ addq %rax,%r12
+ xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
- xorq %r11,%r15
+ addq %rdi,%r12
- xorq %rdx,%rax
+ movq %rbx,%rdi
+ addq (%rbp),%r12
xorq %rbx,%r14
- addq %r15,%r12
- movq %rcx,%r15
+ xorq %rcx,%rdi
rorq $14,%r13
- andq %rbx,%rax
- andq %rdx,%r15
+ movq %rcx,%rax
+ andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
- addq %r15,%rax
+ xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
- leaq 1(%rdi),%rdi
- addq %r14,%rax
+ leaq 24(%rbp),%rbp
movq 72(%rsp),%r13
- movq 48(%rsp),%r14
- movq %r13,%r12
- movq %r14,%r15
+ movq 48(%rsp),%r15
- rorq $7,%r12
- xorq %r13,%r12
- shrq $7,%r13
+ movq %r13,%r12
+ rorq $7,%r13
+ addq %r14,%rax
+ movq %r15,%r14
+ rorq $42,%r15
- rorq $1,%r12
xorq %r12,%r13
- movq 8(%rsp),%r12
-
- rorq $42,%r15
+ shrq $7,%r12
+ rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
- addq %r13,%r12
- xorq %r15,%r14
+ xorq %r13,%r12
+ xorq %r14,%r15
+ addq 8(%rsp),%r12
addq 64(%rsp),%r12
movq %r8,%r13
- addq %r14,%r12
+ addq %r15,%r12
movq %rax,%r14
rorq $23,%r13
movq %r9,%r15
- movq %r12,64(%rsp)
- rorq $5,%r14
xorq %r8,%r13
+ rorq $5,%r14
xorq %r10,%r15
- rorq $4,%r13
- addq %r11,%r12
+ movq %r12,64(%rsp)
xorq %rax,%r14
-
- addq (%rbp,%rdi,8),%r12
andq %r8,%r15
- movq %rbx,%r11
+
+ rorq $4,%r13
+ addq %r11,%r12
+ xorq %r10,%r15
rorq $6,%r14
xorq %r8,%r13
- xorq %r10,%r15
+ addq %r15,%r12
- xorq %rcx,%r11
+ movq %rax,%r15
+ addq (%rbp),%r12
xorq %rax,%r14
- addq %r15,%r12
- movq %rbx,%r15
+ xorq %rbx,%r15
rorq $14,%r13
- andq %rax,%r11
- andq %rcx,%r15
+ movq %rbx,%r11
+ andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
- addq %r15,%r11
+ xorq %rdi,%r11
addq %r12,%rdx
addq %r12,%r11
- leaq 1(%rdi),%rdi
- addq %r14,%r11
+ leaq 8(%rbp),%rbp
movq 80(%rsp),%r13
- movq 56(%rsp),%r14
- movq %r13,%r12
- movq %r14,%r15
+ movq 56(%rsp),%rdi
- rorq $7,%r12
- xorq %r13,%r12
- shrq $7,%r13
+ movq %r13,%r12
+ rorq $7,%r13
+ addq %r14,%r11
+ movq %rdi,%r14
+ rorq $42,%rdi
- rorq $1,%r12
xorq %r12,%r13
- movq 16(%rsp),%r12
-
- rorq $42,%r15
- xorq %r14,%r15
+ shrq $7,%r12
+ rorq $1,%r13
+ xorq %r14,%rdi
shrq $6,%r14
- rorq $19,%r15
- addq %r13,%r12
- xorq %r15,%r14
+ rorq $19,%rdi
+ xorq %r13,%r12
+ xorq %r14,%rdi
+ addq 16(%rsp),%r12
addq 72(%rsp),%r12
movq %rdx,%r13
- addq %r14,%r12
+ addq %rdi,%r12
movq %r11,%r14
rorq $23,%r13
- movq %r8,%r15
- movq %r12,72(%rsp)
+ movq %r8,%rdi
- rorq $5,%r14
xorq %rdx,%r13
- xorq %r9,%r15
+ rorq $5,%r14
+ xorq %r9,%rdi
- rorq $4,%r13
- addq %r10,%r12
+ movq %r12,72(%rsp)
xorq %r11,%r14
+ andq %rdx,%rdi
- addq (%rbp,%rdi,8),%r12
- andq %rdx,%r15
- movq %rax,%r10
+ rorq $4,%r13
+ addq %r10,%r12
+ xorq %r9,%rdi
rorq $6,%r14
xorq %rdx,%r13
- xorq %r9,%r15
+ addq %rdi,%r12
- xorq %rbx,%r10
+ movq %r11,%rdi
+ addq (%rbp),%r12
xorq %r11,%r14
- addq %r15,%r12
- movq %rax,%r15
+ xorq %rax,%rdi
rorq $14,%r13
- andq %r11,%r10
- andq %rbx,%r15
+ movq %rax,%r10
+ andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
- addq %r15,%r10
+ xorq %r15,%r10
addq %r12,%rcx
addq %r12,%r10
- leaq 1(%rdi),%rdi
- addq %r14,%r10
+ leaq 24(%rbp),%rbp
movq 88(%rsp),%r13
- movq 64(%rsp),%r14
- movq %r13,%r12
- movq %r14,%r15
+ movq 64(%rsp),%r15
- rorq $7,%r12
- xorq %r13,%r12
- shrq $7,%r13
+ movq %r13,%r12
+ rorq $7,%r13
+ addq %r14,%r10
+ movq %r15,%r14
+ rorq $42,%r15
- rorq $1,%r12
xorq %r12,%r13
- movq 24(%rsp),%r12
-
- rorq $42,%r15
+ shrq $7,%r12
+ rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
- addq %r13,%r12
- xorq %r15,%r14
+ xorq %r13,%r12
+ xorq %r14,%r15
+ addq 24(%rsp),%r12
addq 80(%rsp),%r12
movq %rcx,%r13
- addq %r14,%r12
+ addq %r15,%r12
movq %r10,%r14
rorq $23,%r13
movq %rdx,%r15
- movq %r12,80(%rsp)
- rorq $5,%r14
xorq %rcx,%r13
+ rorq $5,%r14
xorq %r8,%r15
- rorq $4,%r13
- addq %r9,%r12
+ movq %r12,80(%rsp)
xorq %r10,%r14
-
- addq (%rbp,%rdi,8),%r12
andq %rcx,%r15
- movq %r11,%r9
+
+ rorq $4,%r13
+ addq %r9,%r12
+ xorq %r8,%r15
rorq $6,%r14
xorq %rcx,%r13
- xorq %r8,%r15
+ addq %r15,%r12
- xorq %rax,%r9
+ movq %r10,%r15
+ addq (%rbp),%r12
xorq %r10,%r14
- addq %r15,%r12
- movq %r11,%r15
+ xorq %r11,%r15
rorq $14,%r13
- andq %r10,%r9
- andq %rax,%r15
+ movq %r11,%r9
+ andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
- addq %r15,%r9
+ xorq %rdi,%r9
addq %r12,%rbx
addq %r12,%r9
- leaq 1(%rdi),%rdi
- addq %r14,%r9
+ leaq 8(%rbp),%rbp
movq 96(%rsp),%r13
- movq 72(%rsp),%r14
- movq %r13,%r12
- movq %r14,%r15
+ movq 72(%rsp),%rdi
- rorq $7,%r12
- xorq %r13,%r12
- shrq $7,%r13
+ movq %r13,%r12
+ rorq $7,%r13
+ addq %r14,%r9
+ movq %rdi,%r14
+ rorq $42,%rdi
- rorq $1,%r12
xorq %r12,%r13
- movq 32(%rsp),%r12
-
- rorq $42,%r15
- xorq %r14,%r15
+ shrq $7,%r12
+ rorq $1,%r13
+ xorq %r14,%rdi
shrq $6,%r14
- rorq $19,%r15
- addq %r13,%r12
- xorq %r15,%r14
+ rorq $19,%rdi
+ xorq %r13,%r12
+ xorq %r14,%rdi
+ addq 32(%rsp),%r12
addq 88(%rsp),%r12
movq %rbx,%r13
- addq %r14,%r12
+ addq %rdi,%r12
movq %r9,%r14
rorq $23,%r13
- movq %rcx,%r15
- movq %r12,88(%rsp)
+ movq %rcx,%rdi
- rorq $5,%r14
xorq %rbx,%r13
- xorq %rdx,%r15
+ rorq $5,%r14
+ xorq %rdx,%rdi
- rorq $4,%r13
- addq %r8,%r12
+ movq %r12,88(%rsp)
xorq %r9,%r14
+ andq %rbx,%rdi
- addq (%rbp,%rdi,8),%r12
- andq %rbx,%r15
- movq %r10,%r8
+ rorq $4,%r13
+ addq %r8,%r12
+ xorq %rdx,%rdi
rorq $6,%r14
xorq %rbx,%r13
- xorq %rdx,%r15
+ addq %rdi,%r12
- xorq %r11,%r8
+ movq %r9,%rdi
+ addq (%rbp),%r12
xorq %r9,%r14
- addq %r15,%r12
- movq %r10,%r15
+ xorq %r10,%rdi
rorq $14,%r13
- andq %r9,%r8
- andq %r11,%r15
+ movq %r10,%r8
+ andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
- addq %r15,%r8
+ xorq %r15,%r8
addq %r12,%rax
addq %r12,%r8
- leaq 1(%rdi),%rdi
- addq %r14,%r8
+ leaq 24(%rbp),%rbp
movq 104(%rsp),%r13
- movq 80(%rsp),%r14
- movq %r13,%r12
- movq %r14,%r15
+ movq 80(%rsp),%r15
- rorq $7,%r12
- xorq %r13,%r12
- shrq $7,%r13
+ movq %r13,%r12
+ rorq $7,%r13
+ addq %r14,%r8
+ movq %r15,%r14
+ rorq $42,%r15
- rorq $1,%r12
xorq %r12,%r13
- movq 40(%rsp),%r12
-
- rorq $42,%r15
+ shrq $7,%r12
+ rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
- addq %r13,%r12
- xorq %r15,%r14
+ xorq %r13,%r12
+ xorq %r14,%r15
+ addq 40(%rsp),%r12
addq 96(%rsp),%r12
movq %rax,%r13
- addq %r14,%r12
+ addq %r15,%r12
movq %r8,%r14
rorq $23,%r13
movq %rbx,%r15
- movq %r12,96(%rsp)
- rorq $5,%r14
xorq %rax,%r13
+ rorq $5,%r14
xorq %rcx,%r15
- rorq $4,%r13
- addq %rdx,%r12
+ movq %r12,96(%rsp)
xorq %r8,%r14
-
- addq (%rbp,%rdi,8),%r12
andq %rax,%r15
- movq %r9,%rdx
+
+ rorq $4,%r13
+ addq %rdx,%r12
+ xorq %rcx,%r15
rorq $6,%r14
xorq %rax,%r13
- xorq %rcx,%r15
+ addq %r15,%r12
- xorq %r10,%rdx
+ movq %r8,%r15
+ addq (%rbp),%r12
xorq %r8,%r14
- addq %r15,%r12
- movq %r9,%r15
+ xorq %r9,%r15
rorq $14,%r13
- andq %r8,%rdx
- andq %r10,%r15
+ movq %r9,%rdx
+ andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
- addq %r15,%rdx
+ xorq %rdi,%rdx
addq %r12,%r11
addq %r12,%rdx
- leaq 1(%rdi),%rdi
- addq %r14,%rdx
+ leaq 8(%rbp),%rbp
movq 112(%rsp),%r13
- movq 88(%rsp),%r14
- movq %r13,%r12
- movq %r14,%r15
+ movq 88(%rsp),%rdi
- rorq $7,%r12
- xorq %r13,%r12
- shrq $7,%r13
+ movq %r13,%r12
+ rorq $7,%r13
+ addq %r14,%rdx
+ movq %rdi,%r14
+ rorq $42,%rdi
- rorq $1,%r12
xorq %r12,%r13
- movq 48(%rsp),%r12
-
- rorq $42,%r15
- xorq %r14,%r15
+ shrq $7,%r12
+ rorq $1,%r13
+ xorq %r14,%rdi
shrq $6,%r14
- rorq $19,%r15
- addq %r13,%r12
- xorq %r15,%r14
+ rorq $19,%rdi
+ xorq %r13,%r12
+ xorq %r14,%rdi
+ addq 48(%rsp),%r12
addq 104(%rsp),%r12
movq %r11,%r13
- addq %r14,%r12
+ addq %rdi,%r12
movq %rdx,%r14
rorq $23,%r13
- movq %rax,%r15
- movq %r12,104(%rsp)
+ movq %rax,%rdi
- rorq $5,%r14
xorq %r11,%r13
- xorq %rbx,%r15
+ rorq $5,%r14
+ xorq %rbx,%rdi
- rorq $4,%r13
- addq %rcx,%r12
+ movq %r12,104(%rsp)
xorq %rdx,%r14
+ andq %r11,%rdi
- addq (%rbp,%rdi,8),%r12
- andq %r11,%r15
- movq %r8,%rcx
+ rorq $4,%r13
+ addq %rcx,%r12
+ xorq %rbx,%rdi
rorq $6,%r14
xorq %r11,%r13
- xorq %rbx,%r15
+ addq %rdi,%r12
- xorq %r9,%rcx
+ movq %rdx,%rdi
+ addq (%rbp),%r12
xorq %rdx,%r14
- addq %r15,%r12
- movq %r8,%r15
+ xorq %r8,%rdi
rorq $14,%r13
- andq %rdx,%rcx
- andq %r9,%r15
+ movq %r8,%rcx
+ andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
- addq %r15,%rcx
+ xorq %r15,%rcx
addq %r12,%r10
addq %r12,%rcx
- leaq 1(%rdi),%rdi
- addq %r14,%rcx
+ leaq 24(%rbp),%rbp
movq 120(%rsp),%r13
- movq 96(%rsp),%r14
- movq %r13,%r12
- movq %r14,%r15
+ movq 96(%rsp),%r15
- rorq $7,%r12
- xorq %r13,%r12
- shrq $7,%r13
+ movq %r13,%r12
+ rorq $7,%r13
+ addq %r14,%rcx
+ movq %r15,%r14
+ rorq $42,%r15
- rorq $1,%r12
xorq %r12,%r13
- movq 56(%rsp),%r12
-
- rorq $42,%r15
+ shrq $7,%r12
+ rorq $1,%r13
xorq %r14,%r15
shrq $6,%r14
rorq $19,%r15
- addq %r13,%r12
- xorq %r15,%r14
+ xorq %r13,%r12
+ xorq %r14,%r15
+ addq 56(%rsp),%r12
addq 112(%rsp),%r12
movq %r10,%r13
- addq %r14,%r12
+ addq %r15,%r12
movq %rcx,%r14
rorq $23,%r13
movq %r11,%r15
- movq %r12,112(%rsp)
- rorq $5,%r14
xorq %r10,%r13
+ rorq $5,%r14
xorq %rax,%r15
- rorq $4,%r13
- addq %rbx,%r12
+ movq %r12,112(%rsp)
xorq %rcx,%r14
-
- addq (%rbp,%rdi,8),%r12
andq %r10,%r15
- movq %rdx,%rbx
+
+ rorq $4,%r13
+ addq %rbx,%r12
+ xorq %rax,%r15
rorq $6,%r14
xorq %r10,%r13
- xorq %rax,%r15
+ addq %r15,%r12
- xorq %r8,%rbx
+ movq %rcx,%r15
+ addq (%rbp),%r12
xorq %rcx,%r14
- addq %r15,%r12
- movq %rdx,%r15
+ xorq %rdx,%r15
rorq $14,%r13
- andq %rcx,%rbx
- andq %r8,%r15
+ movq %rdx,%rbx
+ andq %r15,%rdi
rorq $28,%r14
addq %r13,%r12
- addq %r15,%rbx
+ xorq %rdi,%rbx
addq %r12,%r9
addq %r12,%rbx
- leaq 1(%rdi),%rdi
- addq %r14,%rbx
+ leaq 8(%rbp),%rbp
movq 0(%rsp),%r13
- movq 104(%rsp),%r14
- movq %r13,%r12
- movq %r14,%r15
+ movq 104(%rsp),%rdi
- rorq $7,%r12
- xorq %r13,%r12
- shrq $7,%r13
+ movq %r13,%r12
+ rorq $7,%r13
+ addq %r14,%rbx
+ movq %rdi,%r14
+ rorq $42,%rdi
- rorq $1,%r12
xorq %r12,%r13
- movq 64(%rsp),%r12
-
- rorq $42,%r15
- xorq %r14,%r15
+ shrq $7,%r12
+ rorq $1,%r13
+ xorq %r14,%rdi
shrq $6,%r14
- rorq $19,%r15
- addq %r13,%r12
- xorq %r15,%r14
+ rorq $19,%rdi
+ xorq %r13,%r12
+ xorq %r14,%rdi
+ addq 64(%rsp),%r12
addq 120(%rsp),%r12
movq %r9,%r13
- addq %r14,%r12
+ addq %rdi,%r12
movq %rbx,%r14
rorq $23,%r13
- movq %r10,%r15
- movq %r12,120(%rsp)
+ movq %r10,%rdi
- rorq $5,%r14
xorq %r9,%r13
- xorq %r11,%r15
+ rorq $5,%r14
+ xorq %r11,%rdi
- rorq $4,%r13
- addq %rax,%r12
+ movq %r12,120(%rsp)
xorq %rbx,%r14
+ andq %r9,%rdi
- addq (%rbp,%rdi,8),%r12
- andq %r9,%r15
- movq %rcx,%rax
+ rorq $4,%r13
+ addq %rax,%r12
+ xorq %r11,%rdi
rorq $6,%r14
xorq %r9,%r13
- xorq %r11,%r15
+ addq %rdi,%r12
- xorq %rdx,%rax
+ movq %rbx,%rdi
+ addq (%rbp),%r12
xorq %rbx,%r14
- addq %r15,%r12
- movq %rcx,%r15
+ xorq %rcx,%rdi
rorq $14,%r13
- andq %rbx,%rax
- andq %rdx,%r15
+ movq %rcx,%rax
+ andq %rdi,%r15
rorq $28,%r14
addq %r13,%r12
- addq %r15,%rax
+ xorq %r15,%rax
addq %r12,%r8
addq %r12,%rax
- leaq 1(%rdi),%rdi
- addq %r14,%rax
- cmpq $80,%rdi
- jb .Lrounds_16_xx
+ leaq 24(%rbp),%rbp
+ cmpb $0,7(%rbp)
+ jnz .Lrounds_16_xx
movq 128+0(%rsp),%rdi
+ addq %r14,%rax
leaq 128(%rsi),%rsi
addq 0(%rdi),%rax
@@ -1762,42 +1699,86 @@ sha512_block_data_order:
.type K512,@object
K512:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
+.quad 0x428a2f98d728ae22,0x7137449123ef65cd
+.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
+.quad 0x3956c25bf348b538,0x59f111f1b605d019
+.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
+.quad 0xd807aa98a3030242,0x12835b0145706fbe
+.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
+.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
+.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
+.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
+.quad 0x983e5152ee66dfab,0xa831c66d2db43210
+.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
+.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x06ca6351e003826f,0x142929670a0e6e70
+.quad 0x06ca6351e003826f,0x142929670a0e6e70
+.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
+.quad 0x81c2c92e47edaee6,0x92722c851482353b
+.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
+.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
+.quad 0xd192e819d6ef5218,0xd69906245565a910
+.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
+.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
+.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
+.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
+.quad 0x90befffa23631e28,0xa4506cebde82bde9
+.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
+.quad 0xca273eceea26619c,0xd186b8c721c0c207
+.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
+.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
+.quad 0x113f9804bef90dae,0x1b710b35131c471b
+.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
+
+.quad 0x0001020304050607,0x08090a0b0c0d0e0f
+.quad 0x0001020304050607,0x08090a0b0c0d0e0f
+.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
diff --git a/secure/lib/libcrypto/amd64/vpaes-x86_64.S b/secure/lib/libcrypto/amd64/vpaes-x86_64.S
index 8cb9644..8ec5c40 100644
--- a/secure/lib/libcrypto/amd64/vpaes-x86_64.S
+++ b/secure/lib/libcrypto/amd64/vpaes-x86_64.S
@@ -32,8 +32,8 @@ _vpaes_encrypt_core:
movdqa .Lk_ipt+16(%rip),%xmm0
.byte 102,15,56,0,193
pxor %xmm5,%xmm2
- pxor %xmm2,%xmm0
addq $16,%r9
+ pxor %xmm2,%xmm0
leaq .Lk_mc_backward(%rip),%r10
jmp .Lenc_entry
@@ -41,19 +41,19 @@ _vpaes_encrypt_core:
.Lenc_loop:
movdqa %xmm13,%xmm4
-.byte 102,15,56,0,226
- pxor %xmm5,%xmm4
movdqa %xmm12,%xmm0
+.byte 102,15,56,0,226
.byte 102,15,56,0,195
- pxor %xmm4,%xmm0
+ pxor %xmm5,%xmm4
movdqa %xmm15,%xmm5
-.byte 102,15,56,0,234
+ pxor %xmm4,%xmm0
movdqa -64(%r11,%r10,1),%xmm1
+.byte 102,15,56,0,234
+ movdqa (%r11,%r10,1),%xmm4
movdqa %xmm14,%xmm2
.byte 102,15,56,0,211
- pxor %xmm5,%xmm2
- movdqa (%r11,%r10,1),%xmm4
movdqa %xmm0,%xmm3
+ pxor %xmm5,%xmm2
.byte 102,15,56,0,193
addq $16,%r9
pxor %xmm2,%xmm0
@@ -62,30 +62,30 @@ _vpaes_encrypt_core:
pxor %xmm0,%xmm3
.byte 102,15,56,0,193
andq $48,%r11
- pxor %xmm3,%xmm0
subq $1,%rax
+ pxor %xmm3,%xmm0
.Lenc_entry:
movdqa %xmm9,%xmm1
+ movdqa %xmm11,%xmm5
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm9,%xmm0
- movdqa %xmm11,%xmm5
.byte 102,15,56,0,232
- pxor %xmm1,%xmm0
movdqa %xmm10,%xmm3
+ pxor %xmm1,%xmm0
.byte 102,15,56,0,217
- pxor %xmm5,%xmm3
movdqa %xmm10,%xmm4
+ pxor %xmm5,%xmm3
.byte 102,15,56,0,224
- pxor %xmm5,%xmm4
movdqa %xmm10,%xmm2
+ pxor %xmm5,%xmm4
.byte 102,15,56,0,211
- pxor %xmm0,%xmm2
movdqa %xmm10,%xmm3
- movdqu (%r9),%xmm5
+ pxor %xmm0,%xmm2
.byte 102,15,56,0,220
+ movdqu (%r9),%xmm5
pxor %xmm1,%xmm3
jnz .Lenc_loop
@@ -138,62 +138,61 @@ _vpaes_decrypt_core:
movdqa -32(%r10),%xmm4
+ movdqa -16(%r10),%xmm1
.byte 102,15,56,0,226
- pxor %xmm0,%xmm4
- movdqa -16(%r10),%xmm0
-.byte 102,15,56,0,195
+.byte 102,15,56,0,203
pxor %xmm4,%xmm0
- addq $16,%r9
-
-.byte 102,15,56,0,197
movdqa 0(%r10),%xmm4
-.byte 102,15,56,0,226
- pxor %xmm0,%xmm4
- movdqa 16(%r10),%xmm0
-.byte 102,15,56,0,195
- pxor %xmm4,%xmm0
- subq $1,%rax
+ pxor %xmm1,%xmm0
+ movdqa 16(%r10),%xmm1
-.byte 102,15,56,0,197
- movdqa 32(%r10),%xmm4
.byte 102,15,56,0,226
- pxor %xmm0,%xmm4
- movdqa 48(%r10),%xmm0
-.byte 102,15,56,0,195
+.byte 102,15,56,0,197
+.byte 102,15,56,0,203
pxor %xmm4,%xmm0
+ movdqa 32(%r10),%xmm4
+ pxor %xmm1,%xmm0
+ movdqa 48(%r10),%xmm1
+.byte 102,15,56,0,226
.byte 102,15,56,0,197
+.byte 102,15,56,0,203
+ pxor %xmm4,%xmm0
movdqa 64(%r10),%xmm4
+ pxor %xmm1,%xmm0
+ movdqa 80(%r10),%xmm1
+
.byte 102,15,56,0,226
- pxor %xmm0,%xmm4
- movdqa 80(%r10),%xmm0
-.byte 102,15,56,0,195
+.byte 102,15,56,0,197
+.byte 102,15,56,0,203
pxor %xmm4,%xmm0
-
+ addq $16,%r9
.byte 102,15,58,15,237,12
+ pxor %xmm1,%xmm0
+ subq $1,%rax
.Ldec_entry:
movdqa %xmm9,%xmm1
pandn %xmm0,%xmm1
+ movdqa %xmm11,%xmm2
psrld $4,%xmm1
pand %xmm9,%xmm0
- movdqa %xmm11,%xmm2
.byte 102,15,56,0,208
- pxor %xmm1,%xmm0
movdqa %xmm10,%xmm3
+ pxor %xmm1,%xmm0
.byte 102,15,56,0,217
- pxor %xmm2,%xmm3
movdqa %xmm10,%xmm4
+ pxor %xmm2,%xmm3
.byte 102,15,56,0,224
pxor %xmm2,%xmm4
movdqa %xmm10,%xmm2
.byte 102,15,56,0,211
- pxor %xmm0,%xmm2
movdqa %xmm10,%xmm3
+ pxor %xmm0,%xmm2
.byte 102,15,56,0,220
- pxor %xmm1,%xmm3
movdqu (%r9),%xmm0
+ pxor %xmm1,%xmm3
jnz .Ldec_loop
@@ -221,7 +220,7 @@ _vpaes_schedule_core:
- call _vpaes_preheat
+ call _vpaes_preheat
movdqa .Lk_rcon(%rip),%xmm8
movdqu (%rdi),%xmm0
@@ -267,7 +266,7 @@ _vpaes_schedule_core:
call _vpaes_schedule_round
decq %rsi
jz .Lschedule_mangle_last
- call _vpaes_schedule_mangle
+ call _vpaes_schedule_mangle
jmp .Loop_schedule_128
@@ -288,7 +287,7 @@ _vpaes_schedule_core:
.align 16
.Lschedule_192:
movdqu 8(%rdi),%xmm0
- call _vpaes_schedule_transform
+ call _vpaes_schedule_transform
movdqa %xmm0,%xmm6
pxor %xmm4,%xmm4
movhlps %xmm4,%xmm6
@@ -297,13 +296,13 @@ _vpaes_schedule_core:
.Loop_schedule_192:
call _vpaes_schedule_round
.byte 102,15,58,15,198,8
- call _vpaes_schedule_mangle
+ call _vpaes_schedule_mangle
call _vpaes_schedule_192_smear
- call _vpaes_schedule_mangle
+ call _vpaes_schedule_mangle
call _vpaes_schedule_round
decq %rsi
jz .Lschedule_mangle_last
- call _vpaes_schedule_mangle
+ call _vpaes_schedule_mangle
call _vpaes_schedule_192_smear
jmp .Loop_schedule_192
@@ -320,18 +319,18 @@ _vpaes_schedule_core:
.align 16
.Lschedule_256:
movdqu 16(%rdi),%xmm0
- call _vpaes_schedule_transform
+ call _vpaes_schedule_transform
movl $7,%esi
.Loop_schedule_256:
- call _vpaes_schedule_mangle
+ call _vpaes_schedule_mangle
movdqa %xmm0,%xmm6
call _vpaes_schedule_round
decq %rsi
jz .Lschedule_mangle_last
- call _vpaes_schedule_mangle
+ call _vpaes_schedule_mangle
pshufd $255,%xmm0,%xmm0
@@ -369,7 +368,7 @@ _vpaes_schedule_core:
.Lschedule_mangle_last_dec:
addq $-16,%rdx
pxor .Lk_s63(%rip),%xmm0
- call _vpaes_schedule_transform
+ call _vpaes_schedule_transform
movdqu %xmm0,(%rdx)
@@ -401,12 +400,12 @@ _vpaes_schedule_core:
.type _vpaes_schedule_192_smear,@function
.align 16
_vpaes_schedule_192_smear:
- pshufd $128,%xmm6,%xmm0
- pxor %xmm0,%xmm6
+ pshufd $128,%xmm6,%xmm1
pshufd $254,%xmm7,%xmm0
+ pxor %xmm1,%xmm6
+ pxor %xmm1,%xmm1
pxor %xmm0,%xmm6
movdqa %xmm6,%xmm0
- pxor %xmm1,%xmm1
movhlps %xmm1,%xmm6
.byte 0xf3,0xc3
.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
diff --git a/secure/lib/libcrypto/amd64/wp-x86_64.S b/secure/lib/libcrypto/amd64/wp-x86_64.S
index f6cf81c..36f5bc0 100644
--- a/secure/lib/libcrypto/amd64/wp-x86_64.S
+++ b/secure/lib/libcrypto/amd64/wp-x86_64.S
@@ -63,233 +63,236 @@ whirlpool_block:
movq %r15,64+56(%rsp)
xorq %rsi,%rsi
movq %rsi,24(%rbx)
+ jmp .Lround
.align 16
.Lround:
movq 4096(%rbp,%rsi,8),%r8
movl 0(%rsp),%eax
movl 4(%rsp),%ebx
- movb %al,%cl
- movb %ah,%dl
+ movzbl %al,%ecx
+ movzbl %ah,%edx
+ shrl $16,%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%eax
+ movzbl %ah,%edx
xorq 0(%rbp,%rsi,8),%r8
movq 7(%rbp,%rdi,8),%r9
- movb %al,%cl
- movb %ah,%dl
movl 0+8(%rsp),%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %bh,%edx
movq 6(%rbp,%rsi,8),%r10
movq 5(%rbp,%rdi,8),%r11
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%ebx
+ movzbl %bh,%edx
movq 4(%rbp,%rsi,8),%r12
movq 3(%rbp,%rdi,8),%r13
- movb %bl,%cl
- movb %bh,%dl
movl 0+8+4(%rsp),%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %ah,%edx
movq 2(%rbp,%rsi,8),%r14
movq 1(%rbp,%rdi,8),%r15
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%eax
+ movzbl %ah,%edx
xorq 0(%rbp,%rsi,8),%r9
xorq 7(%rbp,%rdi,8),%r10
- movb %al,%cl
- movb %ah,%dl
movl 8+8(%rsp),%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %bh,%edx
xorq 6(%rbp,%rsi,8),%r11
xorq 5(%rbp,%rdi,8),%r12
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%ebx
+ movzbl %bh,%edx
xorq 4(%rbp,%rsi,8),%r13
xorq 3(%rbp,%rdi,8),%r14
- movb %bl,%cl
- movb %bh,%dl
movl 8+8+4(%rsp),%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %ah,%edx
xorq 2(%rbp,%rsi,8),%r15
xorq 1(%rbp,%rdi,8),%r8
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%eax
+ movzbl %ah,%edx
xorq 0(%rbp,%rsi,8),%r10
xorq 7(%rbp,%rdi,8),%r11
- movb %al,%cl
- movb %ah,%dl
movl 16+8(%rsp),%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %bh,%edx
xorq 6(%rbp,%rsi,8),%r12
xorq 5(%rbp,%rdi,8),%r13
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%ebx
+ movzbl %bh,%edx
xorq 4(%rbp,%rsi,8),%r14
xorq 3(%rbp,%rdi,8),%r15
- movb %bl,%cl
- movb %bh,%dl
movl 16+8+4(%rsp),%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %ah,%edx
xorq 2(%rbp,%rsi,8),%r8
xorq 1(%rbp,%rdi,8),%r9
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%eax
+ movzbl %ah,%edx
xorq 0(%rbp,%rsi,8),%r11
xorq 7(%rbp,%rdi,8),%r12
- movb %al,%cl
- movb %ah,%dl
movl 24+8(%rsp),%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %bh,%edx
xorq 6(%rbp,%rsi,8),%r13
xorq 5(%rbp,%rdi,8),%r14
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%ebx
+ movzbl %bh,%edx
xorq 4(%rbp,%rsi,8),%r15
xorq 3(%rbp,%rdi,8),%r8
- movb %bl,%cl
- movb %bh,%dl
movl 24+8+4(%rsp),%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %ah,%edx
xorq 2(%rbp,%rsi,8),%r9
xorq 1(%rbp,%rdi,8),%r10
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%eax
+ movzbl %ah,%edx
xorq 0(%rbp,%rsi,8),%r12
xorq 7(%rbp,%rdi,8),%r13
- movb %al,%cl
- movb %ah,%dl
movl 32+8(%rsp),%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %bh,%edx
xorq 6(%rbp,%rsi,8),%r14
xorq 5(%rbp,%rdi,8),%r15
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%ebx
+ movzbl %bh,%edx
xorq 4(%rbp,%rsi,8),%r8
xorq 3(%rbp,%rdi,8),%r9
- movb %bl,%cl
- movb %bh,%dl
movl 32+8+4(%rsp),%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %ah,%edx
xorq 2(%rbp,%rsi,8),%r10
xorq 1(%rbp,%rdi,8),%r11
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%eax
+ movzbl %ah,%edx
xorq 0(%rbp,%rsi,8),%r13
xorq 7(%rbp,%rdi,8),%r14
- movb %al,%cl
- movb %ah,%dl
movl 40+8(%rsp),%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %bh,%edx
xorq 6(%rbp,%rsi,8),%r15
xorq 5(%rbp,%rdi,8),%r8
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%ebx
+ movzbl %bh,%edx
xorq 4(%rbp,%rsi,8),%r9
xorq 3(%rbp,%rdi,8),%r10
- movb %bl,%cl
- movb %bh,%dl
movl 40+8+4(%rsp),%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %ah,%edx
xorq 2(%rbp,%rsi,8),%r11
xorq 1(%rbp,%rdi,8),%r12
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%eax
+ movzbl %ah,%edx
xorq 0(%rbp,%rsi,8),%r14
xorq 7(%rbp,%rdi,8),%r15
- movb %al,%cl
- movb %ah,%dl
movl 48+8(%rsp),%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %bh,%edx
xorq 6(%rbp,%rsi,8),%r8
xorq 5(%rbp,%rdi,8),%r9
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%ebx
+ movzbl %bh,%edx
xorq 4(%rbp,%rsi,8),%r10
xorq 3(%rbp,%rdi,8),%r11
- movb %bl,%cl
- movb %bh,%dl
movl 48+8+4(%rsp),%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %ah,%edx
xorq 2(%rbp,%rsi,8),%r12
xorq 1(%rbp,%rdi,8),%r13
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%eax
+ movzbl %ah,%edx
xorq 0(%rbp,%rsi,8),%r15
xorq 7(%rbp,%rdi,8),%r8
- movb %al,%cl
- movb %ah,%dl
movl 56+8(%rsp),%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %bh,%edx
xorq 6(%rbp,%rsi,8),%r9
xorq 5(%rbp,%rdi,8),%r10
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%ebx
+ movzbl %bh,%edx
xorq 4(%rbp,%rsi,8),%r11
xorq 3(%rbp,%rdi,8),%r12
- movb %bl,%cl
- movb %bh,%dl
movl 56+8+4(%rsp),%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %ah,%edx
xorq 2(%rbp,%rsi,8),%r13
xorq 1(%rbp,%rdi,8),%r14
movq %r8,0(%rsp)
@@ -300,228 +303,228 @@ whirlpool_block:
movq %r13,40(%rsp)
movq %r14,48(%rsp)
movq %r15,56(%rsp)
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%eax
+ movzbl %ah,%edx
xorq 0(%rbp,%rsi,8),%r8
xorq 7(%rbp,%rdi,8),%r9
- movb %al,%cl
- movb %ah,%dl
movl 64+0+8(%rsp),%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %bh,%edx
xorq 6(%rbp,%rsi,8),%r10
xorq 5(%rbp,%rdi,8),%r11
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%ebx
+ movzbl %bh,%edx
xorq 4(%rbp,%rsi,8),%r12
xorq 3(%rbp,%rdi,8),%r13
- movb %bl,%cl
- movb %bh,%dl
movl 64+0+8+4(%rsp),%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %ah,%edx
xorq 2(%rbp,%rsi,8),%r14
xorq 1(%rbp,%rdi,8),%r15
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%eax
+ movzbl %ah,%edx
xorq 0(%rbp,%rsi,8),%r9
xorq 7(%rbp,%rdi,8),%r10
- movb %al,%cl
- movb %ah,%dl
movl 64+8+8(%rsp),%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %bh,%edx
xorq 6(%rbp,%rsi,8),%r11
xorq 5(%rbp,%rdi,8),%r12
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%ebx
+ movzbl %bh,%edx
xorq 4(%rbp,%rsi,8),%r13
xorq 3(%rbp,%rdi,8),%r14
- movb %bl,%cl
- movb %bh,%dl
movl 64+8+8+4(%rsp),%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %ah,%edx
xorq 2(%rbp,%rsi,8),%r15
xorq 1(%rbp,%rdi,8),%r8
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%eax
+ movzbl %ah,%edx
xorq 0(%rbp,%rsi,8),%r10
xorq 7(%rbp,%rdi,8),%r11
- movb %al,%cl
- movb %ah,%dl
movl 64+16+8(%rsp),%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %bh,%edx
xorq 6(%rbp,%rsi,8),%r12
xorq 5(%rbp,%rdi,8),%r13
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%ebx
+ movzbl %bh,%edx
xorq 4(%rbp,%rsi,8),%r14
xorq 3(%rbp,%rdi,8),%r15
- movb %bl,%cl
- movb %bh,%dl
movl 64+16+8+4(%rsp),%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %ah,%edx
xorq 2(%rbp,%rsi,8),%r8
xorq 1(%rbp,%rdi,8),%r9
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%eax
+ movzbl %ah,%edx
xorq 0(%rbp,%rsi,8),%r11
xorq 7(%rbp,%rdi,8),%r12
- movb %al,%cl
- movb %ah,%dl
movl 64+24+8(%rsp),%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %bh,%edx
xorq 6(%rbp,%rsi,8),%r13
xorq 5(%rbp,%rdi,8),%r14
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%ebx
+ movzbl %bh,%edx
xorq 4(%rbp,%rsi,8),%r15
xorq 3(%rbp,%rdi,8),%r8
- movb %bl,%cl
- movb %bh,%dl
movl 64+24+8+4(%rsp),%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %ah,%edx
xorq 2(%rbp,%rsi,8),%r9
xorq 1(%rbp,%rdi,8),%r10
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%eax
+ movzbl %ah,%edx
xorq 0(%rbp,%rsi,8),%r12
xorq 7(%rbp,%rdi,8),%r13
- movb %al,%cl
- movb %ah,%dl
movl 64+32+8(%rsp),%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %bh,%edx
xorq 6(%rbp,%rsi,8),%r14
xorq 5(%rbp,%rdi,8),%r15
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%ebx
+ movzbl %bh,%edx
xorq 4(%rbp,%rsi,8),%r8
xorq 3(%rbp,%rdi,8),%r9
- movb %bl,%cl
- movb %bh,%dl
movl 64+32+8+4(%rsp),%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %ah,%edx
xorq 2(%rbp,%rsi,8),%r10
xorq 1(%rbp,%rdi,8),%r11
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%eax
+ movzbl %ah,%edx
xorq 0(%rbp,%rsi,8),%r13
xorq 7(%rbp,%rdi,8),%r14
- movb %al,%cl
- movb %ah,%dl
movl 64+40+8(%rsp),%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %bh,%edx
xorq 6(%rbp,%rsi,8),%r15
xorq 5(%rbp,%rdi,8),%r8
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%ebx
+ movzbl %bh,%edx
xorq 4(%rbp,%rsi,8),%r9
xorq 3(%rbp,%rdi,8),%r10
- movb %bl,%cl
- movb %bh,%dl
movl 64+40+8+4(%rsp),%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %ah,%edx
xorq 2(%rbp,%rsi,8),%r11
xorq 1(%rbp,%rdi,8),%r12
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%eax
+ movzbl %ah,%edx
xorq 0(%rbp,%rsi,8),%r14
xorq 7(%rbp,%rdi,8),%r15
- movb %al,%cl
- movb %ah,%dl
movl 64+48+8(%rsp),%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %bh,%edx
xorq 6(%rbp,%rsi,8),%r8
xorq 5(%rbp,%rdi,8),%r9
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%ebx
+ movzbl %bh,%edx
xorq 4(%rbp,%rsi,8),%r10
xorq 3(%rbp,%rdi,8),%r11
- movb %bl,%cl
- movb %bh,%dl
movl 64+48+8+4(%rsp),%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %ah,%edx
xorq 2(%rbp,%rsi,8),%r12
xorq 1(%rbp,%rdi,8),%r13
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%eax
+ movzbl %ah,%edx
xorq 0(%rbp,%rsi,8),%r15
xorq 7(%rbp,%rdi,8),%r8
- movb %al,%cl
- movb %ah,%dl
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %bh,%edx
xorq 6(%rbp,%rsi,8),%r9
xorq 5(%rbp,%rdi,8),%r10
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leaq (%rcx,%rcx,1),%rsi
+ movzbl %bl,%ecx
leaq (%rdx,%rdx,1),%rdi
- shrl $16,%ebx
+ movzbl %bh,%edx
xorq 4(%rbp,%rsi,8),%r11
xorq 3(%rbp,%rdi,8),%r12
- movb %bl,%cl
- movb %bh,%dl
leaq (%rcx,%rcx,1),%rsi
+ movzbl %al,%ecx
leaq (%rdx,%rdx,1),%rdi
+ movzbl %ah,%edx
xorq 2(%rbp,%rsi,8),%r13
xorq 1(%rbp,%rdi,8),%r14
leaq 128(%rsp),%rbx
diff --git a/secure/lib/libcrypto/amd64/x86_64-gf2m.S b/secure/lib/libcrypto/amd64/x86_64-gf2m.S
index 7279c75..f86c253 100644
--- a/secure/lib/libcrypto/amd64/x86_64-gf2m.S
+++ b/secure/lib/libcrypto/amd64/x86_64-gf2m.S
@@ -246,13 +246,13 @@ bn_GF2m_mul_2x2:
movq $15,%r8
movq %rsi,%rax
movq %rcx,%rbp
- call _mul_1x1
+ call _mul_1x1
movq %rax,16(%rsp)
movq %rdx,24(%rsp)
movq 48(%rsp),%rax
movq 64(%rsp),%rbp
- call _mul_1x1
+ call _mul_1x1
movq %rax,0(%rsp)
movq %rdx,8(%rsp)
@@ -260,7 +260,7 @@ bn_GF2m_mul_2x2:
movq 56(%rsp),%rbp
xorq 48(%rsp),%rax
xorq 64(%rsp),%rbp
- call _mul_1x1
+ call _mul_1x1
movq 0(%rsp),%rbx
movq 8(%rsp),%rcx
movq 16(%rsp),%rdi
diff --git a/secure/lib/libcrypto/amd64/x86_64-mont.S b/secure/lib/libcrypto/amd64/x86_64-mont.S
index 5997f3c..bff0fb9 100644
--- a/secure/lib/libcrypto/amd64/x86_64-mont.S
+++ b/secure/lib/libcrypto/amd64/x86_64-mont.S
@@ -1,6 +1,8 @@
# $FreeBSD$
.text
+
+
.globl bn_mul_mont
.type bn_mul_mont,@function
.align 16
@@ -11,7 +13,9 @@ bn_mul_mont:
jb .Lmul_enter
cmpq %rsi,%rdx
jne .Lmul4x_enter
- jmp .Lsqr4x_enter
+ testl $7,%r9d
+ jz .Lsqr8x_enter
+ jmp .Lmul4x_enter
.align 16
.Lmul_enter:
@@ -164,7 +168,7 @@ bn_mul_mont:
leaq 1(%r14),%r14
cmpq %r9,%r14
- jl .Louter
+ jb .Louter
xorq %r14,%r14
movq (%rsp),%rax
@@ -330,7 +334,7 @@ bn_mul4x_mont:
movq %rdi,-32(%rsp,%r15,8)
movq %rdx,%r13
cmpq %r9,%r15
- jl .L1st4x
+ jb .L1st4x
mulq %rbx
addq %rax,%r10
@@ -478,7 +482,7 @@ bn_mul4x_mont:
movq %rdi,-32(%rsp,%r15,8)
movq %rdx,%r13
cmpq %r9,%r15
- jl .Linner4x
+ jb .Linner4x
mulq %rbx
addq %rax,%r10
@@ -524,7 +528,7 @@ bn_mul4x_mont:
movq %rdi,(%rsp,%r15,8)
cmpq %r9,%r14
- jl .Louter4x
+ jb .Louter4x
movq 16(%rsp,%r9,8),%rdi
movq 0(%rsp),%rax
pxor %xmm0,%xmm0
@@ -606,10 +610,13 @@ bn_mul4x_mont:
.Lmul4x_epilogue:
.byte 0xf3,0xc3
.size bn_mul4x_mont,.-bn_mul4x_mont
-.type bn_sqr4x_mont,@function
-.align 16
-bn_sqr4x_mont:
-.Lsqr4x_enter:
+
+
+.type bn_sqr8x_mont,@function
+.align 32
+bn_sqr8x_mont:
+.Lsqr8x_enter:
+ movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
@@ -617,759 +624,102 @@ bn_sqr4x_mont:
pushq %r14
pushq %r15
+ movl %r9d,%r10d
shll $3,%r9d
- xorq %r10,%r10
- movq %rsp,%r11
- subq %r9,%r10
- movq (%r8),%r8
- leaq -72(%rsp,%r10,2),%rsp
- andq $-1024,%rsp
-
-
-
-
-
-
-
-
-
-
-
- movq %rdi,32(%rsp)
- movq %rcx,40(%rsp)
- movq %r8,48(%rsp)
- movq %r11,56(%rsp)
-.Lsqr4x_body:
-
-
-
-
-
-
-
- leaq 32(%r10),%rbp
- leaq (%rsi,%r9,1),%rsi
-
- movq %r9,%rcx
-
-
- movq -32(%rsi,%rbp,1),%r14
- leaq 64(%rsp,%r9,2),%rdi
- movq -24(%rsi,%rbp,1),%rax
- leaq -32(%rdi,%rbp,1),%rdi
- movq -16(%rsi,%rbp,1),%rbx
- movq %rax,%r15
-
- mulq %r14
- movq %rax,%r10
- movq %rbx,%rax
- movq %rdx,%r11
- movq %r10,-24(%rdi,%rbp,1)
-
- xorq %r10,%r10
- mulq %r14
- addq %rax,%r11
- movq %rbx,%rax
- adcq %rdx,%r10
- movq %r11,-16(%rdi,%rbp,1)
-
- leaq -16(%rbp),%rcx
-
-
- movq 8(%rsi,%rcx,1),%rbx
- mulq %r15
- movq %rax,%r12
- movq %rbx,%rax
- movq %rdx,%r13
-
- xorq %r11,%r11
- addq %r12,%r10
- leaq 16(%rcx),%rcx
- adcq $0,%r11
- mulq %r14
- addq %rax,%r10
- movq %rbx,%rax
- adcq %rdx,%r11
- movq %r10,-8(%rdi,%rcx,1)
- jmp .Lsqr4x_1st
-
-.align 16
-.Lsqr4x_1st:
- movq (%rsi,%rcx,1),%rbx
- xorq %r12,%r12
- mulq %r15
- addq %rax,%r13
- movq %rbx,%rax
- adcq %rdx,%r12
-
- xorq %r10,%r10
- addq %r13,%r11
- adcq $0,%r10
- mulq %r14
- addq %rax,%r11
- movq %rbx,%rax
- adcq %rdx,%r10
- movq %r11,(%rdi,%rcx,1)
-
-
- movq 8(%rsi,%rcx,1),%rbx
- xorq %r13,%r13
- mulq %r15
- addq %rax,%r12
- movq %rbx,%rax
- adcq %rdx,%r13
-
- xorq %r11,%r11
- addq %r12,%r10
- adcq $0,%r11
- mulq %r14
- addq %rax,%r10
- movq %rbx,%rax
- adcq %rdx,%r11
- movq %r10,8(%rdi,%rcx,1)
-
- movq 16(%rsi,%rcx,1),%rbx
- xorq %r12,%r12
- mulq %r15
- addq %rax,%r13
- movq %rbx,%rax
- adcq %rdx,%r12
-
- xorq %r10,%r10
- addq %r13,%r11
- adcq $0,%r10
- mulq %r14
- addq %rax,%r11
- movq %rbx,%rax
- adcq %rdx,%r10
- movq %r11,16(%rdi,%rcx,1)
-
-
- movq 24(%rsi,%rcx,1),%rbx
- xorq %r13,%r13
- mulq %r15
- addq %rax,%r12
- movq %rbx,%rax
- adcq %rdx,%r13
-
- xorq %r11,%r11
- addq %r12,%r10
- leaq 32(%rcx),%rcx
- adcq $0,%r11
- mulq %r14
- addq %rax,%r10
- movq %rbx,%rax
- adcq %rdx,%r11
- movq %r10,-8(%rdi,%rcx,1)
-
- cmpq $0,%rcx
- jne .Lsqr4x_1st
-
- xorq %r12,%r12
- addq %r11,%r13
- adcq $0,%r12
- mulq %r15
- addq %rax,%r13
- adcq %rdx,%r12
-
- movq %r13,(%rdi)
- leaq 16(%rbp),%rbp
- movq %r12,8(%rdi)
- jmp .Lsqr4x_outer
-
-.align 16
-.Lsqr4x_outer:
- movq -32(%rsi,%rbp,1),%r14
- leaq 64(%rsp,%r9,2),%rdi
- movq -24(%rsi,%rbp,1),%rax
- leaq -32(%rdi,%rbp,1),%rdi
- movq -16(%rsi,%rbp,1),%rbx
- movq %rax,%r15
-
- movq -24(%rdi,%rbp,1),%r10
- xorq %r11,%r11
- mulq %r14
- addq %rax,%r10
- movq %rbx,%rax
- adcq %rdx,%r11
- movq %r10,-24(%rdi,%rbp,1)
-
- xorq %r10,%r10
- addq -16(%rdi,%rbp,1),%r11
- adcq $0,%r10
- mulq %r14
- addq %rax,%r11
- movq %rbx,%rax
- adcq %rdx,%r10
- movq %r11,-16(%rdi,%rbp,1)
-
- leaq -16(%rbp),%rcx
- xorq %r12,%r12
-
-
- movq 8(%rsi,%rcx,1),%rbx
- xorq %r13,%r13
- addq 8(%rdi,%rcx,1),%r12
- adcq $0,%r13
- mulq %r15
- addq %rax,%r12
- movq %rbx,%rax
- adcq %rdx,%r13
-
- xorq %r11,%r11
- addq %r12,%r10
- adcq $0,%r11
- mulq %r14
- addq %rax,%r10
- movq %rbx,%rax
- adcq %rdx,%r11
- movq %r10,8(%rdi,%rcx,1)
-
- leaq 16(%rcx),%rcx
- jmp .Lsqr4x_inner
-
-.align 16
-.Lsqr4x_inner:
- movq (%rsi,%rcx,1),%rbx
- xorq %r12,%r12
- addq (%rdi,%rcx,1),%r13
- adcq $0,%r12
- mulq %r15
- addq %rax,%r13
- movq %rbx,%rax
- adcq %rdx,%r12
-
- xorq %r10,%r10
- addq %r13,%r11
- adcq $0,%r10
- mulq %r14
- addq %rax,%r11
- movq %rbx,%rax
- adcq %rdx,%r10
- movq %r11,(%rdi,%rcx,1)
-
- movq 8(%rsi,%rcx,1),%rbx
- xorq %r13,%r13
- addq 8(%rdi,%rcx,1),%r12
- adcq $0,%r13
- mulq %r15
- addq %rax,%r12
- movq %rbx,%rax
- adcq %rdx,%r13
-
- xorq %r11,%r11
- addq %r12,%r10
- leaq 16(%rcx),%rcx
- adcq $0,%r11
- mulq %r14
- addq %rax,%r10
- movq %rbx,%rax
- adcq %rdx,%r11
- movq %r10,-8(%rdi,%rcx,1)
-
- cmpq $0,%rcx
- jne .Lsqr4x_inner
-
- xorq %r12,%r12
- addq %r11,%r13
- adcq $0,%r12
- mulq %r15
- addq %rax,%r13
- adcq %rdx,%r12
+ shlq $3+2,%r10
+ negq %r9
- movq %r13,(%rdi)
- movq %r12,8(%rdi)
- addq $16,%rbp
- jnz .Lsqr4x_outer
- movq -32(%rsi),%r14
- leaq 64(%rsp,%r9,2),%rdi
- movq -24(%rsi),%rax
- leaq -32(%rdi,%rbp,1),%rdi
- movq -16(%rsi),%rbx
- movq %rax,%r15
-
- xorq %r11,%r11
- mulq %r14
- addq %rax,%r10
- movq %rbx,%rax
- adcq %rdx,%r11
- movq %r10,-24(%rdi)
-
- xorq %r10,%r10
- addq %r13,%r11
- adcq $0,%r10
- mulq %r14
- addq %rax,%r11
- movq %rbx,%rax
- adcq %rdx,%r10
- movq %r11,-16(%rdi)
-
- movq -8(%rsi),%rbx
- mulq %r15
- addq %rax,%r12
- movq %rbx,%rax
- adcq $0,%rdx
-
- xorq %r11,%r11
- addq %r12,%r10
- movq %rdx,%r13
- adcq $0,%r11
- mulq %r14
- addq %rax,%r10
- movq %rbx,%rax
- adcq %rdx,%r11
- movq %r10,-8(%rdi)
- xorq %r12,%r12
- addq %r11,%r13
- adcq $0,%r12
- mulq %r15
- addq %rax,%r13
- movq -16(%rsi),%rax
- adcq %rdx,%r12
-
- movq %r13,(%rdi)
- movq %r12,8(%rdi)
-
- mulq %rbx
- addq $16,%rbp
- xorq %r14,%r14
- subq %r9,%rbp
- xorq %r15,%r15
-
- addq %r12,%rax
- adcq $0,%rdx
- movq %rax,8(%rdi)
- movq %rdx,16(%rdi)
- movq %r15,24(%rdi)
-
- movq -16(%rsi,%rbp,1),%rax
- leaq 64(%rsp,%r9,2),%rdi
- xorq %r10,%r10
- movq -24(%rdi,%rbp,2),%r11
-
- leaq (%r14,%r10,2),%r12
- shrq $63,%r10
- leaq (%rcx,%r11,2),%r13
- shrq $63,%r11
- orq %r10,%r13
- movq -16(%rdi,%rbp,2),%r10
- movq %r11,%r14
- mulq %rax
- negq %r15
- movq -8(%rdi,%rbp,2),%r11
- adcq %rax,%r12
- movq -8(%rsi,%rbp,1),%rax
- movq %r12,-32(%rdi,%rbp,2)
- adcq %rdx,%r13
-
- leaq (%r14,%r10,2),%rbx
- movq %r13,-24(%rdi,%rbp,2)
- sbbq %r15,%r15
- shrq $63,%r10
- leaq (%rcx,%r11,2),%r8
- shrq $63,%r11
- orq %r10,%r8
- movq 0(%rdi,%rbp,2),%r10
- movq %r11,%r14
- mulq %rax
- negq %r15
- movq 8(%rdi,%rbp,2),%r11
- adcq %rax,%rbx
- movq 0(%rsi,%rbp,1),%rax
- movq %rbx,-16(%rdi,%rbp,2)
- adcq %rdx,%r8
- leaq 16(%rbp),%rbp
- movq %r8,-40(%rdi,%rbp,2)
- sbbq %r15,%r15
- jmp .Lsqr4x_shift_n_add
-
-.align 16
-.Lsqr4x_shift_n_add:
- leaq (%r14,%r10,2),%r12
- shrq $63,%r10
- leaq (%rcx,%r11,2),%r13
- shrq $63,%r11
- orq %r10,%r13
- movq -16(%rdi,%rbp,2),%r10
- movq %r11,%r14
- mulq %rax
- negq %r15
- movq -8(%rdi,%rbp,2),%r11
- adcq %rax,%r12
- movq -8(%rsi,%rbp,1),%rax
- movq %r12,-32(%rdi,%rbp,2)
- adcq %rdx,%r13
-
- leaq (%r14,%r10,2),%rbx
- movq %r13,-24(%rdi,%rbp,2)
- sbbq %r15,%r15
- shrq $63,%r10
- leaq (%rcx,%r11,2),%r8
- shrq $63,%r11
- orq %r10,%r8
- movq 0(%rdi,%rbp,2),%r10
- movq %r11,%r14
- mulq %rax
- negq %r15
- movq 8(%rdi,%rbp,2),%r11
- adcq %rax,%rbx
- movq 0(%rsi,%rbp,1),%rax
- movq %rbx,-16(%rdi,%rbp,2)
- adcq %rdx,%r8
-
- leaq (%r14,%r10,2),%r12
- movq %r8,-8(%rdi,%rbp,2)
- sbbq %r15,%r15
- shrq $63,%r10
- leaq (%rcx,%r11,2),%r13
- shrq $63,%r11
- orq %r10,%r13
- movq 16(%rdi,%rbp,2),%r10
- movq %r11,%r14
- mulq %rax
- negq %r15
- movq 24(%rdi,%rbp,2),%r11
- adcq %rax,%r12
- movq 8(%rsi,%rbp,1),%rax
- movq %r12,0(%rdi,%rbp,2)
- adcq %rdx,%r13
-
- leaq (%r14,%r10,2),%rbx
- movq %r13,8(%rdi,%rbp,2)
- sbbq %r15,%r15
- shrq $63,%r10
- leaq (%rcx,%r11,2),%r8
- shrq $63,%r11
- orq %r10,%r8
- movq 32(%rdi,%rbp,2),%r10
- movq %r11,%r14
- mulq %rax
- negq %r15
- movq 40(%rdi,%rbp,2),%r11
- adcq %rax,%rbx
- movq 16(%rsi,%rbp,1),%rax
- movq %rbx,16(%rdi,%rbp,2)
- adcq %rdx,%r8
- movq %r8,24(%rdi,%rbp,2)
- sbbq %r15,%r15
- addq $32,%rbp
- jnz .Lsqr4x_shift_n_add
-
- leaq (%r14,%r10,2),%r12
- shrq $63,%r10
- leaq (%rcx,%r11,2),%r13
- shrq $63,%r11
- orq %r10,%r13
- movq -16(%rdi),%r10
- movq %r11,%r14
- mulq %rax
- negq %r15
- movq -8(%rdi),%r11
- adcq %rax,%r12
- movq -8(%rsi),%rax
- movq %r12,-32(%rdi)
- adcq %rdx,%r13
-
- leaq (%r14,%r10,2),%rbx
- movq %r13,-24(%rdi)
- sbbq %r15,%r15
- shrq $63,%r10
- leaq (%rcx,%r11,2),%r8
- shrq $63,%r11
- orq %r10,%r8
- mulq %rax
- negq %r15
- adcq %rax,%rbx
- adcq %rdx,%r8
- movq %rbx,-16(%rdi)
- movq %r8,-8(%rdi)
- movq 40(%rsp),%rsi
- movq 48(%rsp),%r8
- xorq %rcx,%rcx
- movq %r9,0(%rsp)
- subq %r9,%rcx
- movq 64(%rsp),%r10
- movq %r8,%r14
- leaq 64(%rsp,%r9,2),%rax
- leaq 64(%rsp,%r9,1),%rdi
- movq %rax,8(%rsp)
- leaq (%rsi,%r9,1),%rsi
- xorq %rbp,%rbp
-
- movq 0(%rsi,%rcx,1),%rax
- movq 8(%rsi,%rcx,1),%r9
- imulq %r10,%r14
- movq %rax,%rbx
- jmp .Lsqr4x_mont_outer
-
-.align 16
-.Lsqr4x_mont_outer:
- xorq %r11,%r11
- mulq %r14
- addq %rax,%r10
- movq %r9,%rax
- adcq %rdx,%r11
- movq %r8,%r15
-
- xorq %r10,%r10
- addq 8(%rdi,%rcx,1),%r11
- adcq $0,%r10
- mulq %r14
- addq %rax,%r11
- movq %rbx,%rax
- adcq %rdx,%r10
-
- imulq %r11,%r15
-
- movq 16(%rsi,%rcx,1),%rbx
- xorq %r13,%r13
- addq %r11,%r12
- adcq $0,%r13
- mulq %r15
- addq %rax,%r12
- movq %rbx,%rax
- adcq %rdx,%r13
- movq %r12,8(%rdi,%rcx,1)
-
- xorq %r11,%r11
- addq 16(%rdi,%rcx,1),%r10
- adcq $0,%r11
- mulq %r14
- addq %rax,%r10
- movq %r9,%rax
- adcq %rdx,%r11
-
- movq 24(%rsi,%rcx,1),%r9
- xorq %r12,%r12
- addq %r10,%r13
- adcq $0,%r12
- mulq %r15
- addq %rax,%r13
- movq %r9,%rax
- adcq %rdx,%r12
- movq %r13,16(%rdi,%rcx,1)
- xorq %r10,%r10
- addq 24(%rdi,%rcx,1),%r11
+ leaq -64(%rsp,%r9,4),%r11
+ movq (%r8),%r8
+ subq %rsi,%r11
+ andq $4095,%r11
+ cmpq %r11,%r10
+ jb .Lsqr8x_sp_alt
+ subq %r11,%rsp
+ leaq -64(%rsp,%r9,4),%rsp
+ jmp .Lsqr8x_sp_done
+
+.align 32
+.Lsqr8x_sp_alt:
+ leaq 4096-64(,%r9,4),%r10
+ leaq -64(%rsp,%r9,4),%rsp
+ subq %r10,%r11
+ movq $0,%r10
+ cmovcq %r10,%r11
+ subq %r11,%rsp
+.Lsqr8x_sp_done:
+ andq $-64,%rsp
+ movq %r9,%r10
+ negq %r9
+
+ leaq 64(%rsp,%r9,2),%r11
+ movq %r8,32(%rsp)
+ movq %rax,40(%rsp)
+.Lsqr8x_body:
+
+ movq %r9,%rbp
+.byte 102,73,15,110,211
+ shrq $3+2,%rbp
+ movl OPENSSL_ia32cap_P+8(%rip),%eax
+ jmp .Lsqr8x_copy_n
+
+.align 32
+.Lsqr8x_copy_n:
+ movq 0(%rcx),%xmm0
+ movq 8(%rcx),%xmm1
+ movq 16(%rcx),%xmm3
+ movq 24(%rcx),%xmm4
leaq 32(%rcx),%rcx
- adcq $0,%r10
- mulq %r14
- addq %rax,%r11
- movq %rbx,%rax
- adcq %rdx,%r10
- jmp .Lsqr4x_mont_inner
-
-.align 16
-.Lsqr4x_mont_inner:
- movq (%rsi,%rcx,1),%rbx
- xorq %r13,%r13
- addq %r11,%r12
- adcq $0,%r13
- mulq %r15
- addq %rax,%r12
- movq %rbx,%rax
- adcq %rdx,%r13
- movq %r12,-8(%rdi,%rcx,1)
-
- xorq %r11,%r11
- addq (%rdi,%rcx,1),%r10
- adcq $0,%r11
- mulq %r14
- addq %rax,%r10
- movq %r9,%rax
- adcq %rdx,%r11
-
- movq 8(%rsi,%rcx,1),%r9
- xorq %r12,%r12
- addq %r10,%r13
- adcq $0,%r12
- mulq %r15
- addq %rax,%r13
- movq %r9,%rax
- adcq %rdx,%r12
- movq %r13,(%rdi,%rcx,1)
-
- xorq %r10,%r10
- addq 8(%rdi,%rcx,1),%r11
- adcq $0,%r10
- mulq %r14
- addq %rax,%r11
- movq %rbx,%rax
- adcq %rdx,%r10
-
-
- movq 16(%rsi,%rcx,1),%rbx
- xorq %r13,%r13
- addq %r11,%r12
- adcq $0,%r13
- mulq %r15
- addq %rax,%r12
- movq %rbx,%rax
- adcq %rdx,%r13
- movq %r12,8(%rdi,%rcx,1)
-
- xorq %r11,%r11
- addq 16(%rdi,%rcx,1),%r10
- adcq $0,%r11
- mulq %r14
- addq %rax,%r10
- movq %r9,%rax
- adcq %rdx,%r11
-
- movq 24(%rsi,%rcx,1),%r9
- xorq %r12,%r12
- addq %r10,%r13
- adcq $0,%r12
- mulq %r15
- addq %rax,%r13
- movq %r9,%rax
- adcq %rdx,%r12
- movq %r13,16(%rdi,%rcx,1)
+ movdqa %xmm0,0(%r11)
+ movdqa %xmm1,16(%r11)
+ movdqa %xmm3,32(%r11)
+ movdqa %xmm4,48(%r11)
+ leaq 64(%r11),%r11
+ decq %rbp
+ jnz .Lsqr8x_copy_n
- xorq %r10,%r10
- addq 24(%rdi,%rcx,1),%r11
- leaq 32(%rcx),%rcx
- adcq $0,%r10
- mulq %r14
- addq %rax,%r11
- movq %rbx,%rax
- adcq %rdx,%r10
- cmpq $0,%rcx
- jne .Lsqr4x_mont_inner
-
- subq 0(%rsp),%rcx
- movq %r8,%r14
-
- xorq %r13,%r13
- addq %r11,%r12
- adcq $0,%r13
- mulq %r15
- addq %rax,%r12
- movq %r9,%rax
- adcq %rdx,%r13
- movq %r12,-8(%rdi)
-
- xorq %r11,%r11
- addq (%rdi),%r10
- adcq $0,%r11
- movq 0(%rsi,%rcx,1),%rbx
- addq %rbp,%r10
- adcq $0,%r11
+ pxor %xmm0,%xmm0
+.byte 102,72,15,110,207
+.byte 102,73,15,110,218
+ call bn_sqr8x_internal
- imulq 16(%rdi,%rcx,1),%r14
- xorq %r12,%r12
- movq 8(%rsi,%rcx,1),%r9
- addq %r10,%r13
- movq 16(%rdi,%rcx,1),%r10
- adcq $0,%r12
- mulq %r15
- addq %rax,%r13
- movq %rbx,%rax
- adcq %rdx,%r12
- movq %r13,(%rdi)
-
- xorq %rbp,%rbp
- addq 8(%rdi),%r12
- adcq %rbp,%rbp
- addq %r11,%r12
- leaq 16(%rdi),%rdi
- adcq $0,%rbp
- movq %r12,-8(%rdi)
- cmpq 8(%rsp),%rdi
- jb .Lsqr4x_mont_outer
-
- movq 0(%rsp),%r9
- movq %rbp,(%rdi)
- movq 64(%rsp,%r9,1),%rax
- leaq 64(%rsp,%r9,1),%rbx
+ pxor %xmm0,%xmm0
+ leaq 48(%rsp),%rax
+ leaq 64(%rsp,%r9,2),%rdx
+ shrq $3+2,%r9
movq 40(%rsp),%rsi
- shrq $5,%r9
- movq 8(%rbx),%rdx
- xorq %rbp,%rbp
-
- movq 32(%rsp),%rdi
- subq 0(%rsi),%rax
- movq 16(%rbx),%r10
- movq 24(%rbx),%r11
- sbbq 8(%rsi),%rdx
- leaq -1(%r9),%rcx
- jmp .Lsqr4x_sub
-.align 16
-.Lsqr4x_sub:
- movq %rax,0(%rdi,%rbp,8)
- movq %rdx,8(%rdi,%rbp,8)
- sbbq 16(%rsi,%rbp,8),%r10
- movq 32(%rbx,%rbp,8),%rax
- movq 40(%rbx,%rbp,8),%rdx
- sbbq 24(%rsi,%rbp,8),%r11
- movq %r10,16(%rdi,%rbp,8)
- movq %r11,24(%rdi,%rbp,8)
- sbbq 32(%rsi,%rbp,8),%rax
- movq 48(%rbx,%rbp,8),%r10
- movq 56(%rbx,%rbp,8),%r11
- sbbq 40(%rsi,%rbp,8),%rdx
- leaq 4(%rbp),%rbp
- decq %rcx
- jnz .Lsqr4x_sub
-
- movq %rax,0(%rdi,%rbp,8)
- movq 32(%rbx,%rbp,8),%rax
- sbbq 16(%rsi,%rbp,8),%r10
- movq %rdx,8(%rdi,%rbp,8)
- sbbq 24(%rsi,%rbp,8),%r11
- movq %r10,16(%rdi,%rbp,8)
+ jmp .Lsqr8x_zero
+
+.align 32
+.Lsqr8x_zero:
+ movdqa %xmm0,0(%rax)
+ movdqa %xmm0,16(%rax)
+ movdqa %xmm0,32(%rax)
+ movdqa %xmm0,48(%rax)
+ leaq 64(%rax),%rax
+ movdqa %xmm0,0(%rdx)
+ movdqa %xmm0,16(%rdx)
+ movdqa %xmm0,32(%rdx)
+ movdqa %xmm0,48(%rdx)
+ leaq 64(%rdx),%rdx
+ decq %r9
+ jnz .Lsqr8x_zero
- sbbq $0,%rax
- movq %r11,24(%rdi,%rbp,8)
- xorq %rbp,%rbp
- andq %rax,%rbx
- notq %rax
- movq %rdi,%rsi
- andq %rax,%rsi
- leaq -1(%r9),%rcx
- orq %rsi,%rbx
-
- pxor %xmm0,%xmm0
- leaq 64(%rsp,%r9,8),%rsi
- movdqu (%rbx),%xmm1
- leaq (%rsi,%r9,8),%rsi
- movdqa %xmm0,64(%rsp)
- movdqa %xmm0,(%rsi)
- movdqu %xmm1,(%rdi)
- jmp .Lsqr4x_copy
-.align 16
-.Lsqr4x_copy:
- movdqu 16(%rbx,%rbp,1),%xmm2
- movdqu 32(%rbx,%rbp,1),%xmm1
- movdqa %xmm0,80(%rsp,%rbp,1)
- movdqa %xmm0,96(%rsp,%rbp,1)
- movdqa %xmm0,16(%rsi,%rbp,1)
- movdqa %xmm0,32(%rsi,%rbp,1)
- movdqu %xmm2,16(%rdi,%rbp,1)
- movdqu %xmm1,32(%rdi,%rbp,1)
- leaq 32(%rbp),%rbp
- decq %rcx
- jnz .Lsqr4x_copy
-
- movdqu 16(%rbx,%rbp,1),%xmm2
- movdqa %xmm0,80(%rsp,%rbp,1)
- movdqa %xmm0,16(%rsi,%rbp,1)
- movdqu %xmm2,16(%rdi,%rbp,1)
- movq 56(%rsp),%rsi
movq $1,%rax
- movq 0(%rsi),%r15
- movq 8(%rsi),%r14
- movq 16(%rsi),%r13
- movq 24(%rsi),%r12
- movq 32(%rsi),%rbp
- movq 40(%rsi),%rbx
- leaq 48(%rsi),%rsp
-.Lsqr4x_epilogue:
+ movq -48(%rsi),%r15
+ movq -40(%rsi),%r14
+ movq -32(%rsi),%r13
+ movq -24(%rsi),%r12
+ movq -16(%rsi),%rbp
+ movq -8(%rsi),%rbx
+ leaq (%rsi),%rsp
+.Lsqr8x_epilogue:
.byte 0xf3,0xc3
-.size bn_sqr4x_mont,.-bn_sqr4x_mont
+.size bn_sqr8x_mont,.-bn_sqr8x_mont
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 16
diff --git a/secure/lib/libcrypto/amd64/x86_64-mont5.S b/secure/lib/libcrypto/amd64/x86_64-mont5.S
index b0b3442..6ab0922 100644
--- a/secure/lib/libcrypto/amd64/x86_64-mont5.S
+++ b/secure/lib/libcrypto/amd64/x86_64-mont5.S
@@ -1,19 +1,20 @@
# $FreeBSD$
.text
+
+
.globl bn_mul_mont_gather5
.type bn_mul_mont_gather5,@function
.align 64
bn_mul_mont_gather5:
- testl $3,%r9d
+ testl $7,%r9d
jnz .Lmul_enter
- cmpl $8,%r9d
- jb .Lmul_enter
jmp .Lmul4x_enter
.align 16
.Lmul_enter:
movl %r9d,%r9d
+ movq %rsp,%rax
movl 8(%rsp),%r10d
pushq %rbx
pushq %rbp
@@ -21,7 +22,6 @@ bn_mul_mont_gather5:
pushq %r13
pushq %r14
pushq %r15
- movq %rsp,%rax
leaq 2(%r9),%r11
negq %r11
leaq (%rsp,%r11,8),%rsp
@@ -222,7 +222,7 @@ bn_mul_mont_gather5:
leaq 1(%r14),%r14
cmpq %r9,%r14
- jl .Louter
+ jb .Louter
xorq %r14,%r14
movq (%rsp),%rax
@@ -256,477 +256,1526 @@ bn_mul_mont_gather5:
movq 8(%rsp,%r9,8),%rsi
movq $1,%rax
- movq (%rsi),%r15
- movq 8(%rsi),%r14
- movq 16(%rsi),%r13
- movq 24(%rsi),%r12
- movq 32(%rsi),%rbp
- movq 40(%rsi),%rbx
- leaq 48(%rsi),%rsp
+ movq -48(%rsi),%r15
+ movq -40(%rsi),%r14
+ movq -32(%rsi),%r13
+ movq -24(%rsi),%r12
+ movq -16(%rsi),%rbp
+ movq -8(%rsi),%rbx
+ leaq (%rsi),%rsp
.Lmul_epilogue:
.byte 0xf3,0xc3
.size bn_mul_mont_gather5,.-bn_mul_mont_gather5
.type bn_mul4x_mont_gather5,@function
-.align 16
+.align 32
bn_mul4x_mont_gather5:
.Lmul4x_enter:
- movl %r9d,%r9d
- movl 8(%rsp),%r10d
+.byte 0x67
+ movq %rsp,%rax
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
- movq %rsp,%rax
- leaq 4(%r9),%r11
- negq %r11
- leaq (%rsp,%r11,8),%rsp
- andq $-1024,%rsp
+.byte 0x67
+ movl %r9d,%r10d
+ shll $3,%r9d
+ shll $3+2,%r10d
+ negq %r9
- movq %rax,8(%rsp,%r9,8)
+
+
+
+
+
+
+
+ leaq -64(%rsp,%r9,2),%r11
+ subq %rsi,%r11
+ andq $4095,%r11
+ cmpq %r11,%r10
+ jb .Lmul4xsp_alt
+ subq %r11,%rsp
+ leaq -64(%rsp,%r9,2),%rsp
+ jmp .Lmul4xsp_done
+
+.align 32
+.Lmul4xsp_alt:
+ leaq 4096-64(,%r9,2),%r10
+ leaq -64(%rsp,%r9,2),%rsp
+ subq %r10,%r11
+ movq $0,%r10
+ cmovcq %r10,%r11
+ subq %r11,%rsp
+.Lmul4xsp_done:
+ andq $-64,%rsp
+ negq %r9
+
+ movq %rax,40(%rsp)
.Lmul4x_body:
- movq %rdi,16(%rsp,%r9,8)
- movq %rdx,%r12
+
+ call mul4x_internal
+
+ movq 40(%rsp),%rsi
+ movq $1,%rax
+ movq -48(%rsi),%r15
+ movq -40(%rsi),%r14
+ movq -32(%rsi),%r13
+ movq -24(%rsi),%r12
+ movq -16(%rsi),%rbp
+ movq -8(%rsi),%rbx
+ leaq (%rsi),%rsp
+.Lmul4x_epilogue:
+ .byte 0xf3,0xc3
+.size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5
+
+.type mul4x_internal,@function
+.align 32
+mul4x_internal:
+ shlq $5,%r9
+ movl 8(%rax),%r10d
+ leaq 256(%rdx,%r9,1),%r13
+ shrq $5,%r9
movq %r10,%r11
shrq $3,%r10
andq $7,%r11
notq %r10
leaq .Lmagic_masks(%rip),%rax
andq $3,%r10
- leaq 96(%r12,%r11,8),%r12
+ leaq 96(%rdx,%r11,8),%r12
movq 0(%rax,%r10,8),%xmm4
movq 8(%rax,%r10,8),%xmm5
+ addq $7,%r11
movq 16(%rax,%r10,8),%xmm6
movq 24(%rax,%r10,8),%xmm7
+ andq $7,%r11
movq -96(%r12),%xmm0
+ leaq 256(%r12),%r14
movq -32(%r12),%xmm1
pand %xmm4,%xmm0
movq 32(%r12),%xmm2
pand %xmm5,%xmm1
movq 96(%r12),%xmm3
pand %xmm6,%xmm2
+.byte 0x67
por %xmm1,%xmm0
+ movq -96(%r14),%xmm1
+.byte 0x67
pand %xmm7,%xmm3
+.byte 0x67
por %xmm2,%xmm0
- leaq 256(%r12),%r12
+ movq -32(%r14),%xmm2
+.byte 0x67
+ pand %xmm4,%xmm1
+.byte 0x67
por %xmm3,%xmm0
+ movq 32(%r14),%xmm3
.byte 102,72,15,126,195
+ movq 96(%r14),%xmm0
+ movq %r13,16+8(%rsp)
+ movq %rdi,56+8(%rsp)
+
movq (%r8),%r8
movq (%rsi),%rax
-
- xorq %r14,%r14
- xorq %r15,%r15
-
- movq -96(%r12),%xmm0
- movq -32(%r12),%xmm1
- pand %xmm4,%xmm0
- movq 32(%r12),%xmm2
- pand %xmm5,%xmm1
+ leaq (%rsi,%r9,1),%rsi
+ negq %r9
movq %r8,%rbp
mulq %rbx
movq %rax,%r10
movq (%rcx),%rax
- movq 96(%r12),%xmm3
- pand %xmm6,%xmm2
- por %xmm1,%xmm0
- pand %xmm7,%xmm3
+ pand %xmm5,%xmm2
+ pand %xmm6,%xmm3
+ por %xmm2,%xmm1
imulq %r10,%rbp
+
+
+
+
+
+
+
+ leaq 64+8(%rsp,%r11,8),%r14
movq %rdx,%r11
- por %xmm2,%xmm0
- leaq 256(%r12),%r12
- por %xmm3,%xmm0
+ pand %xmm7,%xmm0
+ por %xmm3,%xmm1
+ leaq 512(%r12),%r12
+ por %xmm1,%xmm0
mulq %rbp
addq %rax,%r10
- movq 8(%rsi),%rax
+ movq 8(%rsi,%r9,1),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
- movq 8(%rcx),%rax
+ movq 16(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
- movq 16(%rsi),%rax
+ movq 16(%rsi,%r9,1),%rax
adcq $0,%rdx
addq %r11,%rdi
- leaq 4(%r15),%r15
+ leaq 32(%r9),%r15
+ leaq 64(%rcx),%rcx
adcq $0,%rdx
- movq %rdi,(%rsp)
+ movq %rdi,(%r14)
movq %rdx,%r13
jmp .L1st4x
-.align 16
+
+.align 32
.L1st4x:
mulq %rbx
addq %rax,%r10
- movq -16(%rcx,%r15,8),%rax
+ movq -32(%rcx),%rax
+ leaq 32(%r14),%r14
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
- movq -8(%rsi,%r15,8),%rax
+ movq -8(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
- movq %r13,-24(%rsp,%r15,8)
+ movq %r13,-24(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
- movq -8(%rcx,%r15,8),%rax
+ movq -16(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
- movq (%rsi,%r15,8),%rax
+ movq (%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
- movq %rdi,-16(%rsp,%r15,8)
+ movq %rdi,-16(%r14)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
- movq (%rcx,%r15,8),%rax
+ movq 0(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
- movq 8(%rsi,%r15,8),%rax
+ movq 8(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
- movq %r13,-8(%rsp,%r15,8)
+ movq %r13,-8(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
- movq 8(%rcx,%r15,8),%rax
+ movq 16(%rcx),%rax
adcq $0,%rdx
- leaq 4(%r15),%r15
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
- movq -16(%rsi,%r15,8),%rax
+ movq 16(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r11,%rdi
+ leaq 64(%rcx),%rcx
adcq $0,%rdx
- movq %rdi,-32(%rsp,%r15,8)
+ movq %rdi,(%r14)
movq %rdx,%r13
- cmpq %r9,%r15
- jl .L1st4x
+
+ addq $32,%r15
+ jnz .L1st4x
mulq %rbx
addq %rax,%r10
- movq -16(%rcx,%r15,8),%rax
+ movq -32(%rcx),%rax
+ leaq 32(%r14),%r14
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
- movq -8(%rsi,%r15,8),%rax
+ movq -8(%rsi),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
- movq %r13,-24(%rsp,%r15,8)
+ movq %r13,-24(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
- movq -8(%rcx,%r15,8),%rax
+ movq -16(%rcx),%rax
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
- movq (%rsi),%rax
+ movq (%rsi,%r9,1),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
- movq %rdi,-16(%rsp,%r15,8)
+ movq %rdi,-16(%r14)
movq %rdx,%r13
.byte 102,72,15,126,195
+ leaq (%rcx,%r9,2),%rcx
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
- movq %r13,-8(%rsp,%r15,8)
- movq %rdi,(%rsp,%r15,8)
+ movq %r13,-8(%r14)
- leaq 1(%r14),%r14
-.align 4
-.Louter4x:
- xorq %r15,%r15
- movq -96(%r12),%xmm0
- movq -32(%r12),%xmm1
- pand %xmm4,%xmm0
- movq 32(%r12),%xmm2
- pand %xmm5,%xmm1
+ jmp .Louter4x
- movq (%rsp),%r10
+.align 32
+.Louter4x:
+ movq (%r14,%r9,1),%r10
movq %r8,%rbp
mulq %rbx
addq %rax,%r10
movq (%rcx),%rax
adcq $0,%rdx
+ movq -96(%r12),%xmm0
+ movq -32(%r12),%xmm1
+ pand %xmm4,%xmm0
+ movq 32(%r12),%xmm2
+ pand %xmm5,%xmm1
movq 96(%r12),%xmm3
- pand %xmm6,%xmm2
- por %xmm1,%xmm0
- pand %xmm7,%xmm3
imulq %r10,%rbp
+.byte 0x67
movq %rdx,%r11
+ movq %rdi,(%r14)
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
por %xmm2,%xmm0
+ leaq (%r14,%r9,1),%r14
leaq 256(%r12),%r12
por %xmm3,%xmm0
mulq %rbp
addq %rax,%r10
- movq 8(%rsi),%rax
+ movq 8(%rsi,%r9,1),%rax
adcq $0,%rdx
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
- movq 8(%rcx),%rax
+ movq 16(%rcx),%rax
adcq $0,%rdx
- addq 8(%rsp),%r11
+ addq 8(%r14),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
- movq 16(%rsi),%rax
+ movq 16(%rsi,%r9,1),%rax
adcq $0,%rdx
addq %r11,%rdi
- leaq 4(%r15),%r15
+ leaq 32(%r9),%r15
+ leaq 64(%rcx),%rcx
adcq $0,%rdx
movq %rdx,%r13
jmp .Linner4x
-.align 16
+
+.align 32
.Linner4x:
mulq %rbx
addq %rax,%r10
- movq -16(%rcx,%r15,8),%rax
+ movq -32(%rcx),%rax
adcq $0,%rdx
- addq -16(%rsp,%r15,8),%r10
+ addq 16(%r14),%r10
+ leaq 32(%r14),%r14
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
- movq -8(%rsi,%r15,8),%rax
+ movq -8(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
- movq %rdi,-32(%rsp,%r15,8)
+ movq %rdi,-32(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
- movq -8(%rcx,%r15,8),%rax
+ movq -16(%rcx),%rax
adcq $0,%rdx
- addq -8(%rsp,%r15,8),%r11
+ addq -8(%r14),%r11
adcq $0,%rdx
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
- movq (%rsi,%r15,8),%rax
+ movq (%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
- movq %r13,-24(%rsp,%r15,8)
+ movq %r13,-24(%r14)
movq %rdx,%r13
mulq %rbx
addq %rax,%r10
- movq (%rcx,%r15,8),%rax
+ movq 0(%rcx),%rax
adcq $0,%rdx
- addq (%rsp,%r15,8),%r10
+ addq (%r14),%r10
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
- movq 8(%rsi,%r15,8),%rax
+ movq 8(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
- movq %rdi,-16(%rsp,%r15,8)
+ movq %rdi,-16(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
- movq 8(%rcx,%r15,8),%rax
+ movq 16(%rcx),%rax
adcq $0,%rdx
- addq 8(%rsp,%r15,8),%r11
+ addq 8(%r14),%r11
adcq $0,%rdx
- leaq 4(%r15),%r15
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
- movq -16(%rsi,%r15,8),%rax
+ movq 16(%rsi,%r15,1),%rax
adcq $0,%rdx
addq %r11,%rdi
+ leaq 64(%rcx),%rcx
adcq $0,%rdx
- movq %r13,-40(%rsp,%r15,8)
+ movq %r13,-8(%r14)
movq %rdx,%r13
- cmpq %r9,%r15
- jl .Linner4x
+
+ addq $32,%r15
+ jnz .Linner4x
mulq %rbx
addq %rax,%r10
- movq -16(%rcx,%r15,8),%rax
+ movq -32(%rcx),%rax
adcq $0,%rdx
- addq -16(%rsp,%r15,8),%r10
+ addq 16(%r14),%r10
+ leaq 32(%r14),%r14
adcq $0,%rdx
movq %rdx,%r11
mulq %rbp
addq %rax,%r13
- movq -8(%rsi,%r15,8),%rax
+ movq -8(%rsi),%rax
adcq $0,%rdx
addq %r10,%r13
adcq $0,%rdx
- movq %rdi,-32(%rsp,%r15,8)
+ movq %rdi,-32(%r14)
movq %rdx,%rdi
mulq %rbx
addq %rax,%r11
- movq -8(%rcx,%r15,8),%rax
+ movq %rbp,%rax
+ movq -16(%rcx),%rbp
adcq $0,%rdx
- addq -8(%rsp,%r15,8),%r11
+ addq -8(%r14),%r11
adcq $0,%rdx
- leaq 1(%r14),%r14
movq %rdx,%r10
mulq %rbp
addq %rax,%rdi
- movq (%rsi),%rax
+ movq (%rsi,%r9,1),%rax
adcq $0,%rdx
addq %r11,%rdi
adcq $0,%rdx
- movq %r13,-24(%rsp,%r15,8)
+ movq %r13,-24(%r14)
movq %rdx,%r13
.byte 102,72,15,126,195
- movq %rdi,-16(%rsp,%r15,8)
+ movq %rdi,-16(%r14)
+ leaq (%rcx,%r9,2),%rcx
xorq %rdi,%rdi
addq %r10,%r13
adcq $0,%rdi
- addq (%rsp,%r9,8),%r13
+ addq (%r14),%r13
adcq $0,%rdi
- movq %r13,-8(%rsp,%r15,8)
- movq %rdi,(%rsp,%r15,8)
+ movq %r13,-8(%r14)
+
+ cmpq 16+8(%rsp),%r12
+ jb .Louter4x
+ subq %r13,%rbp
+ adcq %r15,%r15
+ orq %r15,%rdi
+ xorq $1,%rdi
+ leaq (%r14,%r9,1),%rbx
+ leaq (%rcx,%rdi,8),%rbp
+ movq %r9,%rcx
+ sarq $3+2,%rcx
+ movq 56+8(%rsp),%rdi
+ jmp .Lsqr4x_sub
+.size mul4x_internal,.-mul4x_internal
+.globl bn_power5
+.type bn_power5,@function
+.align 32
+bn_power5:
+ movq %rsp,%rax
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+ movl %r9d,%r10d
+ shll $3,%r9d
+ shll $3+2,%r10d
+ negq %r9
+ movq (%r8),%r8
- cmpq %r9,%r14
- jl .Louter4x
- movq 16(%rsp,%r9,8),%rdi
- movq 0(%rsp),%rax
- pxor %xmm0,%xmm0
- movq 8(%rsp),%rdx
- shrq $2,%r9
- leaq (%rsp),%rsi
- xorq %r14,%r14
- subq 0(%rcx),%rax
- movq 16(%rsi),%rbx
- movq 24(%rsi),%rbp
- sbbq 8(%rcx),%rdx
- leaq -1(%r9),%r15
- jmp .Lsub4x
-.align 16
-.Lsub4x:
- movq %rax,0(%rdi,%r14,8)
- movq %rdx,8(%rdi,%r14,8)
- sbbq 16(%rcx,%r14,8),%rbx
- movq 32(%rsi,%r14,8),%rax
- movq 40(%rsi,%r14,8),%rdx
- sbbq 24(%rcx,%r14,8),%rbp
- movq %rbx,16(%rdi,%r14,8)
- movq %rbp,24(%rdi,%r14,8)
- sbbq 32(%rcx,%r14,8),%rax
- movq 48(%rsi,%r14,8),%rbx
- movq 56(%rsi,%r14,8),%rbp
- sbbq 40(%rcx,%r14,8),%rdx
- leaq 4(%r14),%r14
- decq %r15
- jnz .Lsub4x
- movq %rax,0(%rdi,%r14,8)
- movq 32(%rsi,%r14,8),%rax
- sbbq 16(%rcx,%r14,8),%rbx
- movq %rdx,8(%rdi,%r14,8)
- sbbq 24(%rcx,%r14,8),%rbp
- movq %rbx,16(%rdi,%r14,8)
- sbbq $0,%rax
- movq %rbp,24(%rdi,%r14,8)
+
+
+
+ leaq -64(%rsp,%r9,2),%r11
+ subq %rsi,%r11
+ andq $4095,%r11
+ cmpq %r11,%r10
+ jb .Lpwr_sp_alt
+ subq %r11,%rsp
+ leaq -64(%rsp,%r9,2),%rsp
+ jmp .Lpwr_sp_done
+
+.align 32
+.Lpwr_sp_alt:
+ leaq 4096-64(,%r9,2),%r10
+ leaq -64(%rsp,%r9,2),%rsp
+ subq %r10,%r11
+ movq $0,%r10
+ cmovcq %r10,%r11
+ subq %r11,%rsp
+.Lpwr_sp_done:
+ andq $-64,%rsp
+ movq %r9,%r10
+ negq %r9
+
+
+
+
+
+
+
+
+
+
+ movq %r8,32(%rsp)
+ movq %rax,40(%rsp)
+.Lpower5_body:
+.byte 102,72,15,110,207
+.byte 102,72,15,110,209
+.byte 102,73,15,110,218
+.byte 102,72,15,110,226
+
+ call __bn_sqr8x_internal
+ call __bn_sqr8x_internal
+ call __bn_sqr8x_internal
+ call __bn_sqr8x_internal
+ call __bn_sqr8x_internal
+
+.byte 102,72,15,126,209
+.byte 102,72,15,126,226
+ movq %rsi,%rdi
+ movq 40(%rsp),%rax
+ leaq 32(%rsp),%r8
+
+ call mul4x_internal
+
+ movq 40(%rsp),%rsi
+ movq $1,%rax
+ movq -48(%rsi),%r15
+ movq -40(%rsi),%r14
+ movq -32(%rsi),%r13
+ movq -24(%rsi),%r12
+ movq -16(%rsi),%rbp
+ movq -8(%rsi),%rbx
+ leaq (%rsi),%rsp
+.Lpower5_epilogue:
+ .byte 0xf3,0xc3
+.size bn_power5,.-bn_power5
+
+.globl bn_sqr8x_internal
+.hidden bn_sqr8x_internal
+.type bn_sqr8x_internal,@function
+.align 32
+bn_sqr8x_internal:
+__bn_sqr8x_internal:
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ leaq 32(%r10),%rbp
+ leaq (%rsi,%r9,1),%rsi
+
+ movq %r9,%rcx
+
+
+ movq -32(%rsi,%rbp,1),%r14
+ leaq 48+8(%rsp,%r9,2),%rdi
+ movq -24(%rsi,%rbp,1),%rax
+ leaq -32(%rdi,%rbp,1),%rdi
+ movq -16(%rsi,%rbp,1),%rbx
+ movq %rax,%r15
+
+ mulq %r14
+ movq %rax,%r10
+ movq %rbx,%rax
+ movq %rdx,%r11
+ movq %r10,-24(%rdi,%rbp,1)
+
+ mulq %r14
+ addq %rax,%r11
+ movq %rbx,%rax
+ adcq $0,%rdx
+ movq %r11,-16(%rdi,%rbp,1)
+ movq %rdx,%r10
+
+
+ movq -8(%rsi,%rbp,1),%rbx
+ mulq %r15
+ movq %rax,%r12
+ movq %rbx,%rax
+ movq %rdx,%r13
+
+ leaq (%rbp),%rcx
+ mulq %r14
+ addq %rax,%r10
+ movq %rbx,%rax
+ movq %rdx,%r11
+ adcq $0,%r11
+ addq %r12,%r10
+ adcq $0,%r11
+ movq %r10,-8(%rdi,%rcx,1)
+ jmp .Lsqr4x_1st
+
+.align 32
+.Lsqr4x_1st:
+ movq (%rsi,%rcx,1),%rbx
+ mulq %r15
+ addq %rax,%r13
+ movq %rbx,%rax
+ movq %rdx,%r12
+ adcq $0,%r12
+
+ mulq %r14
+ addq %rax,%r11
+ movq %rbx,%rax
+ movq 8(%rsi,%rcx,1),%rbx
+ movq %rdx,%r10
+ adcq $0,%r10
+ addq %r13,%r11
+ adcq $0,%r10
+
+
+ mulq %r15
+ addq %rax,%r12
+ movq %rbx,%rax
+ movq %r11,(%rdi,%rcx,1)
+ movq %rdx,%r13
+ adcq $0,%r13
+
+ mulq %r14
+ addq %rax,%r10
+ movq %rbx,%rax
+ movq 16(%rsi,%rcx,1),%rbx
+ movq %rdx,%r11
+ adcq $0,%r11
+ addq %r12,%r10
+ adcq $0,%r11
+
+ mulq %r15
+ addq %rax,%r13
+ movq %rbx,%rax
+ movq %r10,8(%rdi,%rcx,1)
+ movq %rdx,%r12
+ adcq $0,%r12
+
+ mulq %r14
+ addq %rax,%r11
+ movq %rbx,%rax
+ movq 24(%rsi,%rcx,1),%rbx
+ movq %rdx,%r10
+ adcq $0,%r10
+ addq %r13,%r11
+ adcq $0,%r10
+
+
+ mulq %r15
+ addq %rax,%r12
+ movq %rbx,%rax
+ movq %r11,16(%rdi,%rcx,1)
+ movq %rdx,%r13
+ adcq $0,%r13
+ leaq 32(%rcx),%rcx
+
+ mulq %r14
+ addq %rax,%r10
+ movq %rbx,%rax
+ movq %rdx,%r11
+ adcq $0,%r11
+ addq %r12,%r10
+ adcq $0,%r11
+ movq %r10,-8(%rdi,%rcx,1)
+
+ cmpq $0,%rcx
+ jne .Lsqr4x_1st
+
+ mulq %r15
+ addq %rax,%r13
+ leaq 16(%rbp),%rbp
+ adcq $0,%rdx
+ addq %r11,%r13
+ adcq $0,%rdx
+
+ movq %r13,(%rdi)
+ movq %rdx,%r12
+ movq %rdx,8(%rdi)
+ jmp .Lsqr4x_outer
+
+.align 32
+.Lsqr4x_outer:
+ movq -32(%rsi,%rbp,1),%r14
+ leaq 48+8(%rsp,%r9,2),%rdi
+ movq -24(%rsi,%rbp,1),%rax
+ leaq -32(%rdi,%rbp,1),%rdi
+ movq -16(%rsi,%rbp,1),%rbx
+ movq %rax,%r15
+
+ mulq %r14
+ movq -24(%rdi,%rbp,1),%r10
+ addq %rax,%r10
+ movq %rbx,%rax
+ adcq $0,%rdx
+ movq %r10,-24(%rdi,%rbp,1)
+ movq %rdx,%r11
+
+ mulq %r14
+ addq %rax,%r11
+ movq %rbx,%rax
+ adcq $0,%rdx
+ addq -16(%rdi,%rbp,1),%r11
+ movq %rdx,%r10
+ adcq $0,%r10
+ movq %r11,-16(%rdi,%rbp,1)
+
+ xorq %r12,%r12
+
+ movq -8(%rsi,%rbp,1),%rbx
+ mulq %r15
+ addq %rax,%r12
+ movq %rbx,%rax
+ adcq $0,%rdx
+ addq -8(%rdi,%rbp,1),%r12
+ movq %rdx,%r13
+ adcq $0,%r13
+
+ mulq %r14
+ addq %rax,%r10
+ movq %rbx,%rax
+ adcq $0,%rdx
+ addq %r12,%r10
+ movq %rdx,%r11
+ adcq $0,%r11
+ movq %r10,-8(%rdi,%rbp,1)
+
+ leaq (%rbp),%rcx
+ jmp .Lsqr4x_inner
+
+.align 32
+.Lsqr4x_inner:
+ movq (%rsi,%rcx,1),%rbx
+ mulq %r15
+ addq %rax,%r13
+ movq %rbx,%rax
+ movq %rdx,%r12
+ adcq $0,%r12
+ addq (%rdi,%rcx,1),%r13
+ adcq $0,%r12
+
+.byte 0x67
+ mulq %r14
+ addq %rax,%r11
+ movq %rbx,%rax
+ movq 8(%rsi,%rcx,1),%rbx
+ movq %rdx,%r10
+ adcq $0,%r10
+ addq %r13,%r11
+ adcq $0,%r10
+
+ mulq %r15
+ addq %rax,%r12
+ movq %r11,(%rdi,%rcx,1)
+ movq %rbx,%rax
+ movq %rdx,%r13
+ adcq $0,%r13
+ addq 8(%rdi,%rcx,1),%r12
+ leaq 16(%rcx),%rcx
+ adcq $0,%r13
+
+ mulq %r14
+ addq %rax,%r10
+ movq %rbx,%rax
+ adcq $0,%rdx
+ addq %r12,%r10
+ movq %rdx,%r11
+ adcq $0,%r11
+ movq %r10,-8(%rdi,%rcx,1)
+
+ cmpq $0,%rcx
+ jne .Lsqr4x_inner
+
+.byte 0x67
+ mulq %r15
+ addq %rax,%r13
+ adcq $0,%rdx
+ addq %r11,%r13
+ adcq $0,%rdx
+
+ movq %r13,(%rdi)
+ movq %rdx,%r12
+ movq %rdx,8(%rdi)
+
+ addq $16,%rbp
+ jnz .Lsqr4x_outer
+
+
+ movq -32(%rsi),%r14
+ leaq 48+8(%rsp,%r9,2),%rdi
+ movq -24(%rsi),%rax
+ leaq -32(%rdi,%rbp,1),%rdi
+ movq -16(%rsi),%rbx
+ movq %rax,%r15
+
+ mulq %r14
+ addq %rax,%r10
+ movq %rbx,%rax
+ movq %rdx,%r11
+ adcq $0,%r11
+
+ mulq %r14
+ addq %rax,%r11
+ movq %rbx,%rax
+ movq %r10,-24(%rdi)
+ movq %rdx,%r10
+ adcq $0,%r10
+ addq %r13,%r11
+ movq -8(%rsi),%rbx
+ adcq $0,%r10
+
+ mulq %r15
+ addq %rax,%r12
+ movq %rbx,%rax
+ movq %r11,-16(%rdi)
+ movq %rdx,%r13
+ adcq $0,%r13
+
+ mulq %r14
+ addq %rax,%r10
+ movq %rbx,%rax
+ movq %rdx,%r11
+ adcq $0,%r11
+ addq %r12,%r10
+ adcq $0,%r11
+ movq %r10,-8(%rdi)
+
+ mulq %r15
+ addq %rax,%r13
+ movq -16(%rsi),%rax
+ adcq $0,%rdx
+ addq %r11,%r13
+ adcq $0,%rdx
+
+ movq %r13,(%rdi)
+ movq %rdx,%r12
+ movq %rdx,8(%rdi)
+
+ mulq %rbx
+ addq $16,%rbp
xorq %r14,%r14
- andq %rax,%rsi
- notq %rax
- movq %rdi,%rcx
- andq %rax,%rcx
- leaq -1(%r9),%r15
- orq %rcx,%rsi
+ subq %r9,%rbp
+ xorq %r15,%r15
+
+ addq %r12,%rax
+ adcq $0,%rdx
+ movq %rax,8(%rdi)
+ movq %rdx,16(%rdi)
+ movq %r15,24(%rdi)
+
+ movq -16(%rsi,%rbp,1),%rax
+ leaq 48+8(%rsp),%rdi
+ xorq %r10,%r10
+ movq 8(%rdi),%r11
+
+ leaq (%r14,%r10,2),%r12
+ shrq $63,%r10
+ leaq (%rcx,%r11,2),%r13
+ shrq $63,%r11
+ orq %r10,%r13
+ movq 16(%rdi),%r10
+ movq %r11,%r14
+ mulq %rax
+ negq %r15
+ movq 24(%rdi),%r11
+ adcq %rax,%r12
+ movq -8(%rsi,%rbp,1),%rax
+ movq %r12,(%rdi)
+ adcq %rdx,%r13
+
+ leaq (%r14,%r10,2),%rbx
+ movq %r13,8(%rdi)
+ sbbq %r15,%r15
+ shrq $63,%r10
+ leaq (%rcx,%r11,2),%r8
+ shrq $63,%r11
+ orq %r10,%r8
+ movq 32(%rdi),%r10
+ movq %r11,%r14
+ mulq %rax
+ negq %r15
+ movq 40(%rdi),%r11
+ adcq %rax,%rbx
+ movq 0(%rsi,%rbp,1),%rax
+ movq %rbx,16(%rdi)
+ adcq %rdx,%r8
+ leaq 16(%rbp),%rbp
+ movq %r8,24(%rdi)
+ sbbq %r15,%r15
+ leaq 64(%rdi),%rdi
+ jmp .Lsqr4x_shift_n_add
+
+.align 32
+.Lsqr4x_shift_n_add:
+ leaq (%r14,%r10,2),%r12
+ shrq $63,%r10
+ leaq (%rcx,%r11,2),%r13
+ shrq $63,%r11
+ orq %r10,%r13
+ movq -16(%rdi),%r10
+ movq %r11,%r14
+ mulq %rax
+ negq %r15
+ movq -8(%rdi),%r11
+ adcq %rax,%r12
+ movq -8(%rsi,%rbp,1),%rax
+ movq %r12,-32(%rdi)
+ adcq %rdx,%r13
+
+ leaq (%r14,%r10,2),%rbx
+ movq %r13,-24(%rdi)
+ sbbq %r15,%r15
+ shrq $63,%r10
+ leaq (%rcx,%r11,2),%r8
+ shrq $63,%r11
+ orq %r10,%r8
+ movq 0(%rdi),%r10
+ movq %r11,%r14
+ mulq %rax
+ negq %r15
+ movq 8(%rdi),%r11
+ adcq %rax,%rbx
+ movq 0(%rsi,%rbp,1),%rax
+ movq %rbx,-16(%rdi)
+ adcq %rdx,%r8
+
+ leaq (%r14,%r10,2),%r12
+ movq %r8,-8(%rdi)
+ sbbq %r15,%r15
+ shrq $63,%r10
+ leaq (%rcx,%r11,2),%r13
+ shrq $63,%r11
+ orq %r10,%r13
+ movq 16(%rdi),%r10
+ movq %r11,%r14
+ mulq %rax
+ negq %r15
+ movq 24(%rdi),%r11
+ adcq %rax,%r12
+ movq 8(%rsi,%rbp,1),%rax
+ movq %r12,0(%rdi)
+ adcq %rdx,%r13
+
+ leaq (%r14,%r10,2),%rbx
+ movq %r13,8(%rdi)
+ sbbq %r15,%r15
+ shrq $63,%r10
+ leaq (%rcx,%r11,2),%r8
+ shrq $63,%r11
+ orq %r10,%r8
+ movq 32(%rdi),%r10
+ movq %r11,%r14
+ mulq %rax
+ negq %r15
+ movq 40(%rdi),%r11
+ adcq %rax,%rbx
+ movq 16(%rsi,%rbp,1),%rax
+ movq %rbx,16(%rdi)
+ adcq %rdx,%r8
+ movq %r8,24(%rdi)
+ sbbq %r15,%r15
+ leaq 64(%rdi),%rdi
+ addq $32,%rbp
+ jnz .Lsqr4x_shift_n_add
+
+ leaq (%r14,%r10,2),%r12
+.byte 0x67
+ shrq $63,%r10
+ leaq (%rcx,%r11,2),%r13
+ shrq $63,%r11
+ orq %r10,%r13
+ movq -16(%rdi),%r10
+ movq %r11,%r14
+ mulq %rax
+ negq %r15
+ movq -8(%rdi),%r11
+ adcq %rax,%r12
+ movq -8(%rsi),%rax
+ movq %r12,-32(%rdi)
+ adcq %rdx,%r13
+
+ leaq (%r14,%r10,2),%rbx
+ movq %r13,-24(%rdi)
+ sbbq %r15,%r15
+ shrq $63,%r10
+ leaq (%rcx,%r11,2),%r8
+ shrq $63,%r11
+ orq %r10,%r8
+ mulq %rax
+ negq %r15
+ adcq %rax,%rbx
+ adcq %rdx,%r8
+ movq %rbx,-16(%rdi)
+ movq %r8,-8(%rdi)
+.byte 102,72,15,126,213
+sqr8x_reduction:
+ xorq %rax,%rax
+ leaq (%rbp,%r9,2),%rcx
+ leaq 48+8(%rsp,%r9,2),%rdx
+ movq %rcx,0+8(%rsp)
+ leaq 48+8(%rsp,%r9,1),%rdi
+ movq %rdx,8+8(%rsp)
+ negq %r9
+ jmp .L8x_reduction_loop
+
+.align 32
+.L8x_reduction_loop:
+ leaq (%rdi,%r9,1),%rdi
+.byte 0x66
+ movq 0(%rdi),%rbx
+ movq 8(%rdi),%r9
+ movq 16(%rdi),%r10
+ movq 24(%rdi),%r11
+ movq 32(%rdi),%r12
+ movq 40(%rdi),%r13
+ movq 48(%rdi),%r14
+ movq 56(%rdi),%r15
+ movq %rax,(%rdx)
+ leaq 64(%rdi),%rdi
+
+.byte 0x67
+ movq %rbx,%r8
+ imulq 32+8(%rsp),%rbx
+ movq 0(%rbp),%rax
+ movl $8,%ecx
+ jmp .L8x_reduce
+
+.align 32
+.L8x_reduce:
+ mulq %rbx
+ movq 16(%rbp),%rax
+ negq %r8
+ movq %rdx,%r8
+ adcq $0,%r8
+
+ mulq %rbx
+ addq %rax,%r9
+ movq 32(%rbp),%rax
+ adcq $0,%rdx
+ addq %r9,%r8
+ movq %rbx,48-8+8(%rsp,%rcx,8)
+ movq %rdx,%r9
+ adcq $0,%r9
+
+ mulq %rbx
+ addq %rax,%r10
+ movq 48(%rbp),%rax
+ adcq $0,%rdx
+ addq %r10,%r9
+ movq 32+8(%rsp),%rsi
+ movq %rdx,%r10
+ adcq $0,%r10
+
+ mulq %rbx
+ addq %rax,%r11
+ movq 64(%rbp),%rax
+ adcq $0,%rdx
+ imulq %r8,%rsi
+ addq %r11,%r10
+ movq %rdx,%r11
+ adcq $0,%r11
+ mulq %rbx
+ addq %rax,%r12
+ movq 80(%rbp),%rax
+ adcq $0,%rdx
+ addq %r12,%r11
+ movq %rdx,%r12
+ adcq $0,%r12
+
+ mulq %rbx
+ addq %rax,%r13
+ movq 96(%rbp),%rax
+ adcq $0,%rdx
+ addq %r13,%r12
+ movq %rdx,%r13
+ adcq $0,%r13
+
+ mulq %rbx
+ addq %rax,%r14
+ movq 112(%rbp),%rax
+ adcq $0,%rdx
+ addq %r14,%r13
+ movq %rdx,%r14
+ adcq $0,%r14
+
+ mulq %rbx
+ movq %rsi,%rbx
+ addq %rax,%r15
+ movq 0(%rbp),%rax
+ adcq $0,%rdx
+ addq %r15,%r14
+ movq %rdx,%r15
+ adcq $0,%r15
+
+ decl %ecx
+ jnz .L8x_reduce
+
+ leaq 128(%rbp),%rbp
+ xorq %rax,%rax
+ movq 8+8(%rsp),%rdx
+ cmpq 0+8(%rsp),%rbp
+ jae .L8x_no_tail
+
+.byte 0x66
+ addq 0(%rdi),%r8
+ adcq 8(%rdi),%r9
+ adcq 16(%rdi),%r10
+ adcq 24(%rdi),%r11
+ adcq 32(%rdi),%r12
+ adcq 40(%rdi),%r13
+ adcq 48(%rdi),%r14
+ adcq 56(%rdi),%r15
+ sbbq %rsi,%rsi
+
+ movq 48+56+8(%rsp),%rbx
+ movl $8,%ecx
+ movq 0(%rbp),%rax
+ jmp .L8x_tail
+
+.align 32
+.L8x_tail:
+ mulq %rbx
+ addq %rax,%r8
+ movq 16(%rbp),%rax
+ movq %r8,(%rdi)
+ movq %rdx,%r8
+ adcq $0,%r8
+
+ mulq %rbx
+ addq %rax,%r9
+ movq 32(%rbp),%rax
+ adcq $0,%rdx
+ addq %r9,%r8
+ leaq 8(%rdi),%rdi
+ movq %rdx,%r9
+ adcq $0,%r9
+
+ mulq %rbx
+ addq %rax,%r10
+ movq 48(%rbp),%rax
+ adcq $0,%rdx
+ addq %r10,%r9
+ movq %rdx,%r10
+ adcq $0,%r10
+
+ mulq %rbx
+ addq %rax,%r11
+ movq 64(%rbp),%rax
+ adcq $0,%rdx
+ addq %r11,%r10
+ movq %rdx,%r11
+ adcq $0,%r11
+
+ mulq %rbx
+ addq %rax,%r12
+ movq 80(%rbp),%rax
+ adcq $0,%rdx
+ addq %r12,%r11
+ movq %rdx,%r12
+ adcq $0,%r12
+
+ mulq %rbx
+ addq %rax,%r13
+ movq 96(%rbp),%rax
+ adcq $0,%rdx
+ addq %r13,%r12
+ movq %rdx,%r13
+ adcq $0,%r13
+
+ mulq %rbx
+ addq %rax,%r14
+ movq 112(%rbp),%rax
+ adcq $0,%rdx
+ addq %r14,%r13
+ movq %rdx,%r14
+ adcq $0,%r14
+
+ mulq %rbx
+ movq 48-16+8(%rsp,%rcx,8),%rbx
+ addq %rax,%r15
+ adcq $0,%rdx
+ addq %r15,%r14
+ movq 0(%rbp),%rax
+ movq %rdx,%r15
+ adcq $0,%r15
+
+ decl %ecx
+ jnz .L8x_tail
+
+ leaq 128(%rbp),%rbp
+ movq 8+8(%rsp),%rdx
+ cmpq 0+8(%rsp),%rbp
+ jae .L8x_tail_done
+
+ movq 48+56+8(%rsp),%rbx
+ negq %rsi
+ movq 0(%rbp),%rax
+ adcq 0(%rdi),%r8
+ adcq 8(%rdi),%r9
+ adcq 16(%rdi),%r10
+ adcq 24(%rdi),%r11
+ adcq 32(%rdi),%r12
+ adcq 40(%rdi),%r13
+ adcq 48(%rdi),%r14
+ adcq 56(%rdi),%r15
+ sbbq %rsi,%rsi
+
+ movl $8,%ecx
+ jmp .L8x_tail
+
+.align 32
+.L8x_tail_done:
+ addq (%rdx),%r8
+ xorq %rax,%rax
+
+ negq %rsi
+.L8x_no_tail:
+ adcq 0(%rdi),%r8
+ adcq 8(%rdi),%r9
+ adcq 16(%rdi),%r10
+ adcq 24(%rdi),%r11
+ adcq 32(%rdi),%r12
+ adcq 40(%rdi),%r13
+ adcq 48(%rdi),%r14
+ adcq 56(%rdi),%r15
+ adcq $0,%rax
+ movq -16(%rbp),%rcx
+ xorq %rsi,%rsi
+
+.byte 102,72,15,126,213
+
+ movq %r8,0(%rdi)
+ movq %r9,8(%rdi)
+.byte 102,73,15,126,217
+ movq %r10,16(%rdi)
+ movq %r11,24(%rdi)
+ movq %r12,32(%rdi)
+ movq %r13,40(%rdi)
+ movq %r14,48(%rdi)
+ movq %r15,56(%rdi)
+ leaq 64(%rdi),%rdi
+
+ cmpq %rdx,%rdi
+ jb .L8x_reduction_loop
+
+ subq %r15,%rcx
+ leaq (%rdi,%r9,1),%rbx
+ adcq %rsi,%rsi
+ movq %r9,%rcx
+ orq %rsi,%rax
+.byte 102,72,15,126,207
+ xorq $1,%rax
+.byte 102,72,15,126,206
+ leaq (%rbp,%rax,8),%rbp
+ sarq $3+2,%rcx
+ jmp .Lsqr4x_sub
+
+.align 32
+.Lsqr4x_sub:
+.byte 0x66
+ movq 0(%rbx),%r12
+ movq 8(%rbx),%r13
+ sbbq 0(%rbp),%r12
+ movq 16(%rbx),%r14
+ sbbq 16(%rbp),%r13
+ movq 24(%rbx),%r15
+ leaq 32(%rbx),%rbx
+ sbbq 32(%rbp),%r14
+ movq %r12,0(%rdi)
+ sbbq 48(%rbp),%r15
+ leaq 64(%rbp),%rbp
+ movq %r13,8(%rdi)
+ movq %r14,16(%rdi)
+ movq %r15,24(%rdi)
+ leaq 32(%rdi),%rdi
+
+ incq %rcx
+ jnz .Lsqr4x_sub
+ movq %r9,%r10
+ negq %r9
+ .byte 0xf3,0xc3
+.size bn_sqr8x_internal,.-bn_sqr8x_internal
+.globl bn_from_montgomery
+.type bn_from_montgomery,@function
+.align 32
+bn_from_montgomery:
+ testl $7,%r9d
+ jz bn_from_mont8x
+ xorl %eax,%eax
+ .byte 0xf3,0xc3
+.size bn_from_montgomery,.-bn_from_montgomery
+
+.type bn_from_mont8x,@function
+.align 32
+bn_from_mont8x:
+.byte 0x67
+ movq %rsp,%rax
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+.byte 0x67
+ movl %r9d,%r10d
+ shll $3,%r9d
+ shll $3+2,%r10d
+ negq %r9
+ movq (%r8),%r8
+
+
+
+
+
+
+
+ leaq -64(%rsp,%r9,2),%r11
+ subq %rsi,%r11
+ andq $4095,%r11
+ cmpq %r11,%r10
+ jb .Lfrom_sp_alt
+ subq %r11,%rsp
+ leaq -64(%rsp,%r9,2),%rsp
+ jmp .Lfrom_sp_done
+
+.align 32
+.Lfrom_sp_alt:
+ leaq 4096-64(,%r9,2),%r10
+ leaq -64(%rsp,%r9,2),%rsp
+ subq %r10,%r11
+ movq $0,%r10
+ cmovcq %r10,%r11
+ subq %r11,%rsp
+.Lfrom_sp_done:
+ andq $-64,%rsp
+ movq %r9,%r10
+ negq %r9
+
+
+
+
+
+
+
+
+
+
+ movq %r8,32(%rsp)
+ movq %rax,40(%rsp)
+.Lfrom_body:
+ movq %r9,%r11
+ leaq 48(%rsp),%rax
+ pxor %xmm0,%xmm0
+ jmp .Lmul_by_1
+
+.align 32
+.Lmul_by_1:
movdqu (%rsi),%xmm1
- movdqa %xmm0,(%rsp)
- movdqu %xmm1,(%rdi)
- jmp .Lcopy4x
-.align 16
-.Lcopy4x:
- movdqu 16(%rsi,%r14,1),%xmm2
- movdqu 32(%rsi,%r14,1),%xmm1
- movdqa %xmm0,16(%rsp,%r14,1)
- movdqu %xmm2,16(%rdi,%r14,1)
- movdqa %xmm0,32(%rsp,%r14,1)
- movdqu %xmm1,32(%rdi,%r14,1)
- leaq 32(%r14),%r14
- decq %r15
- jnz .Lcopy4x
+ movdqu 16(%rsi),%xmm2
+ movdqu 32(%rsi),%xmm3
+ movdqa %xmm0,(%rax,%r9,1)
+ movdqu 48(%rsi),%xmm4
+ movdqa %xmm0,16(%rax,%r9,1)
+.byte 0x48,0x8d,0xb6,0x40,0x00,0x00,0x00
+ movdqa %xmm1,(%rax)
+ movdqa %xmm0,32(%rax,%r9,1)
+ movdqa %xmm2,16(%rax)
+ movdqa %xmm0,48(%rax,%r9,1)
+ movdqa %xmm3,32(%rax)
+ movdqa %xmm4,48(%rax)
+ leaq 64(%rax),%rax
+ subq $64,%r11
+ jnz .Lmul_by_1
+
+.byte 102,72,15,110,207
+.byte 102,72,15,110,209
+.byte 0x67
+ movq %rcx,%rbp
+.byte 102,73,15,110,218
+ call sqr8x_reduction
+
+ pxor %xmm0,%xmm0
+ leaq 48(%rsp),%rax
+ movq 40(%rsp),%rsi
+ jmp .Lfrom_mont_zero
+
+.align 32
+.Lfrom_mont_zero:
+ movdqa %xmm0,0(%rax)
+ movdqa %xmm0,16(%rax)
+ movdqa %xmm0,32(%rax)
+ movdqa %xmm0,48(%rax)
+ leaq 64(%rax),%rax
+ subq $32,%r9
+ jnz .Lfrom_mont_zero
- shlq $2,%r9
- movdqu 16(%rsi,%r14,1),%xmm2
- movdqa %xmm0,16(%rsp,%r14,1)
- movdqu %xmm2,16(%rdi,%r14,1)
- movq 8(%rsp,%r9,8),%rsi
movq $1,%rax
- movq (%rsi),%r15
- movq 8(%rsi),%r14
- movq 16(%rsi),%r13
- movq 24(%rsi),%r12
- movq 32(%rsi),%rbp
- movq 40(%rsi),%rbx
- leaq 48(%rsi),%rsp
-.Lmul4x_epilogue:
+ movq -48(%rsi),%r15
+ movq -40(%rsi),%r14
+ movq -32(%rsi),%r13
+ movq -24(%rsi),%r12
+ movq -16(%rsi),%rbp
+ movq -8(%rsi),%rbx
+ leaq (%rsi),%rsp
+.Lfrom_epilogue:
.byte 0xf3,0xc3
-.size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5
+.size bn_from_mont8x,.-bn_from_mont8x
+.globl bn_get_bits5
+.type bn_get_bits5,@function
+.align 16
+bn_get_bits5:
+ leaq 0(%rdi),%r10
+ leaq 1(%rdi),%r11
+ movl %esi,%ecx
+ shrl $4,%esi
+ andl $15,%ecx
+ leal -8(%rcx),%eax
+ cmpl $11,%ecx
+ cmovaq %r11,%r10
+ cmoval %eax,%ecx
+ movzwl (%r10,%rsi,2),%eax
+ shrl %cl,%eax
+ andl $31,%eax
+ .byte 0xf3,0xc3
+.size bn_get_bits5,.-bn_get_bits5
+
.globl bn_scatter5
.type bn_scatter5,@function
.align 16
bn_scatter5:
- cmpq $0,%rsi
+ cmpl $0,%esi
jz .Lscatter_epilogue
leaq (%rdx,%rcx,8),%rdx
.Lscatter:
@@ -734,7 +1783,7 @@ bn_scatter5:
leaq 8(%rdi),%rdi
movq %rax,(%rdx)
leaq 256(%rdx),%rdx
- subq $1,%rsi
+ subl $1,%esi
jnz .Lscatter
.Lscatter_epilogue:
.byte 0xf3,0xc3
@@ -744,13 +1793,13 @@ bn_scatter5:
.type bn_gather5,@function
.align 16
bn_gather5:
- movq %rcx,%r11
- shrq $3,%rcx
+ movl %ecx,%r11d
+ shrl $3,%ecx
andq $7,%r11
- notq %rcx
+ notl %ecx
leaq .Lmagic_masks(%rip),%rax
- andq $3,%rcx
- leaq 96(%rdx,%r11,8),%rdx
+ andl $3,%ecx
+ leaq 128(%rdx,%r11,8),%rdx
movq 0(%rax,%rcx,8),%xmm4
movq 8(%rax,%rcx,8),%xmm5
movq 16(%rax,%rcx,8),%xmm6
@@ -758,22 +1807,23 @@ bn_gather5:
jmp .Lgather
.align 16
.Lgather:
- movq -96(%rdx),%xmm0
- movq -32(%rdx),%xmm1
+ movq -128(%rdx),%xmm0
+ movq -64(%rdx),%xmm1
pand %xmm4,%xmm0
- movq 32(%rdx),%xmm2
+ movq 0(%rdx),%xmm2
pand %xmm5,%xmm1
- movq 96(%rdx),%xmm3
+ movq 64(%rdx),%xmm3
pand %xmm6,%xmm2
por %xmm1,%xmm0
pand %xmm7,%xmm3
+.byte 0x67,0x67
por %xmm2,%xmm0
leaq 256(%rdx),%rdx
por %xmm3,%xmm0
movq %xmm0,(%rdi)
leaq 8(%rdi),%rdi
- subq $1,%rsi
+ subl $1,%esi
jnz .Lgather
.byte 0xf3,0xc3
.LSEH_end_bn_gather5:
diff --git a/secure/lib/libcrypto/amd64/x86_64cpuid.S b/secure/lib/libcrypto/amd64/x86_64cpuid.S
index c5d8399..93de516 100644
--- a/secure/lib/libcrypto/amd64/x86_64cpuid.S
+++ b/secure/lib/libcrypto/amd64/x86_64cpuid.S
@@ -5,7 +5,7 @@
call OPENSSL_cpuid_setup
.hidden OPENSSL_ia32cap_P
-.comm OPENSSL_ia32cap_P,8,4
+.comm OPENSSL_ia32cap_P,16,4
.text
@@ -15,11 +15,11 @@
OPENSSL_atomic_add:
movl (%rdi),%eax
.Lspin: leaq (%rsi,%rax,1),%r8
-.byte 0xf0
+.byte 0xf0
cmpxchgl %r8d,(%rdi)
jne .Lspin
movl %r8d,%eax
-.byte 0x48,0x98
+.byte 0x48,0x98
.byte 0xf3,0xc3
.size OPENSSL_atomic_add,.-OPENSSL_atomic_add
@@ -40,6 +40,7 @@ OPENSSL_ia32_cpuid:
movq %rbx,%r8
xorl %eax,%eax
+ movl %eax,8(%rdi)
cpuid
movl %eax,%r11d
@@ -107,6 +108,14 @@ OPENSSL_ia32_cpuid:
shrl $14,%r10d
andl $4095,%r10d
+ cmpl $7,%r11d
+ jb .Lnocacheinfo
+
+ movl $7,%eax
+ xorl %ecx,%ecx
+ cpuid
+ movl %ebx,8(%rdi)
+
.Lnocacheinfo:
movl $1,%eax
cpuid
@@ -139,13 +148,14 @@ OPENSSL_ia32_cpuid:
btl $27,%r9d
jnc .Lclear_avx
xorl %ecx,%ecx
-.byte 0x0f,0x01,0xd0
+.byte 0x0f,0x01,0xd0
andl $6,%eax
cmpl $6,%eax
je .Ldone
.Lclear_avx:
movl $4026525695,%eax
andl %eax,%r9d
+ andl $4294967263,8(%rdi)
.Ldone:
shlq $32,%r9
movl %r10d,%eax
@@ -233,3 +243,18 @@ OPENSSL_ia32_rdrand:
cmoveq %rcx,%rax
.byte 0xf3,0xc3
.size OPENSSL_ia32_rdrand,.-OPENSSL_ia32_rdrand
+
+.globl OPENSSL_ia32_rdseed
+.type OPENSSL_ia32_rdseed,@function
+.align 16
+OPENSSL_ia32_rdseed:
+ movl $8,%ecx
+.Loop_rdseed:
+.byte 72,15,199,248
+ jc .Lbreak_rdseed
+ loop .Loop_rdseed
+.Lbreak_rdseed:
+ cmpq $0,%rax
+ cmoveq %rcx,%rax
+ .byte 0xf3,0xc3
+.size OPENSSL_ia32_rdseed,.-OPENSSL_ia32_rdseed
diff --git a/secure/lib/libcrypto/engines/Makefile b/secure/lib/libcrypto/engines/Makefile
index e29fb87..a41dd7b 100644
--- a/secure/lib/libcrypto/engines/Makefile
+++ b/secure/lib/libcrypto/engines/Makefile
@@ -1,6 +1,6 @@
# $FreeBSD$
-SUBDIR= lib4758cca libaep libatalla libchil libcswift libgost libnuron \
- libsureware libubsec
+SUBDIR= lib4758cca libaep libatalla libcapi libchil libcswift libgost \
+ libnuron libsureware libubsec
.include <bsd.subdir.mk>
diff --git a/secure/lib/libcrypto/engines/libcapi/Makefile b/secure/lib/libcrypto/engines/libcapi/Makefile
new file mode 100644
index 0000000..48543ab
--- /dev/null
+++ b/secure/lib/libcrypto/engines/libcapi/Makefile
@@ -0,0 +1,6 @@
+# $FreeBSD$
+
+SHLIB_NAME?= libcapi.so
+SRCS= e_capi.c
+
+.include <bsd.lib.mk>
diff --git a/secure/lib/libcrypto/engines/libgost/Makefile b/secure/lib/libcrypto/engines/libgost/Makefile
index 47ab05b..ca1c716 100644
--- a/secure/lib/libcrypto/engines/libgost/Makefile
+++ b/secure/lib/libcrypto/engines/libgost/Makefile
@@ -1,9 +1,9 @@
# $FreeBSD$
SHLIB_NAME?= libgost.so
-SRCS= gost2001.c gost2001_keyx.c gost89.c gost94_keyx.c gost_ameth.c \
- gost_asn1.c gost_crypt.c gost_ctl.c gost_eng.c gost_keywrap.c \
- gost_md.c gost_params.c gost_pmeth.c gost_sign.c gosthash.c \
- e_gost_err.c
+SRCS= e_gost_err.c gost2001.c gost2001_keyx.c gost89.c gost94_keyx.c \
+ gost_ameth.c gost_asn1.c gost_crypt.c gost_ctl.c gost_eng.c \
+ gost_keywrap.c gost_md.c gost_params.c gost_pmeth.c gost_sign.c \
+ gosthash.c
.include <bsd.lib.mk>
diff --git a/secure/lib/libcrypto/i386/aes-586.s b/secure/lib/libcrypto/i386/aes-586.s
index 704c53c..bb66276 100644
--- a/secure/lib/libcrypto/i386/aes-586.s
+++ b/secure/lib/libcrypto/i386/aes-586.s
@@ -101,74 +101,78 @@ _x86_AES_encrypt_compact:
xorl %ecx,%edx
movl %esi,%ecx
- movl %ecx,%esi
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
+ movl $2155905152,%ebp
+ andl %ecx,%ebp
leal (%ecx,%ecx,1),%edi
- subl %ebp,%esi
+ movl %ebp,%esi
+ shrl $7,%ebp
andl $4278124286,%edi
- andl $454761243,%esi
+ subl %ebp,%esi
movl %ecx,%ebp
+ andl $454761243,%esi
+ rorl $16,%ebp
xorl %edi,%esi
+ movl %ecx,%edi
xorl %esi,%ecx
+ rorl $24,%edi
+ xorl %ebp,%esi
roll $24,%ecx
+ xorl %edi,%esi
+ movl $2155905152,%ebp
xorl %esi,%ecx
- rorl $16,%ebp
- xorl %ebp,%ecx
- rorl $8,%ebp
- xorl %ebp,%ecx
- movl %edx,%esi
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
+ andl %edx,%ebp
leal (%edx,%edx,1),%edi
- subl %ebp,%esi
+ movl %ebp,%esi
+ shrl $7,%ebp
andl $4278124286,%edi
- andl $454761243,%esi
+ subl %ebp,%esi
movl %edx,%ebp
+ andl $454761243,%esi
+ rorl $16,%ebp
xorl %edi,%esi
+ movl %edx,%edi
xorl %esi,%edx
+ rorl $24,%edi
+ xorl %ebp,%esi
roll $24,%edx
+ xorl %edi,%esi
+ movl $2155905152,%ebp
xorl %esi,%edx
- rorl $16,%ebp
- xorl %ebp,%edx
- rorl $8,%ebp
- xorl %ebp,%edx
- movl %eax,%esi
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
+ andl %eax,%ebp
leal (%eax,%eax,1),%edi
- subl %ebp,%esi
+ movl %ebp,%esi
+ shrl $7,%ebp
andl $4278124286,%edi
- andl $454761243,%esi
+ subl %ebp,%esi
movl %eax,%ebp
+ andl $454761243,%esi
+ rorl $16,%ebp
xorl %edi,%esi
+ movl %eax,%edi
xorl %esi,%eax
+ rorl $24,%edi
+ xorl %ebp,%esi
roll $24,%eax
+ xorl %edi,%esi
+ movl $2155905152,%ebp
xorl %esi,%eax
- rorl $16,%ebp
- xorl %ebp,%eax
- rorl $8,%ebp
- xorl %ebp,%eax
- movl %ebx,%esi
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
+ andl %ebx,%ebp
leal (%ebx,%ebx,1),%edi
- subl %ebp,%esi
+ movl %ebp,%esi
+ shrl $7,%ebp
andl $4278124286,%edi
- andl $454761243,%esi
+ subl %ebp,%esi
movl %ebx,%ebp
+ andl $454761243,%esi
+ rorl $16,%ebp
xorl %edi,%esi
+ movl %ebx,%edi
xorl %esi,%ebx
+ rorl $24,%edi
+ xorl %ebp,%esi
roll $24,%ebx
+ xorl %edi,%esi
xorl %esi,%ebx
- rorl $16,%ebp
- xorl %ebp,%ebx
- rorl $8,%ebp
- xorl %ebp,%ebx
movl 20(%esp),%edi
movl 28(%esp),%ebp
addl $16,%edi
@@ -290,74 +294,76 @@ _sse_AES_encrypt_compact:
pshufw $13,%mm4,%mm5
movd %mm1,%eax
movd %mm5,%ebx
+ movl %edi,20(%esp)
movzbl %al,%esi
- movzbl -128(%ebp,%esi,1),%ecx
- pshufw $13,%mm0,%mm2
movzbl %ah,%edx
+ pshufw $13,%mm0,%mm2
+ movzbl -128(%ebp,%esi,1),%ecx
+ movzbl %bl,%edi
movzbl -128(%ebp,%edx,1),%edx
- shll $8,%edx
shrl $16,%eax
- movzbl %bl,%esi
- movzbl -128(%ebp,%esi,1),%esi
+ shll $8,%edx
+ movzbl -128(%ebp,%edi,1),%esi
+ movzbl %bh,%edi
shll $16,%esi
- orl %esi,%ecx
pshufw $8,%mm4,%mm6
- movzbl %bh,%esi
- movzbl -128(%ebp,%esi,1),%esi
+ orl %esi,%ecx
+ movzbl -128(%ebp,%edi,1),%esi
+ movzbl %ah,%edi
shll $24,%esi
- orl %esi,%edx
shrl $16,%ebx
- movzbl %ah,%esi
- movzbl -128(%ebp,%esi,1),%esi
+ orl %esi,%edx
+ movzbl -128(%ebp,%edi,1),%esi
+ movzbl %bh,%edi
shll $8,%esi
orl %esi,%ecx
- movzbl %bh,%esi
- movzbl -128(%ebp,%esi,1),%esi
+ movzbl -128(%ebp,%edi,1),%esi
+ movzbl %al,%edi
shll $24,%esi
orl %esi,%ecx
- movd %ecx,%mm0
- movzbl %al,%esi
- movzbl -128(%ebp,%esi,1),%ecx
+ movzbl -128(%ebp,%edi,1),%esi
+ movzbl %bl,%edi
movd %mm2,%eax
- movzbl %bl,%esi
- movzbl -128(%ebp,%esi,1),%esi
- shll $16,%esi
- orl %esi,%ecx
+ movd %ecx,%mm0
+ movzbl -128(%ebp,%edi,1),%ecx
+ movzbl %ah,%edi
+ shll $16,%ecx
movd %mm6,%ebx
- movzbl %ah,%esi
- movzbl -128(%ebp,%esi,1),%esi
+ orl %esi,%ecx
+ movzbl -128(%ebp,%edi,1),%esi
+ movzbl %bh,%edi
shll $24,%esi
orl %esi,%ecx
- movzbl %bh,%esi
- movzbl -128(%ebp,%esi,1),%esi
+ movzbl -128(%ebp,%edi,1),%esi
+ movzbl %bl,%edi
shll $8,%esi
- orl %esi,%ecx
- movd %ecx,%mm1
- movzbl %bl,%esi
- movzbl -128(%ebp,%esi,1),%ecx
shrl $16,%ebx
- movzbl %al,%esi
- movzbl -128(%ebp,%esi,1),%esi
- shll $16,%esi
orl %esi,%ecx
+ movzbl -128(%ebp,%edi,1),%esi
+ movzbl %al,%edi
shrl $16,%eax
+ movd %ecx,%mm1
+ movzbl -128(%ebp,%edi,1),%ecx
+ movzbl %ah,%edi
+ shll $16,%ecx
+ andl $255,%eax
+ orl %esi,%ecx
punpckldq %mm1,%mm0
- movzbl %ah,%esi
- movzbl -128(%ebp,%esi,1),%esi
+ movzbl -128(%ebp,%edi,1),%esi
+ movzbl %bh,%edi
shll $24,%esi
- orl %esi,%ecx
- andl $255,%eax
+ andl $255,%ebx
movzbl -128(%ebp,%eax,1),%eax
+ orl %esi,%ecx
shll $16,%eax
+ movzbl -128(%ebp,%edi,1),%esi
orl %eax,%edx
- movzbl %bh,%esi
- movzbl -128(%ebp,%esi,1),%esi
shll $8,%esi
- orl %esi,%ecx
- movd %ecx,%mm4
- andl $255,%ebx
movzbl -128(%ebp,%ebx,1),%ebx
+ orl %esi,%ecx
orl %ebx,%edx
+ movl 20(%esp),%edi
+ movd %ecx,%mm4
movd %edx,%mm5
punpckldq %mm5,%mm4
addl $16,%edi
@@ -1130,28 +1136,28 @@ _x86_AES_decrypt_compact:
movzbl -128(%ebp,%eax,1),%eax
shll $24,%eax
xorl %eax,%edx
- movl %ecx,%esi
- andl $2155905152,%esi
- movl %esi,%edi
+ movl $2155905152,%edi
+ andl %ecx,%edi
+ movl %edi,%esi
shrl $7,%edi
leal (%ecx,%ecx,1),%eax
subl %edi,%esi
andl $4278124286,%eax
andl $454761243,%esi
- xorl %eax,%esi
- movl %esi,%eax
- andl $2155905152,%esi
- movl %esi,%edi
+ xorl %esi,%eax
+ movl $2155905152,%edi
+ andl %eax,%edi
+ movl %edi,%esi
shrl $7,%edi
leal (%eax,%eax,1),%ebx
subl %edi,%esi
andl $4278124286,%ebx
andl $454761243,%esi
xorl %ecx,%eax
- xorl %ebx,%esi
- movl %esi,%ebx
- andl $2155905152,%esi
- movl %esi,%edi
+ xorl %esi,%ebx
+ movl $2155905152,%edi
+ andl %ebx,%edi
+ movl %edi,%esi
shrl $7,%edi
leal (%ebx,%ebx,1),%ebp
subl %edi,%esi
@@ -1162,39 +1168,39 @@ _x86_AES_decrypt_compact:
xorl %esi,%ebp
xorl %eax,%ecx
xorl %ebp,%eax
- roll $24,%eax
xorl %ebx,%ecx
xorl %ebp,%ebx
- roll $16,%ebx
+ roll $24,%eax
xorl %ebp,%ecx
- roll $8,%ebp
+ roll $16,%ebx
xorl %eax,%ecx
+ roll $8,%ebp
xorl %ebx,%ecx
movl 4(%esp),%eax
xorl %ebp,%ecx
movl %ecx,12(%esp)
- movl %edx,%esi
- andl $2155905152,%esi
- movl %esi,%edi
+ movl $2155905152,%edi
+ andl %edx,%edi
+ movl %edi,%esi
shrl $7,%edi
leal (%edx,%edx,1),%ebx
subl %edi,%esi
andl $4278124286,%ebx
andl $454761243,%esi
- xorl %ebx,%esi
- movl %esi,%ebx
- andl $2155905152,%esi
- movl %esi,%edi
+ xorl %esi,%ebx
+ movl $2155905152,%edi
+ andl %ebx,%edi
+ movl %edi,%esi
shrl $7,%edi
leal (%ebx,%ebx,1),%ecx
subl %edi,%esi
andl $4278124286,%ecx
andl $454761243,%esi
xorl %edx,%ebx
- xorl %ecx,%esi
- movl %esi,%ecx
- andl $2155905152,%esi
- movl %esi,%edi
+ xorl %esi,%ecx
+ movl $2155905152,%edi
+ andl %ecx,%edi
+ movl %edi,%esi
shrl $7,%edi
leal (%ecx,%ecx,1),%ebp
subl %edi,%esi
@@ -1205,39 +1211,39 @@ _x86_AES_decrypt_compact:
xorl %esi,%ebp
xorl %ebx,%edx
xorl %ebp,%ebx
- roll $24,%ebx
xorl %ecx,%edx
xorl %ebp,%ecx
- roll $16,%ecx
+ roll $24,%ebx
xorl %ebp,%edx
- roll $8,%ebp
+ roll $16,%ecx
xorl %ebx,%edx
+ roll $8,%ebp
xorl %ecx,%edx
movl 8(%esp),%ebx
xorl %ebp,%edx
movl %edx,16(%esp)
- movl %eax,%esi
- andl $2155905152,%esi
- movl %esi,%edi
+ movl $2155905152,%edi
+ andl %eax,%edi
+ movl %edi,%esi
shrl $7,%edi
leal (%eax,%eax,1),%ecx
subl %edi,%esi
andl $4278124286,%ecx
andl $454761243,%esi
- xorl %ecx,%esi
- movl %esi,%ecx
- andl $2155905152,%esi
- movl %esi,%edi
+ xorl %esi,%ecx
+ movl $2155905152,%edi
+ andl %ecx,%edi
+ movl %edi,%esi
shrl $7,%edi
leal (%ecx,%ecx,1),%edx
subl %edi,%esi
andl $4278124286,%edx
andl $454761243,%esi
xorl %eax,%ecx
- xorl %edx,%esi
- movl %esi,%edx
- andl $2155905152,%esi
- movl %esi,%edi
+ xorl %esi,%edx
+ movl $2155905152,%edi
+ andl %edx,%edi
+ movl %edi,%esi
shrl $7,%edi
leal (%edx,%edx,1),%ebp
subl %edi,%esi
@@ -1248,37 +1254,37 @@ _x86_AES_decrypt_compact:
xorl %esi,%ebp
xorl %ecx,%eax
xorl %ebp,%ecx
- roll $24,%ecx
xorl %edx,%eax
xorl %ebp,%edx
- roll $16,%edx
+ roll $24,%ecx
xorl %ebp,%eax
- roll $8,%ebp
+ roll $16,%edx
xorl %ecx,%eax
+ roll $8,%ebp
xorl %edx,%eax
xorl %ebp,%eax
- movl %ebx,%esi
- andl $2155905152,%esi
- movl %esi,%edi
+ movl $2155905152,%edi
+ andl %ebx,%edi
+ movl %edi,%esi
shrl $7,%edi
leal (%ebx,%ebx,1),%ecx
subl %edi,%esi
andl $4278124286,%ecx
andl $454761243,%esi
- xorl %ecx,%esi
- movl %esi,%ecx
- andl $2155905152,%esi
- movl %esi,%edi
+ xorl %esi,%ecx
+ movl $2155905152,%edi
+ andl %ecx,%edi
+ movl %edi,%esi
shrl $7,%edi
leal (%ecx,%ecx,1),%edx
subl %edi,%esi
andl $4278124286,%edx
andl $454761243,%esi
xorl %ebx,%ecx
- xorl %edx,%esi
- movl %esi,%edx
- andl $2155905152,%esi
- movl %esi,%edi
+ xorl %esi,%edx
+ movl $2155905152,%edi
+ andl %edx,%edi
+ movl %edi,%esi
shrl $7,%edi
leal (%edx,%edx,1),%ebp
subl %edi,%esi
@@ -1289,13 +1295,13 @@ _x86_AES_decrypt_compact:
xorl %esi,%ebp
xorl %ecx,%ebx
xorl %ebp,%ecx
- roll $24,%ecx
xorl %edx,%ebx
xorl %ebp,%edx
- roll $16,%edx
+ roll $24,%ecx
xorl %ebp,%ebx
- roll $8,%ebp
+ roll $16,%edx
xorl %ecx,%ebx
+ roll $8,%ebp
xorl %edx,%ebx
movl 12(%esp),%ecx
xorl %ebp,%ebx
@@ -1414,77 +1420,79 @@ _sse_AES_decrypt_compact:
.align 16
.L007loop:
pshufw $12,%mm0,%mm1
- movd %mm1,%eax
pshufw $9,%mm4,%mm5
- movzbl %al,%esi
- movzbl -128(%ebp,%esi,1),%ecx
+ movd %mm1,%eax
movd %mm5,%ebx
+ movl %edi,20(%esp)
+ movzbl %al,%esi
movzbl %ah,%edx
+ pshufw $6,%mm0,%mm2
+ movzbl -128(%ebp,%esi,1),%ecx
+ movzbl %bl,%edi
movzbl -128(%ebp,%edx,1),%edx
+ shrl $16,%eax
shll $8,%edx
- pshufw $6,%mm0,%mm2
- movzbl %bl,%esi
- movzbl -128(%ebp,%esi,1),%esi
+ movzbl -128(%ebp,%edi,1),%esi
+ movzbl %bh,%edi
shll $16,%esi
+ pshufw $3,%mm4,%mm6
orl %esi,%ecx
- shrl $16,%eax
- movzbl %bh,%esi
- movzbl -128(%ebp,%esi,1),%esi
+ movzbl -128(%ebp,%edi,1),%esi
+ movzbl %ah,%edi
shll $24,%esi
- orl %esi,%edx
shrl $16,%ebx
- pshufw $3,%mm4,%mm6
- movzbl %ah,%esi
- movzbl -128(%ebp,%esi,1),%esi
+ orl %esi,%edx
+ movzbl -128(%ebp,%edi,1),%esi
+ movzbl %bh,%edi
shll $24,%esi
orl %esi,%ecx
- movzbl %bh,%esi
- movzbl -128(%ebp,%esi,1),%esi
+ movzbl -128(%ebp,%edi,1),%esi
+ movzbl %al,%edi
shll $8,%esi
- orl %esi,%ecx
- movd %ecx,%mm0
- movzbl %al,%esi
movd %mm2,%eax
- movzbl -128(%ebp,%esi,1),%ecx
- shll $16,%ecx
- movzbl %bl,%esi
+ orl %esi,%ecx
+ movzbl -128(%ebp,%edi,1),%esi
+ movzbl %bl,%edi
+ shll $16,%esi
movd %mm6,%ebx
- movzbl -128(%ebp,%esi,1),%esi
+ movd %ecx,%mm0
+ movzbl -128(%ebp,%edi,1),%ecx
+ movzbl %al,%edi
orl %esi,%ecx
- movzbl %al,%esi
- movzbl -128(%ebp,%esi,1),%esi
+ movzbl -128(%ebp,%edi,1),%esi
+ movzbl %bl,%edi
orl %esi,%edx
- movzbl %bl,%esi
- movzbl -128(%ebp,%esi,1),%esi
+ movzbl -128(%ebp,%edi,1),%esi
+ movzbl %ah,%edi
shll $16,%esi
- orl %esi,%edx
- movd %edx,%mm1
- movzbl %ah,%esi
- movzbl -128(%ebp,%esi,1),%edx
- shll $8,%edx
- movzbl %bh,%esi
shrl $16,%eax
- movzbl -128(%ebp,%esi,1),%esi
- shll $24,%esi
orl %esi,%edx
+ movzbl -128(%ebp,%edi,1),%esi
+ movzbl %bh,%edi
shrl $16,%ebx
- punpckldq %mm1,%mm0
- movzbl %bh,%esi
- movzbl -128(%ebp,%esi,1),%esi
shll $8,%esi
- orl %esi,%ecx
+ movd %edx,%mm1
+ movzbl -128(%ebp,%edi,1),%edx
+ movzbl %bh,%edi
+ shll $24,%edx
andl $255,%ebx
+ orl %esi,%edx
+ punpckldq %mm1,%mm0
+ movzbl -128(%ebp,%edi,1),%esi
+ movzbl %al,%edi
+ shll $8,%esi
+ movzbl %ah,%eax
movzbl -128(%ebp,%ebx,1),%ebx
+ orl %esi,%ecx
+ movzbl -128(%ebp,%edi,1),%esi
orl %ebx,%edx
- movzbl %al,%esi
- movzbl -128(%ebp,%esi,1),%esi
shll $16,%esi
- orl %esi,%edx
- movd %edx,%mm4
- movzbl %ah,%eax
movzbl -128(%ebp,%eax,1),%eax
+ orl %esi,%edx
shll $24,%eax
orl %eax,%ecx
+ movl 20(%esp),%edi
+ movd %edx,%mm4
movd %ecx,%mm5
punpckldq %mm5,%mm4
addl $16,%edi
@@ -3046,30 +3054,30 @@ private_AES_set_decrypt_key:
.align 4
.L056permute:
addl $16,%edi
- movl %eax,%esi
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
+ movl $2155905152,%ebp
+ andl %eax,%ebp
leal (%eax,%eax,1),%ebx
+ movl %ebp,%esi
+ shrl $7,%ebp
subl %ebp,%esi
andl $4278124286,%ebx
andl $454761243,%esi
- xorl %ebx,%esi
- movl %esi,%ebx
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
+ xorl %esi,%ebx
+ movl $2155905152,%ebp
+ andl %ebx,%ebp
leal (%ebx,%ebx,1),%ecx
+ movl %ebp,%esi
+ shrl $7,%ebp
subl %ebp,%esi
andl $4278124286,%ecx
andl $454761243,%esi
xorl %eax,%ebx
- xorl %ecx,%esi
- movl %esi,%ecx
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
+ xorl %esi,%ecx
+ movl $2155905152,%ebp
+ andl %ecx,%ebp
leal (%ecx,%ecx,1),%edx
+ movl %ebp,%esi
+ shrl $7,%ebp
xorl %eax,%ecx
subl %ebp,%esi
andl $4278124286,%edx
@@ -3090,30 +3098,30 @@ private_AES_set_decrypt_key:
movl %ebp,%ebx
xorl %edx,%eax
movl %eax,(%edi)
- movl %ebx,%esi
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
+ movl $2155905152,%ebp
+ andl %ebx,%ebp
leal (%ebx,%ebx,1),%ecx
+ movl %ebp,%esi
+ shrl $7,%ebp
subl %ebp,%esi
andl $4278124286,%ecx
andl $454761243,%esi
- xorl %ecx,%esi
- movl %esi,%ecx
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
+ xorl %esi,%ecx
+ movl $2155905152,%ebp
+ andl %ecx,%ebp
leal (%ecx,%ecx,1),%edx
+ movl %ebp,%esi
+ shrl $7,%ebp
subl %ebp,%esi
andl $4278124286,%edx
andl $454761243,%esi
xorl %ebx,%ecx
- xorl %edx,%esi
- movl %esi,%edx
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
+ xorl %esi,%edx
+ movl $2155905152,%ebp
+ andl %edx,%ebp
leal (%edx,%edx,1),%eax
+ movl %ebp,%esi
+ shrl $7,%ebp
xorl %ebx,%edx
subl %ebp,%esi
andl $4278124286,%eax
@@ -3134,30 +3142,30 @@ private_AES_set_decrypt_key:
movl %ebp,%ecx
xorl %eax,%ebx
movl %ebx,4(%edi)
- movl %ecx,%esi
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
+ movl $2155905152,%ebp
+ andl %ecx,%ebp
leal (%ecx,%ecx,1),%edx
+ movl %ebp,%esi
+ shrl $7,%ebp
subl %ebp,%esi
andl $4278124286,%edx
andl $454761243,%esi
- xorl %edx,%esi
- movl %esi,%edx
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
+ xorl %esi,%edx
+ movl $2155905152,%ebp
+ andl %edx,%ebp
leal (%edx,%edx,1),%eax
+ movl %ebp,%esi
+ shrl $7,%ebp
subl %ebp,%esi
andl $4278124286,%eax
andl $454761243,%esi
xorl %ecx,%edx
- xorl %eax,%esi
- movl %esi,%eax
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
+ xorl %esi,%eax
+ movl $2155905152,%ebp
+ andl %eax,%ebp
leal (%eax,%eax,1),%ebx
+ movl %ebp,%esi
+ shrl $7,%ebp
xorl %ecx,%eax
subl %ebp,%esi
andl $4278124286,%ebx
@@ -3178,30 +3186,30 @@ private_AES_set_decrypt_key:
movl %ebp,%edx
xorl %ebx,%ecx
movl %ecx,8(%edi)
- movl %edx,%esi
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
+ movl $2155905152,%ebp
+ andl %edx,%ebp
leal (%edx,%edx,1),%eax
+ movl %ebp,%esi
+ shrl $7,%ebp
subl %ebp,%esi
andl $4278124286,%eax
andl $454761243,%esi
- xorl %eax,%esi
- movl %esi,%eax
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
+ xorl %esi,%eax
+ movl $2155905152,%ebp
+ andl %eax,%ebp
leal (%eax,%eax,1),%ebx
+ movl %ebp,%esi
+ shrl $7,%ebp
subl %ebp,%esi
andl $4278124286,%ebx
andl $454761243,%esi
xorl %edx,%eax
- xorl %ebx,%esi
- movl %esi,%ebx
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
+ xorl %esi,%ebx
+ movl $2155905152,%ebp
+ andl %ebx,%ebp
leal (%ebx,%ebx,1),%ecx
+ movl %ebp,%esi
+ shrl $7,%ebp
xorl %edx,%ebx
subl %ebp,%esi
andl $4278124286,%ecx
@@ -3234,4 +3242,4 @@ private_AES_set_decrypt_key:
.byte 65,69,83,32,102,111,114,32,120,56,54,44,32,67,82,89
.byte 80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114
.byte 111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
-.comm OPENSSL_ia32cap_P,8,4
+.comm OPENSSL_ia32cap_P,16,4
diff --git a/secure/lib/libcrypto/i386/aesni-x86.s b/secure/lib/libcrypto/i386/aesni-x86.s
index e05096f..5b294bd 100644
--- a/secure/lib/libcrypto/i386/aesni-x86.s
+++ b/secure/lib/libcrypto/i386/aesni-x86.s
@@ -22,7 +22,10 @@ aesni_encrypt:
leal 16(%edx),%edx
jnz .L000enc1_loop_1
.byte 102,15,56,221,209
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
movups %xmm2,(%eax)
+ pxor %xmm2,%xmm2
ret
.size aesni_encrypt,.-.L_aesni_encrypt_begin
.globl aesni_decrypt
@@ -46,32 +49,90 @@ aesni_decrypt:
leal 16(%edx),%edx
jnz .L001dec1_loop_2
.byte 102,15,56,223,209
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
movups %xmm2,(%eax)
+ pxor %xmm2,%xmm2
ret
.size aesni_decrypt,.-.L_aesni_decrypt_begin
+.type _aesni_encrypt2,@function
+.align 16
+_aesni_encrypt2:
+ movups (%edx),%xmm0
+ shll $4,%ecx
+ movups 16(%edx),%xmm1
+ xorps %xmm0,%xmm2
+ pxor %xmm0,%xmm3
+ movups 32(%edx),%xmm0
+ leal 32(%edx,%ecx,1),%edx
+ negl %ecx
+ addl $16,%ecx
+.L002enc2_loop:
+.byte 102,15,56,220,209
+.byte 102,15,56,220,217
+ movups (%edx,%ecx,1),%xmm1
+ addl $32,%ecx
+.byte 102,15,56,220,208
+.byte 102,15,56,220,216
+ movups -16(%edx,%ecx,1),%xmm0
+ jnz .L002enc2_loop
+.byte 102,15,56,220,209
+.byte 102,15,56,220,217
+.byte 102,15,56,221,208
+.byte 102,15,56,221,216
+ ret
+.size _aesni_encrypt2,.-_aesni_encrypt2
+.type _aesni_decrypt2,@function
+.align 16
+_aesni_decrypt2:
+ movups (%edx),%xmm0
+ shll $4,%ecx
+ movups 16(%edx),%xmm1
+ xorps %xmm0,%xmm2
+ pxor %xmm0,%xmm3
+ movups 32(%edx),%xmm0
+ leal 32(%edx,%ecx,1),%edx
+ negl %ecx
+ addl $16,%ecx
+.L003dec2_loop:
+.byte 102,15,56,222,209
+.byte 102,15,56,222,217
+ movups (%edx,%ecx,1),%xmm1
+ addl $32,%ecx
+.byte 102,15,56,222,208
+.byte 102,15,56,222,216
+ movups -16(%edx,%ecx,1),%xmm0
+ jnz .L003dec2_loop
+.byte 102,15,56,222,209
+.byte 102,15,56,222,217
+.byte 102,15,56,223,208
+.byte 102,15,56,223,216
+ ret
+.size _aesni_decrypt2,.-_aesni_decrypt2
.type _aesni_encrypt3,@function
.align 16
_aesni_encrypt3:
movups (%edx),%xmm0
- shrl $1,%ecx
+ shll $4,%ecx
movups 16(%edx),%xmm1
- leal 32(%edx),%edx
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
- movups (%edx),%xmm0
-.L002enc3_loop:
+ movups 32(%edx),%xmm0
+ leal 32(%edx,%ecx,1),%edx
+ negl %ecx
+ addl $16,%ecx
+.L004enc3_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
- decl %ecx
.byte 102,15,56,220,225
- movups 16(%edx),%xmm1
+ movups (%edx,%ecx,1),%xmm1
+ addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
- leal 32(%edx),%edx
.byte 102,15,56,220,224
- movups (%edx),%xmm0
- jnz .L002enc3_loop
+ movups -16(%edx,%ecx,1),%xmm0
+ jnz .L004enc3_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
@@ -84,25 +145,26 @@ _aesni_encrypt3:
.align 16
_aesni_decrypt3:
movups (%edx),%xmm0
- shrl $1,%ecx
+ shll $4,%ecx
movups 16(%edx),%xmm1
- leal 32(%edx),%edx
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
- movups (%edx),%xmm0
-.L003dec3_loop:
+ movups 32(%edx),%xmm0
+ leal 32(%edx,%ecx,1),%edx
+ negl %ecx
+ addl $16,%ecx
+.L005dec3_loop:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
- decl %ecx
.byte 102,15,56,222,225
- movups 16(%edx),%xmm1
+ movups (%edx,%ecx,1),%xmm1
+ addl $32,%ecx
.byte 102,15,56,222,208
.byte 102,15,56,222,216
- leal 32(%edx),%edx
.byte 102,15,56,222,224
- movups (%edx),%xmm0
- jnz .L003dec3_loop
+ movups -16(%edx,%ecx,1),%xmm0
+ jnz .L005dec3_loop
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
@@ -116,27 +178,29 @@ _aesni_decrypt3:
_aesni_encrypt4:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
- shrl $1,%ecx
- leal 32(%edx),%edx
+ shll $4,%ecx
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
- movups (%edx),%xmm0
-.L004enc4_loop:
+ movups 32(%edx),%xmm0
+ leal 32(%edx,%ecx,1),%edx
+ negl %ecx
+.byte 15,31,64,0
+ addl $16,%ecx
+.L006enc4_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
- decl %ecx
.byte 102,15,56,220,225
.byte 102,15,56,220,233
- movups 16(%edx),%xmm1
+ movups (%edx,%ecx,1),%xmm1
+ addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
- leal 32(%edx),%edx
.byte 102,15,56,220,224
.byte 102,15,56,220,232
- movups (%edx),%xmm0
- jnz .L004enc4_loop
+ movups -16(%edx,%ecx,1),%xmm0
+ jnz .L006enc4_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
@@ -152,27 +216,29 @@ _aesni_encrypt4:
_aesni_decrypt4:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
- shrl $1,%ecx
- leal 32(%edx),%edx
+ shll $4,%ecx
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
- movups (%edx),%xmm0
-.L005dec4_loop:
+ movups 32(%edx),%xmm0
+ leal 32(%edx,%ecx,1),%edx
+ negl %ecx
+.byte 15,31,64,0
+ addl $16,%ecx
+.L007dec4_loop:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
- decl %ecx
.byte 102,15,56,222,225
.byte 102,15,56,222,233
- movups 16(%edx),%xmm1
+ movups (%edx,%ecx,1),%xmm1
+ addl $32,%ecx
.byte 102,15,56,222,208
.byte 102,15,56,222,216
- leal 32(%edx),%edx
.byte 102,15,56,222,224
.byte 102,15,56,222,232
- movups (%edx),%xmm0
- jnz .L005dec4_loop
+ movups -16(%edx,%ecx,1),%xmm0
+ jnz .L007dec4_loop
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
@@ -187,45 +253,42 @@ _aesni_decrypt4:
.align 16
_aesni_encrypt6:
movups (%edx),%xmm0
- shrl $1,%ecx
+ shll $4,%ecx
movups 16(%edx),%xmm1
- leal 32(%edx),%edx
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
-.byte 102,15,56,220,209
pxor %xmm0,%xmm4
-.byte 102,15,56,220,217
+.byte 102,15,56,220,209
pxor %xmm0,%xmm5
- decl %ecx
-.byte 102,15,56,220,225
pxor %xmm0,%xmm6
-.byte 102,15,56,220,233
+.byte 102,15,56,220,217
+ leal 32(%edx,%ecx,1),%edx
+ negl %ecx
+.byte 102,15,56,220,225
pxor %xmm0,%xmm7
-.byte 102,15,56,220,241
- movups (%edx),%xmm0
-.byte 102,15,56,220,249
- jmp .L_aesni_encrypt6_enter
+ movups (%edx,%ecx,1),%xmm0
+ addl $16,%ecx
+ jmp .L008_aesni_encrypt6_inner
.align 16
-.L006enc6_loop:
+.L009enc6_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
- decl %ecx
.byte 102,15,56,220,225
+.L008_aesni_encrypt6_inner:
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
-.align 16
.L_aesni_encrypt6_enter:
- movups 16(%edx),%xmm1
+ movups (%edx,%ecx,1),%xmm1
+ addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
- leal 32(%edx),%edx
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
- movups (%edx),%xmm0
- jnz .L006enc6_loop
+ movups -16(%edx,%ecx,1),%xmm0
+ jnz .L009enc6_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
@@ -244,45 +307,42 @@ _aesni_encrypt6:
.align 16
_aesni_decrypt6:
movups (%edx),%xmm0
- shrl $1,%ecx
+ shll $4,%ecx
movups 16(%edx),%xmm1
- leal 32(%edx),%edx
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
-.byte 102,15,56,222,209
pxor %xmm0,%xmm4
-.byte 102,15,56,222,217
+.byte 102,15,56,222,209
pxor %xmm0,%xmm5
- decl %ecx
-.byte 102,15,56,222,225
pxor %xmm0,%xmm6
-.byte 102,15,56,222,233
+.byte 102,15,56,222,217
+ leal 32(%edx,%ecx,1),%edx
+ negl %ecx
+.byte 102,15,56,222,225
pxor %xmm0,%xmm7
-.byte 102,15,56,222,241
- movups (%edx),%xmm0
-.byte 102,15,56,222,249
- jmp .L_aesni_decrypt6_enter
+ movups (%edx,%ecx,1),%xmm0
+ addl $16,%ecx
+ jmp .L010_aesni_decrypt6_inner
.align 16
-.L007dec6_loop:
+.L011dec6_loop:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
- decl %ecx
.byte 102,15,56,222,225
+.L010_aesni_decrypt6_inner:
.byte 102,15,56,222,233
.byte 102,15,56,222,241
.byte 102,15,56,222,249
-.align 16
.L_aesni_decrypt6_enter:
- movups 16(%edx),%xmm1
+ movups (%edx,%ecx,1),%xmm1
+ addl $32,%ecx
.byte 102,15,56,222,208
.byte 102,15,56,222,216
- leal 32(%edx),%edx
.byte 102,15,56,222,224
.byte 102,15,56,222,232
.byte 102,15,56,222,240
.byte 102,15,56,222,248
- movups (%edx),%xmm0
- jnz .L007dec6_loop
+ movups -16(%edx,%ecx,1),%xmm0
+ jnz .L011dec6_loop
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
@@ -312,14 +372,14 @@ aesni_ecb_encrypt:
movl 32(%esp),%edx
movl 36(%esp),%ebx
andl $-16,%eax
- jz .L008ecb_ret
+ jz .L012ecb_ret
movl 240(%edx),%ecx
testl %ebx,%ebx
- jz .L009ecb_decrypt
+ jz .L013ecb_decrypt
movl %edx,%ebp
movl %ecx,%ebx
cmpl $96,%eax
- jb .L010ecb_enc_tail
+ jb .L014ecb_enc_tail
movdqu (%esi),%xmm2
movdqu 16(%esi),%xmm3
movdqu 32(%esi),%xmm4
@@ -328,9 +388,9 @@ aesni_ecb_encrypt:
movdqu 80(%esi),%xmm7
leal 96(%esi),%esi
subl $96,%eax
- jmp .L011ecb_enc_loop6_enter
+ jmp .L015ecb_enc_loop6_enter
.align 16
-.L012ecb_enc_loop6:
+.L016ecb_enc_loop6:
movups %xmm2,(%edi)
movdqu (%esi),%xmm2
movups %xmm3,16(%edi)
@@ -345,12 +405,12 @@ aesni_ecb_encrypt:
leal 96(%edi),%edi
movdqu 80(%esi),%xmm7
leal 96(%esi),%esi
-.L011ecb_enc_loop6_enter:
+.L015ecb_enc_loop6_enter:
call _aesni_encrypt6
movl %ebp,%edx
movl %ebx,%ecx
subl $96,%eax
- jnc .L012ecb_enc_loop6
+ jnc .L016ecb_enc_loop6
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
@@ -359,18 +419,18 @@ aesni_ecb_encrypt:
movups %xmm7,80(%edi)
leal 96(%edi),%edi
addl $96,%eax
- jz .L008ecb_ret
-.L010ecb_enc_tail:
+ jz .L012ecb_ret
+.L014ecb_enc_tail:
movups (%esi),%xmm2
cmpl $32,%eax
- jb .L013ecb_enc_one
+ jb .L017ecb_enc_one
movups 16(%esi),%xmm3
- je .L014ecb_enc_two
+ je .L018ecb_enc_two
movups 32(%esi),%xmm4
cmpl $64,%eax
- jb .L015ecb_enc_three
+ jb .L019ecb_enc_three
movups 48(%esi),%xmm5
- je .L016ecb_enc_four
+ je .L020ecb_enc_four
movups 64(%esi),%xmm6
xorps %xmm7,%xmm7
call _aesni_encrypt6
@@ -379,50 +439,49 @@ aesni_ecb_encrypt:
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
- jmp .L008ecb_ret
+ jmp .L012ecb_ret
.align 16
-.L013ecb_enc_one:
+.L017ecb_enc_one:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
-.L017enc1_loop_3:
+.L021enc1_loop_3:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
- jnz .L017enc1_loop_3
+ jnz .L021enc1_loop_3
.byte 102,15,56,221,209
movups %xmm2,(%edi)
- jmp .L008ecb_ret
+ jmp .L012ecb_ret
.align 16
-.L014ecb_enc_two:
- xorps %xmm4,%xmm4
- call _aesni_encrypt3
+.L018ecb_enc_two:
+ call _aesni_encrypt2
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
- jmp .L008ecb_ret
+ jmp .L012ecb_ret
.align 16
-.L015ecb_enc_three:
+.L019ecb_enc_three:
call _aesni_encrypt3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
- jmp .L008ecb_ret
+ jmp .L012ecb_ret
.align 16
-.L016ecb_enc_four:
+.L020ecb_enc_four:
call _aesni_encrypt4
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
- jmp .L008ecb_ret
+ jmp .L012ecb_ret
.align 16
-.L009ecb_decrypt:
+.L013ecb_decrypt:
movl %edx,%ebp
movl %ecx,%ebx
cmpl $96,%eax
- jb .L018ecb_dec_tail
+ jb .L022ecb_dec_tail
movdqu (%esi),%xmm2
movdqu 16(%esi),%xmm3
movdqu 32(%esi),%xmm4
@@ -431,9 +490,9 @@ aesni_ecb_encrypt:
movdqu 80(%esi),%xmm7
leal 96(%esi),%esi
subl $96,%eax
- jmp .L019ecb_dec_loop6_enter
+ jmp .L023ecb_dec_loop6_enter
.align 16
-.L020ecb_dec_loop6:
+.L024ecb_dec_loop6:
movups %xmm2,(%edi)
movdqu (%esi),%xmm2
movups %xmm3,16(%edi)
@@ -448,12 +507,12 @@ aesni_ecb_encrypt:
leal 96(%edi),%edi
movdqu 80(%esi),%xmm7
leal 96(%esi),%esi
-.L019ecb_dec_loop6_enter:
+.L023ecb_dec_loop6_enter:
call _aesni_decrypt6
movl %ebp,%edx
movl %ebx,%ecx
subl $96,%eax
- jnc .L020ecb_dec_loop6
+ jnc .L024ecb_dec_loop6
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
@@ -462,18 +521,18 @@ aesni_ecb_encrypt:
movups %xmm7,80(%edi)
leal 96(%edi),%edi
addl $96,%eax
- jz .L008ecb_ret
-.L018ecb_dec_tail:
+ jz .L012ecb_ret
+.L022ecb_dec_tail:
movups (%esi),%xmm2
cmpl $32,%eax
- jb .L021ecb_dec_one
+ jb .L025ecb_dec_one
movups 16(%esi),%xmm3
- je .L022ecb_dec_two
+ je .L026ecb_dec_two
movups 32(%esi),%xmm4
cmpl $64,%eax
- jb .L023ecb_dec_three
+ jb .L027ecb_dec_three
movups 48(%esi),%xmm5
- je .L024ecb_dec_four
+ je .L028ecb_dec_four
movups 64(%esi),%xmm6
xorps %xmm7,%xmm7
call _aesni_decrypt6
@@ -482,44 +541,51 @@ aesni_ecb_encrypt:
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
- jmp .L008ecb_ret
+ jmp .L012ecb_ret
.align 16
-.L021ecb_dec_one:
+.L025ecb_dec_one:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
-.L025dec1_loop_4:
+.L029dec1_loop_4:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
- jnz .L025dec1_loop_4
+ jnz .L029dec1_loop_4
.byte 102,15,56,223,209
movups %xmm2,(%edi)
- jmp .L008ecb_ret
+ jmp .L012ecb_ret
.align 16
-.L022ecb_dec_two:
- xorps %xmm4,%xmm4
- call _aesni_decrypt3
+.L026ecb_dec_two:
+ call _aesni_decrypt2
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
- jmp .L008ecb_ret
+ jmp .L012ecb_ret
.align 16
-.L023ecb_dec_three:
+.L027ecb_dec_three:
call _aesni_decrypt3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
- jmp .L008ecb_ret
+ jmp .L012ecb_ret
.align 16
-.L024ecb_dec_four:
+.L028ecb_dec_four:
call _aesni_decrypt4
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
-.L008ecb_ret:
+.L012ecb_ret:
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
+ pxor %xmm2,%xmm2
+ pxor %xmm3,%xmm3
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
+ pxor %xmm6,%xmm6
+ pxor %xmm7,%xmm7
popl %edi
popl %esi
popl %ebx
@@ -558,48 +624,56 @@ aesni_ccm64_encrypt_blocks:
movl %ebp,20(%esp)
movl %ebp,24(%esp)
movl %ebp,28(%esp)
- shrl $1,%ecx
+ shll $4,%ecx
+ movl $16,%ebx
leal (%edx),%ebp
movdqa (%esp),%xmm5
movdqa %xmm7,%xmm2
- movl %ecx,%ebx
+ leal 32(%edx,%ecx,1),%edx
+ subl %ecx,%ebx
.byte 102,15,56,0,253
-.L026ccm64_enc_outer:
+.L030ccm64_enc_outer:
movups (%ebp),%xmm0
movl %ebx,%ecx
movups (%esi),%xmm6
xorps %xmm0,%xmm2
movups 16(%ebp),%xmm1
xorps %xmm6,%xmm0
- leal 32(%ebp),%edx
xorps %xmm0,%xmm3
- movups (%edx),%xmm0
-.L027ccm64_enc2_loop:
+ movups 32(%ebp),%xmm0
+.L031ccm64_enc2_loop:
.byte 102,15,56,220,209
- decl %ecx
.byte 102,15,56,220,217
- movups 16(%edx),%xmm1
+ movups (%edx,%ecx,1),%xmm1
+ addl $32,%ecx
.byte 102,15,56,220,208
- leal 32(%edx),%edx
.byte 102,15,56,220,216
- movups (%edx),%xmm0
- jnz .L027ccm64_enc2_loop
+ movups -16(%edx,%ecx,1),%xmm0
+ jnz .L031ccm64_enc2_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
paddq 16(%esp),%xmm7
+ decl %eax
.byte 102,15,56,221,208
.byte 102,15,56,221,216
- decl %eax
leal 16(%esi),%esi
xorps %xmm2,%xmm6
movdqa %xmm7,%xmm2
movups %xmm6,(%edi)
- leal 16(%edi),%edi
.byte 102,15,56,0,213
- jnz .L026ccm64_enc_outer
+ leal 16(%edi),%edi
+ jnz .L030ccm64_enc_outer
movl 48(%esp),%esp
movl 40(%esp),%edi
movups %xmm3,(%edi)
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
+ pxor %xmm2,%xmm2
+ pxor %xmm3,%xmm3
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
+ pxor %xmm6,%xmm6
+ pxor %xmm7,%xmm7
popl %edi
popl %esi
popl %ebx
@@ -647,71 +721,82 @@ aesni_ccm64_decrypt_blocks:
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
-.L028enc1_loop_5:
+.L032enc1_loop_5:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
- jnz .L028enc1_loop_5
+ jnz .L032enc1_loop_5
.byte 102,15,56,221,209
+ shll $4,%ebx
+ movl $16,%ecx
movups (%esi),%xmm6
paddq 16(%esp),%xmm7
leal 16(%esi),%esi
- jmp .L029ccm64_dec_outer
+ subl %ebx,%ecx
+ leal 32(%ebp,%ebx,1),%edx
+ movl %ecx,%ebx
+ jmp .L033ccm64_dec_outer
.align 16
-.L029ccm64_dec_outer:
+.L033ccm64_dec_outer:
xorps %xmm2,%xmm6
movdqa %xmm7,%xmm2
- movl %ebx,%ecx
movups %xmm6,(%edi)
leal 16(%edi),%edi
.byte 102,15,56,0,213
subl $1,%eax
- jz .L030ccm64_dec_break
+ jz .L034ccm64_dec_break
movups (%ebp),%xmm0
- shrl $1,%ecx
+ movl %ebx,%ecx
movups 16(%ebp),%xmm1
xorps %xmm0,%xmm6
- leal 32(%ebp),%edx
xorps %xmm0,%xmm2
xorps %xmm6,%xmm3
- movups (%edx),%xmm0
-.L031ccm64_dec2_loop:
+ movups 32(%ebp),%xmm0
+.L035ccm64_dec2_loop:
.byte 102,15,56,220,209
- decl %ecx
.byte 102,15,56,220,217
- movups 16(%edx),%xmm1
+ movups (%edx,%ecx,1),%xmm1
+ addl $32,%ecx
.byte 102,15,56,220,208
- leal 32(%edx),%edx
.byte 102,15,56,220,216
- movups (%edx),%xmm0
- jnz .L031ccm64_dec2_loop
+ movups -16(%edx,%ecx,1),%xmm0
+ jnz .L035ccm64_dec2_loop
movups (%esi),%xmm6
paddq 16(%esp),%xmm7
.byte 102,15,56,220,209
.byte 102,15,56,220,217
- leal 16(%esi),%esi
.byte 102,15,56,221,208
.byte 102,15,56,221,216
- jmp .L029ccm64_dec_outer
+ leal 16(%esi),%esi
+ jmp .L033ccm64_dec_outer
.align 16
-.L030ccm64_dec_break:
+.L034ccm64_dec_break:
+ movl 240(%ebp),%ecx
movl %ebp,%edx
movups (%edx),%xmm0
movups 16(%edx),%xmm1
xorps %xmm0,%xmm6
leal 32(%edx),%edx
xorps %xmm6,%xmm3
-.L032enc1_loop_6:
+.L036enc1_loop_6:
.byte 102,15,56,220,217
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
- jnz .L032enc1_loop_6
+ jnz .L036enc1_loop_6
.byte 102,15,56,221,217
movl 48(%esp),%esp
movl 40(%esp),%edi
movups %xmm3,(%edi)
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
+ pxor %xmm2,%xmm2
+ pxor %xmm3,%xmm3
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
+ pxor %xmm6,%xmm6
+ pxor %xmm7,%xmm7
popl %edi
popl %esi
popl %ebx
@@ -737,7 +822,7 @@ aesni_ctr32_encrypt_blocks:
andl $-16,%esp
movl %ebp,80(%esp)
cmpl $1,%eax
- je .L033ctr32_one_shortcut
+ je .L037ctr32_one_shortcut
movdqu (%ebx),%xmm7
movl $202182159,(%esp)
movl $134810123,4(%esp)
@@ -753,63 +838,59 @@ aesni_ctr32_encrypt_blocks:
.byte 102,15,58,34,253,3
movl 240(%edx),%ecx
bswap %ebx
- pxor %xmm1,%xmm1
pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
movdqa (%esp),%xmm2
-.byte 102,15,58,34,203,0
+.byte 102,15,58,34,195,0
leal 3(%ebx),%ebp
-.byte 102,15,58,34,197,0
+.byte 102,15,58,34,205,0
incl %ebx
-.byte 102,15,58,34,203,1
+.byte 102,15,58,34,195,1
incl %ebp
-.byte 102,15,58,34,197,1
+.byte 102,15,58,34,205,1
incl %ebx
-.byte 102,15,58,34,203,2
+.byte 102,15,58,34,195,2
incl %ebp
-.byte 102,15,58,34,197,2
- movdqa %xmm1,48(%esp)
-.byte 102,15,56,0,202
- movdqa %xmm0,64(%esp)
+.byte 102,15,58,34,205,2
+ movdqa %xmm0,48(%esp)
.byte 102,15,56,0,194
- pshufd $192,%xmm1,%xmm2
- pshufd $128,%xmm1,%xmm3
+ movdqu (%edx),%xmm6
+ movdqa %xmm1,64(%esp)
+.byte 102,15,56,0,202
+ pshufd $192,%xmm0,%xmm2
+ pshufd $128,%xmm0,%xmm3
cmpl $6,%eax
- jb .L034ctr32_tail
+ jb .L038ctr32_tail
+ pxor %xmm6,%xmm7
+ shll $4,%ecx
+ movl $16,%ebx
movdqa %xmm7,32(%esp)
- shrl $1,%ecx
movl %edx,%ebp
- movl %ecx,%ebx
+ subl %ecx,%ebx
+ leal 32(%edx,%ecx,1),%edx
subl $6,%eax
- jmp .L035ctr32_loop6
-.align 16
-.L035ctr32_loop6:
- pshufd $64,%xmm1,%xmm4
- movdqa 32(%esp),%xmm1
- pshufd $192,%xmm0,%xmm5
- por %xmm1,%xmm2
- pshufd $128,%xmm0,%xmm6
- por %xmm1,%xmm3
- pshufd $64,%xmm0,%xmm7
- por %xmm1,%xmm4
- por %xmm1,%xmm5
- por %xmm1,%xmm6
- por %xmm1,%xmm7
- movups (%ebp),%xmm0
- movups 16(%ebp),%xmm1
- leal 32(%ebp),%edx
- decl %ecx
+ jmp .L039ctr32_loop6
+.align 16
+.L039ctr32_loop6:
+ pshufd $64,%xmm0,%xmm4
+ movdqa 32(%esp),%xmm0
+ pshufd $192,%xmm1,%xmm5
pxor %xmm0,%xmm2
+ pshufd $128,%xmm1,%xmm6
pxor %xmm0,%xmm3
-.byte 102,15,56,220,209
+ pshufd $64,%xmm1,%xmm7
+ movups 16(%ebp),%xmm1
pxor %xmm0,%xmm4
-.byte 102,15,56,220,217
pxor %xmm0,%xmm5
-.byte 102,15,56,220,225
+.byte 102,15,56,220,209
pxor %xmm0,%xmm6
-.byte 102,15,56,220,233
pxor %xmm0,%xmm7
+.byte 102,15,56,220,217
+ movups 32(%ebp),%xmm0
+ movl %ebx,%ecx
+.byte 102,15,56,220,225
+.byte 102,15,56,220,233
.byte 102,15,56,220,241
- movups (%edx),%xmm0
.byte 102,15,56,220,249
call .L_aesni_encrypt6_enter
movups (%esi),%xmm1
@@ -820,51 +901,51 @@ aesni_ctr32_encrypt_blocks:
movups %xmm2,(%edi)
movdqa 16(%esp),%xmm0
xorps %xmm1,%xmm4
- movdqa 48(%esp),%xmm1
+ movdqa 64(%esp),%xmm1
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
paddd %xmm0,%xmm1
- paddd 64(%esp),%xmm0
+ paddd 48(%esp),%xmm0
movdqa (%esp),%xmm2
movups 48(%esi),%xmm3
movups 64(%esi),%xmm4
xorps %xmm3,%xmm5
movups 80(%esi),%xmm3
leal 96(%esi),%esi
- movdqa %xmm1,48(%esp)
-.byte 102,15,56,0,202
+ movdqa %xmm0,48(%esp)
+.byte 102,15,56,0,194
xorps %xmm4,%xmm6
movups %xmm5,48(%edi)
xorps %xmm3,%xmm7
- movdqa %xmm0,64(%esp)
-.byte 102,15,56,0,194
+ movdqa %xmm1,64(%esp)
+.byte 102,15,56,0,202
movups %xmm6,64(%edi)
- pshufd $192,%xmm1,%xmm2
+ pshufd $192,%xmm0,%xmm2
movups %xmm7,80(%edi)
leal 96(%edi),%edi
- movl %ebx,%ecx
- pshufd $128,%xmm1,%xmm3
+ pshufd $128,%xmm0,%xmm3
subl $6,%eax
- jnc .L035ctr32_loop6
+ jnc .L039ctr32_loop6
addl $6,%eax
- jz .L036ctr32_ret
+ jz .L040ctr32_ret
+ movdqu (%ebp),%xmm7
movl %ebp,%edx
- leal 1(,%ecx,2),%ecx
- movdqa 32(%esp),%xmm7
-.L034ctr32_tail:
+ pxor 32(%esp),%xmm7
+ movl 240(%ebp),%ecx
+.L038ctr32_tail:
por %xmm7,%xmm2
cmpl $2,%eax
- jb .L037ctr32_one
- pshufd $64,%xmm1,%xmm4
+ jb .L041ctr32_one
+ pshufd $64,%xmm0,%xmm4
por %xmm7,%xmm3
- je .L038ctr32_two
- pshufd $192,%xmm0,%xmm5
+ je .L042ctr32_two
+ pshufd $192,%xmm1,%xmm5
por %xmm7,%xmm4
cmpl $4,%eax
- jb .L039ctr32_three
- pshufd $128,%xmm0,%xmm6
+ jb .L043ctr32_three
+ pshufd $128,%xmm1,%xmm6
por %xmm7,%xmm5
- je .L040ctr32_four
+ je .L044ctr32_four
por %xmm7,%xmm6
call _aesni_encrypt6
movups (%esi),%xmm1
@@ -882,39 +963,39 @@ aesni_ctr32_encrypt_blocks:
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
- jmp .L036ctr32_ret
+ jmp .L040ctr32_ret
.align 16
-.L033ctr32_one_shortcut:
+.L037ctr32_one_shortcut:
movups (%ebx),%xmm2
movl 240(%edx),%ecx
-.L037ctr32_one:
+.L041ctr32_one:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
-.L041enc1_loop_7:
+.L045enc1_loop_7:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
- jnz .L041enc1_loop_7
+ jnz .L045enc1_loop_7
.byte 102,15,56,221,209
movups (%esi),%xmm6
xorps %xmm2,%xmm6
movups %xmm6,(%edi)
- jmp .L036ctr32_ret
+ jmp .L040ctr32_ret
.align 16
-.L038ctr32_two:
- call _aesni_encrypt3
+.L042ctr32_two:
+ call _aesni_encrypt2
movups (%esi),%xmm5
movups 16(%esi),%xmm6
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
- jmp .L036ctr32_ret
+ jmp .L040ctr32_ret
.align 16
-.L039ctr32_three:
+.L043ctr32_three:
call _aesni_encrypt3
movups (%esi),%xmm5
movups 16(%esi),%xmm6
@@ -925,9 +1006,9 @@ aesni_ctr32_encrypt_blocks:
xorps %xmm7,%xmm4
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
- jmp .L036ctr32_ret
+ jmp .L040ctr32_ret
.align 16
-.L040ctr32_four:
+.L044ctr32_four:
call _aesni_encrypt4
movups (%esi),%xmm6
movups 16(%esi),%xmm7
@@ -941,7 +1022,18 @@ aesni_ctr32_encrypt_blocks:
xorps %xmm0,%xmm5
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
-.L036ctr32_ret:
+.L040ctr32_ret:
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
+ pxor %xmm2,%xmm2
+ pxor %xmm3,%xmm3
+ pxor %xmm4,%xmm4
+ movdqa %xmm0,32(%esp)
+ pxor %xmm5,%xmm5
+ movdqa %xmm0,48(%esp)
+ pxor %xmm6,%xmm6
+ movdqa %xmm0,64(%esp)
+ pxor %xmm7,%xmm7
movl 80(%esp),%esp
popl %edi
popl %esi
@@ -966,12 +1058,12 @@ aesni_xts_encrypt:
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
-.L042enc1_loop_8:
+.L046enc1_loop_8:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
- jnz .L042enc1_loop_8
+ jnz .L046enc1_loop_8
.byte 102,15,56,221,209
movl 20(%esp),%esi
movl 24(%esp),%edi
@@ -995,12 +1087,14 @@ aesni_xts_encrypt:
movl %edx,%ebp
movl %ecx,%ebx
subl $96,%eax
- jc .L043xts_enc_short
- shrl $1,%ecx
- movl %ecx,%ebx
- jmp .L044xts_enc_loop6
+ jc .L047xts_enc_short
+ shll $4,%ecx
+ movl $16,%ebx
+ subl %ecx,%ebx
+ leal 32(%edx,%ecx,1),%edx
+ jmp .L048xts_enc_loop6
.align 16
-.L044xts_enc_loop6:
+.L048xts_enc_loop6:
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,(%esp)
@@ -1036,6 +1130,7 @@ aesni_xts_encrypt:
pand %xmm3,%xmm7
movups (%esi),%xmm2
pxor %xmm1,%xmm7
+ movl %ebx,%ecx
movdqu 16(%esi),%xmm3
xorps %xmm0,%xmm2
movdqu 32(%esi),%xmm4
@@ -1051,19 +1146,17 @@ aesni_xts_encrypt:
movdqa %xmm7,80(%esp)
pxor %xmm1,%xmm7
movups 16(%ebp),%xmm1
- leal 32(%ebp),%edx
pxor 16(%esp),%xmm3
-.byte 102,15,56,220,209
pxor 32(%esp),%xmm4
-.byte 102,15,56,220,217
+.byte 102,15,56,220,209
pxor 48(%esp),%xmm5
- decl %ecx
-.byte 102,15,56,220,225
pxor 64(%esp),%xmm6
-.byte 102,15,56,220,233
+.byte 102,15,56,220,217
pxor %xmm0,%xmm7
+ movups 32(%ebp),%xmm0
+.byte 102,15,56,220,225
+.byte 102,15,56,220,233
.byte 102,15,56,220,241
- movups (%edx),%xmm0
.byte 102,15,56,220,249
call .L_aesni_encrypt6_enter
movdqa 80(%esp),%xmm1
@@ -1088,26 +1181,25 @@ aesni_xts_encrypt:
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
- movl %ebx,%ecx
pxor %xmm2,%xmm1
subl $96,%eax
- jnc .L044xts_enc_loop6
- leal 1(,%ecx,2),%ecx
+ jnc .L048xts_enc_loop6
+ movl 240(%ebp),%ecx
movl %ebp,%edx
movl %ecx,%ebx
-.L043xts_enc_short:
+.L047xts_enc_short:
addl $96,%eax
- jz .L045xts_enc_done6x
+ jz .L049xts_enc_done6x
movdqa %xmm1,%xmm5
cmpl $32,%eax
- jb .L046xts_enc_one
+ jb .L050xts_enc_one
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
- je .L047xts_enc_two
+ je .L051xts_enc_two
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,%xmm6
@@ -1116,7 +1208,7 @@ aesni_xts_encrypt:
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
cmpl $64,%eax
- jb .L048xts_enc_three
+ jb .L052xts_enc_three
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,%xmm7
@@ -1126,7 +1218,7 @@ aesni_xts_encrypt:
pxor %xmm2,%xmm1
movdqa %xmm5,(%esp)
movdqa %xmm6,16(%esp)
- je .L049xts_enc_four
+ je .L053xts_enc_four
movdqa %xmm7,32(%esp)
pshufd $19,%xmm0,%xmm7
movdqa %xmm1,48(%esp)
@@ -1158,9 +1250,9 @@ aesni_xts_encrypt:
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
leal 80(%edi),%edi
- jmp .L050xts_enc_done
+ jmp .L054xts_enc_done
.align 16
-.L046xts_enc_one:
+.L050xts_enc_one:
movups (%esi),%xmm2
leal 16(%esi),%esi
xorps %xmm5,%xmm2
@@ -1168,37 +1260,36 @@ aesni_xts_encrypt:
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
-.L051enc1_loop_9:
+.L055enc1_loop_9:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
- jnz .L051enc1_loop_9
+ jnz .L055enc1_loop_9
.byte 102,15,56,221,209
xorps %xmm5,%xmm2
movups %xmm2,(%edi)
leal 16(%edi),%edi
movdqa %xmm5,%xmm1
- jmp .L050xts_enc_done
+ jmp .L054xts_enc_done
.align 16
-.L047xts_enc_two:
+.L051xts_enc_two:
movaps %xmm1,%xmm6
movups (%esi),%xmm2
movups 16(%esi),%xmm3
leal 32(%esi),%esi
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
- xorps %xmm4,%xmm4
- call _aesni_encrypt3
+ call _aesni_encrypt2
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
leal 32(%edi),%edi
movdqa %xmm6,%xmm1
- jmp .L050xts_enc_done
+ jmp .L054xts_enc_done
.align 16
-.L048xts_enc_three:
+.L052xts_enc_three:
movaps %xmm1,%xmm7
movups (%esi),%xmm2
movups 16(%esi),%xmm3
@@ -1216,9 +1307,9 @@ aesni_xts_encrypt:
movups %xmm4,32(%edi)
leal 48(%edi),%edi
movdqa %xmm7,%xmm1
- jmp .L050xts_enc_done
+ jmp .L054xts_enc_done
.align 16
-.L049xts_enc_four:
+.L053xts_enc_four:
movaps %xmm1,%xmm6
movups (%esi),%xmm2
movups 16(%esi),%xmm3
@@ -1240,28 +1331,28 @@ aesni_xts_encrypt:
movups %xmm5,48(%edi)
leal 64(%edi),%edi
movdqa %xmm6,%xmm1
- jmp .L050xts_enc_done
+ jmp .L054xts_enc_done
.align 16
-.L045xts_enc_done6x:
+.L049xts_enc_done6x:
movl 112(%esp),%eax
andl $15,%eax
- jz .L052xts_enc_ret
+ jz .L056xts_enc_ret
movdqa %xmm1,%xmm5
movl %eax,112(%esp)
- jmp .L053xts_enc_steal
+ jmp .L057xts_enc_steal
.align 16
-.L050xts_enc_done:
+.L054xts_enc_done:
movl 112(%esp),%eax
pxor %xmm0,%xmm0
andl $15,%eax
- jz .L052xts_enc_ret
+ jz .L056xts_enc_ret
pcmpgtd %xmm1,%xmm0
movl %eax,112(%esp)
pshufd $19,%xmm0,%xmm5
paddq %xmm1,%xmm1
pand 96(%esp),%xmm5
pxor %xmm1,%xmm5
-.L053xts_enc_steal:
+.L057xts_enc_steal:
movzbl (%esi),%ecx
movzbl -16(%edi),%edx
leal 1(%esi),%esi
@@ -1269,7 +1360,7 @@ aesni_xts_encrypt:
movb %dl,(%edi)
leal 1(%edi),%edi
subl $1,%eax
- jnz .L053xts_enc_steal
+ jnz .L057xts_enc_steal
subl 112(%esp),%edi
movl %ebp,%edx
movl %ebx,%ecx
@@ -1279,16 +1370,30 @@ aesni_xts_encrypt:
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
-.L054enc1_loop_10:
+.L058enc1_loop_10:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
- jnz .L054enc1_loop_10
+ jnz .L058enc1_loop_10
.byte 102,15,56,221,209
xorps %xmm5,%xmm2
movups %xmm2,-16(%edi)
-.L052xts_enc_ret:
+.L056xts_enc_ret:
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
+ pxor %xmm2,%xmm2
+ movdqa %xmm0,(%esp)
+ pxor %xmm3,%xmm3
+ movdqa %xmm0,16(%esp)
+ pxor %xmm4,%xmm4
+ movdqa %xmm0,32(%esp)
+ pxor %xmm5,%xmm5
+ movdqa %xmm0,48(%esp)
+ pxor %xmm6,%xmm6
+ movdqa %xmm0,64(%esp)
+ pxor %xmm7,%xmm7
+ movdqa %xmm0,80(%esp)
movl 116(%esp),%esp
popl %edi
popl %esi
@@ -1313,12 +1418,12 @@ aesni_xts_decrypt:
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
-.L055enc1_loop_11:
+.L059enc1_loop_11:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
- jnz .L055enc1_loop_11
+ jnz .L059enc1_loop_11
.byte 102,15,56,221,209
movl 20(%esp),%esi
movl 24(%esp),%edi
@@ -1347,12 +1452,14 @@ aesni_xts_decrypt:
pcmpgtd %xmm1,%xmm0
andl $-16,%eax
subl $96,%eax
- jc .L056xts_dec_short
- shrl $1,%ecx
- movl %ecx,%ebx
- jmp .L057xts_dec_loop6
+ jc .L060xts_dec_short
+ shll $4,%ecx
+ movl $16,%ebx
+ subl %ecx,%ebx
+ leal 32(%edx,%ecx,1),%edx
+ jmp .L061xts_dec_loop6
.align 16
-.L057xts_dec_loop6:
+.L061xts_dec_loop6:
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,(%esp)
@@ -1388,6 +1495,7 @@ aesni_xts_decrypt:
pand %xmm3,%xmm7
movups (%esi),%xmm2
pxor %xmm1,%xmm7
+ movl %ebx,%ecx
movdqu 16(%esi),%xmm3
xorps %xmm0,%xmm2
movdqu 32(%esi),%xmm4
@@ -1403,19 +1511,17 @@ aesni_xts_decrypt:
movdqa %xmm7,80(%esp)
pxor %xmm1,%xmm7
movups 16(%ebp),%xmm1
- leal 32(%ebp),%edx
pxor 16(%esp),%xmm3
-.byte 102,15,56,222,209
pxor 32(%esp),%xmm4
-.byte 102,15,56,222,217
+.byte 102,15,56,222,209
pxor 48(%esp),%xmm5
- decl %ecx
-.byte 102,15,56,222,225
pxor 64(%esp),%xmm6
-.byte 102,15,56,222,233
+.byte 102,15,56,222,217
pxor %xmm0,%xmm7
+ movups 32(%ebp),%xmm0
+.byte 102,15,56,222,225
+.byte 102,15,56,222,233
.byte 102,15,56,222,241
- movups (%edx),%xmm0
.byte 102,15,56,222,249
call .L_aesni_decrypt6_enter
movdqa 80(%esp),%xmm1
@@ -1440,26 +1546,25 @@ aesni_xts_decrypt:
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
- movl %ebx,%ecx
pxor %xmm2,%xmm1
subl $96,%eax
- jnc .L057xts_dec_loop6
- leal 1(,%ecx,2),%ecx
+ jnc .L061xts_dec_loop6
+ movl 240(%ebp),%ecx
movl %ebp,%edx
movl %ecx,%ebx
-.L056xts_dec_short:
+.L060xts_dec_short:
addl $96,%eax
- jz .L058xts_dec_done6x
+ jz .L062xts_dec_done6x
movdqa %xmm1,%xmm5
cmpl $32,%eax
- jb .L059xts_dec_one
+ jb .L063xts_dec_one
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
- je .L060xts_dec_two
+ je .L064xts_dec_two
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,%xmm6
@@ -1468,7 +1573,7 @@ aesni_xts_decrypt:
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
cmpl $64,%eax
- jb .L061xts_dec_three
+ jb .L065xts_dec_three
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,%xmm7
@@ -1478,7 +1583,7 @@ aesni_xts_decrypt:
pxor %xmm2,%xmm1
movdqa %xmm5,(%esp)
movdqa %xmm6,16(%esp)
- je .L062xts_dec_four
+ je .L066xts_dec_four
movdqa %xmm7,32(%esp)
pshufd $19,%xmm0,%xmm7
movdqa %xmm1,48(%esp)
@@ -1510,9 +1615,9 @@ aesni_xts_decrypt:
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
leal 80(%edi),%edi
- jmp .L063xts_dec_done
+ jmp .L067xts_dec_done
.align 16
-.L059xts_dec_one:
+.L063xts_dec_one:
movups (%esi),%xmm2
leal 16(%esi),%esi
xorps %xmm5,%xmm2
@@ -1520,36 +1625,36 @@ aesni_xts_decrypt:
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
-.L064dec1_loop_12:
+.L068dec1_loop_12:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
- jnz .L064dec1_loop_12
+ jnz .L068dec1_loop_12
.byte 102,15,56,223,209
xorps %xmm5,%xmm2
movups %xmm2,(%edi)
leal 16(%edi),%edi
movdqa %xmm5,%xmm1
- jmp .L063xts_dec_done
+ jmp .L067xts_dec_done
.align 16
-.L060xts_dec_two:
+.L064xts_dec_two:
movaps %xmm1,%xmm6
movups (%esi),%xmm2
movups 16(%esi),%xmm3
leal 32(%esi),%esi
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
- call _aesni_decrypt3
+ call _aesni_decrypt2
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
leal 32(%edi),%edi
movdqa %xmm6,%xmm1
- jmp .L063xts_dec_done
+ jmp .L067xts_dec_done
.align 16
-.L061xts_dec_three:
+.L065xts_dec_three:
movaps %xmm1,%xmm7
movups (%esi),%xmm2
movups 16(%esi),%xmm3
@@ -1567,9 +1672,9 @@ aesni_xts_decrypt:
movups %xmm4,32(%edi)
leal 48(%edi),%edi
movdqa %xmm7,%xmm1
- jmp .L063xts_dec_done
+ jmp .L067xts_dec_done
.align 16
-.L062xts_dec_four:
+.L066xts_dec_four:
movaps %xmm1,%xmm6
movups (%esi),%xmm2
movups 16(%esi),%xmm3
@@ -1591,20 +1696,20 @@ aesni_xts_decrypt:
movups %xmm5,48(%edi)
leal 64(%edi),%edi
movdqa %xmm6,%xmm1
- jmp .L063xts_dec_done
+ jmp .L067xts_dec_done
.align 16
-.L058xts_dec_done6x:
+.L062xts_dec_done6x:
movl 112(%esp),%eax
andl $15,%eax
- jz .L065xts_dec_ret
+ jz .L069xts_dec_ret
movl %eax,112(%esp)
- jmp .L066xts_dec_only_one_more
+ jmp .L070xts_dec_only_one_more
.align 16
-.L063xts_dec_done:
+.L067xts_dec_done:
movl 112(%esp),%eax
pxor %xmm0,%xmm0
andl $15,%eax
- jz .L065xts_dec_ret
+ jz .L069xts_dec_ret
pcmpgtd %xmm1,%xmm0
movl %eax,112(%esp)
pshufd $19,%xmm0,%xmm2
@@ -1614,7 +1719,7 @@ aesni_xts_decrypt:
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
-.L066xts_dec_only_one_more:
+.L070xts_dec_only_one_more:
pshufd $19,%xmm0,%xmm5
movdqa %xmm1,%xmm6
paddq %xmm1,%xmm1
@@ -1628,16 +1733,16 @@ aesni_xts_decrypt:
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
-.L067dec1_loop_13:
+.L071dec1_loop_13:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
- jnz .L067dec1_loop_13
+ jnz .L071dec1_loop_13
.byte 102,15,56,223,209
xorps %xmm5,%xmm2
movups %xmm2,(%edi)
-.L068xts_dec_steal:
+.L072xts_dec_steal:
movzbl 16(%esi),%ecx
movzbl (%edi),%edx
leal 1(%esi),%esi
@@ -1645,7 +1750,7 @@ aesni_xts_decrypt:
movb %dl,16(%edi)
leal 1(%edi),%edi
subl $1,%eax
- jnz .L068xts_dec_steal
+ jnz .L072xts_dec_steal
subl 112(%esp),%edi
movl %ebp,%edx
movl %ebx,%ecx
@@ -1655,16 +1760,30 @@ aesni_xts_decrypt:
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
-.L069dec1_loop_14:
+.L073dec1_loop_14:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
- jnz .L069dec1_loop_14
+ jnz .L073dec1_loop_14
.byte 102,15,56,223,209
xorps %xmm6,%xmm2
movups %xmm2,(%edi)
-.L065xts_dec_ret:
+.L069xts_dec_ret:
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
+ pxor %xmm2,%xmm2
+ movdqa %xmm0,(%esp)
+ pxor %xmm3,%xmm3
+ movdqa %xmm0,16(%esp)
+ pxor %xmm4,%xmm4
+ movdqa %xmm0,32(%esp)
+ pxor %xmm5,%xmm5
+ movdqa %xmm0,48(%esp)
+ pxor %xmm6,%xmm6
+ movdqa %xmm0,64(%esp)
+ pxor %xmm7,%xmm7
+ movdqa %xmm0,80(%esp)
movl 116(%esp),%esp
popl %edi
popl %esi
@@ -1690,7 +1809,7 @@ aesni_cbc_encrypt:
movl 32(%esp),%edx
movl 36(%esp),%ebp
testl %eax,%eax
- jz .L070cbc_abort
+ jz .L074cbc_abort
cmpl $0,40(%esp)
xchgl %esp,%ebx
movups (%ebp),%xmm7
@@ -1698,14 +1817,14 @@ aesni_cbc_encrypt:
movl %edx,%ebp
movl %ebx,16(%esp)
movl %ecx,%ebx
- je .L071cbc_decrypt
+ je .L075cbc_decrypt
movaps %xmm7,%xmm2
cmpl $16,%eax
- jb .L072cbc_enc_tail
+ jb .L076cbc_enc_tail
subl $16,%eax
- jmp .L073cbc_enc_loop
+ jmp .L077cbc_enc_loop
.align 16
-.L073cbc_enc_loop:
+.L077cbc_enc_loop:
movups (%esi),%xmm7
leal 16(%esi),%esi
movups (%edx),%xmm0
@@ -1713,24 +1832,25 @@ aesni_cbc_encrypt:
xorps %xmm0,%xmm7
leal 32(%edx),%edx
xorps %xmm7,%xmm2
-.L074enc1_loop_15:
+.L078enc1_loop_15:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
- jnz .L074enc1_loop_15
+ jnz .L078enc1_loop_15
.byte 102,15,56,221,209
movl %ebx,%ecx
movl %ebp,%edx
movups %xmm2,(%edi)
leal 16(%edi),%edi
subl $16,%eax
- jnc .L073cbc_enc_loop
+ jnc .L077cbc_enc_loop
addl $16,%eax
- jnz .L072cbc_enc_tail
+ jnz .L076cbc_enc_tail
movaps %xmm2,%xmm7
- jmp .L075cbc_ret
-.L072cbc_enc_tail:
+ pxor %xmm2,%xmm2
+ jmp .L079cbc_ret
+.L076cbc_enc_tail:
movl %eax,%ecx
.long 2767451785
movl $16,%ecx
@@ -1741,20 +1861,20 @@ aesni_cbc_encrypt:
movl %ebx,%ecx
movl %edi,%esi
movl %ebp,%edx
- jmp .L073cbc_enc_loop
+ jmp .L077cbc_enc_loop
.align 16
-.L071cbc_decrypt:
+.L075cbc_decrypt:
cmpl $80,%eax
- jbe .L076cbc_dec_tail
+ jbe .L080cbc_dec_tail
movaps %xmm7,(%esp)
subl $80,%eax
- jmp .L077cbc_dec_loop6_enter
+ jmp .L081cbc_dec_loop6_enter
.align 16
-.L078cbc_dec_loop6:
+.L082cbc_dec_loop6:
movaps %xmm0,(%esp)
movups %xmm7,(%edi)
leal 16(%edi),%edi
-.L077cbc_dec_loop6_enter:
+.L081cbc_dec_loop6_enter:
movdqu (%esi),%xmm2
movdqu 16(%esi),%xmm3
movdqu 32(%esi),%xmm4
@@ -1784,28 +1904,28 @@ aesni_cbc_encrypt:
movups %xmm6,64(%edi)
leal 80(%edi),%edi
subl $96,%eax
- ja .L078cbc_dec_loop6
+ ja .L082cbc_dec_loop6
movaps %xmm7,%xmm2
movaps %xmm0,%xmm7
addl $80,%eax
- jle .L079cbc_dec_tail_collected
+ jle .L083cbc_dec_clear_tail_collected
movups %xmm2,(%edi)
leal 16(%edi),%edi
-.L076cbc_dec_tail:
+.L080cbc_dec_tail:
movups (%esi),%xmm2
movaps %xmm2,%xmm6
cmpl $16,%eax
- jbe .L080cbc_dec_one
+ jbe .L084cbc_dec_one
movups 16(%esi),%xmm3
movaps %xmm3,%xmm5
cmpl $32,%eax
- jbe .L081cbc_dec_two
+ jbe .L085cbc_dec_two
movups 32(%esi),%xmm4
cmpl $48,%eax
- jbe .L082cbc_dec_three
+ jbe .L086cbc_dec_three
movups 48(%esi),%xmm5
cmpl $64,%eax
- jbe .L083cbc_dec_four
+ jbe .L087cbc_dec_four
movups 64(%esi),%xmm6
movaps %xmm7,(%esp)
movups (%esi),%xmm2
@@ -1823,56 +1943,62 @@ aesni_cbc_encrypt:
xorps %xmm0,%xmm6
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
+ pxor %xmm3,%xmm3
movups %xmm4,32(%edi)
+ pxor %xmm4,%xmm4
movups %xmm5,48(%edi)
+ pxor %xmm5,%xmm5
leal 64(%edi),%edi
movaps %xmm6,%xmm2
+ pxor %xmm6,%xmm6
subl $80,%eax
- jmp .L079cbc_dec_tail_collected
+ jmp .L088cbc_dec_tail_collected
.align 16
-.L080cbc_dec_one:
+.L084cbc_dec_one:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
-.L084dec1_loop_16:
+.L089dec1_loop_16:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
- jnz .L084dec1_loop_16
+ jnz .L089dec1_loop_16
.byte 102,15,56,223,209
xorps %xmm7,%xmm2
movaps %xmm6,%xmm7
subl $16,%eax
- jmp .L079cbc_dec_tail_collected
+ jmp .L088cbc_dec_tail_collected
.align 16
-.L081cbc_dec_two:
- xorps %xmm4,%xmm4
- call _aesni_decrypt3
+.L085cbc_dec_two:
+ call _aesni_decrypt2
xorps %xmm7,%xmm2
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
movaps %xmm3,%xmm2
+ pxor %xmm3,%xmm3
leal 16(%edi),%edi
movaps %xmm5,%xmm7
subl $32,%eax
- jmp .L079cbc_dec_tail_collected
+ jmp .L088cbc_dec_tail_collected
.align 16
-.L082cbc_dec_three:
+.L086cbc_dec_three:
call _aesni_decrypt3
xorps %xmm7,%xmm2
xorps %xmm6,%xmm3
xorps %xmm5,%xmm4
movups %xmm2,(%edi)
movaps %xmm4,%xmm2
+ pxor %xmm4,%xmm4
movups %xmm3,16(%edi)
+ pxor %xmm3,%xmm3
leal 32(%edi),%edi
movups 32(%esi),%xmm7
subl $48,%eax
- jmp .L079cbc_dec_tail_collected
+ jmp .L088cbc_dec_tail_collected
.align 16
-.L083cbc_dec_four:
+.L087cbc_dec_four:
call _aesni_decrypt4
movups 16(%esi),%xmm1
movups 32(%esi),%xmm0
@@ -1882,28 +2008,44 @@ aesni_cbc_encrypt:
movups %xmm2,(%edi)
xorps %xmm1,%xmm4
movups %xmm3,16(%edi)
+ pxor %xmm3,%xmm3
xorps %xmm0,%xmm5
movups %xmm4,32(%edi)
+ pxor %xmm4,%xmm4
leal 48(%edi),%edi
movaps %xmm5,%xmm2
+ pxor %xmm5,%xmm5
subl $64,%eax
-.L079cbc_dec_tail_collected:
+ jmp .L088cbc_dec_tail_collected
+.align 16
+.L083cbc_dec_clear_tail_collected:
+ pxor %xmm3,%xmm3
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
+ pxor %xmm6,%xmm6
+.L088cbc_dec_tail_collected:
andl $15,%eax
- jnz .L085cbc_dec_tail_partial
+ jnz .L090cbc_dec_tail_partial
movups %xmm2,(%edi)
- jmp .L075cbc_ret
+ pxor %xmm0,%xmm0
+ jmp .L079cbc_ret
.align 16
-.L085cbc_dec_tail_partial:
+.L090cbc_dec_tail_partial:
movaps %xmm2,(%esp)
+ pxor %xmm0,%xmm0
movl $16,%ecx
movl %esp,%esi
subl %eax,%ecx
.long 2767451785
-.L075cbc_ret:
+ movdqa %xmm2,(%esp)
+.L079cbc_ret:
movl 16(%esp),%esp
movl 36(%esp),%ebp
+ pxor %xmm2,%xmm2
+ pxor %xmm1,%xmm1
movups %xmm7,(%ebp)
-.L070cbc_abort:
+ pxor %xmm7,%xmm7
+.L074cbc_abort:
popl %edi
popl %esi
popl %ebx
@@ -1913,52 +2055,62 @@ aesni_cbc_encrypt:
.type _aesni_set_encrypt_key,@function
.align 16
_aesni_set_encrypt_key:
+ pushl %ebp
+ pushl %ebx
testl %eax,%eax
- jz .L086bad_pointer
+ jz .L091bad_pointer
testl %edx,%edx
- jz .L086bad_pointer
+ jz .L091bad_pointer
+ call .L092pic
+.L092pic:
+ popl %ebx
+ leal .Lkey_const-.L092pic(%ebx),%ebx
+ leal OPENSSL_ia32cap_P,%ebp
movups (%eax),%xmm0
xorps %xmm4,%xmm4
+ movl 4(%ebp),%ebp
leal 16(%edx),%edx
+ andl $268437504,%ebp
cmpl $256,%ecx
- je .L08714rounds
+ je .L09314rounds
cmpl $192,%ecx
- je .L08812rounds
+ je .L09412rounds
cmpl $128,%ecx
- jne .L089bad_keybits
+ jne .L095bad_keybits
.align 16
-.L09010rounds:
+.L09610rounds:
+ cmpl $268435456,%ebp
+ je .L09710rounds_alt
movl $9,%ecx
movups %xmm0,-16(%edx)
.byte 102,15,58,223,200,1
- call .L091key_128_cold
+ call .L098key_128_cold
.byte 102,15,58,223,200,2
- call .L092key_128
+ call .L099key_128
.byte 102,15,58,223,200,4
- call .L092key_128
+ call .L099key_128
.byte 102,15,58,223,200,8
- call .L092key_128
+ call .L099key_128
.byte 102,15,58,223,200,16
- call .L092key_128
+ call .L099key_128
.byte 102,15,58,223,200,32
- call .L092key_128
+ call .L099key_128
.byte 102,15,58,223,200,64
- call .L092key_128
+ call .L099key_128
.byte 102,15,58,223,200,128
- call .L092key_128
+ call .L099key_128
.byte 102,15,58,223,200,27
- call .L092key_128
+ call .L099key_128
.byte 102,15,58,223,200,54
- call .L092key_128
+ call .L099key_128
movups %xmm0,(%edx)
movl %ecx,80(%edx)
- xorl %eax,%eax
- ret
+ jmp .L100good_key
.align 16
-.L092key_128:
+.L099key_128:
movups %xmm0,(%edx)
leal 16(%edx),%edx
-.L091key_128_cold:
+.L098key_128_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
@@ -1967,38 +2119,91 @@ _aesni_set_encrypt_key:
xorps %xmm1,%xmm0
ret
.align 16
-.L08812rounds:
+.L09710rounds_alt:
+ movdqa (%ebx),%xmm5
+ movl $8,%ecx
+ movdqa 32(%ebx),%xmm4
+ movdqa %xmm0,%xmm2
+ movdqu %xmm0,-16(%edx)
+.L101loop_key128:
+.byte 102,15,56,0,197
+.byte 102,15,56,221,196
+ pslld $1,%xmm4
+ leal 16(%edx),%edx
+ movdqa %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm3,%xmm2
+ pxor %xmm2,%xmm0
+ movdqu %xmm0,-16(%edx)
+ movdqa %xmm0,%xmm2
+ decl %ecx
+ jnz .L101loop_key128
+ movdqa 48(%ebx),%xmm4
+.byte 102,15,56,0,197
+.byte 102,15,56,221,196
+ pslld $1,%xmm4
+ movdqa %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm3,%xmm2
+ pxor %xmm2,%xmm0
+ movdqu %xmm0,(%edx)
+ movdqa %xmm0,%xmm2
+.byte 102,15,56,0,197
+.byte 102,15,56,221,196
+ movdqa %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm3,%xmm2
+ pxor %xmm2,%xmm0
+ movdqu %xmm0,16(%edx)
+ movl $9,%ecx
+ movl %ecx,96(%edx)
+ jmp .L100good_key
+.align 16
+.L09412rounds:
movq 16(%eax),%xmm2
+ cmpl $268435456,%ebp
+ je .L10212rounds_alt
movl $11,%ecx
movups %xmm0,-16(%edx)
.byte 102,15,58,223,202,1
- call .L093key_192a_cold
+ call .L103key_192a_cold
.byte 102,15,58,223,202,2
- call .L094key_192b
+ call .L104key_192b
.byte 102,15,58,223,202,4
- call .L095key_192a
+ call .L105key_192a
.byte 102,15,58,223,202,8
- call .L094key_192b
+ call .L104key_192b
.byte 102,15,58,223,202,16
- call .L095key_192a
+ call .L105key_192a
.byte 102,15,58,223,202,32
- call .L094key_192b
+ call .L104key_192b
.byte 102,15,58,223,202,64
- call .L095key_192a
+ call .L105key_192a
.byte 102,15,58,223,202,128
- call .L094key_192b
+ call .L104key_192b
movups %xmm0,(%edx)
movl %ecx,48(%edx)
- xorl %eax,%eax
- ret
+ jmp .L100good_key
.align 16
-.L095key_192a:
+.L105key_192a:
movups %xmm0,(%edx)
leal 16(%edx),%edx
.align 16
-.L093key_192a_cold:
+.L103key_192a_cold:
movaps %xmm2,%xmm5
-.L096key_192b_warm:
+.L106key_192b_warm:
shufps $16,%xmm0,%xmm4
movdqa %xmm2,%xmm3
xorps %xmm4,%xmm0
@@ -2012,56 +2217,90 @@ _aesni_set_encrypt_key:
pxor %xmm3,%xmm2
ret
.align 16
-.L094key_192b:
+.L104key_192b:
movaps %xmm0,%xmm3
shufps $68,%xmm0,%xmm5
movups %xmm5,(%edx)
shufps $78,%xmm2,%xmm3
movups %xmm3,16(%edx)
leal 32(%edx),%edx
- jmp .L096key_192b_warm
+ jmp .L106key_192b_warm
+.align 16
+.L10212rounds_alt:
+ movdqa 16(%ebx),%xmm5
+ movdqa 32(%ebx),%xmm4
+ movl $8,%ecx
+ movdqu %xmm0,-16(%edx)
+.L107loop_key192:
+ movq %xmm2,(%edx)
+ movdqa %xmm2,%xmm1
+.byte 102,15,56,0,213
+.byte 102,15,56,221,212
+ pslld $1,%xmm4
+ leal 24(%edx),%edx
+ movdqa %xmm0,%xmm3
+ pslldq $4,%xmm0
+ pxor %xmm0,%xmm3
+ pslldq $4,%xmm0
+ pxor %xmm0,%xmm3
+ pslldq $4,%xmm0
+ pxor %xmm3,%xmm0
+ pshufd $255,%xmm0,%xmm3
+ pxor %xmm1,%xmm3
+ pslldq $4,%xmm1
+ pxor %xmm1,%xmm3
+ pxor %xmm2,%xmm0
+ pxor %xmm3,%xmm2
+ movdqu %xmm0,-16(%edx)
+ decl %ecx
+ jnz .L107loop_key192
+ movl $11,%ecx
+ movl %ecx,32(%edx)
+ jmp .L100good_key
.align 16
-.L08714rounds:
+.L09314rounds:
movups 16(%eax),%xmm2
- movl $13,%ecx
leal 16(%edx),%edx
+ cmpl $268435456,%ebp
+ je .L10814rounds_alt
+ movl $13,%ecx
movups %xmm0,-32(%edx)
movups %xmm2,-16(%edx)
.byte 102,15,58,223,202,1
- call .L097key_256a_cold
+ call .L109key_256a_cold
.byte 102,15,58,223,200,1
- call .L098key_256b
+ call .L110key_256b
.byte 102,15,58,223,202,2
- call .L099key_256a
+ call .L111key_256a
.byte 102,15,58,223,200,2
- call .L098key_256b
+ call .L110key_256b
.byte 102,15,58,223,202,4
- call .L099key_256a
+ call .L111key_256a
.byte 102,15,58,223,200,4
- call .L098key_256b
+ call .L110key_256b
.byte 102,15,58,223,202,8
- call .L099key_256a
+ call .L111key_256a
.byte 102,15,58,223,200,8
- call .L098key_256b
+ call .L110key_256b
.byte 102,15,58,223,202,16
- call .L099key_256a
+ call .L111key_256a
.byte 102,15,58,223,200,16
- call .L098key_256b
+ call .L110key_256b
.byte 102,15,58,223,202,32
- call .L099key_256a
+ call .L111key_256a
.byte 102,15,58,223,200,32
- call .L098key_256b
+ call .L110key_256b
.byte 102,15,58,223,202,64
- call .L099key_256a
+ call .L111key_256a
movups %xmm0,(%edx)
movl %ecx,16(%edx)
xorl %eax,%eax
- ret
+ jmp .L100good_key
.align 16
-.L099key_256a:
+.L111key_256a:
movups %xmm2,(%edx)
leal 16(%edx),%edx
-.L097key_256a_cold:
+.L109key_256a_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
@@ -2070,7 +2309,7 @@ _aesni_set_encrypt_key:
xorps %xmm1,%xmm0
ret
.align 16
-.L098key_256b:
+.L110key_256b:
movups %xmm0,(%edx)
leal 16(%edx),%edx
shufps $16,%xmm2,%xmm4
@@ -2080,13 +2319,70 @@ _aesni_set_encrypt_key:
shufps $170,%xmm1,%xmm1
xorps %xmm1,%xmm2
ret
+.align 16
+.L10814rounds_alt:
+ movdqa (%ebx),%xmm5
+ movdqa 32(%ebx),%xmm4
+ movl $7,%ecx
+ movdqu %xmm0,-32(%edx)
+ movdqa %xmm2,%xmm1
+ movdqu %xmm2,-16(%edx)
+.L112loop_key256:
+.byte 102,15,56,0,213
+.byte 102,15,56,221,212
+ movdqa %xmm0,%xmm3
+ pslldq $4,%xmm0
+ pxor %xmm0,%xmm3
+ pslldq $4,%xmm0
+ pxor %xmm0,%xmm3
+ pslldq $4,%xmm0
+ pxor %xmm3,%xmm0
+ pslld $1,%xmm4
+ pxor %xmm2,%xmm0
+ movdqu %xmm0,(%edx)
+ decl %ecx
+ jz .L113done_key256
+ pshufd $255,%xmm0,%xmm2
+ pxor %xmm3,%xmm3
+.byte 102,15,56,221,211
+ movdqa %xmm1,%xmm3
+ pslldq $4,%xmm1
+ pxor %xmm1,%xmm3
+ pslldq $4,%xmm1
+ pxor %xmm1,%xmm3
+ pslldq $4,%xmm1
+ pxor %xmm3,%xmm1
+ pxor %xmm1,%xmm2
+ movdqu %xmm2,16(%edx)
+ leal 32(%edx),%edx
+ movdqa %xmm2,%xmm1
+ jmp .L112loop_key256
+.L113done_key256:
+ movl $13,%ecx
+ movl %ecx,16(%edx)
+.L100good_key:
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
+ pxor %xmm2,%xmm2
+ pxor %xmm3,%xmm3
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
+ xorl %eax,%eax
+ popl %ebx
+ popl %ebp
+ ret
.align 4
-.L086bad_pointer:
+.L091bad_pointer:
movl $-1,%eax
+ popl %ebx
+ popl %ebp
ret
.align 4
-.L089bad_keybits:
+.L095bad_keybits:
+ pxor %xmm0,%xmm0
movl $-2,%eax
+ popl %ebx
+ popl %ebp
ret
.size _aesni_set_encrypt_key,.-_aesni_set_encrypt_key
.globl aesni_set_encrypt_key
@@ -2112,7 +2408,7 @@ aesni_set_decrypt_key:
movl 12(%esp),%edx
shll $4,%ecx
testl %eax,%eax
- jnz .L100dec_key_ret
+ jnz .L114dec_key_ret
leal 16(%edx,%ecx,1),%eax
movups (%edx),%xmm0
movups (%eax),%xmm1
@@ -2120,7 +2416,7 @@ aesni_set_decrypt_key:
movups %xmm1,(%edx)
leal 16(%edx),%edx
leal -16(%eax),%eax
-.L101dec_key_inverse:
+.L115dec_key_inverse:
movups (%edx),%xmm0
movups (%eax),%xmm1
.byte 102,15,56,219,192
@@ -2130,15 +2426,24 @@ aesni_set_decrypt_key:
movups %xmm0,16(%eax)
movups %xmm1,-16(%edx)
cmpl %edx,%eax
- ja .L101dec_key_inverse
+ ja .L115dec_key_inverse
movups (%edx),%xmm0
.byte 102,15,56,219,192
movups %xmm0,(%edx)
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
xorl %eax,%eax
-.L100dec_key_ret:
+.L114dec_key_ret:
ret
.size aesni_set_decrypt_key,.-.L_aesni_set_decrypt_key_begin
+.align 64
+.Lkey_const:
+.long 202313229,202313229,202313229,202313229
+.long 67569157,67569157,67569157,67569157
+.long 1,1,1,1
+.long 27,27,27,27
.byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69
.byte 83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83
.byte 32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115
.byte 115,108,46,111,114,103,62,0
+.comm OPENSSL_ia32cap_P,16,4
diff --git a/secure/lib/libcrypto/i386/bn-586.s b/secure/lib/libcrypto/i386/bn-586.s
index b40296e..3200c6d 100644
--- a/secure/lib/libcrypto/i386/bn-586.s
+++ b/secure/lib/libcrypto/i386/bn-586.s
@@ -1519,4 +1519,4 @@ bn_sub_part_words:
popl %ebp
ret
.size bn_sub_part_words,.-.L_bn_sub_part_words_begin
-.comm OPENSSL_ia32cap_P,8,4
+.comm OPENSSL_ia32cap_P,16,4
diff --git a/secure/lib/libcrypto/i386/cast-586.s b/secure/lib/libcrypto/i386/cast-586.s
deleted file mode 100644
index 2d65735..0000000
--- a/secure/lib/libcrypto/i386/cast-586.s
+++ /dev/null
@@ -1,934 +0,0 @@
- # $FreeBSD$
-.file "cast-586.s"
-.text
-.globl CAST_encrypt
-.type CAST_encrypt,@function
-.align 16
-CAST_encrypt:
-.L_CAST_encrypt_begin:
-
- pushl %ebp
- pushl %ebx
- movl 12(%esp),%ebx
- movl 16(%esp),%ebp
- pushl %esi
- pushl %edi
-
- movl (%ebx),%edi
- movl 4(%ebx),%esi
-
- movl 128(%ebp),%eax
- pushl %eax
- xorl %eax,%eax
-
- movl (%ebp),%edx
- movl 4(%ebp),%ecx
- addl %esi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- xorl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- subl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- addl %ebx,%ecx
- xorl %ecx,%edi
-
- movl 8(%ebp),%edx
- movl 12(%ebp),%ecx
- xorl %edi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- subl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- addl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- xorl %ebx,%ecx
- xorl %ecx,%esi
-
- movl 16(%ebp),%edx
- movl 20(%ebp),%ecx
- subl %esi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- addl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- xorl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- subl %ebx,%ecx
- xorl %ecx,%edi
-
- movl 24(%ebp),%edx
- movl 28(%ebp),%ecx
- addl %edi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- xorl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- subl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- addl %ebx,%ecx
- xorl %ecx,%esi
-
- movl 32(%ebp),%edx
- movl 36(%ebp),%ecx
- xorl %esi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- subl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- addl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- xorl %ebx,%ecx
- xorl %ecx,%edi
-
- movl 40(%ebp),%edx
- movl 44(%ebp),%ecx
- subl %edi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- addl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- xorl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- subl %ebx,%ecx
- xorl %ecx,%esi
-
- movl 48(%ebp),%edx
- movl 52(%ebp),%ecx
- addl %esi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- xorl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- subl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- addl %ebx,%ecx
- xorl %ecx,%edi
-
- movl 56(%ebp),%edx
- movl 60(%ebp),%ecx
- xorl %edi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- subl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- addl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- xorl %ebx,%ecx
- xorl %ecx,%esi
-
- movl 64(%ebp),%edx
- movl 68(%ebp),%ecx
- subl %esi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- addl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- xorl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- subl %ebx,%ecx
- xorl %ecx,%edi
-
- movl 72(%ebp),%edx
- movl 76(%ebp),%ecx
- addl %edi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- xorl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- subl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- addl %ebx,%ecx
- xorl %ecx,%esi
-
- movl 80(%ebp),%edx
- movl 84(%ebp),%ecx
- xorl %esi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- subl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- addl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- xorl %ebx,%ecx
- xorl %ecx,%edi
-
- movl 88(%ebp),%edx
- movl 92(%ebp),%ecx
- subl %edi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- addl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- xorl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- subl %ebx,%ecx
- xorl %ecx,%esi
-
- popl %edx
- orl %edx,%edx
- jnz .L000cast_enc_done
-
- movl 96(%ebp),%edx
- movl 100(%ebp),%ecx
- addl %esi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- xorl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- subl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- addl %ebx,%ecx
- xorl %ecx,%edi
-
- movl 104(%ebp),%edx
- movl 108(%ebp),%ecx
- xorl %edi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- subl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- addl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- xorl %ebx,%ecx
- xorl %ecx,%esi
-
- movl 112(%ebp),%edx
- movl 116(%ebp),%ecx
- subl %esi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- addl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- xorl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- subl %ebx,%ecx
- xorl %ecx,%edi
-
- movl 120(%ebp),%edx
- movl 124(%ebp),%ecx
- addl %edi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- xorl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- subl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- addl %ebx,%ecx
- xorl %ecx,%esi
-.L000cast_enc_done:
- nop
- movl 20(%esp),%eax
- movl %edi,4(%eax)
- movl %esi,(%eax)
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
-.size CAST_encrypt,.-.L_CAST_encrypt_begin
-.globl CAST_decrypt
-.type CAST_decrypt,@function
-.align 16
-CAST_decrypt:
-.L_CAST_decrypt_begin:
-
- pushl %ebp
- pushl %ebx
- movl 12(%esp),%ebx
- movl 16(%esp),%ebp
- pushl %esi
- pushl %edi
-
- movl (%ebx),%edi
- movl 4(%ebx),%esi
-
- movl 128(%ebp),%eax
- orl %eax,%eax
- jnz .L001cast_dec_skip
- xorl %eax,%eax
-
- movl 120(%ebp),%edx
- movl 124(%ebp),%ecx
- addl %esi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- xorl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- subl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- addl %ebx,%ecx
- xorl %ecx,%edi
-
- movl 112(%ebp),%edx
- movl 116(%ebp),%ecx
- subl %edi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- addl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- xorl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- subl %ebx,%ecx
- xorl %ecx,%esi
-
- movl 104(%ebp),%edx
- movl 108(%ebp),%ecx
- xorl %esi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- subl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- addl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- xorl %ebx,%ecx
- xorl %ecx,%edi
-
- movl 96(%ebp),%edx
- movl 100(%ebp),%ecx
- addl %edi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- xorl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- subl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- addl %ebx,%ecx
- xorl %ecx,%esi
-.L001cast_dec_skip:
-
- movl 88(%ebp),%edx
- movl 92(%ebp),%ecx
- subl %esi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- addl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- xorl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- subl %ebx,%ecx
- xorl %ecx,%edi
-
- movl 80(%ebp),%edx
- movl 84(%ebp),%ecx
- xorl %edi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- subl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- addl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- xorl %ebx,%ecx
- xorl %ecx,%esi
-
- movl 72(%ebp),%edx
- movl 76(%ebp),%ecx
- addl %esi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- xorl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- subl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- addl %ebx,%ecx
- xorl %ecx,%edi
-
- movl 64(%ebp),%edx
- movl 68(%ebp),%ecx
- subl %edi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- addl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- xorl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- subl %ebx,%ecx
- xorl %ecx,%esi
-
- movl 56(%ebp),%edx
- movl 60(%ebp),%ecx
- xorl %esi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- subl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- addl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- xorl %ebx,%ecx
- xorl %ecx,%edi
-
- movl 48(%ebp),%edx
- movl 52(%ebp),%ecx
- addl %edi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- xorl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- subl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- addl %ebx,%ecx
- xorl %ecx,%esi
-
- movl 40(%ebp),%edx
- movl 44(%ebp),%ecx
- subl %esi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- addl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- xorl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- subl %ebx,%ecx
- xorl %ecx,%edi
-
- movl 32(%ebp),%edx
- movl 36(%ebp),%ecx
- xorl %edi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- subl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- addl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- xorl %ebx,%ecx
- xorl %ecx,%esi
-
- movl 24(%ebp),%edx
- movl 28(%ebp),%ecx
- addl %esi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- xorl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- subl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- addl %ebx,%ecx
- xorl %ecx,%edi
-
- movl 16(%ebp),%edx
- movl 20(%ebp),%ecx
- subl %edi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- addl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- xorl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- subl %ebx,%ecx
- xorl %ecx,%esi
-
- movl 8(%ebp),%edx
- movl 12(%ebp),%ecx
- xorl %esi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- subl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- addl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- xorl %ebx,%ecx
- xorl %ecx,%edi
-
- movl (%ebp),%edx
- movl 4(%ebp),%ecx
- addl %edi,%edx
- roll %cl,%edx
- movl %edx,%ebx
- xorl %ecx,%ecx
- movb %dh,%cl
- andl $255,%ebx
- shrl $16,%edx
- xorl %eax,%eax
- movb %dh,%al
- andl $255,%edx
- movl CAST_S_table0(,%ecx,4),%ecx
- movl CAST_S_table1(,%ebx,4),%ebx
- xorl %ebx,%ecx
- movl CAST_S_table2(,%eax,4),%ebx
- subl %ebx,%ecx
- movl CAST_S_table3(,%edx,4),%ebx
- addl %ebx,%ecx
- xorl %ecx,%esi
- nop
- movl 20(%esp),%eax
- movl %edi,4(%eax)
- movl %esi,(%eax)
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
-.size CAST_decrypt,.-.L_CAST_decrypt_begin
-.globl CAST_cbc_encrypt
-.type CAST_cbc_encrypt,@function
-.align 16
-CAST_cbc_encrypt:
-.L_CAST_cbc_encrypt_begin:
-
- pushl %ebp
- pushl %ebx
- pushl %esi
- pushl %edi
- movl 28(%esp),%ebp
-
- movl 36(%esp),%ebx
- movl (%ebx),%esi
- movl 4(%ebx),%edi
- pushl %edi
- pushl %esi
- pushl %edi
- pushl %esi
- movl %esp,%ebx
- movl 36(%esp),%esi
- movl 40(%esp),%edi
-
- movl 56(%esp),%ecx
-
- movl 48(%esp),%eax
- pushl %eax
- pushl %ebx
- cmpl $0,%ecx
- jz .L002decrypt
- andl $4294967288,%ebp
- movl 8(%esp),%eax
- movl 12(%esp),%ebx
- jz .L003encrypt_finish
-.L004encrypt_loop:
- movl (%esi),%ecx
- movl 4(%esi),%edx
- xorl %ecx,%eax
- xorl %edx,%ebx
- bswap %eax
- bswap %ebx
- movl %eax,8(%esp)
- movl %ebx,12(%esp)
- call .L_CAST_encrypt_begin
- movl 8(%esp),%eax
- movl 12(%esp),%ebx
- bswap %eax
- bswap %ebx
- movl %eax,(%edi)
- movl %ebx,4(%edi)
- addl $8,%esi
- addl $8,%edi
- subl $8,%ebp
- jnz .L004encrypt_loop
-.L003encrypt_finish:
- movl 52(%esp),%ebp
- andl $7,%ebp
- jz .L005finish
- call .L006PIC_point
-.L006PIC_point:
- popl %edx
- leal .L007cbc_enc_jmp_table-.L006PIC_point(%edx),%ecx
- movl (%ecx,%ebp,4),%ebp
- addl %edx,%ebp
- xorl %ecx,%ecx
- xorl %edx,%edx
- jmp *%ebp
-.L008ej7:
- movb 6(%esi),%dh
- shll $8,%edx
-.L009ej6:
- movb 5(%esi),%dh
-.L010ej5:
- movb 4(%esi),%dl
-.L011ej4:
- movl (%esi),%ecx
- jmp .L012ejend
-.L013ej3:
- movb 2(%esi),%ch
- shll $8,%ecx
-.L014ej2:
- movb 1(%esi),%ch
-.L015ej1:
- movb (%esi),%cl
-.L012ejend:
- xorl %ecx,%eax
- xorl %edx,%ebx
- bswap %eax
- bswap %ebx
- movl %eax,8(%esp)
- movl %ebx,12(%esp)
- call .L_CAST_encrypt_begin
- movl 8(%esp),%eax
- movl 12(%esp),%ebx
- bswap %eax
- bswap %ebx
- movl %eax,(%edi)
- movl %ebx,4(%edi)
- jmp .L005finish
-.L002decrypt:
- andl $4294967288,%ebp
- movl 16(%esp),%eax
- movl 20(%esp),%ebx
- jz .L016decrypt_finish
-.L017decrypt_loop:
- movl (%esi),%eax
- movl 4(%esi),%ebx
- bswap %eax
- bswap %ebx
- movl %eax,8(%esp)
- movl %ebx,12(%esp)
- call .L_CAST_decrypt_begin
- movl 8(%esp),%eax
- movl 12(%esp),%ebx
- bswap %eax
- bswap %ebx
- movl 16(%esp),%ecx
- movl 20(%esp),%edx
- xorl %eax,%ecx
- xorl %ebx,%edx
- movl (%esi),%eax
- movl 4(%esi),%ebx
- movl %ecx,(%edi)
- movl %edx,4(%edi)
- movl %eax,16(%esp)
- movl %ebx,20(%esp)
- addl $8,%esi
- addl $8,%edi
- subl $8,%ebp
- jnz .L017decrypt_loop
-.L016decrypt_finish:
- movl 52(%esp),%ebp
- andl $7,%ebp
- jz .L005finish
- movl (%esi),%eax
- movl 4(%esi),%ebx
- bswap %eax
- bswap %ebx
- movl %eax,8(%esp)
- movl %ebx,12(%esp)
- call .L_CAST_decrypt_begin
- movl 8(%esp),%eax
- movl 12(%esp),%ebx
- bswap %eax
- bswap %ebx
- movl 16(%esp),%ecx
- movl 20(%esp),%edx
- xorl %eax,%ecx
- xorl %ebx,%edx
- movl (%esi),%eax
- movl 4(%esi),%ebx
-.L018dj7:
- rorl $16,%edx
- movb %dl,6(%edi)
- shrl $16,%edx
-.L019dj6:
- movb %dh,5(%edi)
-.L020dj5:
- movb %dl,4(%edi)
-.L021dj4:
- movl %ecx,(%edi)
- jmp .L022djend
-.L023dj3:
- rorl $16,%ecx
- movb %cl,2(%edi)
- shll $16,%ecx
-.L024dj2:
- movb %ch,1(%esi)
-.L025dj1:
- movb %cl,(%esi)
-.L022djend:
- jmp .L005finish
-.L005finish:
- movl 60(%esp),%ecx
- addl $24,%esp
- movl %eax,(%ecx)
- movl %ebx,4(%ecx)
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
-.align 64
-.L007cbc_enc_jmp_table:
-.long 0
-.long .L015ej1-.L006PIC_point
-.long .L014ej2-.L006PIC_point
-.long .L013ej3-.L006PIC_point
-.long .L011ej4-.L006PIC_point
-.long .L010ej5-.L006PIC_point
-.long .L009ej6-.L006PIC_point
-.long .L008ej7-.L006PIC_point
-.align 64
-.size CAST_cbc_encrypt,.-.L_CAST_cbc_encrypt_begin
diff --git a/secure/lib/libcrypto/i386/des-586.s b/secure/lib/libcrypto/i386/des-586.s
index 868b2ca..83bf07a 100644
--- a/secure/lib/libcrypto/i386/des-586.s
+++ b/secure/lib/libcrypto/i386/des-586.s
@@ -1001,7 +1001,7 @@ DES_encrypt1:
call .L000pic_point
.L000pic_point:
popl %ebp
- leal DES_SPtrans-.L000pic_point(%ebp),%ebp
+ leal .Ldes_sptrans-.L000pic_point(%ebp),%ebp
movl 24(%esp),%ecx
cmpl $0,%ebx
je .L001decrypt
@@ -1078,7 +1078,7 @@ DES_encrypt2:
call .L003pic_point
.L003pic_point:
popl %ebp
- leal DES_SPtrans-.L003pic_point(%ebp),%ebp
+ leal .Ldes_sptrans-.L003pic_point(%ebp),%ebp
movl 24(%esp),%ecx
cmpl $0,%ebx
je .L004decrypt
@@ -1708,6 +1708,7 @@ DES_ede3_cbc_encrypt:
.size DES_ede3_cbc_encrypt,.-.L_DES_ede3_cbc_encrypt_begin
.align 64
DES_SPtrans:
+.Ldes_sptrans:
.long 34080768,524288,33554434,34080770
.long 33554432,526338,524290,33554434
.long 526338,34080768,34078720,2050
diff --git a/secure/lib/libcrypto/i386/ghash-x86.s b/secure/lib/libcrypto/i386/ghash-x86.s
index a200a97..53d5e3f 100644
--- a/secure/lib/libcrypto/i386/ghash-x86.s
+++ b/secure/lib/libcrypto/i386/ghash-x86.s
@@ -946,27 +946,34 @@ gcm_init_clmul:
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
+ movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
- psllq $1,%xmm0
- pxor %xmm3,%xmm0
psllq $5,%xmm0
+ pxor %xmm0,%xmm3
+ psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
- movdqa %xmm0,%xmm4
+ movdqa %xmm0,%xmm3
pslldq $8,%xmm0
- psrldq $8,%xmm4
- pxor %xmm3,%xmm0
- pxor %xmm4,%xmm1
- movdqa %xmm0,%xmm4
- psrlq $5,%xmm0
+ psrldq $8,%xmm3
pxor %xmm4,%xmm0
+ pxor %xmm3,%xmm1
+ movdqa %xmm0,%xmm4
psrlq $1,%xmm0
+ pxor %xmm4,%xmm1
+ pxor %xmm0,%xmm4
+ psrlq $5,%xmm0
pxor %xmm4,%xmm0
- pxor %xmm1,%xmm4
psrlq $1,%xmm0
- pxor %xmm4,%xmm0
+ pxor %xmm1,%xmm0
+ pshufd $78,%xmm2,%xmm3
+ pshufd $78,%xmm0,%xmm4
+ pxor %xmm2,%xmm3
movdqu %xmm2,(%edx)
+ pxor %xmm0,%xmm4
movdqu %xmm0,16(%edx)
+.byte 102,15,58,15,227,8
+ movdqu %xmm4,32(%edx)
ret
.size gcm_init_clmul,.-.L_gcm_init_clmul_begin
.globl gcm_gmult_clmul
@@ -984,11 +991,10 @@ gcm_gmult_clmul:
movdqa (%ecx),%xmm5
movups (%edx),%xmm2
.byte 102,15,56,0,197
+ movups 32(%edx),%xmm4
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
- pshufd $78,%xmm2,%xmm4
pxor %xmm0,%xmm3
- pxor %xmm2,%xmm4
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,220,0
@@ -999,25 +1005,26 @@ gcm_gmult_clmul:
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
+ movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
- psllq $1,%xmm0
- pxor %xmm3,%xmm0
psllq $5,%xmm0
+ pxor %xmm0,%xmm3
+ psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
- movdqa %xmm0,%xmm4
+ movdqa %xmm0,%xmm3
pslldq $8,%xmm0
- psrldq $8,%xmm4
- pxor %xmm3,%xmm0
- pxor %xmm4,%xmm1
- movdqa %xmm0,%xmm4
- psrlq $5,%xmm0
+ psrldq $8,%xmm3
pxor %xmm4,%xmm0
+ pxor %xmm3,%xmm1
+ movdqa %xmm0,%xmm4
psrlq $1,%xmm0
+ pxor %xmm4,%xmm1
+ pxor %xmm0,%xmm4
+ psrlq $5,%xmm0
pxor %xmm4,%xmm0
- pxor %xmm1,%xmm4
psrlq $1,%xmm0
- pxor %xmm4,%xmm0
+ pxor %xmm1,%xmm0
.byte 102,15,56,0,197
movdqu %xmm0,(%eax)
ret
@@ -1049,127 +1056,115 @@ gcm_ghash_clmul:
movdqu 16(%esi),%xmm6
.byte 102,15,56,0,221
.byte 102,15,56,0,245
+ movdqu 32(%edx),%xmm5
pxor %xmm3,%xmm0
- movdqa %xmm6,%xmm7
pshufd $78,%xmm6,%xmm3
- pshufd $78,%xmm2,%xmm4
+ movdqa %xmm6,%xmm7
pxor %xmm6,%xmm3
- pxor %xmm2,%xmm4
+ leal 32(%esi),%esi
.byte 102,15,58,68,242,0
.byte 102,15,58,68,250,17
-.byte 102,15,58,68,220,0
- xorps %xmm6,%xmm3
- xorps %xmm7,%xmm3
- movdqa %xmm3,%xmm4
- psrldq $8,%xmm3
- pslldq $8,%xmm4
- pxor %xmm3,%xmm7
- pxor %xmm4,%xmm6
+.byte 102,15,58,68,221,0
movups 16(%edx),%xmm2
- leal 32(%esi),%esi
+ nop
subl $32,%ebx
jbe .L014even_tail
+ jmp .L015mod_loop
+.align 32
.L015mod_loop:
+ pshufd $78,%xmm0,%xmm4
movdqa %xmm0,%xmm1
- pshufd $78,%xmm0,%xmm3
- pshufd $78,%xmm2,%xmm4
- pxor %xmm0,%xmm3
- pxor %xmm2,%xmm4
+ pxor %xmm0,%xmm4
+ nop
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
-.byte 102,15,58,68,220,0
- xorps %xmm0,%xmm3
- xorps %xmm1,%xmm3
- movdqa %xmm3,%xmm4
- psrldq $8,%xmm3
- pslldq $8,%xmm4
- pxor %xmm3,%xmm1
- pxor %xmm4,%xmm0
- movdqu (%esi),%xmm3
+.byte 102,15,58,68,229,16
movups (%edx),%xmm2
- pxor %xmm6,%xmm0
- pxor %xmm7,%xmm1
+ xorps %xmm6,%xmm0
+ movdqa (%ecx),%xmm5
+ xorps %xmm7,%xmm1
+ movdqu (%esi),%xmm7
+ pxor %xmm0,%xmm3
movdqu 16(%esi),%xmm6
-.byte 102,15,56,0,221
+ pxor %xmm1,%xmm3
+.byte 102,15,56,0,253
+ pxor %xmm3,%xmm4
+ movdqa %xmm4,%xmm3
+ psrldq $8,%xmm4
+ pslldq $8,%xmm3
+ pxor %xmm4,%xmm1
+ pxor %xmm3,%xmm0
.byte 102,15,56,0,245
- movdqa %xmm6,%xmm5
+ pxor %xmm7,%xmm1
movdqa %xmm6,%xmm7
- pxor %xmm3,%xmm1
+ movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
- psllq $1,%xmm0
- pxor %xmm3,%xmm0
psllq $5,%xmm0
+ pxor %xmm0,%xmm3
+ psllq $1,%xmm0
pxor %xmm3,%xmm0
.byte 102,15,58,68,242,0
+ movups 32(%edx),%xmm5
psllq $57,%xmm0
- movdqa %xmm0,%xmm4
+ movdqa %xmm0,%xmm3
pslldq $8,%xmm0
- psrldq $8,%xmm4
- pxor %xmm3,%xmm0
- pshufd $78,%xmm5,%xmm3
+ psrldq $8,%xmm3
+ pxor %xmm4,%xmm0
+ pxor %xmm3,%xmm1
+ pshufd $78,%xmm7,%xmm3
+ movdqa %xmm0,%xmm4
+ psrlq $1,%xmm0
+ pxor %xmm7,%xmm3
pxor %xmm4,%xmm1
- pxor %xmm5,%xmm3
- pshufd $78,%xmm2,%xmm5
- pxor %xmm2,%xmm5
.byte 102,15,58,68,250,17
- movdqa %xmm0,%xmm4
+ movups 16(%edx),%xmm2
+ pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
- pxor %xmm4,%xmm0
- pxor %xmm1,%xmm4
- psrlq $1,%xmm0
- pxor %xmm4,%xmm0
+ pxor %xmm1,%xmm0
.byte 102,15,58,68,221,0
- movups 16(%edx),%xmm2
- xorps %xmm6,%xmm3
- xorps %xmm7,%xmm3
- movdqa %xmm3,%xmm5
- psrldq $8,%xmm3
- pslldq $8,%xmm5
- pxor %xmm3,%xmm7
- pxor %xmm5,%xmm6
- movdqa (%ecx),%xmm5
leal 32(%esi),%esi
subl $32,%ebx
ja .L015mod_loop
.L014even_tail:
+ pshufd $78,%xmm0,%xmm4
movdqa %xmm0,%xmm1
- pshufd $78,%xmm0,%xmm3
- pshufd $78,%xmm2,%xmm4
- pxor %xmm0,%xmm3
- pxor %xmm2,%xmm4
+ pxor %xmm0,%xmm4
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
-.byte 102,15,58,68,220,0
- xorps %xmm0,%xmm3
- xorps %xmm1,%xmm3
- movdqa %xmm3,%xmm4
- psrldq $8,%xmm3
- pslldq $8,%xmm4
- pxor %xmm3,%xmm1
- pxor %xmm4,%xmm0
- pxor %xmm6,%xmm0
- pxor %xmm7,%xmm1
- movdqa %xmm0,%xmm3
- psllq $1,%xmm0
+.byte 102,15,58,68,229,16
+ movdqa (%ecx),%xmm5
+ xorps %xmm6,%xmm0
+ xorps %xmm7,%xmm1
+ pxor %xmm0,%xmm3
+ pxor %xmm1,%xmm3
+ pxor %xmm3,%xmm4
+ movdqa %xmm4,%xmm3
+ psrldq $8,%xmm4
+ pslldq $8,%xmm3
+ pxor %xmm4,%xmm1
pxor %xmm3,%xmm0
+ movdqa %xmm0,%xmm4
+ movdqa %xmm0,%xmm3
psllq $5,%xmm0
+ pxor %xmm0,%xmm3
+ psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
- movdqa %xmm0,%xmm4
+ movdqa %xmm0,%xmm3
pslldq $8,%xmm0
- psrldq $8,%xmm4
- pxor %xmm3,%xmm0
- pxor %xmm4,%xmm1
- movdqa %xmm0,%xmm4
- psrlq $5,%xmm0
+ psrldq $8,%xmm3
pxor %xmm4,%xmm0
+ pxor %xmm3,%xmm1
+ movdqa %xmm0,%xmm4
psrlq $1,%xmm0
+ pxor %xmm4,%xmm1
+ pxor %xmm0,%xmm4
+ psrlq $5,%xmm0
pxor %xmm4,%xmm0
- pxor %xmm1,%xmm4
psrlq $1,%xmm0
- pxor %xmm4,%xmm0
+ pxor %xmm1,%xmm0
testl %ebx,%ebx
jnz .L016done
movups (%edx),%xmm2
@@ -1192,25 +1187,26 @@ gcm_ghash_clmul:
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
+ movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
- psllq $1,%xmm0
- pxor %xmm3,%xmm0
psllq $5,%xmm0
+ pxor %xmm0,%xmm3
+ psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
- movdqa %xmm0,%xmm4
+ movdqa %xmm0,%xmm3
pslldq $8,%xmm0
- psrldq $8,%xmm4
- pxor %xmm3,%xmm0
- pxor %xmm4,%xmm1
- movdqa %xmm0,%xmm4
- psrlq $5,%xmm0
+ psrldq $8,%xmm3
pxor %xmm4,%xmm0
+ pxor %xmm3,%xmm1
+ movdqa %xmm0,%xmm4
psrlq $1,%xmm0
+ pxor %xmm4,%xmm1
+ pxor %xmm0,%xmm4
+ psrlq $5,%xmm0
pxor %xmm4,%xmm0
- pxor %xmm1,%xmm4
psrlq $1,%xmm0
- pxor %xmm4,%xmm0
+ pxor %xmm1,%xmm0
.L016done:
.byte 102,15,56,0,197
movdqu %xmm0,(%eax)
@@ -1225,12 +1221,6 @@ gcm_ghash_clmul:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,194
.align 64
-.Lrem_4bit:
-.long 0,0,0,471859200,0,943718400,0,610271232
-.long 0,1887436800,0,1822425088,0,1220542464,0,1423966208
-.long 0,3774873600,0,4246732800,0,3644850176,0,3311403008
-.long 0,2441084928,0,2376073216,0,2847932416,0,3051356160
-.align 64
.Lrem_8bit:
.value 0,450,900,582,1800,1738,1164,1358
.value 3600,4050,3476,3158,2328,2266,2716,2910
@@ -1264,6 +1254,12 @@ gcm_ghash_clmul:
.value 42960,42514,42068,42390,41176,41242,41820,41630
.value 46560,46114,46692,47014,45800,45866,45420,45230
.value 48112,47666,47220,47542,48376,48442,49020,48830
+.align 64
+.Lrem_4bit:
+.long 0,0,0,471859200,0,943718400,0,610271232
+.long 0,1887436800,0,1822425088,0,1220542464,0,1423966208
+.long 0,3774873600,0,4246732800,0,3644850176,0,3311403008
+.long 0,2441084928,0,2376073216,0,2847932416,0,3051356160
.byte 71,72,65,83,72,32,102,111,114,32,120,56,54,44,32,67
.byte 82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112
.byte 112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62
diff --git a/secure/lib/libcrypto/i386/rc4-586.s b/secure/lib/libcrypto/i386/rc4-586.s
index c1f70a6..e9603ae 100644
--- a/secure/lib/libcrypto/i386/rc4-586.s
+++ b/secure/lib/libcrypto/i386/rc4-586.s
@@ -30,8 +30,8 @@ RC4:
movl (%edi,%eax,4),%ecx
andl $-4,%edx
jz .L002loop1
- testl $-8,%edx
movl %ebp,32(%esp)
+ testl $-8,%edx
jz .L003go4loop4
leal OPENSSL_ia32cap_P,%ebp
btl $26,(%ebp)
@@ -370,4 +370,4 @@ RC4_options:
.byte 111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 64
.size RC4_options,.-.L_RC4_options_begin
-.comm OPENSSL_ia32cap_P,8,4
+.comm OPENSSL_ia32cap_P,16,4
diff --git a/secure/lib/libcrypto/i386/sha1-586.s b/secure/lib/libcrypto/i386/sha1-586.s
index ffb8883..74b1ab4 100644
--- a/secure/lib/libcrypto/i386/sha1-586.s
+++ b/secure/lib/libcrypto/i386/sha1-586.s
@@ -19,8 +19,11 @@ sha1_block_data_order:
movl 4(%esi),%edx
testl $512,%edx
jz .L001x86
+ movl 8(%esi),%ecx
testl $16777216,%eax
jz .L001x86
+ testl $536870912,%ecx
+ jnz .Lshaext_shortcut
jmp .Lssse3_shortcut
.align 16
.L001x86:
@@ -1389,9 +1392,9 @@ sha1_block_data_order:
popl %ebp
ret
.size sha1_block_data_order,.-.L_sha1_block_data_order_begin
-.type _sha1_block_data_order_ssse3,@function
+.type _sha1_block_data_order_shaext,@function
.align 16
-_sha1_block_data_order_ssse3:
+_sha1_block_data_order_shaext:
pushl %ebp
pushl %ebx
pushl %esi
@@ -1400,6 +1403,176 @@ _sha1_block_data_order_ssse3:
.L003pic_point:
popl %ebp
leal .LK_XX_XX-.L003pic_point(%ebp),%ebp
+.Lshaext_shortcut:
+ movl 20(%esp),%edi
+ movl %esp,%ebx
+ movl 24(%esp),%esi
+ movl 28(%esp),%ecx
+ subl $32,%esp
+ movdqu (%edi),%xmm0
+ movd 16(%edi),%xmm1
+ andl $-32,%esp
+ movdqa 80(%ebp),%xmm3
+ movdqu (%esi),%xmm4
+ pshufd $27,%xmm0,%xmm0
+ movdqu 16(%esi),%xmm5
+ pshufd $27,%xmm1,%xmm1
+ movdqu 32(%esi),%xmm6
+.byte 102,15,56,0,227
+ movdqu 48(%esi),%xmm7
+.byte 102,15,56,0,235
+.byte 102,15,56,0,243
+.byte 102,15,56,0,251
+ jmp .L004loop_shaext
+.align 16
+.L004loop_shaext:
+ decl %ecx
+ leal 64(%esi),%eax
+ movdqa %xmm1,(%esp)
+ paddd %xmm4,%xmm1
+ cmovnel %eax,%esi
+ movdqa %xmm0,16(%esp)
+.byte 15,56,201,229
+ movdqa %xmm0,%xmm2
+.byte 15,58,204,193,0
+.byte 15,56,200,213
+ pxor %xmm6,%xmm4
+.byte 15,56,201,238
+.byte 15,56,202,231
+ movdqa %xmm0,%xmm1
+.byte 15,58,204,194,0
+.byte 15,56,200,206
+ pxor %xmm7,%xmm5
+.byte 15,56,202,236
+.byte 15,56,201,247
+ movdqa %xmm0,%xmm2
+.byte 15,58,204,193,0
+.byte 15,56,200,215
+ pxor %xmm4,%xmm6
+.byte 15,56,201,252
+.byte 15,56,202,245
+ movdqa %xmm0,%xmm1
+.byte 15,58,204,194,0
+.byte 15,56,200,204
+ pxor %xmm5,%xmm7
+.byte 15,56,202,254
+.byte 15,56,201,229
+ movdqa %xmm0,%xmm2
+.byte 15,58,204,193,0
+.byte 15,56,200,213
+ pxor %xmm6,%xmm4
+.byte 15,56,201,238
+.byte 15,56,202,231
+ movdqa %xmm0,%xmm1
+.byte 15,58,204,194,1
+.byte 15,56,200,206
+ pxor %xmm7,%xmm5
+.byte 15,56,202,236
+.byte 15,56,201,247
+ movdqa %xmm0,%xmm2
+.byte 15,58,204,193,1
+.byte 15,56,200,215
+ pxor %xmm4,%xmm6
+.byte 15,56,201,252
+.byte 15,56,202,245
+ movdqa %xmm0,%xmm1
+.byte 15,58,204,194,1
+.byte 15,56,200,204
+ pxor %xmm5,%xmm7
+.byte 15,56,202,254
+.byte 15,56,201,229
+ movdqa %xmm0,%xmm2
+.byte 15,58,204,193,1
+.byte 15,56,200,213
+ pxor %xmm6,%xmm4
+.byte 15,56,201,238
+.byte 15,56,202,231
+ movdqa %xmm0,%xmm1
+.byte 15,58,204,194,1
+.byte 15,56,200,206
+ pxor %xmm7,%xmm5
+.byte 15,56,202,236
+.byte 15,56,201,247
+ movdqa %xmm0,%xmm2
+.byte 15,58,204,193,2
+.byte 15,56,200,215
+ pxor %xmm4,%xmm6
+.byte 15,56,201,252
+.byte 15,56,202,245
+ movdqa %xmm0,%xmm1
+.byte 15,58,204,194,2
+.byte 15,56,200,204
+ pxor %xmm5,%xmm7
+.byte 15,56,202,254
+.byte 15,56,201,229
+ movdqa %xmm0,%xmm2
+.byte 15,58,204,193,2
+.byte 15,56,200,213
+ pxor %xmm6,%xmm4
+.byte 15,56,201,238
+.byte 15,56,202,231
+ movdqa %xmm0,%xmm1
+.byte 15,58,204,194,2
+.byte 15,56,200,206
+ pxor %xmm7,%xmm5
+.byte 15,56,202,236
+.byte 15,56,201,247
+ movdqa %xmm0,%xmm2
+.byte 15,58,204,193,2
+.byte 15,56,200,215
+ pxor %xmm4,%xmm6
+.byte 15,56,201,252
+.byte 15,56,202,245
+ movdqa %xmm0,%xmm1
+.byte 15,58,204,194,3
+.byte 15,56,200,204
+ pxor %xmm5,%xmm7
+.byte 15,56,202,254
+ movdqu (%esi),%xmm4
+ movdqa %xmm0,%xmm2
+.byte 15,58,204,193,3
+.byte 15,56,200,213
+ movdqu 16(%esi),%xmm5
+.byte 102,15,56,0,227
+ movdqa %xmm0,%xmm1
+.byte 15,58,204,194,3
+.byte 15,56,200,206
+ movdqu 32(%esi),%xmm6
+.byte 102,15,56,0,235
+ movdqa %xmm0,%xmm2
+.byte 15,58,204,193,3
+.byte 15,56,200,215
+ movdqu 48(%esi),%xmm7
+.byte 102,15,56,0,243
+ movdqa %xmm0,%xmm1
+.byte 15,58,204,194,3
+ movdqa (%esp),%xmm2
+.byte 102,15,56,0,251
+.byte 15,56,200,202
+ paddd 16(%esp),%xmm0
+ jnz .L004loop_shaext
+ pshufd $27,%xmm0,%xmm0
+ pshufd $27,%xmm1,%xmm1
+ movdqu %xmm0,(%edi)
+ movd %xmm1,16(%edi)
+ movl %ebx,%esp
+ popl %edi
+ popl %esi
+ popl %ebx
+ popl %ebp
+ ret
+.size _sha1_block_data_order_shaext,.-_sha1_block_data_order_shaext
+.type _sha1_block_data_order_ssse3,@function
+.align 16
+_sha1_block_data_order_ssse3:
+ pushl %ebp
+ pushl %ebx
+ pushl %esi
+ pushl %edi
+ call .L005pic_point
+.L005pic_point:
+ popl %ebp
+ leal .LK_XX_XX-.L005pic_point(%ebp),%ebp
.Lssse3_shortcut:
movdqa (%ebp),%xmm7
movdqa 16(%ebp),%xmm0
@@ -1447,936 +1620,917 @@ _sha1_block_data_order_ssse3:
movdqa %xmm1,16(%esp)
psubd %xmm7,%xmm1
movdqa %xmm2,32(%esp)
+ movl %ecx,%ebp
psubd %xmm7,%xmm2
- movdqa %xmm1,%xmm4
- jmp .L004loop
+ xorl %edx,%ebp
+ pshufd $238,%xmm0,%xmm4
+ andl %ebp,%esi
+ jmp .L006loop
.align 16
-.L004loop:
- addl (%esp),%edi
- xorl %edx,%ecx
-.byte 102,15,58,15,224,8
- movdqa %xmm3,%xmm6
+.L006loop:
+ rorl $2,%ebx
+ xorl %edx,%esi
movl %eax,%ebp
- roll $5,%eax
+ punpcklqdq %xmm1,%xmm4
+ movdqa %xmm3,%xmm6
+ addl (%esp),%edi
+ xorl %ecx,%ebx
paddd %xmm3,%xmm7
movdqa %xmm0,64(%esp)
- andl %ecx,%esi
- xorl %edx,%ecx
+ roll $5,%eax
+ addl %esi,%edi
psrldq $4,%xmm6
- xorl %edx,%esi
- addl %eax,%edi
+ andl %ebx,%ebp
+ xorl %ecx,%ebx
pxor %xmm0,%xmm4
- rorl $2,%ebx
- addl %esi,%edi
+ addl %eax,%edi
+ rorl $7,%eax
pxor %xmm2,%xmm6
- addl 4(%esp),%edx
- xorl %ecx,%ebx
+ xorl %ecx,%ebp
movl %edi,%esi
- roll $5,%edi
+ addl 4(%esp),%edx
pxor %xmm6,%xmm4
- andl %ebx,%ebp
- xorl %ecx,%ebx
+ xorl %ebx,%eax
+ roll $5,%edi
movdqa %xmm7,48(%esp)
- xorl %ecx,%ebp
- addl %edi,%edx
- movdqa %xmm4,%xmm0
- movdqa %xmm4,%xmm6
- rorl $7,%eax
addl %ebp,%edx
- addl 8(%esp),%ecx
+ andl %eax,%esi
+ movdqa %xmm4,%xmm0
xorl %ebx,%eax
+ addl %edi,%edx
+ rorl $7,%edi
+ movdqa %xmm4,%xmm6
+ xorl %ebx,%esi
pslldq $12,%xmm0
paddd %xmm4,%xmm4
movl %edx,%ebp
- roll $5,%edx
- andl %eax,%esi
- xorl %ebx,%eax
+ addl 8(%esp),%ecx
psrld $31,%xmm6
- xorl %ebx,%esi
- addl %edx,%ecx
+ xorl %eax,%edi
+ roll $5,%edx
movdqa %xmm0,%xmm7
- rorl $7,%edi
addl %esi,%ecx
+ andl %edi,%ebp
+ xorl %eax,%edi
psrld $30,%xmm0
+ addl %edx,%ecx
+ rorl $7,%edx
por %xmm6,%xmm4
- addl 12(%esp),%ebx
- xorl %eax,%edi
+ xorl %eax,%ebp
movl %ecx,%esi
- roll $5,%ecx
+ addl 12(%esp),%ebx
pslld $2,%xmm7
+ xorl %edi,%edx
+ roll $5,%ecx
pxor %xmm0,%xmm4
- andl %edi,%ebp
- xorl %eax,%edi
movdqa 96(%esp),%xmm0
- xorl %eax,%ebp
- addl %ecx,%ebx
- pxor %xmm7,%xmm4
- movdqa %xmm2,%xmm5
- rorl $7,%edx
addl %ebp,%ebx
- addl 16(%esp),%eax
+ andl %edx,%esi
+ pxor %xmm7,%xmm4
+ pshufd $238,%xmm1,%xmm5
xorl %edi,%edx
-.byte 102,15,58,15,233,8
- movdqa %xmm4,%xmm7
+ addl %ecx,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
movl %ebx,%ebp
- roll $5,%ebx
+ punpcklqdq %xmm2,%xmm5
+ movdqa %xmm4,%xmm7
+ addl 16(%esp),%eax
+ xorl %edx,%ecx
paddd %xmm4,%xmm0
movdqa %xmm1,80(%esp)
- andl %edx,%esi
- xorl %edi,%edx
+ roll $5,%ebx
+ addl %esi,%eax
psrldq $4,%xmm7
- xorl %edi,%esi
- addl %ebx,%eax
+ andl %ecx,%ebp
+ xorl %edx,%ecx
pxor %xmm1,%xmm5
- rorl $7,%ecx
- addl %esi,%eax
+ addl %ebx,%eax
+ rorl $7,%ebx
pxor %xmm3,%xmm7
- addl 20(%esp),%edi
- xorl %edx,%ecx
+ xorl %edx,%ebp
movl %eax,%esi
- roll $5,%eax
+ addl 20(%esp),%edi
pxor %xmm7,%xmm5
- andl %ecx,%ebp
- xorl %edx,%ecx
+ xorl %ecx,%ebx
+ roll $5,%eax
movdqa %xmm0,(%esp)
- xorl %edx,%ebp
- addl %eax,%edi
- movdqa %xmm5,%xmm1
- movdqa %xmm5,%xmm7
- rorl $7,%ebx
addl %ebp,%edi
- addl 24(%esp),%edx
+ andl %ebx,%esi
+ movdqa %xmm5,%xmm1
xorl %ecx,%ebx
+ addl %eax,%edi
+ rorl $7,%eax
+ movdqa %xmm5,%xmm7
+ xorl %ecx,%esi
pslldq $12,%xmm1
paddd %xmm5,%xmm5
movl %edi,%ebp
- roll $5,%edi
- andl %ebx,%esi
- xorl %ecx,%ebx
+ addl 24(%esp),%edx
psrld $31,%xmm7
- xorl %ecx,%esi
- addl %edi,%edx
+ xorl %ebx,%eax
+ roll $5,%edi
movdqa %xmm1,%xmm0
- rorl $7,%eax
addl %esi,%edx
+ andl %eax,%ebp
+ xorl %ebx,%eax
psrld $30,%xmm1
+ addl %edi,%edx
+ rorl $7,%edi
por %xmm7,%xmm5
- addl 28(%esp),%ecx
- xorl %ebx,%eax
+ xorl %ebx,%ebp
movl %edx,%esi
- roll $5,%edx
+ addl 28(%esp),%ecx
pslld $2,%xmm0
+ xorl %eax,%edi
+ roll $5,%edx
pxor %xmm1,%xmm5
- andl %eax,%ebp
- xorl %ebx,%eax
movdqa 112(%esp),%xmm1
- xorl %ebx,%ebp
- addl %edx,%ecx
- pxor %xmm0,%xmm5
- movdqa %xmm3,%xmm6
- rorl $7,%edi
addl %ebp,%ecx
- addl 32(%esp),%ebx
+ andl %edi,%esi
+ pxor %xmm0,%xmm5
+ pshufd $238,%xmm2,%xmm6
xorl %eax,%edi
-.byte 102,15,58,15,242,8
- movdqa %xmm5,%xmm0
+ addl %edx,%ecx
+ rorl $7,%edx
+ xorl %eax,%esi
movl %ecx,%ebp
- roll $5,%ecx
+ punpcklqdq %xmm3,%xmm6
+ movdqa %xmm5,%xmm0
+ addl 32(%esp),%ebx
+ xorl %edi,%edx
paddd %xmm5,%xmm1
movdqa %xmm2,96(%esp)
- andl %edi,%esi
- xorl %eax,%edi
+ roll $5,%ecx
+ addl %esi,%ebx
psrldq $4,%xmm0
- xorl %eax,%esi
- addl %ecx,%ebx
+ andl %edx,%ebp
+ xorl %edi,%edx
pxor %xmm2,%xmm6
- rorl $7,%edx
- addl %esi,%ebx
+ addl %ecx,%ebx
+ rorl $7,%ecx
pxor %xmm4,%xmm0
- addl 36(%esp),%eax
- xorl %edi,%edx
+ xorl %edi,%ebp
movl %ebx,%esi
- roll $5,%ebx
+ addl 36(%esp),%eax
pxor %xmm0,%xmm6
- andl %edx,%ebp
- xorl %edi,%edx
+ xorl %edx,%ecx
+ roll $5,%ebx
movdqa %xmm1,16(%esp)
- xorl %edi,%ebp
- addl %ebx,%eax
- movdqa %xmm6,%xmm2
- movdqa %xmm6,%xmm0
- rorl $7,%ecx
addl %ebp,%eax
- addl 40(%esp),%edi
+ andl %ecx,%esi
+ movdqa %xmm6,%xmm2
xorl %edx,%ecx
+ addl %ebx,%eax
+ rorl $7,%ebx
+ movdqa %xmm6,%xmm0
+ xorl %edx,%esi
pslldq $12,%xmm2
paddd %xmm6,%xmm6
movl %eax,%ebp
- roll $5,%eax
- andl %ecx,%esi
- xorl %edx,%ecx
+ addl 40(%esp),%edi
psrld $31,%xmm0
- xorl %edx,%esi
- addl %eax,%edi
+ xorl %ecx,%ebx
+ roll $5,%eax
movdqa %xmm2,%xmm1
- rorl $7,%ebx
addl %esi,%edi
+ andl %ebx,%ebp
+ xorl %ecx,%ebx
psrld $30,%xmm2
+ addl %eax,%edi
+ rorl $7,%eax
por %xmm0,%xmm6
- addl 44(%esp),%edx
- xorl %ecx,%ebx
+ xorl %ecx,%ebp
movdqa 64(%esp),%xmm0
movl %edi,%esi
- roll $5,%edi
+ addl 44(%esp),%edx
pslld $2,%xmm1
+ xorl %ebx,%eax
+ roll $5,%edi
pxor %xmm2,%xmm6
- andl %ebx,%ebp
- xorl %ecx,%ebx
movdqa 112(%esp),%xmm2
- xorl %ecx,%ebp
- addl %edi,%edx
- pxor %xmm1,%xmm6
- movdqa %xmm4,%xmm7
- rorl $7,%eax
addl %ebp,%edx
- addl 48(%esp),%ecx
+ andl %eax,%esi
+ pxor %xmm1,%xmm6
+ pshufd $238,%xmm3,%xmm7
xorl %ebx,%eax
-.byte 102,15,58,15,251,8
- movdqa %xmm6,%xmm1
+ addl %edi,%edx
+ rorl $7,%edi
+ xorl %ebx,%esi
movl %edx,%ebp
- roll $5,%edx
+ punpcklqdq %xmm4,%xmm7
+ movdqa %xmm6,%xmm1
+ addl 48(%esp),%ecx
+ xorl %eax,%edi
paddd %xmm6,%xmm2
movdqa %xmm3,64(%esp)
- andl %eax,%esi
- xorl %ebx,%eax
+ roll $5,%edx
+ addl %esi,%ecx
psrldq $4,%xmm1
- xorl %ebx,%esi
- addl %edx,%ecx
+ andl %edi,%ebp
+ xorl %eax,%edi
pxor %xmm3,%xmm7
- rorl $7,%edi
- addl %esi,%ecx
+ addl %edx,%ecx
+ rorl $7,%edx
pxor %xmm5,%xmm1
- addl 52(%esp),%ebx
- xorl %eax,%edi
+ xorl %eax,%ebp
movl %ecx,%esi
- roll $5,%ecx
+ addl 52(%esp),%ebx
pxor %xmm1,%xmm7
- andl %edi,%ebp
- xorl %eax,%edi
+ xorl %edi,%edx
+ roll $5,%ecx
movdqa %xmm2,32(%esp)
- xorl %eax,%ebp
- addl %ecx,%ebx
- movdqa %xmm7,%xmm3
- movdqa %xmm7,%xmm1
- rorl $7,%edx
addl %ebp,%ebx
- addl 56(%esp),%eax
+ andl %edx,%esi
+ movdqa %xmm7,%xmm3
xorl %edi,%edx
+ addl %ecx,%ebx
+ rorl $7,%ecx
+ movdqa %xmm7,%xmm1
+ xorl %edi,%esi
pslldq $12,%xmm3
paddd %xmm7,%xmm7
movl %ebx,%ebp
- roll $5,%ebx
- andl %edx,%esi
- xorl %edi,%edx
+ addl 56(%esp),%eax
psrld $31,%xmm1
- xorl %edi,%esi
- addl %ebx,%eax
+ xorl %edx,%ecx
+ roll $5,%ebx
movdqa %xmm3,%xmm2
- rorl $7,%ecx
addl %esi,%eax
+ andl %ecx,%ebp
+ xorl %edx,%ecx
psrld $30,%xmm3
+ addl %ebx,%eax
+ rorl $7,%ebx
por %xmm1,%xmm7
- addl 60(%esp),%edi
- xorl %edx,%ecx
+ xorl %edx,%ebp
movdqa 80(%esp),%xmm1
movl %eax,%esi
- roll $5,%eax
+ addl 60(%esp),%edi
pslld $2,%xmm2
+ xorl %ecx,%ebx
+ roll $5,%eax
pxor %xmm3,%xmm7
- andl %ecx,%ebp
- xorl %edx,%ecx
movdqa 112(%esp),%xmm3
- xorl %edx,%ebp
- addl %eax,%edi
- pxor %xmm2,%xmm7
- rorl $7,%ebx
addl %ebp,%edi
- movdqa %xmm7,%xmm2
- addl (%esp),%edx
- pxor %xmm4,%xmm0
-.byte 102,15,58,15,214,8
+ andl %ebx,%esi
+ pxor %xmm2,%xmm7
+ pshufd $238,%xmm6,%xmm2
xorl %ecx,%ebx
+ addl %eax,%edi
+ rorl $7,%eax
+ pxor %xmm4,%xmm0
+ punpcklqdq %xmm7,%xmm2
+ xorl %ecx,%esi
movl %edi,%ebp
- roll $5,%edi
+ addl (%esp),%edx
pxor %xmm1,%xmm0
movdqa %xmm4,80(%esp)
- andl %ebx,%esi
- xorl %ecx,%ebx
+ xorl %ebx,%eax
+ roll $5,%edi
movdqa %xmm3,%xmm4
+ addl %esi,%edx
paddd %xmm7,%xmm3
- xorl %ecx,%esi
- addl %edi,%edx
+ andl %eax,%ebp
pxor %xmm2,%xmm0
- rorl $7,%eax
- addl %esi,%edx
- addl 4(%esp),%ecx
xorl %ebx,%eax
+ addl %edi,%edx
+ rorl $7,%edi
+ xorl %ebx,%ebp
movdqa %xmm0,%xmm2
movdqa %xmm3,48(%esp)
movl %edx,%esi
+ addl 4(%esp),%ecx
+ xorl %eax,%edi
roll $5,%edx
- andl %eax,%ebp
- xorl %ebx,%eax
pslld $2,%xmm0
- xorl %ebx,%ebp
- addl %edx,%ecx
- psrld $30,%xmm2
- rorl $7,%edi
addl %ebp,%ecx
- addl 8(%esp),%ebx
+ andl %edi,%esi
+ psrld $30,%xmm2
xorl %eax,%edi
+ addl %edx,%ecx
+ rorl $7,%edx
+ xorl %eax,%esi
movl %ecx,%ebp
+ addl 8(%esp),%ebx
+ xorl %edi,%edx
roll $5,%ecx
por %xmm2,%xmm0
- andl %edi,%esi
- xorl %eax,%edi
+ addl %esi,%ebx
+ andl %edx,%ebp
movdqa 96(%esp),%xmm2
- xorl %eax,%esi
+ xorl %edi,%edx
addl %ecx,%ebx
- rorl $7,%edx
- addl %esi,%ebx
addl 12(%esp),%eax
- movdqa %xmm0,%xmm3
- xorl %edi,%edx
+ xorl %edi,%ebp
movl %ebx,%esi
+ pshufd $238,%xmm7,%xmm3
roll $5,%ebx
- andl %edx,%ebp
- xorl %edi,%edx
- xorl %edi,%ebp
- addl %ebx,%eax
- rorl $7,%ecx
addl %ebp,%eax
+ xorl %edx,%esi
+ rorl $7,%ecx
+ addl %ebx,%eax
addl 16(%esp),%edi
pxor %xmm5,%xmm1
-.byte 102,15,58,15,223,8
- xorl %edx,%esi
+ punpcklqdq %xmm0,%xmm3
+ xorl %ecx,%esi
movl %eax,%ebp
roll $5,%eax
pxor %xmm2,%xmm1
movdqa %xmm5,96(%esp)
- xorl %ecx,%esi
- addl %eax,%edi
+ addl %esi,%edi
+ xorl %ecx,%ebp
movdqa %xmm4,%xmm5
- paddd %xmm0,%xmm4
rorl $7,%ebx
- addl %esi,%edi
+ paddd %xmm0,%xmm4
+ addl %eax,%edi
pxor %xmm3,%xmm1
addl 20(%esp),%edx
- xorl %ecx,%ebp
+ xorl %ebx,%ebp
movl %edi,%esi
roll $5,%edi
movdqa %xmm1,%xmm3
movdqa %xmm4,(%esp)
- xorl %ebx,%ebp
- addl %edi,%edx
- rorl $7,%eax
addl %ebp,%edx
+ xorl %ebx,%esi
+ rorl $7,%eax
+ addl %edi,%edx
pslld $2,%xmm1
addl 24(%esp),%ecx
- xorl %ebx,%esi
+ xorl %eax,%esi
psrld $30,%xmm3
movl %edx,%ebp
roll $5,%edx
- xorl %eax,%esi
- addl %edx,%ecx
- rorl $7,%edi
addl %esi,%ecx
+ xorl %eax,%ebp
+ rorl $7,%edi
+ addl %edx,%ecx
por %xmm3,%xmm1
addl 28(%esp),%ebx
- xorl %eax,%ebp
+ xorl %edi,%ebp
movdqa 64(%esp),%xmm3
movl %ecx,%esi
roll $5,%ecx
- xorl %edi,%ebp
- addl %ecx,%ebx
- rorl $7,%edx
- movdqa %xmm1,%xmm4
addl %ebp,%ebx
+ xorl %edi,%esi
+ rorl $7,%edx
+ pshufd $238,%xmm0,%xmm4
+ addl %ecx,%ebx
addl 32(%esp),%eax
pxor %xmm6,%xmm2
-.byte 102,15,58,15,224,8
- xorl %edi,%esi
+ punpcklqdq %xmm1,%xmm4
+ xorl %edx,%esi
movl %ebx,%ebp
roll $5,%ebx
pxor %xmm3,%xmm2
movdqa %xmm6,64(%esp)
- xorl %edx,%esi
- addl %ebx,%eax
+ addl %esi,%eax
+ xorl %edx,%ebp
movdqa 128(%esp),%xmm6
- paddd %xmm1,%xmm5
rorl $7,%ecx
- addl %esi,%eax
+ paddd %xmm1,%xmm5
+ addl %ebx,%eax
pxor %xmm4,%xmm2
addl 36(%esp),%edi
- xorl %edx,%ebp
+ xorl %ecx,%ebp
movl %eax,%esi
roll $5,%eax
movdqa %xmm2,%xmm4
movdqa %xmm5,16(%esp)
- xorl %ecx,%ebp
- addl %eax,%edi
- rorl $7,%ebx
addl %ebp,%edi
+ xorl %ecx,%esi
+ rorl $7,%ebx
+ addl %eax,%edi
pslld $2,%xmm2
addl 40(%esp),%edx
- xorl %ecx,%esi
+ xorl %ebx,%esi
psrld $30,%xmm4
movl %edi,%ebp
roll $5,%edi
- xorl %ebx,%esi
- addl %edi,%edx
- rorl $7,%eax
addl %esi,%edx
+ xorl %ebx,%ebp
+ rorl $7,%eax
+ addl %edi,%edx
por %xmm4,%xmm2
addl 44(%esp),%ecx
- xorl %ebx,%ebp
+ xorl %eax,%ebp
movdqa 80(%esp),%xmm4
movl %edx,%esi
roll $5,%edx
- xorl %eax,%ebp
- addl %edx,%ecx
- rorl $7,%edi
- movdqa %xmm2,%xmm5
addl %ebp,%ecx
+ xorl %eax,%esi
+ rorl $7,%edi
+ pshufd $238,%xmm1,%xmm5
+ addl %edx,%ecx
addl 48(%esp),%ebx
pxor %xmm7,%xmm3
-.byte 102,15,58,15,233,8
- xorl %eax,%esi
+ punpcklqdq %xmm2,%xmm5
+ xorl %edi,%esi
movl %ecx,%ebp
roll $5,%ecx
pxor %xmm4,%xmm3
movdqa %xmm7,80(%esp)
- xorl %edi,%esi
- addl %ecx,%ebx
+ addl %esi,%ebx
+ xorl %edi,%ebp
movdqa %xmm6,%xmm7
- paddd %xmm2,%xmm6
rorl $7,%edx
- addl %esi,%ebx
+ paddd %xmm2,%xmm6
+ addl %ecx,%ebx
pxor %xmm5,%xmm3
addl 52(%esp),%eax
- xorl %edi,%ebp
+ xorl %edx,%ebp
movl %ebx,%esi
roll $5,%ebx
movdqa %xmm3,%xmm5
movdqa %xmm6,32(%esp)
- xorl %edx,%ebp
- addl %ebx,%eax
- rorl $7,%ecx
addl %ebp,%eax
+ xorl %edx,%esi
+ rorl $7,%ecx
+ addl %ebx,%eax
pslld $2,%xmm3
addl 56(%esp),%edi
- xorl %edx,%esi
+ xorl %ecx,%esi
psrld $30,%xmm5
movl %eax,%ebp
roll $5,%eax
- xorl %ecx,%esi
- addl %eax,%edi
- rorl $7,%ebx
addl %esi,%edi
+ xorl %ecx,%ebp
+ rorl $7,%ebx
+ addl %eax,%edi
por %xmm5,%xmm3
addl 60(%esp),%edx
- xorl %ecx,%ebp
+ xorl %ebx,%ebp
movdqa 96(%esp),%xmm5
movl %edi,%esi
roll $5,%edi
- xorl %ebx,%ebp
- addl %edi,%edx
- rorl $7,%eax
- movdqa %xmm3,%xmm6
addl %ebp,%edx
+ xorl %ebx,%esi
+ rorl $7,%eax
+ pshufd $238,%xmm2,%xmm6
+ addl %edi,%edx
addl (%esp),%ecx
pxor %xmm0,%xmm4
-.byte 102,15,58,15,242,8
- xorl %ebx,%esi
+ punpcklqdq %xmm3,%xmm6
+ xorl %eax,%esi
movl %edx,%ebp
roll $5,%edx
pxor %xmm5,%xmm4
movdqa %xmm0,96(%esp)
- xorl %eax,%esi
- addl %edx,%ecx
+ addl %esi,%ecx
+ xorl %eax,%ebp
movdqa %xmm7,%xmm0
- paddd %xmm3,%xmm7
rorl $7,%edi
- addl %esi,%ecx
+ paddd %xmm3,%xmm7
+ addl %edx,%ecx
pxor %xmm6,%xmm4
addl 4(%esp),%ebx
- xorl %eax,%ebp
+ xorl %edi,%ebp
movl %ecx,%esi
roll $5,%ecx
movdqa %xmm4,%xmm6
movdqa %xmm7,48(%esp)
- xorl %edi,%ebp
- addl %ecx,%ebx
- rorl $7,%edx
addl %ebp,%ebx
+ xorl %edi,%esi
+ rorl $7,%edx
+ addl %ecx,%ebx
pslld $2,%xmm4
addl 8(%esp),%eax
- xorl %edi,%esi
+ xorl %edx,%esi
psrld $30,%xmm6
movl %ebx,%ebp
roll $5,%ebx
- xorl %edx,%esi
- addl %ebx,%eax
- rorl $7,%ecx
addl %esi,%eax
+ xorl %edx,%ebp
+ rorl $7,%ecx
+ addl %ebx,%eax
por %xmm6,%xmm4
addl 12(%esp),%edi
- xorl %edx,%ebp
+ xorl %ecx,%ebp
movdqa 64(%esp),%xmm6
movl %eax,%esi
roll $5,%eax
- xorl %ecx,%ebp
- addl %eax,%edi
- rorl $7,%ebx
- movdqa %xmm4,%xmm7
addl %ebp,%edi
+ xorl %ecx,%esi
+ rorl $7,%ebx
+ pshufd $238,%xmm3,%xmm7
+ addl %eax,%edi
addl 16(%esp),%edx
pxor %xmm1,%xmm5
-.byte 102,15,58,15,251,8
- xorl %ecx,%esi
+ punpcklqdq %xmm4,%xmm7
+ xorl %ebx,%esi
movl %edi,%ebp
roll $5,%edi
pxor %xmm6,%xmm5
movdqa %xmm1,64(%esp)
- xorl %ebx,%esi
- addl %edi,%edx
+ addl %esi,%edx
+ xorl %ebx,%ebp
movdqa %xmm0,%xmm1
- paddd %xmm4,%xmm0
rorl $7,%eax
- addl %esi,%edx
+ paddd %xmm4,%xmm0
+ addl %edi,%edx
pxor %xmm7,%xmm5
addl 20(%esp),%ecx
- xorl %ebx,%ebp
+ xorl %eax,%ebp
movl %edx,%esi
roll $5,%edx
movdqa %xmm5,%xmm7
movdqa %xmm0,(%esp)
- xorl %eax,%ebp
- addl %edx,%ecx
- rorl $7,%edi
addl %ebp,%ecx
+ xorl %eax,%esi
+ rorl $7,%edi
+ addl %edx,%ecx
pslld $2,%xmm5
addl 24(%esp),%ebx
- xorl %eax,%esi
+ xorl %edi,%esi
psrld $30,%xmm7
movl %ecx,%ebp
roll $5,%ecx
- xorl %edi,%esi
- addl %ecx,%ebx
- rorl $7,%edx
addl %esi,%ebx
+ xorl %edi,%ebp
+ rorl $7,%edx
+ addl %ecx,%ebx
por %xmm7,%xmm5
addl 28(%esp),%eax
- xorl %edi,%ebp
movdqa 80(%esp),%xmm7
+ rorl $7,%ecx
movl %ebx,%esi
- roll $5,%ebx
xorl %edx,%ebp
- addl %ebx,%eax
- rorl $7,%ecx
- movdqa %xmm5,%xmm0
+ roll $5,%ebx
+ pshufd $238,%xmm4,%xmm0
addl %ebp,%eax
- movl %ecx,%ebp
- pxor %xmm2,%xmm6
-.byte 102,15,58,15,196,8
+ xorl %ecx,%esi
xorl %edx,%ecx
+ addl %ebx,%eax
addl 32(%esp),%edi
- andl %edx,%ebp
- pxor %xmm7,%xmm6
- movdqa %xmm2,80(%esp)
+ pxor %xmm2,%xmm6
+ punpcklqdq %xmm5,%xmm0
andl %ecx,%esi
+ xorl %edx,%ecx
rorl $7,%ebx
- movdqa %xmm1,%xmm2
- paddd %xmm5,%xmm1
- addl %ebp,%edi
+ pxor %xmm7,%xmm6
+ movdqa %xmm2,80(%esp)
movl %eax,%ebp
- pxor %xmm0,%xmm6
+ xorl %ecx,%esi
roll $5,%eax
+ movdqa %xmm1,%xmm2
addl %esi,%edi
- xorl %edx,%ecx
+ paddd %xmm5,%xmm1
+ xorl %ebx,%ebp
+ pxor %xmm0,%xmm6
+ xorl %ecx,%ebx
addl %eax,%edi
+ addl 36(%esp),%edx
+ andl %ebx,%ebp
movdqa %xmm6,%xmm0
movdqa %xmm1,16(%esp)
- movl %ebx,%esi
xorl %ecx,%ebx
- addl 36(%esp),%edx
- andl %ecx,%esi
- pslld $2,%xmm6
- andl %ebx,%ebp
rorl $7,%eax
- psrld $30,%xmm0
- addl %esi,%edx
movl %edi,%esi
+ xorl %ebx,%ebp
roll $5,%edi
+ pslld $2,%xmm6
addl %ebp,%edx
- xorl %ecx,%ebx
- addl %edi,%edx
- por %xmm0,%xmm6
- movl %eax,%ebp
+ xorl %eax,%esi
+ psrld $30,%xmm0
xorl %ebx,%eax
- movdqa 96(%esp),%xmm0
+ addl %edi,%edx
addl 40(%esp),%ecx
- andl %ebx,%ebp
andl %eax,%esi
+ xorl %ebx,%eax
rorl $7,%edi
- addl %ebp,%ecx
- movdqa %xmm6,%xmm1
+ por %xmm0,%xmm6
movl %edx,%ebp
+ xorl %eax,%esi
+ movdqa 96(%esp),%xmm0
roll $5,%edx
addl %esi,%ecx
- xorl %ebx,%eax
- addl %edx,%ecx
- movl %edi,%esi
+ xorl %edi,%ebp
xorl %eax,%edi
+ addl %edx,%ecx
+ pshufd $238,%xmm5,%xmm1
addl 44(%esp),%ebx
- andl %eax,%esi
andl %edi,%ebp
+ xorl %eax,%edi
rorl $7,%edx
- addl %esi,%ebx
movl %ecx,%esi
+ xorl %edi,%ebp
roll $5,%ecx
addl %ebp,%ebx
- xorl %eax,%edi
+ xorl %edx,%esi
+ xorl %edi,%edx
addl %ecx,%ebx
- movl %edx,%ebp
+ addl 48(%esp),%eax
pxor %xmm3,%xmm7
-.byte 102,15,58,15,205,8
+ punpcklqdq %xmm6,%xmm1
+ andl %edx,%esi
xorl %edi,%edx
- addl 48(%esp),%eax
- andl %edi,%ebp
+ rorl $7,%ecx
pxor %xmm0,%xmm7
movdqa %xmm3,96(%esp)
- andl %edx,%esi
- rorl $7,%ecx
- movdqa 144(%esp),%xmm3
- paddd %xmm6,%xmm2
- addl %ebp,%eax
movl %ebx,%ebp
- pxor %xmm1,%xmm7
+ xorl %edx,%esi
roll $5,%ebx
+ movdqa 144(%esp),%xmm3
addl %esi,%eax
- xorl %edi,%edx
+ paddd %xmm6,%xmm2
+ xorl %ecx,%ebp
+ pxor %xmm1,%xmm7
+ xorl %edx,%ecx
addl %ebx,%eax
+ addl 52(%esp),%edi
+ andl %ecx,%ebp
movdqa %xmm7,%xmm1
movdqa %xmm2,32(%esp)
- movl %ecx,%esi
xorl %edx,%ecx
- addl 52(%esp),%edi
- andl %edx,%esi
- pslld $2,%xmm7
- andl %ecx,%ebp
rorl $7,%ebx
- psrld $30,%xmm1
- addl %esi,%edi
movl %eax,%esi
+ xorl %ecx,%ebp
roll $5,%eax
+ pslld $2,%xmm7
addl %ebp,%edi
- xorl %edx,%ecx
- addl %eax,%edi
- por %xmm1,%xmm7
- movl %ebx,%ebp
+ xorl %ebx,%esi
+ psrld $30,%xmm1
xorl %ecx,%ebx
- movdqa 64(%esp),%xmm1
+ addl %eax,%edi
addl 56(%esp),%edx
- andl %ecx,%ebp
andl %ebx,%esi
+ xorl %ecx,%ebx
rorl $7,%eax
- addl %ebp,%edx
- movdqa %xmm7,%xmm2
+ por %xmm1,%xmm7
movl %edi,%ebp
+ xorl %ebx,%esi
+ movdqa 64(%esp),%xmm1
roll $5,%edi
addl %esi,%edx
- xorl %ecx,%ebx
- addl %edi,%edx
- movl %eax,%esi
+ xorl %eax,%ebp
xorl %ebx,%eax
+ addl %edi,%edx
+ pshufd $238,%xmm6,%xmm2
addl 60(%esp),%ecx
- andl %ebx,%esi
andl %eax,%ebp
+ xorl %ebx,%eax
rorl $7,%edi
- addl %esi,%ecx
movl %edx,%esi
+ xorl %eax,%ebp
roll $5,%edx
addl %ebp,%ecx
- xorl %ebx,%eax
+ xorl %edi,%esi
+ xorl %eax,%edi
addl %edx,%ecx
- movl %edi,%ebp
+ addl (%esp),%ebx
pxor %xmm4,%xmm0
-.byte 102,15,58,15,214,8
+ punpcklqdq %xmm7,%xmm2
+ andl %edi,%esi
xorl %eax,%edi
- addl (%esp),%ebx
- andl %eax,%ebp
+ rorl $7,%edx
pxor %xmm1,%xmm0
movdqa %xmm4,64(%esp)
- andl %edi,%esi
- rorl $7,%edx
- movdqa %xmm3,%xmm4
- paddd %xmm7,%xmm3
- addl %ebp,%ebx
movl %ecx,%ebp
- pxor %xmm2,%xmm0
+ xorl %edi,%esi
roll $5,%ecx
+ movdqa %xmm3,%xmm4
addl %esi,%ebx
- xorl %eax,%edi
+ paddd %xmm7,%xmm3
+ xorl %edx,%ebp
+ pxor %xmm2,%xmm0
+ xorl %edi,%edx
addl %ecx,%ebx
+ addl 4(%esp),%eax
+ andl %edx,%ebp
movdqa %xmm0,%xmm2
movdqa %xmm3,48(%esp)
- movl %edx,%esi
xorl %edi,%edx
- addl 4(%esp),%eax
- andl %edi,%esi
- pslld $2,%xmm0
- andl %edx,%ebp
rorl $7,%ecx
- psrld $30,%xmm2
- addl %esi,%eax
movl %ebx,%esi
+ xorl %edx,%ebp
roll $5,%ebx
+ pslld $2,%xmm0
addl %ebp,%eax
- xorl %edi,%edx
- addl %ebx,%eax
- por %xmm2,%xmm0
- movl %ecx,%ebp
+ xorl %ecx,%esi
+ psrld $30,%xmm2
xorl %edx,%ecx
- movdqa 80(%esp),%xmm2
+ addl %ebx,%eax
addl 8(%esp),%edi
- andl %edx,%ebp
andl %ecx,%esi
+ xorl %edx,%ecx
rorl $7,%ebx
- addl %ebp,%edi
- movdqa %xmm0,%xmm3
+ por %xmm2,%xmm0
movl %eax,%ebp
+ xorl %ecx,%esi
+ movdqa 80(%esp),%xmm2
roll $5,%eax
addl %esi,%edi
- xorl %edx,%ecx
- addl %eax,%edi
- movl %ebx,%esi
+ xorl %ebx,%ebp
xorl %ecx,%ebx
+ addl %eax,%edi
+ pshufd $238,%xmm7,%xmm3
addl 12(%esp),%edx
- andl %ecx,%esi
andl %ebx,%ebp
+ xorl %ecx,%ebx
rorl $7,%eax
- addl %esi,%edx
movl %edi,%esi
+ xorl %ebx,%ebp
roll $5,%edi
addl %ebp,%edx
- xorl %ecx,%ebx
+ xorl %eax,%esi
+ xorl %ebx,%eax
addl %edi,%edx
- movl %eax,%ebp
+ addl 16(%esp),%ecx
pxor %xmm5,%xmm1
-.byte 102,15,58,15,223,8
+ punpcklqdq %xmm0,%xmm3
+ andl %eax,%esi
xorl %ebx,%eax
- addl 16(%esp),%ecx
- andl %ebx,%ebp
+ rorl $7,%edi
pxor %xmm2,%xmm1
movdqa %xmm5,80(%esp)
- andl %eax,%esi
- rorl $7,%edi
- movdqa %xmm4,%xmm5
- paddd %xmm0,%xmm4
- addl %ebp,%ecx
movl %edx,%ebp
- pxor %xmm3,%xmm1
+ xorl %eax,%esi
roll $5,%edx
+ movdqa %xmm4,%xmm5
addl %esi,%ecx
- xorl %ebx,%eax
+ paddd %xmm0,%xmm4
+ xorl %edi,%ebp
+ pxor %xmm3,%xmm1
+ xorl %eax,%edi
addl %edx,%ecx
+ addl 20(%esp),%ebx
+ andl %edi,%ebp
movdqa %xmm1,%xmm3
movdqa %xmm4,(%esp)
- movl %edi,%esi
xorl %eax,%edi
- addl 20(%esp),%ebx
- andl %eax,%esi
- pslld $2,%xmm1
- andl %edi,%ebp
rorl $7,%edx
- psrld $30,%xmm3
- addl %esi,%ebx
movl %ecx,%esi
+ xorl %edi,%ebp
roll $5,%ecx
+ pslld $2,%xmm1
addl %ebp,%ebx
- xorl %eax,%edi
- addl %ecx,%ebx
- por %xmm3,%xmm1
- movl %edx,%ebp
+ xorl %edx,%esi
+ psrld $30,%xmm3
xorl %edi,%edx
- movdqa 96(%esp),%xmm3
+ addl %ecx,%ebx
addl 24(%esp),%eax
- andl %edi,%ebp
andl %edx,%esi
+ xorl %edi,%edx
rorl $7,%ecx
- addl %ebp,%eax
- movdqa %xmm1,%xmm4
+ por %xmm3,%xmm1
movl %ebx,%ebp
+ xorl %edx,%esi
+ movdqa 96(%esp),%xmm3
roll $5,%ebx
addl %esi,%eax
- xorl %edi,%edx
- addl %ebx,%eax
- movl %ecx,%esi
+ xorl %ecx,%ebp
xorl %edx,%ecx
+ addl %ebx,%eax
+ pshufd $238,%xmm0,%xmm4
addl 28(%esp),%edi
- andl %edx,%esi
andl %ecx,%ebp
+ xorl %edx,%ecx
rorl $7,%ebx
- addl %esi,%edi
movl %eax,%esi
+ xorl %ecx,%ebp
roll $5,%eax
addl %ebp,%edi
- xorl %edx,%ecx
+ xorl %ebx,%esi
+ xorl %ecx,%ebx
addl %eax,%edi
- movl %ebx,%ebp
+ addl 32(%esp),%edx
pxor %xmm6,%xmm2
-.byte 102,15,58,15,224,8
+ punpcklqdq %xmm1,%xmm4
+ andl %ebx,%esi
xorl %ecx,%ebx
- addl 32(%esp),%edx
- andl %ecx,%ebp
+ rorl $7,%eax
pxor %xmm3,%xmm2
movdqa %xmm6,96(%esp)
- andl %ebx,%esi
- rorl $7,%eax
- movdqa %xmm5,%xmm6
- paddd %xmm1,%xmm5
- addl %ebp,%edx
movl %edi,%ebp
- pxor %xmm4,%xmm2
+ xorl %ebx,%esi
roll $5,%edi
+ movdqa %xmm5,%xmm6
addl %esi,%edx
- xorl %ecx,%ebx
+ paddd %xmm1,%xmm5
+ xorl %eax,%ebp
+ pxor %xmm4,%xmm2
+ xorl %ebx,%eax
addl %edi,%edx
+ addl 36(%esp),%ecx
+ andl %eax,%ebp
movdqa %xmm2,%xmm4
movdqa %xmm5,16(%esp)
- movl %eax,%esi
xorl %ebx,%eax
- addl 36(%esp),%ecx
- andl %ebx,%esi
- pslld $2,%xmm2
- andl %eax,%ebp
rorl $7,%edi
- psrld $30,%xmm4
- addl %esi,%ecx
movl %edx,%esi
+ xorl %eax,%ebp
roll $5,%edx
+ pslld $2,%xmm2
addl %ebp,%ecx
- xorl %ebx,%eax
- addl %edx,%ecx
- por %xmm4,%xmm2
- movl %edi,%ebp
+ xorl %edi,%esi
+ psrld $30,%xmm4
xorl %eax,%edi
- movdqa 64(%esp),%xmm4
+ addl %edx,%ecx
addl 40(%esp),%ebx
- andl %eax,%ebp
andl %edi,%esi
+ xorl %eax,%edi
rorl $7,%edx
- addl %ebp,%ebx
- movdqa %xmm2,%xmm5
+ por %xmm4,%xmm2
movl %ecx,%ebp
+ xorl %edi,%esi
+ movdqa 64(%esp),%xmm4
roll $5,%ecx
addl %esi,%ebx
- xorl %eax,%edi
- addl %ecx,%ebx
- movl %edx,%esi
+ xorl %edx,%ebp
xorl %edi,%edx
+ addl %ecx,%ebx
+ pshufd $238,%xmm1,%xmm5
addl 44(%esp),%eax
- andl %edi,%esi
andl %edx,%ebp
+ xorl %edi,%edx
rorl $7,%ecx
- addl %esi,%eax
movl %ebx,%esi
+ xorl %edx,%ebp
roll $5,%ebx
addl %ebp,%eax
- xorl %edi,%edx
+ xorl %edx,%esi
addl %ebx,%eax
addl 48(%esp),%edi
pxor %xmm7,%xmm3
-.byte 102,15,58,15,233,8
- xorl %edx,%esi
+ punpcklqdq %xmm2,%xmm5
+ xorl %ecx,%esi
movl %eax,%ebp
roll $5,%eax
pxor %xmm4,%xmm3
movdqa %xmm7,64(%esp)
- xorl %ecx,%esi
- addl %eax,%edi
+ addl %esi,%edi
+ xorl %ecx,%ebp
movdqa %xmm6,%xmm7
- paddd %xmm2,%xmm6
rorl $7,%ebx
- addl %esi,%edi
+ paddd %xmm2,%xmm6
+ addl %eax,%edi
pxor %xmm5,%xmm3
addl 52(%esp),%edx
- xorl %ecx,%ebp
+ xorl %ebx,%ebp
movl %edi,%esi
roll $5,%edi
movdqa %xmm3,%xmm5
movdqa %xmm6,32(%esp)
- xorl %ebx,%ebp
- addl %edi,%edx
- rorl $7,%eax
addl %ebp,%edx
+ xorl %ebx,%esi
+ rorl $7,%eax
+ addl %edi,%edx
pslld $2,%xmm3
addl 56(%esp),%ecx
- xorl %ebx,%esi
+ xorl %eax,%esi
psrld $30,%xmm5
movl %edx,%ebp
roll $5,%edx
- xorl %eax,%esi
- addl %edx,%ecx
- rorl $7,%edi
addl %esi,%ecx
+ xorl %eax,%ebp
+ rorl $7,%edi
+ addl %edx,%ecx
por %xmm5,%xmm3
addl 60(%esp),%ebx
- xorl %eax,%ebp
+ xorl %edi,%ebp
movl %ecx,%esi
roll $5,%ecx
- xorl %edi,%ebp
- addl %ecx,%ebx
- rorl $7,%edx
addl %ebp,%ebx
- addl (%esp),%eax
- paddd %xmm3,%xmm7
xorl %edi,%esi
+ rorl $7,%edx
+ addl %ecx,%ebx
+ addl (%esp),%eax
+ xorl %edx,%esi
movl %ebx,%ebp
roll $5,%ebx
- xorl %edx,%esi
- movdqa %xmm7,48(%esp)
- addl %ebx,%eax
- rorl $7,%ecx
addl %esi,%eax
- addl 4(%esp),%edi
xorl %edx,%ebp
+ rorl $7,%ecx
+ paddd %xmm3,%xmm7
+ addl %ebx,%eax
+ addl 4(%esp),%edi
+ xorl %ecx,%ebp
movl %eax,%esi
+ movdqa %xmm7,48(%esp)
roll $5,%eax
- xorl %ecx,%ebp
- addl %eax,%edi
- rorl $7,%ebx
addl %ebp,%edi
- addl 8(%esp),%edx
xorl %ecx,%esi
+ rorl $7,%ebx
+ addl %eax,%edi
+ addl 8(%esp),%edx
+ xorl %ebx,%esi
movl %edi,%ebp
roll $5,%edi
- xorl %ebx,%esi
- addl %edi,%edx
- rorl $7,%eax
addl %esi,%edx
- addl 12(%esp),%ecx
xorl %ebx,%ebp
+ rorl $7,%eax
+ addl %edi,%edx
+ addl 12(%esp),%ecx
+ xorl %eax,%ebp
movl %edx,%esi
roll $5,%edx
- xorl %eax,%ebp
- addl %edx,%ecx
- rorl $7,%edi
addl %ebp,%ecx
+ xorl %eax,%esi
+ rorl $7,%edi
+ addl %edx,%ecx
movl 196(%esp),%ebp
cmpl 200(%esp),%ebp
- je .L005done
+ je .L007done
movdqa 160(%esp),%xmm7
movdqa 176(%esp),%xmm6
movdqu (%ebp),%xmm0
@@ -2388,113 +2542,112 @@ _sha1_block_data_order_ssse3:
movl %ebp,196(%esp)
movdqa %xmm7,96(%esp)
addl 16(%esp),%ebx
- xorl %eax,%esi
-.byte 102,15,56,0,206
+ xorl %edi,%esi
movl %ecx,%ebp
roll $5,%ecx
- paddd %xmm7,%xmm0
- xorl %edi,%esi
- addl %ecx,%ebx
- rorl $7,%edx
addl %esi,%ebx
- movdqa %xmm0,(%esp)
- addl 20(%esp),%eax
xorl %edi,%ebp
- psubd %xmm7,%xmm0
+ rorl $7,%edx
+.byte 102,15,56,0,206
+ addl %ecx,%ebx
+ addl 20(%esp),%eax
+ xorl %edx,%ebp
movl %ebx,%esi
+ paddd %xmm7,%xmm0
roll $5,%ebx
- xorl %edx,%ebp
- addl %ebx,%eax
- rorl $7,%ecx
addl %ebp,%eax
- addl 24(%esp),%edi
xorl %edx,%esi
+ rorl $7,%ecx
+ movdqa %xmm0,(%esp)
+ addl %ebx,%eax
+ addl 24(%esp),%edi
+ xorl %ecx,%esi
movl %eax,%ebp
+ psubd %xmm7,%xmm0
roll $5,%eax
- xorl %ecx,%esi
- addl %eax,%edi
- rorl $7,%ebx
addl %esi,%edi
- addl 28(%esp),%edx
xorl %ecx,%ebp
+ rorl $7,%ebx
+ addl %eax,%edi
+ addl 28(%esp),%edx
+ xorl %ebx,%ebp
movl %edi,%esi
roll $5,%edi
- xorl %ebx,%ebp
- addl %edi,%edx
- rorl $7,%eax
addl %ebp,%edx
- addl 32(%esp),%ecx
xorl %ebx,%esi
-.byte 102,15,56,0,214
+ rorl $7,%eax
+ addl %edi,%edx
+ addl 32(%esp),%ecx
+ xorl %eax,%esi
movl %edx,%ebp
roll $5,%edx
- paddd %xmm7,%xmm1
- xorl %eax,%esi
- addl %edx,%ecx
- rorl $7,%edi
addl %esi,%ecx
- movdqa %xmm1,16(%esp)
- addl 36(%esp),%ebx
xorl %eax,%ebp
- psubd %xmm7,%xmm1
+ rorl $7,%edi
+.byte 102,15,56,0,214
+ addl %edx,%ecx
+ addl 36(%esp),%ebx
+ xorl %edi,%ebp
movl %ecx,%esi
+ paddd %xmm7,%xmm1
roll $5,%ecx
- xorl %edi,%ebp
- addl %ecx,%ebx
- rorl $7,%edx
addl %ebp,%ebx
- addl 40(%esp),%eax
xorl %edi,%esi
+ rorl $7,%edx
+ movdqa %xmm1,16(%esp)
+ addl %ecx,%ebx
+ addl 40(%esp),%eax
+ xorl %edx,%esi
movl %ebx,%ebp
+ psubd %xmm7,%xmm1
roll $5,%ebx
- xorl %edx,%esi
- addl %ebx,%eax
- rorl $7,%ecx
addl %esi,%eax
- addl 44(%esp),%edi
xorl %edx,%ebp
+ rorl $7,%ecx
+ addl %ebx,%eax
+ addl 44(%esp),%edi
+ xorl %ecx,%ebp
movl %eax,%esi
roll $5,%eax
- xorl %ecx,%ebp
- addl %eax,%edi
- rorl $7,%ebx
addl %ebp,%edi
- addl 48(%esp),%edx
xorl %ecx,%esi
-.byte 102,15,56,0,222
+ rorl $7,%ebx
+ addl %eax,%edi
+ addl 48(%esp),%edx
+ xorl %ebx,%esi
movl %edi,%ebp
roll $5,%edi
- paddd %xmm7,%xmm2
- xorl %ebx,%esi
- addl %edi,%edx
- rorl $7,%eax
addl %esi,%edx
- movdqa %xmm2,32(%esp)
- addl 52(%esp),%ecx
xorl %ebx,%ebp
- psubd %xmm7,%xmm2
+ rorl $7,%eax
+.byte 102,15,56,0,222
+ addl %edi,%edx
+ addl 52(%esp),%ecx
+ xorl %eax,%ebp
movl %edx,%esi
+ paddd %xmm7,%xmm2
roll $5,%edx
- xorl %eax,%ebp
- addl %edx,%ecx
- rorl $7,%edi
addl %ebp,%ecx
- addl 56(%esp),%ebx
xorl %eax,%esi
+ rorl $7,%edi
+ movdqa %xmm2,32(%esp)
+ addl %edx,%ecx
+ addl 56(%esp),%ebx
+ xorl %edi,%esi
movl %ecx,%ebp
+ psubd %xmm7,%xmm2
roll $5,%ecx
- xorl %edi,%esi
- addl %ecx,%ebx
- rorl $7,%edx
addl %esi,%ebx
- addl 60(%esp),%eax
xorl %edi,%ebp
+ rorl $7,%edx
+ addl %ecx,%ebx
+ addl 60(%esp),%eax
+ xorl %edx,%ebp
movl %ebx,%esi
roll $5,%ebx
- xorl %edx,%ebp
- addl %ebx,%eax
- rorl $7,%ecx
addl %ebp,%eax
+ rorl $7,%ecx
+ addl %ebx,%eax
movl 192(%esp),%ebp
addl (%ebp),%eax
addl 4(%ebp),%esi
@@ -2504,109 +2657,112 @@ _sha1_block_data_order_ssse3:
movl %esi,4(%ebp)
addl 16(%ebp),%edi
movl %ecx,8(%ebp)
- movl %esi,%ebx
+ movl %ecx,%ebx
movl %edx,12(%ebp)
+ xorl %edx,%ebx
movl %edi,16(%ebp)
- movdqa %xmm1,%xmm4
- jmp .L004loop
+ movl %esi,%ebp
+ pshufd $238,%xmm0,%xmm4
+ andl %ebx,%esi
+ movl %ebp,%ebx
+ jmp .L006loop
.align 16
-.L005done:
+.L007done:
addl 16(%esp),%ebx
- xorl %eax,%esi
+ xorl %edi,%esi
movl %ecx,%ebp
roll $5,%ecx
- xorl %edi,%esi
- addl %ecx,%ebx
- rorl $7,%edx
addl %esi,%ebx
- addl 20(%esp),%eax
xorl %edi,%ebp
+ rorl $7,%edx
+ addl %ecx,%ebx
+ addl 20(%esp),%eax
+ xorl %edx,%ebp
movl %ebx,%esi
roll $5,%ebx
- xorl %edx,%ebp
- addl %ebx,%eax
- rorl $7,%ecx
addl %ebp,%eax
- addl 24(%esp),%edi
xorl %edx,%esi
+ rorl $7,%ecx
+ addl %ebx,%eax
+ addl 24(%esp),%edi
+ xorl %ecx,%esi
movl %eax,%ebp
roll $5,%eax
- xorl %ecx,%esi
- addl %eax,%edi
- rorl $7,%ebx
addl %esi,%edi
- addl 28(%esp),%edx
xorl %ecx,%ebp
+ rorl $7,%ebx
+ addl %eax,%edi
+ addl 28(%esp),%edx
+ xorl %ebx,%ebp
movl %edi,%esi
roll $5,%edi
- xorl %ebx,%ebp
- addl %edi,%edx
- rorl $7,%eax
addl %ebp,%edx
- addl 32(%esp),%ecx
xorl %ebx,%esi
+ rorl $7,%eax
+ addl %edi,%edx
+ addl 32(%esp),%ecx
+ xorl %eax,%esi
movl %edx,%ebp
roll $5,%edx
- xorl %eax,%esi
- addl %edx,%ecx
- rorl $7,%edi
addl %esi,%ecx
- addl 36(%esp),%ebx
xorl %eax,%ebp
+ rorl $7,%edi
+ addl %edx,%ecx
+ addl 36(%esp),%ebx
+ xorl %edi,%ebp
movl %ecx,%esi
roll $5,%ecx
- xorl %edi,%ebp
- addl %ecx,%ebx
- rorl $7,%edx
addl %ebp,%ebx
- addl 40(%esp),%eax
xorl %edi,%esi
+ rorl $7,%edx
+ addl %ecx,%ebx
+ addl 40(%esp),%eax
+ xorl %edx,%esi
movl %ebx,%ebp
roll $5,%ebx
- xorl %edx,%esi
- addl %ebx,%eax
- rorl $7,%ecx
addl %esi,%eax
- addl 44(%esp),%edi
xorl %edx,%ebp
+ rorl $7,%ecx
+ addl %ebx,%eax
+ addl 44(%esp),%edi
+ xorl %ecx,%ebp
movl %eax,%esi
roll $5,%eax
- xorl %ecx,%ebp
- addl %eax,%edi
- rorl $7,%ebx
addl %ebp,%edi
- addl 48(%esp),%edx
xorl %ecx,%esi
+ rorl $7,%ebx
+ addl %eax,%edi
+ addl 48(%esp),%edx
+ xorl %ebx,%esi
movl %edi,%ebp
roll $5,%edi
- xorl %ebx,%esi
- addl %edi,%edx
- rorl $7,%eax
addl %esi,%edx
- addl 52(%esp),%ecx
xorl %ebx,%ebp
+ rorl $7,%eax
+ addl %edi,%edx
+ addl 52(%esp),%ecx
+ xorl %eax,%ebp
movl %edx,%esi
roll $5,%edx
- xorl %eax,%ebp
- addl %edx,%ecx
- rorl $7,%edi
addl %ebp,%ecx
- addl 56(%esp),%ebx
xorl %eax,%esi
+ rorl $7,%edi
+ addl %edx,%ecx
+ addl 56(%esp),%ebx
+ xorl %edi,%esi
movl %ecx,%ebp
roll $5,%ecx
- xorl %edi,%esi
- addl %ecx,%ebx
- rorl $7,%edx
addl %esi,%ebx
- addl 60(%esp),%eax
xorl %edi,%ebp
+ rorl $7,%edx
+ addl %ecx,%ebx
+ addl 60(%esp),%eax
+ xorl %edx,%ebp
movl %ebx,%esi
roll $5,%ebx
- xorl %edx,%ebp
- addl %ebx,%eax
- rorl $7,%ecx
addl %ebp,%eax
+ rorl $7,%ecx
+ addl %ebx,%eax
movl 192(%esp),%ebp
addl (%ebp),%eax
movl 204(%esp),%esp
@@ -2632,8 +2788,9 @@ _sha1_block_data_order_ssse3:
.long 2400959708,2400959708,2400959708,2400959708
.long 3395469782,3395469782,3395469782,3395469782
.long 66051,67438087,134810123,202182159
+.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115
.byte 102,111,114,109,32,102,111,114,32,120,56,54,44,32,67,82
.byte 89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112
.byte 114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
-.comm OPENSSL_ia32cap_P,8,4
+.comm OPENSSL_ia32cap_P,16,4
diff --git a/secure/lib/libcrypto/i386/sha256-586.s b/secure/lib/libcrypto/i386/sha256-586.s
index 7ea3748..a3b82f8 100644
--- a/secure/lib/libcrypto/i386/sha256-586.s
+++ b/secure/lib/libcrypto/i386/sha256-586.s
@@ -26,234 +26,4553 @@ sha256_block_data_order:
movl %edi,4(%esp)
movl %eax,8(%esp)
movl %ebx,12(%esp)
+ leal OPENSSL_ia32cap_P,%edx
+ movl (%edx),%ecx
+ movl 4(%edx),%ebx
+ testl $1048576,%ecx
+ jnz .L002loop
+ movl 8(%edx),%edx
+ testl $16777216,%ecx
+ jz .L003no_xmm
+ andl $1073741824,%ecx
+ andl $268435968,%ebx
+ testl $536870912,%edx
+ jnz .L004shaext
+ orl %ebx,%ecx
+ andl $1342177280,%ecx
+ cmpl $1342177280,%ecx
+ testl $512,%ebx
+ jnz .L005SSSE3
+.L003no_xmm:
+ subl %edi,%eax
+ cmpl $256,%eax
+ jae .L006unrolled
+ jmp .L002loop
.align 16
.L002loop:
movl (%edi),%eax
movl 4(%edi),%ebx
movl 8(%edi),%ecx
- movl 12(%edi),%edx
bswap %eax
+ movl 12(%edi),%edx
bswap %ebx
- bswap %ecx
- bswap %edx
pushl %eax
+ bswap %ecx
pushl %ebx
+ bswap %edx
pushl %ecx
pushl %edx
movl 16(%edi),%eax
movl 20(%edi),%ebx
movl 24(%edi),%ecx
- movl 28(%edi),%edx
bswap %eax
+ movl 28(%edi),%edx
bswap %ebx
- bswap %ecx
- bswap %edx
pushl %eax
+ bswap %ecx
pushl %ebx
+ bswap %edx
pushl %ecx
pushl %edx
movl 32(%edi),%eax
movl 36(%edi),%ebx
movl 40(%edi),%ecx
- movl 44(%edi),%edx
bswap %eax
+ movl 44(%edi),%edx
bswap %ebx
- bswap %ecx
- bswap %edx
pushl %eax
+ bswap %ecx
pushl %ebx
+ bswap %edx
pushl %ecx
pushl %edx
movl 48(%edi),%eax
movl 52(%edi),%ebx
movl 56(%edi),%ecx
- movl 60(%edi),%edx
bswap %eax
+ movl 60(%edi),%edx
bswap %ebx
- bswap %ecx
- bswap %edx
pushl %eax
+ bswap %ecx
pushl %ebx
+ bswap %edx
pushl %ecx
pushl %edx
addl $64,%edi
- subl $32,%esp
- movl %edi,100(%esp)
+ leal -36(%esp),%esp
+ movl %edi,104(%esp)
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edi
- movl %ebx,4(%esp)
- movl %ecx,8(%esp)
- movl %edi,12(%esp)
+ movl %ebx,8(%esp)
+ xorl %ecx,%ebx
+ movl %ecx,12(%esp)
+ movl %edi,16(%esp)
+ movl %ebx,(%esp)
movl 16(%esi),%edx
movl 20(%esi),%ebx
movl 24(%esi),%ecx
movl 28(%esi),%edi
- movl %ebx,20(%esp)
- movl %ecx,24(%esp)
- movl %edi,28(%esp)
+ movl %ebx,24(%esp)
+ movl %ecx,28(%esp)
+ movl %edi,32(%esp)
.align 16
-.L00300_15:
- movl 92(%esp),%ebx
+.L00700_15:
movl %edx,%ecx
+ movl 24(%esp),%esi
rorl $14,%ecx
- movl 20(%esp),%esi
+ movl 28(%esp),%edi
xorl %edx,%ecx
+ xorl %edi,%esi
+ movl 96(%esp),%ebx
rorl $5,%ecx
- xorl %edx,%ecx
- rorl $6,%ecx
- movl 24(%esp),%edi
- addl %ecx,%ebx
+ andl %edx,%esi
+ movl %edx,20(%esp)
+ xorl %ecx,%edx
+ addl 32(%esp),%ebx
xorl %edi,%esi
- movl %edx,16(%esp)
+ rorl $6,%edx
movl %eax,%ecx
+ addl %esi,%ebx
+ rorl $9,%ecx
+ addl %edx,%ebx
+ movl 8(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,4(%esp)
+ leal -4(%esp),%esp
+ rorl $11,%ecx
+ movl (%ebp),%esi
+ xorl %eax,%ecx
+ movl 20(%esp),%edx
+ xorl %edi,%eax
+ rorl $2,%ecx
+ addl %esi,%ebx
+ movl %eax,(%esp)
+ addl %ebx,%edx
+ andl 4(%esp),%eax
+ addl %ecx,%ebx
+ xorl %edi,%eax
+ addl $4,%ebp
+ addl %ebx,%eax
+ cmpl $3248222580,%esi
+ jne .L00700_15
+ movl 156(%esp),%ecx
+ jmp .L00816_63
+.align 16
+.L00816_63:
+ movl %ecx,%ebx
+ movl 104(%esp),%esi
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 160(%esp),%ebx
+ shrl $10,%edi
+ addl 124(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
+ movl 24(%esp),%esi
+ rorl $14,%ecx
+ addl %edi,%ebx
+ movl 28(%esp),%edi
+ xorl %edx,%ecx
+ xorl %edi,%esi
+ movl %ebx,96(%esp)
+ rorl $5,%ecx
andl %edx,%esi
- movl 12(%esp),%edx
+ movl %edx,20(%esp)
+ xorl %ecx,%edx
+ addl 32(%esp),%ebx
xorl %edi,%esi
- movl %eax,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
addl %esi,%ebx
rorl $9,%ecx
+ addl %edx,%ebx
+ movl 8(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,4(%esp)
+ leal -4(%esp),%esp
+ rorl $11,%ecx
+ movl (%ebp),%esi
+ xorl %eax,%ecx
+ movl 20(%esp),%edx
+ xorl %edi,%eax
+ rorl $2,%ecx
+ addl %esi,%ebx
+ movl %eax,(%esp)
+ addl %ebx,%edx
+ andl 4(%esp),%eax
+ addl %ecx,%ebx
+ xorl %edi,%eax
+ movl 156(%esp),%ecx
+ addl $4,%ebp
+ addl %ebx,%eax
+ cmpl $3329325298,%esi
+ jne .L00816_63
+ movl 356(%esp),%esi
+ movl 8(%esp),%ebx
+ movl 16(%esp),%ecx
+ addl (%esi),%eax
+ addl 4(%esi),%ebx
+ addl 8(%esi),%edi
+ addl 12(%esi),%ecx
+ movl %eax,(%esi)
+ movl %ebx,4(%esi)
+ movl %edi,8(%esi)
+ movl %ecx,12(%esi)
+ movl 24(%esp),%eax
+ movl 28(%esp),%ebx
+ movl 32(%esp),%ecx
+ movl 360(%esp),%edi
+ addl 16(%esi),%edx
+ addl 20(%esi),%eax
+ addl 24(%esi),%ebx
+ addl 28(%esi),%ecx
+ movl %edx,16(%esi)
+ movl %eax,20(%esi)
+ movl %ebx,24(%esi)
+ movl %ecx,28(%esi)
+ leal 356(%esp),%esp
+ subl $256,%ebp
+ cmpl 8(%esp),%edi
+ jb .L002loop
+ movl 12(%esp),%esp
+ popl %edi
+ popl %esi
+ popl %ebx
+ popl %ebp
+ ret
+.align 64
+.L001K256:
+.long 1116352408,1899447441,3049323471,3921009573,961987163,1508970993,2453635748,2870763221,3624381080,310598401,607225278,1426881987,1925078388,2162078206,2614888103,3248222580,3835390401,4022224774,264347078,604807628,770255983,1249150122,1555081692,1996064986,2554220882,2821834349,2952996808,3210313671,3336571891,3584528711,113926993,338241895,666307205,773529912,1294757372,1396182291,1695183700,1986661051,2177026350,2456956037,2730485921,2820302411,3259730800,3345764771,3516065817,3600352804,4094571909,275423344,430227734,506948616,659060556,883997877,958139571,1322822218,1537002063,1747873779,1955562222,2024104815,2227730452,2361852424,2428436474,2756734187,3204031479,3329325298
+.long 66051,67438087,134810123,202182159
+.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97
+.byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32
+.byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97
+.byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103
+.byte 62,0
+.align 16
+.L006unrolled:
+ leal -96(%esp),%esp
+ movl (%esi),%eax
+ movl 4(%esi),%ebp
+ movl 8(%esi),%ecx
+ movl 12(%esi),%ebx
+ movl %ebp,4(%esp)
+ xorl %ecx,%ebp
+ movl %ecx,8(%esp)
+ movl %ebx,12(%esp)
+ movl 16(%esi),%edx
+ movl 20(%esi),%ebx
+ movl 24(%esi),%ecx
+ movl 28(%esi),%esi
+ movl %ebx,20(%esp)
+ movl %ecx,24(%esp)
+ movl %esi,28(%esp)
+ jmp .L009grand_loop
+.align 16
+.L009grand_loop:
+ movl (%edi),%ebx
+ movl 4(%edi),%ecx
+ bswap %ebx
+ movl 8(%edi),%esi
+ bswap %ecx
+ movl %ebx,32(%esp)
+ bswap %esi
+ movl %ecx,36(%esp)
+ movl %esi,40(%esp)
+ movl 12(%edi),%ebx
+ movl 16(%edi),%ecx
+ bswap %ebx
+ movl 20(%edi),%esi
+ bswap %ecx
+ movl %ebx,44(%esp)
+ bswap %esi
+ movl %ecx,48(%esp)
+ movl %esi,52(%esp)
+ movl 24(%edi),%ebx
+ movl 28(%edi),%ecx
+ bswap %ebx
+ movl 32(%edi),%esi
+ bswap %ecx
+ movl %ebx,56(%esp)
+ bswap %esi
+ movl %ecx,60(%esp)
+ movl %esi,64(%esp)
+ movl 36(%edi),%ebx
+ movl 40(%edi),%ecx
+ bswap %ebx
+ movl 44(%edi),%esi
+ bswap %ecx
+ movl %ebx,68(%esp)
+ bswap %esi
+ movl %ecx,72(%esp)
+ movl %esi,76(%esp)
+ movl 48(%edi),%ebx
+ movl 52(%edi),%ecx
+ bswap %ebx
+ movl 56(%edi),%esi
+ bswap %ecx
+ movl %ebx,80(%esp)
+ bswap %esi
+ movl %ecx,84(%esp)
+ movl %esi,88(%esp)
+ movl 60(%edi),%ebx
+ addl $64,%edi
+ bswap %ebx
+ movl %edi,100(%esp)
+ movl %ebx,92(%esp)
+ movl %edx,%ecx
+ movl 20(%esp),%esi
+ rorl $14,%edx
+ movl 24(%esp),%edi
+ xorl %ecx,%edx
+ movl 32(%esp),%ebx
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,16(%esp)
+ xorl %ecx,%edx
addl 28(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 4(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 1116352408(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 12(%esp),%edx
+ addl %ecx,%ebp
+ movl %edx,%esi
+ movl 16(%esp),%ecx
+ rorl $14,%edx
+ movl 20(%esp),%edi
+ xorl %esi,%edx
+ movl 36(%esp),%ebx
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,12(%esp)
+ xorl %esi,%edx
+ addl 24(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl (%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,28(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 1899447441(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 8(%esp),%edx
+ addl %esi,%eax
+ movl %edx,%ecx
+ movl 12(%esp),%esi
+ rorl $14,%edx
+ movl 16(%esp),%edi
+ xorl %ecx,%edx
+ movl 40(%esp),%ebx
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,8(%esp)
+ xorl %ecx,%edx
+ addl 20(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 28(%esp),%edi
xorl %eax,%ecx
+ movl %eax,24(%esp)
+ xorl %edi,%eax
rorl $11,%ecx
+ andl %eax,%ebp
+ leal 3049323471(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 4(%esp),%edx
+ addl %ecx,%ebp
+ movl %edx,%esi
+ movl 8(%esp),%ecx
+ rorl $14,%edx
+ movl 12(%esp),%edi
+ xorl %esi,%edx
+ movl 44(%esp),%ebx
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,4(%esp)
+ xorl %esi,%edx
+ addl 16(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl 24(%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,20(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 3921009573(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ rorl $2,%esi
+ addl %edx,%eax
+ addl (%esp),%edx
+ addl %esi,%eax
+ movl %edx,%ecx
movl 4(%esp),%esi
+ rorl $14,%edx
+ movl 8(%esp),%edi
+ xorl %ecx,%edx
+ movl 48(%esp),%ebx
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,(%esp)
+ xorl %ecx,%edx
+ addl 12(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 20(%esp),%edi
xorl %eax,%ecx
+ movl %eax,16(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 961987163(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
rorl $2,%ecx
- addl %ebx,%edx
+ addl %edx,%ebp
+ addl 28(%esp),%edx
+ addl %ecx,%ebp
+ movl %edx,%esi
+ movl (%esp),%ecx
+ rorl $14,%edx
+ movl 4(%esp),%edi
+ xorl %esi,%edx
+ movl 52(%esp),%ebx
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,28(%esp)
+ xorl %esi,%edx
+ addl 8(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl 16(%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,12(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 1508970993(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 24(%esp),%edx
+ addl %esi,%eax
+ movl %edx,%ecx
+ movl 28(%esp),%esi
+ rorl $14,%edx
+ movl (%esp),%edi
+ xorl %ecx,%edx
+ movl 56(%esp),%ebx
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,24(%esp)
+ xorl %ecx,%edx
+ addl 4(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 12(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,8(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 2453635748(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 20(%esp),%edx
+ addl %ecx,%ebp
+ movl %edx,%esi
+ movl 24(%esp),%ecx
+ rorl $14,%edx
+ movl 28(%esp),%edi
+ xorl %esi,%edx
+ movl 60(%esp),%ebx
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,20(%esp)
+ xorl %esi,%edx
+ addl (%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
movl 8(%esp),%edi
- addl %ecx,%ebx
+ xorl %ebp,%esi
+ movl %ebp,4(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 2870763221(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 16(%esp),%edx
+ addl %esi,%eax
+ movl %edx,%ecx
+ movl 20(%esp),%esi
+ rorl $14,%edx
+ movl 24(%esp),%edi
+ xorl %ecx,%edx
+ movl 64(%esp),%ebx
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,16(%esp)
+ xorl %ecx,%edx
+ addl 28(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 4(%esp),%edi
+ xorl %eax,%ecx
movl %eax,(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 3624381080(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 12(%esp),%edx
+ addl %ecx,%ebp
+ movl %edx,%esi
+ movl 16(%esp),%ecx
+ rorl $14,%edx
+ movl 20(%esp),%edi
+ xorl %esi,%edx
+ movl 68(%esp),%ebx
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,12(%esp)
+ xorl %esi,%edx
+ addl 24(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl (%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,28(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 310598401(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 8(%esp),%edx
+ addl %esi,%eax
+ movl %edx,%ecx
+ movl 12(%esp),%esi
+ rorl $14,%edx
+ movl 16(%esp),%edi
+ xorl %ecx,%edx
+ movl 72(%esp),%ebx
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,8(%esp)
+ xorl %ecx,%edx
+ addl 20(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
movl %eax,%ecx
- subl $4,%esp
- orl %esi,%eax
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 28(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,24(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 607225278(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 4(%esp),%edx
+ addl %ecx,%ebp
+ movl %edx,%esi
+ movl 8(%esp),%ecx
+ rorl $14,%edx
+ movl 12(%esp),%edi
+ xorl %esi,%edx
+ movl 76(%esp),%ebx
+ xorl %edi,%ecx
+ rorl $5,%edx
andl %esi,%ecx
- andl %edi,%eax
- movl (%ebp),%esi
- orl %ecx,%eax
- addl $4,%ebp
- addl %ebx,%eax
- addl %esi,%edx
+ movl %esi,4(%esp)
+ xorl %esi,%edx
+ addl 16(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl 24(%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,20(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 1426881987(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ rorl $2,%esi
+ addl %edx,%eax
+ addl (%esp),%edx
addl %esi,%eax
- cmpl $3248222580,%esi
- jne .L00300_15
- movl 152(%esp),%ebx
-.align 16
-.L00416_63:
- movl %ebx,%esi
- movl 100(%esp),%ecx
+ movl %edx,%ecx
+ movl 4(%esp),%esi
+ rorl $14,%edx
+ movl 8(%esp),%edi
+ xorl %ecx,%edx
+ movl 80(%esp),%ebx
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,(%esp)
+ xorl %ecx,%edx
+ addl 12(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 20(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,16(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 1925078388(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 28(%esp),%edx
+ addl %ecx,%ebp
+ movl %edx,%esi
+ movl (%esp),%ecx
+ rorl $14,%edx
+ movl 4(%esp),%edi
+ xorl %esi,%edx
+ movl 84(%esp),%ebx
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,28(%esp)
+ xorl %esi,%edx
+ addl 8(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl 16(%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,12(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 2162078206(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 24(%esp),%edx
+ addl %esi,%eax
+ movl %edx,%ecx
+ movl 28(%esp),%esi
+ rorl $14,%edx
+ movl (%esp),%edi
+ xorl %ecx,%edx
+ movl 88(%esp),%ebx
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,24(%esp)
+ xorl %ecx,%edx
+ addl 4(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 12(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,8(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 2614888103(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 20(%esp),%edx
+ addl %ecx,%ebp
+ movl %edx,%esi
+ movl 24(%esp),%ecx
+ rorl $14,%edx
+ movl 28(%esp),%edi
+ xorl %esi,%edx
+ movl 92(%esp),%ebx
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,20(%esp)
+ xorl %esi,%edx
+ addl (%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl 8(%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,4(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 3248222580(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 36(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 16(%esp),%edx
+ addl %esi,%eax
+ movl 88(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 32(%esp),%ebx
+ shrl $10,%edi
+ addl 68(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
+ movl 20(%esp),%esi
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 24(%esp),%edi
+ xorl %ecx,%edx
+ movl %ebx,32(%esp)
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,16(%esp)
+ xorl %ecx,%edx
+ addl 28(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 4(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 3835390401(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 40(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 12(%esp),%edx
+ addl %ecx,%ebp
+ movl 92(%esp),%ecx
+ movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
+ rorl $2,%ecx
xorl %ebx,%esi
+ shrl $3,%ebx
rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 36(%esp),%ebx
+ shrl $10,%edi
+ addl 72(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
+ movl 16(%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 20(%esp),%edi
+ xorl %esi,%edx
+ movl %ebx,36(%esp)
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,12(%esp)
+ xorl %esi,%edx
+ addl 24(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl (%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,28(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 4022224774(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 44(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 8(%esp),%edx
+ addl %esi,%eax
+ movl 32(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 40(%esp),%ebx
+ shrl $10,%edi
+ addl 76(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
+ movl 12(%esp),%esi
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 16(%esp),%edi
+ xorl %ecx,%edx
+ movl %ebx,40(%esp)
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,8(%esp)
+ xorl %ecx,%edx
+ addl 20(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 28(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,24(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 264347078(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 48(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 4(%esp),%edx
+ addl %ecx,%ebp
+ movl 36(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
shrl $3,%ebx
- rorl $2,%edi
+ rorl $7,%esi
+ xorl %edi,%ecx
xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 44(%esp),%ebx
+ shrl $10,%edi
+ addl 80(%esp),%ebx
+ movl %edx,%esi
xorl %ecx,%edi
- rorl $17,%edi
- shrl $10,%ecx
- addl 156(%esp),%ebx
+ movl 8(%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 12(%esp),%edi
+ xorl %esi,%edx
+ movl %ebx,44(%esp)
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,4(%esp)
+ xorl %esi,%edx
+ addl 16(%esp),%ebx
xorl %ecx,%edi
- addl 120(%esp),%ebx
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl 24(%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,20(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 604807628(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 52(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl (%esp),%edx
+ addl %esi,%eax
+ movl 40(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 48(%esp),%ebx
+ shrl $10,%edi
+ addl 84(%esp),%ebx
movl %edx,%ecx
+ xorl %esi,%edi
+ movl 4(%esp),%esi
+ rorl $14,%edx
addl %edi,%ebx
- rorl $14,%ecx
+ movl 8(%esp),%edi
+ xorl %ecx,%edx
+ movl %ebx,48(%esp)
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,(%esp)
+ xorl %ecx,%edx
+ addl 12(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 20(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,16(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 770255983(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 56(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 28(%esp),%edx
+ addl %ecx,%ebp
+ movl 44(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
+ shrl $3,%ebx
+ rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 52(%esp),%ebx
+ shrl $10,%edi
+ addl 88(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
+ movl (%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 4(%esp),%edi
+ xorl %esi,%edx
+ movl %ebx,52(%esp)
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,28(%esp)
+ xorl %esi,%edx
+ addl 8(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl 16(%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,12(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 1249150122(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 60(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 24(%esp),%edx
+ addl %esi,%eax
+ movl 48(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 56(%esp),%ebx
+ shrl $10,%edi
+ addl 92(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
+ movl 28(%esp),%esi
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl (%esp),%edi
+ xorl %ecx,%edx
+ movl %ebx,56(%esp)
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,24(%esp)
+ xorl %ecx,%edx
+ addl 4(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 12(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,8(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 1555081692(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 64(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 20(%esp),%edx
+ addl %ecx,%ebp
+ movl 52(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
+ shrl $3,%ebx
+ rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 60(%esp),%ebx
+ shrl $10,%edi
+ addl 32(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
+ movl 24(%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 28(%esp),%edi
+ xorl %esi,%edx
+ movl %ebx,60(%esp)
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,20(%esp)
+ xorl %esi,%edx
+ addl (%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl 8(%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,4(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 1996064986(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 68(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 16(%esp),%edx
+ addl %esi,%eax
+ movl 56(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 64(%esp),%ebx
+ shrl $10,%edi
+ addl 36(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
movl 20(%esp),%esi
- xorl %edx,%ecx
- rorl $5,%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 24(%esp),%edi
+ xorl %ecx,%edx
+ movl %ebx,64(%esp)
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,16(%esp)
+ xorl %ecx,%edx
+ addl 28(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 4(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 2554220882(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 72(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 12(%esp),%edx
+ addl %ecx,%ebp
+ movl 60(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
+ shrl $3,%ebx
+ rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 68(%esp),%ebx
+ shrl $10,%edi
+ addl 40(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
+ movl 16(%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 20(%esp),%edi
+ xorl %esi,%edx
+ movl %ebx,68(%esp)
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,12(%esp)
+ xorl %esi,%edx
+ addl 24(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl (%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,28(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 2821834349(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 76(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 8(%esp),%edx
+ addl %esi,%eax
+ movl 64(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 72(%esp),%ebx
+ shrl $10,%edi
+ addl 44(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
+ movl 12(%esp),%esi
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 16(%esp),%edi
+ xorl %ecx,%edx
+ movl %ebx,72(%esp)
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,8(%esp)
+ xorl %ecx,%edx
+ addl 20(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 28(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,24(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 2952996808(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 80(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 4(%esp),%edx
+ addl %ecx,%ebp
+ movl 68(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
+ shrl $3,%ebx
+ rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 76(%esp),%ebx
+ shrl $10,%edi
+ addl 48(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
+ movl 8(%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 12(%esp),%edi
+ xorl %esi,%edx
+ movl %ebx,76(%esp)
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,4(%esp)
+ xorl %esi,%edx
+ addl 16(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl 24(%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,20(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 3210313671(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 84(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl (%esp),%edx
+ addl %esi,%eax
+ movl 72(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 80(%esp),%ebx
+ shrl $10,%edi
+ addl 52(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
+ movl 4(%esp),%esi
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 8(%esp),%edi
+ xorl %ecx,%edx
+ movl %ebx,80(%esp)
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,(%esp)
+ xorl %ecx,%edx
+ addl 12(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 20(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,16(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 3336571891(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 88(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 28(%esp),%edx
+ addl %ecx,%ebp
+ movl 76(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
+ shrl $3,%ebx
+ rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 84(%esp),%ebx
+ shrl $10,%edi
+ addl 56(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
+ movl (%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 4(%esp),%edi
+ xorl %esi,%edx
+ movl %ebx,84(%esp)
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,28(%esp)
+ xorl %esi,%edx
+ addl 8(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl 16(%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,12(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 3584528711(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 92(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 24(%esp),%edx
+ addl %esi,%eax
+ movl 80(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 88(%esp),%ebx
+ shrl $10,%edi
+ addl 60(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
+ movl 28(%esp),%esi
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl (%esp),%edi
+ xorl %ecx,%edx
+ movl %ebx,88(%esp)
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,24(%esp)
+ xorl %ecx,%edx
+ addl 4(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 12(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,8(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 113926993(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 32(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 20(%esp),%edx
+ addl %ecx,%ebp
+ movl 84(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
+ shrl $3,%ebx
+ rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 92(%esp),%ebx
+ shrl $10,%edi
+ addl 64(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
+ movl 24(%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 28(%esp),%edi
+ xorl %esi,%edx
movl %ebx,92(%esp)
- xorl %edx,%ecx
- rorl $6,%ecx
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,20(%esp)
+ xorl %esi,%edx
+ addl (%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl 8(%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,4(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 338241895(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 36(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 16(%esp),%edx
+ addl %esi,%eax
+ movl 88(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 32(%esp),%ebx
+ shrl $10,%edi
+ addl 68(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
+ movl 20(%esp),%esi
+ rorl $14,%edx
+ addl %edi,%ebx
movl 24(%esp),%edi
- addl %ecx,%ebx
+ xorl %ecx,%edx
+ movl %ebx,32(%esp)
xorl %edi,%esi
- movl %edx,16(%esp)
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,16(%esp)
+ xorl %ecx,%edx
+ addl 28(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
movl %eax,%ecx
- andl %edx,%esi
- movl 12(%esp),%edx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 4(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 666307205(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 40(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 12(%esp),%edx
+ addl %ecx,%ebp
+ movl 92(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
+ shrl $3,%ebx
+ rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 36(%esp),%ebx
+ shrl $10,%edi
+ addl 72(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
+ movl 16(%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 20(%esp),%edi
+ xorl %esi,%edx
+ movl %ebx,36(%esp)
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,12(%esp)
+ xorl %esi,%edx
+ addl 24(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl (%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,28(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 773529912(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 44(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 8(%esp),%edx
+ addl %esi,%eax
+ movl 32(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
xorl %edi,%esi
- movl %eax,%edi
- addl %esi,%ebx
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 40(%esp),%ebx
+ shrl $10,%edi
+ addl 76(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
+ movl 12(%esp),%esi
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 16(%esp),%edi
+ xorl %ecx,%edx
+ movl %ebx,40(%esp)
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,8(%esp)
+ xorl %ecx,%edx
+ addl 20(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 28(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,24(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 1294757372(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 48(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 4(%esp),%edx
+ addl %ecx,%ebp
+ movl 36(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
+ shrl $3,%ebx
+ rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 44(%esp),%ebx
+ shrl $10,%edi
+ addl 80(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
+ movl 8(%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 12(%esp),%edi
+ xorl %esi,%edx
+ movl %ebx,44(%esp)
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,4(%esp)
+ xorl %esi,%edx
+ addl 16(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl 24(%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,20(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 1396182291(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 52(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl (%esp),%edx
+ addl %esi,%eax
+ movl 40(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 48(%esp),%ebx
+ shrl $10,%edi
+ addl 84(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
+ movl 4(%esp),%esi
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 8(%esp),%edi
+ xorl %ecx,%edx
+ movl %ebx,48(%esp)
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,(%esp)
+ xorl %ecx,%edx
+ addl 12(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
rorl $9,%ecx
+ movl %eax,%esi
+ movl 20(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,16(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 1695183700(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 56(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 28(%esp),%edx
+ addl %ecx,%ebp
+ movl 44(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
+ shrl $3,%ebx
+ rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 52(%esp),%ebx
+ shrl $10,%edi
+ addl 88(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
+ movl (%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 4(%esp),%edi
+ xorl %esi,%edx
+ movl %ebx,52(%esp)
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,28(%esp)
+ xorl %esi,%edx
+ addl 8(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl 16(%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,12(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 1986661051(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 60(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 24(%esp),%edx
+ addl %esi,%eax
+ movl 48(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 56(%esp),%ebx
+ shrl $10,%edi
+ addl 92(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
+ movl 28(%esp),%esi
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl (%esp),%edi
+ xorl %ecx,%edx
+ movl %ebx,56(%esp)
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,24(%esp)
+ xorl %ecx,%edx
+ addl 4(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 12(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,8(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 2177026350(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 64(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 20(%esp),%edx
+ addl %ecx,%ebp
+ movl 52(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
+ shrl $3,%ebx
+ rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 60(%esp),%ebx
+ shrl $10,%edi
+ addl 32(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
+ movl 24(%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 28(%esp),%edi
+ xorl %esi,%edx
+ movl %ebx,60(%esp)
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,20(%esp)
+ xorl %esi,%edx
+ addl (%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl 8(%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,4(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 2456956037(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 68(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 16(%esp),%edx
+ addl %esi,%eax
+ movl 56(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 64(%esp),%ebx
+ shrl $10,%edi
+ addl 36(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
+ movl 20(%esp),%esi
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 24(%esp),%edi
+ xorl %ecx,%edx
+ movl %ebx,64(%esp)
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,16(%esp)
+ xorl %ecx,%edx
addl 28(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 4(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 2730485921(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 72(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 12(%esp),%edx
+ addl %ecx,%ebp
+ movl 60(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
+ shrl $3,%ebx
+ rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 68(%esp),%ebx
+ shrl $10,%edi
+ addl 40(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
+ movl 16(%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 20(%esp),%edi
+ xorl %esi,%edx
+ movl %ebx,68(%esp)
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,12(%esp)
+ xorl %esi,%edx
+ addl 24(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl (%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,28(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 2820302411(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 76(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 8(%esp),%edx
+ addl %esi,%eax
+ movl 64(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 72(%esp),%ebx
+ shrl $10,%edi
+ addl 44(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
+ movl 12(%esp),%esi
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 16(%esp),%edi
+ xorl %ecx,%edx
+ movl %ebx,72(%esp)
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,8(%esp)
+ xorl %ecx,%edx
+ addl 20(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 28(%esp),%edi
xorl %eax,%ecx
+ movl %eax,24(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 3259730800(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 80(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 4(%esp),%edx
+ addl %ecx,%ebp
+ movl 68(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
+ shrl $3,%ebx
+ rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 76(%esp),%ebx
+ shrl $10,%edi
+ addl 48(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
+ movl 8(%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 12(%esp),%edi
+ xorl %esi,%edx
+ movl %ebx,76(%esp)
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,4(%esp)
+ xorl %esi,%edx
+ addl 16(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl 24(%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,20(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 3345764771(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 84(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl (%esp),%edx
+ addl %esi,%eax
+ movl 72(%esp),%esi
+ movl %ecx,%ebx
rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 80(%esp),%ebx
+ shrl $10,%edi
+ addl 52(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
movl 4(%esp),%esi
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 8(%esp),%edi
+ xorl %ecx,%edx
+ movl %ebx,80(%esp)
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,(%esp)
+ xorl %ecx,%edx
+ addl 12(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 20(%esp),%edi
xorl %eax,%ecx
+ movl %eax,16(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 3516065817(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 88(%esp),%esi
rorl $2,%ecx
- addl %ebx,%edx
+ addl %edx,%ebp
+ addl 28(%esp),%edx
+ addl %ecx,%ebp
+ movl 76(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
+ shrl $3,%ebx
+ rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 84(%esp),%ebx
+ shrl $10,%edi
+ addl 56(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
+ movl (%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 4(%esp),%edi
+ xorl %esi,%edx
+ movl %ebx,84(%esp)
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,28(%esp)
+ xorl %esi,%edx
+ addl 8(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl 16(%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,12(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 3600352804(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 92(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 24(%esp),%edx
+ addl %esi,%eax
+ movl 80(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 88(%esp),%ebx
+ shrl $10,%edi
+ addl 60(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
+ movl 28(%esp),%esi
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl (%esp),%edi
+ xorl %ecx,%edx
+ movl %ebx,88(%esp)
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,24(%esp)
+ xorl %ecx,%edx
+ addl 4(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 12(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,8(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 4094571909(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 32(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 20(%esp),%edx
+ addl %ecx,%ebp
+ movl 84(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
+ shrl $3,%ebx
+ rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 92(%esp),%ebx
+ shrl $10,%edi
+ addl 64(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
+ movl 24(%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 28(%esp),%edi
+ xorl %esi,%edx
+ movl %ebx,92(%esp)
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,20(%esp)
+ xorl %esi,%edx
+ addl (%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
movl 8(%esp),%edi
- addl %ecx,%ebx
+ xorl %ebp,%esi
+ movl %ebp,4(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 275423344(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 36(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 16(%esp),%edx
+ addl %esi,%eax
+ movl 88(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 32(%esp),%ebx
+ shrl $10,%edi
+ addl 68(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
+ movl 20(%esp),%esi
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 24(%esp),%edi
+ xorl %ecx,%edx
+ movl %ebx,32(%esp)
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,16(%esp)
+ xorl %ecx,%edx
+ addl 28(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 4(%esp),%edi
+ xorl %eax,%ecx
movl %eax,(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 430227734(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 40(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 12(%esp),%edx
+ addl %ecx,%ebp
+ movl 92(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
+ shrl $3,%ebx
+ rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 36(%esp),%ebx
+ shrl $10,%edi
+ addl 72(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
+ movl 16(%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 20(%esp),%edi
+ xorl %esi,%edx
+ movl %ebx,36(%esp)
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,12(%esp)
+ xorl %esi,%edx
+ addl 24(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl (%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,28(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 506948616(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 44(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 8(%esp),%edx
+ addl %esi,%eax
+ movl 32(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 40(%esp),%ebx
+ shrl $10,%edi
+ addl 76(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
+ movl 12(%esp),%esi
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 16(%esp),%edi
+ xorl %ecx,%edx
+ movl %ebx,40(%esp)
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,8(%esp)
+ xorl %ecx,%edx
+ addl 20(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
movl %eax,%ecx
- subl $4,%esp
- orl %esi,%eax
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 28(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,24(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 659060556(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 48(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 4(%esp),%edx
+ addl %ecx,%ebp
+ movl 36(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
+ shrl $3,%ebx
+ rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 44(%esp),%ebx
+ shrl $10,%edi
+ addl 80(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
+ movl 8(%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 12(%esp),%edi
+ xorl %esi,%edx
+ movl %ebx,44(%esp)
+ xorl %edi,%ecx
+ rorl $5,%edx
andl %esi,%ecx
- andl %edi,%eax
- movl (%ebp),%esi
- orl %ecx,%eax
- addl $4,%ebp
- addl %ebx,%eax
- movl 152(%esp),%ebx
- addl %esi,%edx
+ movl %esi,4(%esp)
+ xorl %esi,%edx
+ addl 16(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl 24(%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,20(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 883997877(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 52(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl (%esp),%edx
addl %esi,%eax
- cmpl $3329325298,%esi
- jne .L00416_63
- movl 352(%esp),%esi
- movl 4(%esp),%ebx
+ movl 40(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 48(%esp),%ebx
+ shrl $10,%edi
+ addl 84(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
+ movl 4(%esp),%esi
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 8(%esp),%edi
+ xorl %ecx,%edx
+ movl %ebx,48(%esp)
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,(%esp)
+ xorl %ecx,%edx
+ addl 12(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 20(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,16(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 958139571(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 56(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 28(%esp),%edx
+ addl %ecx,%ebp
+ movl 44(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
+ shrl $3,%ebx
+ rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 52(%esp),%ebx
+ shrl $10,%edi
+ addl 88(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
+ movl (%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 4(%esp),%edi
+ xorl %esi,%edx
+ movl %ebx,52(%esp)
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,28(%esp)
+ xorl %esi,%edx
+ addl 8(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl 16(%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,12(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 1322822218(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 60(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 24(%esp),%edx
+ addl %esi,%eax
+ movl 48(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 56(%esp),%ebx
+ shrl $10,%edi
+ addl 92(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
+ movl 28(%esp),%esi
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl (%esp),%edi
+ xorl %ecx,%edx
+ movl %ebx,56(%esp)
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,24(%esp)
+ xorl %ecx,%edx
+ addl 4(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 12(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,8(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 1537002063(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 64(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 20(%esp),%edx
+ addl %ecx,%ebp
+ movl 52(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
+ shrl $3,%ebx
+ rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 60(%esp),%ebx
+ shrl $10,%edi
+ addl 32(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
+ movl 24(%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 28(%esp),%edi
+ xorl %esi,%edx
+ movl %ebx,60(%esp)
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,20(%esp)
+ xorl %esi,%edx
+ addl (%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl 8(%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,4(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 1747873779(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 68(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 16(%esp),%edx
+ addl %esi,%eax
+ movl 56(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 64(%esp),%ebx
+ shrl $10,%edi
+ addl 36(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
+ movl 20(%esp),%esi
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 24(%esp),%edi
+ xorl %ecx,%edx
+ movl %ebx,64(%esp)
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,16(%esp)
+ xorl %ecx,%edx
+ addl 28(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 4(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 1955562222(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 72(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 12(%esp),%edx
+ addl %ecx,%ebp
+ movl 60(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
+ shrl $3,%ebx
+ rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 68(%esp),%ebx
+ shrl $10,%edi
+ addl 40(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
+ movl 16(%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 20(%esp),%edi
+ xorl %esi,%edx
+ movl %ebx,68(%esp)
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,12(%esp)
+ xorl %esi,%edx
+ addl 24(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl (%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,28(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 2024104815(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 76(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 8(%esp),%edx
+ addl %esi,%eax
+ movl 64(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 72(%esp),%ebx
+ shrl $10,%edi
+ addl 44(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
+ movl 12(%esp),%esi
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 16(%esp),%edi
+ xorl %ecx,%edx
+ movl %ebx,72(%esp)
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,8(%esp)
+ xorl %ecx,%edx
+ addl 20(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 28(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,24(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 2227730452(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 80(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 4(%esp),%edx
+ addl %ecx,%ebp
+ movl 68(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
+ shrl $3,%ebx
+ rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 76(%esp),%ebx
+ shrl $10,%edi
+ addl 48(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
movl 8(%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 12(%esp),%edi
+ xorl %esi,%edx
+ movl %ebx,76(%esp)
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,4(%esp)
+ xorl %esi,%edx
+ addl 16(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl 24(%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,20(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 2361852424(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 84(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl (%esp),%edx
+ addl %esi,%eax
+ movl 72(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 80(%esp),%ebx
+ shrl $10,%edi
+ addl 52(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
+ movl 4(%esp),%esi
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 8(%esp),%edi
+ xorl %ecx,%edx
+ movl %ebx,80(%esp)
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,(%esp)
+ xorl %ecx,%edx
+ addl 12(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
+ movl 20(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,16(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 2428436474(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 88(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 28(%esp),%edx
+ addl %ecx,%ebp
+ movl 76(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
+ shrl $3,%ebx
+ rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 84(%esp),%ebx
+ shrl $10,%edi
+ addl 56(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
+ movl (%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 4(%esp),%edi
+ xorl %esi,%edx
+ movl %ebx,84(%esp)
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,28(%esp)
+ xorl %esi,%edx
+ addl 8(%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl 16(%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,12(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 2756734187(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ movl 92(%esp),%ecx
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 24(%esp),%edx
+ addl %esi,%eax
+ movl 80(%esp),%esi
+ movl %ecx,%ebx
+ rorl $11,%ecx
+ movl %esi,%edi
+ rorl $2,%esi
+ xorl %ebx,%ecx
+ shrl $3,%ebx
+ rorl $7,%ecx
+ xorl %edi,%esi
+ xorl %ecx,%ebx
+ rorl $17,%esi
+ addl 88(%esp),%ebx
+ shrl $10,%edi
+ addl 60(%esp),%ebx
+ movl %edx,%ecx
+ xorl %esi,%edi
+ movl 28(%esp),%esi
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl (%esp),%edi
+ xorl %ecx,%edx
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,24(%esp)
+ xorl %ecx,%edx
+ addl 4(%esp),%ebx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%ebx
+ rorl $9,%ecx
+ movl %eax,%esi
movl 12(%esp),%edi
+ xorl %eax,%ecx
+ movl %eax,8(%esp)
+ xorl %edi,%eax
+ rorl $11,%ecx
+ andl %eax,%ebp
+ leal 3204031479(%ebx,%edx,1),%edx
+ xorl %esi,%ecx
+ xorl %edi,%ebp
+ movl 32(%esp),%esi
+ rorl $2,%ecx
+ addl %edx,%ebp
+ addl 20(%esp),%edx
+ addl %ecx,%ebp
+ movl 84(%esp),%ecx
+ movl %esi,%ebx
+ rorl $11,%esi
+ movl %ecx,%edi
+ rorl $2,%ecx
+ xorl %ebx,%esi
+ shrl $3,%ebx
+ rorl $7,%esi
+ xorl %edi,%ecx
+ xorl %esi,%ebx
+ rorl $17,%ecx
+ addl 92(%esp),%ebx
+ shrl $10,%edi
+ addl 64(%esp),%ebx
+ movl %edx,%esi
+ xorl %ecx,%edi
+ movl 24(%esp),%ecx
+ rorl $14,%edx
+ addl %edi,%ebx
+ movl 28(%esp),%edi
+ xorl %esi,%edx
+ xorl %edi,%ecx
+ rorl $5,%edx
+ andl %esi,%ecx
+ movl %esi,20(%esp)
+ xorl %esi,%edx
+ addl (%esp),%ebx
+ xorl %ecx,%edi
+ rorl $6,%edx
+ movl %ebp,%esi
+ addl %edi,%ebx
+ rorl $9,%esi
+ movl %ebp,%ecx
+ movl 8(%esp),%edi
+ xorl %ebp,%esi
+ movl %ebp,4(%esp)
+ xorl %edi,%ebp
+ rorl $11,%esi
+ andl %ebp,%eax
+ leal 3329325298(%ebx,%edx,1),%edx
+ xorl %ecx,%esi
+ xorl %edi,%eax
+ rorl $2,%esi
+ addl %edx,%eax
+ addl 16(%esp),%edx
+ addl %esi,%eax
+ movl 96(%esp),%esi
+ xorl %edi,%ebp
+ movl 12(%esp),%ecx
addl (%esi),%eax
- addl 4(%esi),%ebx
- addl 8(%esi),%ecx
- addl 12(%esi),%edi
+ addl 4(%esi),%ebp
+ addl 8(%esi),%edi
+ addl 12(%esi),%ecx
movl %eax,(%esi)
- movl %ebx,4(%esi)
- movl %ecx,8(%esi)
- movl %edi,12(%esi)
- movl 20(%esp),%eax
+ movl %ebp,4(%esi)
+ movl %edi,8(%esi)
+ movl %ecx,12(%esi)
+ movl %ebp,4(%esp)
+ xorl %edi,%ebp
+ movl %edi,8(%esp)
+ movl %ecx,12(%esp)
+ movl 20(%esp),%edi
movl 24(%esp),%ebx
movl 28(%esp),%ecx
- movl 356(%esp),%edi
addl 16(%esi),%edx
- addl 20(%esi),%eax
+ addl 20(%esi),%edi
addl 24(%esi),%ebx
addl 28(%esi),%ecx
movl %edx,16(%esi)
- movl %eax,20(%esi)
+ movl %edi,20(%esi)
movl %ebx,24(%esi)
movl %ecx,28(%esi)
- addl $352,%esp
- subl $256,%ebp
- cmpl 8(%esp),%edi
- jb .L002loop
- movl 12(%esp),%esp
+ movl %edi,20(%esp)
+ movl 100(%esp),%edi
+ movl %ebx,24(%esp)
+ movl %ecx,28(%esp)
+ cmpl 104(%esp),%edi
+ jb .L009grand_loop
+ movl 108(%esp),%esp
+ popl %edi
+ popl %esi
+ popl %ebx
+ popl %ebp
+ ret
+.align 32
+.L004shaext:
+ subl $32,%esp
+ movdqu (%esi),%xmm1
+ leal 128(%ebp),%ebp
+ movdqu 16(%esi),%xmm2
+ movdqa 128(%ebp),%xmm7
+ pshufd $27,%xmm1,%xmm0
+ pshufd $177,%xmm1,%xmm1
+ pshufd $27,%xmm2,%xmm2
+.byte 102,15,58,15,202,8
+ punpcklqdq %xmm0,%xmm2
+ jmp .L010loop_shaext
+.align 16
+.L010loop_shaext:
+ movdqu (%edi),%xmm3
+ movdqu 16(%edi),%xmm4
+ movdqu 32(%edi),%xmm5
+.byte 102,15,56,0,223
+ movdqu 48(%edi),%xmm6
+ movdqa %xmm2,16(%esp)
+ movdqa -128(%ebp),%xmm0
+ paddd %xmm3,%xmm0
+.byte 102,15,56,0,231
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ nop
+ movdqa %xmm1,(%esp)
+.byte 15,56,203,202
+ movdqa -112(%ebp),%xmm0
+ paddd %xmm4,%xmm0
+.byte 102,15,56,0,239
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ leal 64(%edi),%edi
+.byte 15,56,204,220
+.byte 15,56,203,202
+ movdqa -96(%ebp),%xmm0
+ paddd %xmm5,%xmm0
+.byte 102,15,56,0,247
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm6,%xmm7
+.byte 102,15,58,15,253,4
+ nop
+ paddd %xmm7,%xmm3
+.byte 15,56,204,229
+.byte 15,56,203,202
+ movdqa -80(%ebp),%xmm0
+ paddd %xmm6,%xmm0
+.byte 15,56,205,222
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm3,%xmm7
+.byte 102,15,58,15,254,4
+ nop
+ paddd %xmm7,%xmm4
+.byte 15,56,204,238
+.byte 15,56,203,202
+ movdqa -64(%ebp),%xmm0
+ paddd %xmm3,%xmm0
+.byte 15,56,205,227
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm4,%xmm7
+.byte 102,15,58,15,251,4
+ nop
+ paddd %xmm7,%xmm5
+.byte 15,56,204,243
+.byte 15,56,203,202
+ movdqa -48(%ebp),%xmm0
+ paddd %xmm4,%xmm0
+.byte 15,56,205,236
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm5,%xmm7
+.byte 102,15,58,15,252,4
+ nop
+ paddd %xmm7,%xmm6
+.byte 15,56,204,220
+.byte 15,56,203,202
+ movdqa -32(%ebp),%xmm0
+ paddd %xmm5,%xmm0
+.byte 15,56,205,245
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm6,%xmm7
+.byte 102,15,58,15,253,4
+ nop
+ paddd %xmm7,%xmm3
+.byte 15,56,204,229
+.byte 15,56,203,202
+ movdqa -16(%ebp),%xmm0
+ paddd %xmm6,%xmm0
+.byte 15,56,205,222
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm3,%xmm7
+.byte 102,15,58,15,254,4
+ nop
+ paddd %xmm7,%xmm4
+.byte 15,56,204,238
+.byte 15,56,203,202
+ movdqa (%ebp),%xmm0
+ paddd %xmm3,%xmm0
+.byte 15,56,205,227
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm4,%xmm7
+.byte 102,15,58,15,251,4
+ nop
+ paddd %xmm7,%xmm5
+.byte 15,56,204,243
+.byte 15,56,203,202
+ movdqa 16(%ebp),%xmm0
+ paddd %xmm4,%xmm0
+.byte 15,56,205,236
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm5,%xmm7
+.byte 102,15,58,15,252,4
+ nop
+ paddd %xmm7,%xmm6
+.byte 15,56,204,220
+.byte 15,56,203,202
+ movdqa 32(%ebp),%xmm0
+ paddd %xmm5,%xmm0
+.byte 15,56,205,245
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm6,%xmm7
+.byte 102,15,58,15,253,4
+ nop
+ paddd %xmm7,%xmm3
+.byte 15,56,204,229
+.byte 15,56,203,202
+ movdqa 48(%ebp),%xmm0
+ paddd %xmm6,%xmm0
+.byte 15,56,205,222
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm3,%xmm7
+.byte 102,15,58,15,254,4
+ nop
+ paddd %xmm7,%xmm4
+.byte 15,56,204,238
+.byte 15,56,203,202
+ movdqa 64(%ebp),%xmm0
+ paddd %xmm3,%xmm0
+.byte 15,56,205,227
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm4,%xmm7
+.byte 102,15,58,15,251,4
+ nop
+ paddd %xmm7,%xmm5
+.byte 15,56,204,243
+.byte 15,56,203,202
+ movdqa 80(%ebp),%xmm0
+ paddd %xmm4,%xmm0
+.byte 15,56,205,236
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ movdqa %xmm5,%xmm7
+.byte 102,15,58,15,252,4
+.byte 15,56,203,202
+ paddd %xmm7,%xmm6
+ movdqa 96(%ebp),%xmm0
+ paddd %xmm5,%xmm0
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+.byte 15,56,205,245
+ movdqa 128(%ebp),%xmm7
+.byte 15,56,203,202
+ movdqa 112(%ebp),%xmm0
+ paddd %xmm6,%xmm0
+ nop
+.byte 15,56,203,209
+ pshufd $14,%xmm0,%xmm0
+ cmpl %edi,%eax
+ nop
+.byte 15,56,203,202
+ paddd 16(%esp),%xmm2
+ paddd (%esp),%xmm1
+ jnz .L010loop_shaext
+ pshufd $177,%xmm2,%xmm2
+ pshufd $27,%xmm1,%xmm7
+ pshufd $177,%xmm1,%xmm1
+ punpckhqdq %xmm2,%xmm1
+.byte 102,15,58,15,215,8
+ movl 44(%esp),%esp
+ movdqu %xmm1,(%esi)
+ movdqu %xmm2,16(%esi)
+ popl %edi
+ popl %esi
+ popl %ebx
+ popl %ebp
+ ret
+.align 32
+.L005SSSE3:
+ leal -96(%esp),%esp
+ movl (%esi),%eax
+ movl 4(%esi),%ebx
+ movl 8(%esi),%ecx
+ movl 12(%esi),%edi
+ movl %ebx,4(%esp)
+ xorl %ecx,%ebx
+ movl %ecx,8(%esp)
+ movl %edi,12(%esp)
+ movl 16(%esi),%edx
+ movl 20(%esi),%edi
+ movl 24(%esi),%ecx
+ movl 28(%esi),%esi
+ movl %edi,20(%esp)
+ movl 100(%esp),%edi
+ movl %ecx,24(%esp)
+ movl %esi,28(%esp)
+ movdqa 256(%ebp),%xmm7
+ jmp .L011grand_ssse3
+.align 16
+.L011grand_ssse3:
+ movdqu (%edi),%xmm0
+ movdqu 16(%edi),%xmm1
+ movdqu 32(%edi),%xmm2
+ movdqu 48(%edi),%xmm3
+ addl $64,%edi
+.byte 102,15,56,0,199
+ movl %edi,100(%esp)
+.byte 102,15,56,0,207
+ movdqa (%ebp),%xmm4
+.byte 102,15,56,0,215
+ movdqa 16(%ebp),%xmm5
+ paddd %xmm0,%xmm4
+.byte 102,15,56,0,223
+ movdqa 32(%ebp),%xmm6
+ paddd %xmm1,%xmm5
+ movdqa 48(%ebp),%xmm7
+ movdqa %xmm4,32(%esp)
+ paddd %xmm2,%xmm6
+ movdqa %xmm5,48(%esp)
+ paddd %xmm3,%xmm7
+ movdqa %xmm6,64(%esp)
+ movdqa %xmm7,80(%esp)
+ jmp .L012ssse3_00_47
+.align 16
+.L012ssse3_00_47:
+ addl $64,%ebp
+ movl %edx,%ecx
+ movdqa %xmm1,%xmm4
+ rorl $14,%edx
+ movl 20(%esp),%esi
+ movdqa %xmm3,%xmm7
+ xorl %ecx,%edx
+ movl 24(%esp),%edi
+.byte 102,15,58,15,224,4
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+.byte 102,15,58,15,250,4
+ movl %ecx,16(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ movdqa %xmm4,%xmm5
+ rorl $6,%edx
+ movl %eax,%ecx
+ movdqa %xmm4,%xmm6
+ addl %edi,%edx
+ movl 4(%esp),%edi
+ psrld $3,%xmm4
+ movl %eax,%esi
+ rorl $9,%ecx
+ paddd %xmm7,%xmm0
+ movl %eax,(%esp)
+ xorl %eax,%ecx
+ psrld $7,%xmm6
+ xorl %edi,%eax
+ addl 28(%esp),%edx
+ rorl $11,%ecx
+ andl %eax,%ebx
+ pshufd $250,%xmm3,%xmm7
+ xorl %esi,%ecx
+ addl 32(%esp),%edx
+ pslld $14,%xmm5
+ xorl %edi,%ebx
+ rorl $2,%ecx
+ pxor %xmm6,%xmm4
+ addl %edx,%ebx
+ addl 12(%esp),%edx
+ psrld $11,%xmm6
+ addl %ecx,%ebx
+ movl %edx,%ecx
+ rorl $14,%edx
+ pxor %xmm5,%xmm4
+ movl 16(%esp),%esi
+ xorl %ecx,%edx
+ pslld $11,%xmm5
+ movl 20(%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ pxor %xmm6,%xmm4
+ andl %ecx,%esi
+ movl %ecx,12(%esp)
+ movdqa %xmm7,%xmm6
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ pxor %xmm5,%xmm4
+ movl %ebx,%ecx
+ addl %edi,%edx
+ psrld $10,%xmm7
+ movl (%esp),%edi
+ movl %ebx,%esi
+ rorl $9,%ecx
+ paddd %xmm4,%xmm0
+ movl %ebx,28(%esp)
+ xorl %ebx,%ecx
+ psrlq $17,%xmm6
+ xorl %edi,%ebx
+ addl 24(%esp),%edx
+ rorl $11,%ecx
+ pxor %xmm6,%xmm7
+ andl %ebx,%eax
+ xorl %esi,%ecx
+ psrlq $2,%xmm6
+ addl 36(%esp),%edx
+ xorl %edi,%eax
+ rorl $2,%ecx
+ pxor %xmm6,%xmm7
+ addl %edx,%eax
+ addl 8(%esp),%edx
+ pshufd $128,%xmm7,%xmm7
+ addl %ecx,%eax
+ movl %edx,%ecx
+ rorl $14,%edx
+ movl 12(%esp),%esi
+ xorl %ecx,%edx
+ movl 16(%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ psrldq $8,%xmm7
+ movl %ecx,8(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ paddd %xmm7,%xmm0
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%edx
+ movl 28(%esp),%edi
+ movl %eax,%esi
+ rorl $9,%ecx
+ movl %eax,24(%esp)
+ pshufd $80,%xmm0,%xmm7
+ xorl %eax,%ecx
+ xorl %edi,%eax
+ addl 20(%esp),%edx
+ movdqa %xmm7,%xmm6
+ rorl $11,%ecx
+ psrld $10,%xmm7
+ andl %eax,%ebx
+ psrlq $17,%xmm6
+ xorl %esi,%ecx
+ addl 40(%esp),%edx
+ xorl %edi,%ebx
+ rorl $2,%ecx
+ pxor %xmm6,%xmm7
+ addl %edx,%ebx
+ addl 4(%esp),%edx
+ psrlq $2,%xmm6
+ addl %ecx,%ebx
+ movl %edx,%ecx
+ rorl $14,%edx
+ pxor %xmm6,%xmm7
+ movl 8(%esp),%esi
+ xorl %ecx,%edx
+ movl 12(%esp),%edi
+ pshufd $8,%xmm7,%xmm7
+ xorl %edi,%esi
+ rorl $5,%edx
+ movdqa (%ebp),%xmm6
+ andl %ecx,%esi
+ movl %ecx,4(%esp)
+ pslldq $8,%xmm7
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %ebx,%ecx
+ addl %edi,%edx
+ movl 24(%esp),%edi
+ movl %ebx,%esi
+ rorl $9,%ecx
+ paddd %xmm7,%xmm0
+ movl %ebx,20(%esp)
+ xorl %ebx,%ecx
+ xorl %edi,%ebx
+ addl 16(%esp),%edx
+ paddd %xmm0,%xmm6
+ rorl $11,%ecx
+ andl %ebx,%eax
+ xorl %esi,%ecx
+ addl 44(%esp),%edx
+ xorl %edi,%eax
+ rorl $2,%ecx
+ addl %edx,%eax
+ addl (%esp),%edx
+ addl %ecx,%eax
+ movdqa %xmm6,32(%esp)
+ movl %edx,%ecx
+ movdqa %xmm2,%xmm4
+ rorl $14,%edx
+ movl 4(%esp),%esi
+ movdqa %xmm0,%xmm7
+ xorl %ecx,%edx
+ movl 8(%esp),%edi
+.byte 102,15,58,15,225,4
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+.byte 102,15,58,15,251,4
+ movl %ecx,(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ movdqa %xmm4,%xmm5
+ rorl $6,%edx
+ movl %eax,%ecx
+ movdqa %xmm4,%xmm6
+ addl %edi,%edx
+ movl 20(%esp),%edi
+ psrld $3,%xmm4
+ movl %eax,%esi
+ rorl $9,%ecx
+ paddd %xmm7,%xmm1
+ movl %eax,16(%esp)
+ xorl %eax,%ecx
+ psrld $7,%xmm6
+ xorl %edi,%eax
+ addl 12(%esp),%edx
+ rorl $11,%ecx
+ andl %eax,%ebx
+ pshufd $250,%xmm0,%xmm7
+ xorl %esi,%ecx
+ addl 48(%esp),%edx
+ pslld $14,%xmm5
+ xorl %edi,%ebx
+ rorl $2,%ecx
+ pxor %xmm6,%xmm4
+ addl %edx,%ebx
+ addl 28(%esp),%edx
+ psrld $11,%xmm6
+ addl %ecx,%ebx
+ movl %edx,%ecx
+ rorl $14,%edx
+ pxor %xmm5,%xmm4
+ movl (%esp),%esi
+ xorl %ecx,%edx
+ pslld $11,%xmm5
+ movl 4(%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ pxor %xmm6,%xmm4
+ andl %ecx,%esi
+ movl %ecx,28(%esp)
+ movdqa %xmm7,%xmm6
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ pxor %xmm5,%xmm4
+ movl %ebx,%ecx
+ addl %edi,%edx
+ psrld $10,%xmm7
+ movl 16(%esp),%edi
+ movl %ebx,%esi
+ rorl $9,%ecx
+ paddd %xmm4,%xmm1
+ movl %ebx,12(%esp)
+ xorl %ebx,%ecx
+ psrlq $17,%xmm6
+ xorl %edi,%ebx
+ addl 8(%esp),%edx
+ rorl $11,%ecx
+ pxor %xmm6,%xmm7
+ andl %ebx,%eax
+ xorl %esi,%ecx
+ psrlq $2,%xmm6
+ addl 52(%esp),%edx
+ xorl %edi,%eax
+ rorl $2,%ecx
+ pxor %xmm6,%xmm7
+ addl %edx,%eax
+ addl 24(%esp),%edx
+ pshufd $128,%xmm7,%xmm7
+ addl %ecx,%eax
+ movl %edx,%ecx
+ rorl $14,%edx
+ movl 28(%esp),%esi
+ xorl %ecx,%edx
+ movl (%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ psrldq $8,%xmm7
+ movl %ecx,24(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ paddd %xmm7,%xmm1
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%edx
+ movl 12(%esp),%edi
+ movl %eax,%esi
+ rorl $9,%ecx
+ movl %eax,8(%esp)
+ pshufd $80,%xmm1,%xmm7
+ xorl %eax,%ecx
+ xorl %edi,%eax
+ addl 4(%esp),%edx
+ movdqa %xmm7,%xmm6
+ rorl $11,%ecx
+ psrld $10,%xmm7
+ andl %eax,%ebx
+ psrlq $17,%xmm6
+ xorl %esi,%ecx
+ addl 56(%esp),%edx
+ xorl %edi,%ebx
+ rorl $2,%ecx
+ pxor %xmm6,%xmm7
+ addl %edx,%ebx
+ addl 20(%esp),%edx
+ psrlq $2,%xmm6
+ addl %ecx,%ebx
+ movl %edx,%ecx
+ rorl $14,%edx
+ pxor %xmm6,%xmm7
+ movl 24(%esp),%esi
+ xorl %ecx,%edx
+ movl 28(%esp),%edi
+ pshufd $8,%xmm7,%xmm7
+ xorl %edi,%esi
+ rorl $5,%edx
+ movdqa 16(%ebp),%xmm6
+ andl %ecx,%esi
+ movl %ecx,20(%esp)
+ pslldq $8,%xmm7
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %ebx,%ecx
+ addl %edi,%edx
+ movl 8(%esp),%edi
+ movl %ebx,%esi
+ rorl $9,%ecx
+ paddd %xmm7,%xmm1
+ movl %ebx,4(%esp)
+ xorl %ebx,%ecx
+ xorl %edi,%ebx
+ addl (%esp),%edx
+ paddd %xmm1,%xmm6
+ rorl $11,%ecx
+ andl %ebx,%eax
+ xorl %esi,%ecx
+ addl 60(%esp),%edx
+ xorl %edi,%eax
+ rorl $2,%ecx
+ addl %edx,%eax
+ addl 16(%esp),%edx
+ addl %ecx,%eax
+ movdqa %xmm6,48(%esp)
+ movl %edx,%ecx
+ movdqa %xmm3,%xmm4
+ rorl $14,%edx
+ movl 20(%esp),%esi
+ movdqa %xmm1,%xmm7
+ xorl %ecx,%edx
+ movl 24(%esp),%edi
+.byte 102,15,58,15,226,4
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+.byte 102,15,58,15,248,4
+ movl %ecx,16(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ movdqa %xmm4,%xmm5
+ rorl $6,%edx
+ movl %eax,%ecx
+ movdqa %xmm4,%xmm6
+ addl %edi,%edx
+ movl 4(%esp),%edi
+ psrld $3,%xmm4
+ movl %eax,%esi
+ rorl $9,%ecx
+ paddd %xmm7,%xmm2
+ movl %eax,(%esp)
+ xorl %eax,%ecx
+ psrld $7,%xmm6
+ xorl %edi,%eax
+ addl 28(%esp),%edx
+ rorl $11,%ecx
+ andl %eax,%ebx
+ pshufd $250,%xmm1,%xmm7
+ xorl %esi,%ecx
+ addl 64(%esp),%edx
+ pslld $14,%xmm5
+ xorl %edi,%ebx
+ rorl $2,%ecx
+ pxor %xmm6,%xmm4
+ addl %edx,%ebx
+ addl 12(%esp),%edx
+ psrld $11,%xmm6
+ addl %ecx,%ebx
+ movl %edx,%ecx
+ rorl $14,%edx
+ pxor %xmm5,%xmm4
+ movl 16(%esp),%esi
+ xorl %ecx,%edx
+ pslld $11,%xmm5
+ movl 20(%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ pxor %xmm6,%xmm4
+ andl %ecx,%esi
+ movl %ecx,12(%esp)
+ movdqa %xmm7,%xmm6
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ pxor %xmm5,%xmm4
+ movl %ebx,%ecx
+ addl %edi,%edx
+ psrld $10,%xmm7
+ movl (%esp),%edi
+ movl %ebx,%esi
+ rorl $9,%ecx
+ paddd %xmm4,%xmm2
+ movl %ebx,28(%esp)
+ xorl %ebx,%ecx
+ psrlq $17,%xmm6
+ xorl %edi,%ebx
+ addl 24(%esp),%edx
+ rorl $11,%ecx
+ pxor %xmm6,%xmm7
+ andl %ebx,%eax
+ xorl %esi,%ecx
+ psrlq $2,%xmm6
+ addl 68(%esp),%edx
+ xorl %edi,%eax
+ rorl $2,%ecx
+ pxor %xmm6,%xmm7
+ addl %edx,%eax
+ addl 8(%esp),%edx
+ pshufd $128,%xmm7,%xmm7
+ addl %ecx,%eax
+ movl %edx,%ecx
+ rorl $14,%edx
+ movl 12(%esp),%esi
+ xorl %ecx,%edx
+ movl 16(%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ psrldq $8,%xmm7
+ movl %ecx,8(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ paddd %xmm7,%xmm2
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%edx
+ movl 28(%esp),%edi
+ movl %eax,%esi
+ rorl $9,%ecx
+ movl %eax,24(%esp)
+ pshufd $80,%xmm2,%xmm7
+ xorl %eax,%ecx
+ xorl %edi,%eax
+ addl 20(%esp),%edx
+ movdqa %xmm7,%xmm6
+ rorl $11,%ecx
+ psrld $10,%xmm7
+ andl %eax,%ebx
+ psrlq $17,%xmm6
+ xorl %esi,%ecx
+ addl 72(%esp),%edx
+ xorl %edi,%ebx
+ rorl $2,%ecx
+ pxor %xmm6,%xmm7
+ addl %edx,%ebx
+ addl 4(%esp),%edx
+ psrlq $2,%xmm6
+ addl %ecx,%ebx
+ movl %edx,%ecx
+ rorl $14,%edx
+ pxor %xmm6,%xmm7
+ movl 8(%esp),%esi
+ xorl %ecx,%edx
+ movl 12(%esp),%edi
+ pshufd $8,%xmm7,%xmm7
+ xorl %edi,%esi
+ rorl $5,%edx
+ movdqa 32(%ebp),%xmm6
+ andl %ecx,%esi
+ movl %ecx,4(%esp)
+ pslldq $8,%xmm7
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %ebx,%ecx
+ addl %edi,%edx
+ movl 24(%esp),%edi
+ movl %ebx,%esi
+ rorl $9,%ecx
+ paddd %xmm7,%xmm2
+ movl %ebx,20(%esp)
+ xorl %ebx,%ecx
+ xorl %edi,%ebx
+ addl 16(%esp),%edx
+ paddd %xmm2,%xmm6
+ rorl $11,%ecx
+ andl %ebx,%eax
+ xorl %esi,%ecx
+ addl 76(%esp),%edx
+ xorl %edi,%eax
+ rorl $2,%ecx
+ addl %edx,%eax
+ addl (%esp),%edx
+ addl %ecx,%eax
+ movdqa %xmm6,64(%esp)
+ movl %edx,%ecx
+ movdqa %xmm0,%xmm4
+ rorl $14,%edx
+ movl 4(%esp),%esi
+ movdqa %xmm2,%xmm7
+ xorl %ecx,%edx
+ movl 8(%esp),%edi
+.byte 102,15,58,15,227,4
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+.byte 102,15,58,15,249,4
+ movl %ecx,(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ movdqa %xmm4,%xmm5
+ rorl $6,%edx
+ movl %eax,%ecx
+ movdqa %xmm4,%xmm6
+ addl %edi,%edx
+ movl 20(%esp),%edi
+ psrld $3,%xmm4
+ movl %eax,%esi
+ rorl $9,%ecx
+ paddd %xmm7,%xmm3
+ movl %eax,16(%esp)
+ xorl %eax,%ecx
+ psrld $7,%xmm6
+ xorl %edi,%eax
+ addl 12(%esp),%edx
+ rorl $11,%ecx
+ andl %eax,%ebx
+ pshufd $250,%xmm2,%xmm7
+ xorl %esi,%ecx
+ addl 80(%esp),%edx
+ pslld $14,%xmm5
+ xorl %edi,%ebx
+ rorl $2,%ecx
+ pxor %xmm6,%xmm4
+ addl %edx,%ebx
+ addl 28(%esp),%edx
+ psrld $11,%xmm6
+ addl %ecx,%ebx
+ movl %edx,%ecx
+ rorl $14,%edx
+ pxor %xmm5,%xmm4
+ movl (%esp),%esi
+ xorl %ecx,%edx
+ pslld $11,%xmm5
+ movl 4(%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ pxor %xmm6,%xmm4
+ andl %ecx,%esi
+ movl %ecx,28(%esp)
+ movdqa %xmm7,%xmm6
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ pxor %xmm5,%xmm4
+ movl %ebx,%ecx
+ addl %edi,%edx
+ psrld $10,%xmm7
+ movl 16(%esp),%edi
+ movl %ebx,%esi
+ rorl $9,%ecx
+ paddd %xmm4,%xmm3
+ movl %ebx,12(%esp)
+ xorl %ebx,%ecx
+ psrlq $17,%xmm6
+ xorl %edi,%ebx
+ addl 8(%esp),%edx
+ rorl $11,%ecx
+ pxor %xmm6,%xmm7
+ andl %ebx,%eax
+ xorl %esi,%ecx
+ psrlq $2,%xmm6
+ addl 84(%esp),%edx
+ xorl %edi,%eax
+ rorl $2,%ecx
+ pxor %xmm6,%xmm7
+ addl %edx,%eax
+ addl 24(%esp),%edx
+ pshufd $128,%xmm7,%xmm7
+ addl %ecx,%eax
+ movl %edx,%ecx
+ rorl $14,%edx
+ movl 28(%esp),%esi
+ xorl %ecx,%edx
+ movl (%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ psrldq $8,%xmm7
+ movl %ecx,24(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ paddd %xmm7,%xmm3
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%edx
+ movl 12(%esp),%edi
+ movl %eax,%esi
+ rorl $9,%ecx
+ movl %eax,8(%esp)
+ pshufd $80,%xmm3,%xmm7
+ xorl %eax,%ecx
+ xorl %edi,%eax
+ addl 4(%esp),%edx
+ movdqa %xmm7,%xmm6
+ rorl $11,%ecx
+ psrld $10,%xmm7
+ andl %eax,%ebx
+ psrlq $17,%xmm6
+ xorl %esi,%ecx
+ addl 88(%esp),%edx
+ xorl %edi,%ebx
+ rorl $2,%ecx
+ pxor %xmm6,%xmm7
+ addl %edx,%ebx
+ addl 20(%esp),%edx
+ psrlq $2,%xmm6
+ addl %ecx,%ebx
+ movl %edx,%ecx
+ rorl $14,%edx
+ pxor %xmm6,%xmm7
+ movl 24(%esp),%esi
+ xorl %ecx,%edx
+ movl 28(%esp),%edi
+ pshufd $8,%xmm7,%xmm7
+ xorl %edi,%esi
+ rorl $5,%edx
+ movdqa 48(%ebp),%xmm6
+ andl %ecx,%esi
+ movl %ecx,20(%esp)
+ pslldq $8,%xmm7
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %ebx,%ecx
+ addl %edi,%edx
+ movl 8(%esp),%edi
+ movl %ebx,%esi
+ rorl $9,%ecx
+ paddd %xmm7,%xmm3
+ movl %ebx,4(%esp)
+ xorl %ebx,%ecx
+ xorl %edi,%ebx
+ addl (%esp),%edx
+ paddd %xmm3,%xmm6
+ rorl $11,%ecx
+ andl %ebx,%eax
+ xorl %esi,%ecx
+ addl 92(%esp),%edx
+ xorl %edi,%eax
+ rorl $2,%ecx
+ addl %edx,%eax
+ addl 16(%esp),%edx
+ addl %ecx,%eax
+ movdqa %xmm6,80(%esp)
+ cmpl $66051,64(%ebp)
+ jne .L012ssse3_00_47
+ movl %edx,%ecx
+ rorl $14,%edx
+ movl 20(%esp),%esi
+ xorl %ecx,%edx
+ movl 24(%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,16(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%edx
+ movl 4(%esp),%edi
+ movl %eax,%esi
+ rorl $9,%ecx
+ movl %eax,(%esp)
+ xorl %eax,%ecx
+ xorl %edi,%eax
+ addl 28(%esp),%edx
+ rorl $11,%ecx
+ andl %eax,%ebx
+ xorl %esi,%ecx
+ addl 32(%esp),%edx
+ xorl %edi,%ebx
+ rorl $2,%ecx
+ addl %edx,%ebx
+ addl 12(%esp),%edx
+ addl %ecx,%ebx
+ movl %edx,%ecx
+ rorl $14,%edx
+ movl 16(%esp),%esi
+ xorl %ecx,%edx
+ movl 20(%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,12(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %ebx,%ecx
+ addl %edi,%edx
+ movl (%esp),%edi
+ movl %ebx,%esi
+ rorl $9,%ecx
+ movl %ebx,28(%esp)
+ xorl %ebx,%ecx
+ xorl %edi,%ebx
+ addl 24(%esp),%edx
+ rorl $11,%ecx
+ andl %ebx,%eax
+ xorl %esi,%ecx
+ addl 36(%esp),%edx
+ xorl %edi,%eax
+ rorl $2,%ecx
+ addl %edx,%eax
+ addl 8(%esp),%edx
+ addl %ecx,%eax
+ movl %edx,%ecx
+ rorl $14,%edx
+ movl 12(%esp),%esi
+ xorl %ecx,%edx
+ movl 16(%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,8(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%edx
+ movl 28(%esp),%edi
+ movl %eax,%esi
+ rorl $9,%ecx
+ movl %eax,24(%esp)
+ xorl %eax,%ecx
+ xorl %edi,%eax
+ addl 20(%esp),%edx
+ rorl $11,%ecx
+ andl %eax,%ebx
+ xorl %esi,%ecx
+ addl 40(%esp),%edx
+ xorl %edi,%ebx
+ rorl $2,%ecx
+ addl %edx,%ebx
+ addl 4(%esp),%edx
+ addl %ecx,%ebx
+ movl %edx,%ecx
+ rorl $14,%edx
+ movl 8(%esp),%esi
+ xorl %ecx,%edx
+ movl 12(%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,4(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %ebx,%ecx
+ addl %edi,%edx
+ movl 24(%esp),%edi
+ movl %ebx,%esi
+ rorl $9,%ecx
+ movl %ebx,20(%esp)
+ xorl %ebx,%ecx
+ xorl %edi,%ebx
+ addl 16(%esp),%edx
+ rorl $11,%ecx
+ andl %ebx,%eax
+ xorl %esi,%ecx
+ addl 44(%esp),%edx
+ xorl %edi,%eax
+ rorl $2,%ecx
+ addl %edx,%eax
+ addl (%esp),%edx
+ addl %ecx,%eax
+ movl %edx,%ecx
+ rorl $14,%edx
+ movl 4(%esp),%esi
+ xorl %ecx,%edx
+ movl 8(%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%edx
+ movl 20(%esp),%edi
+ movl %eax,%esi
+ rorl $9,%ecx
+ movl %eax,16(%esp)
+ xorl %eax,%ecx
+ xorl %edi,%eax
+ addl 12(%esp),%edx
+ rorl $11,%ecx
+ andl %eax,%ebx
+ xorl %esi,%ecx
+ addl 48(%esp),%edx
+ xorl %edi,%ebx
+ rorl $2,%ecx
+ addl %edx,%ebx
+ addl 28(%esp),%edx
+ addl %ecx,%ebx
+ movl %edx,%ecx
+ rorl $14,%edx
+ movl (%esp),%esi
+ xorl %ecx,%edx
+ movl 4(%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,28(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %ebx,%ecx
+ addl %edi,%edx
+ movl 16(%esp),%edi
+ movl %ebx,%esi
+ rorl $9,%ecx
+ movl %ebx,12(%esp)
+ xorl %ebx,%ecx
+ xorl %edi,%ebx
+ addl 8(%esp),%edx
+ rorl $11,%ecx
+ andl %ebx,%eax
+ xorl %esi,%ecx
+ addl 52(%esp),%edx
+ xorl %edi,%eax
+ rorl $2,%ecx
+ addl %edx,%eax
+ addl 24(%esp),%edx
+ addl %ecx,%eax
+ movl %edx,%ecx
+ rorl $14,%edx
+ movl 28(%esp),%esi
+ xorl %ecx,%edx
+ movl (%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,24(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%edx
+ movl 12(%esp),%edi
+ movl %eax,%esi
+ rorl $9,%ecx
+ movl %eax,8(%esp)
+ xorl %eax,%ecx
+ xorl %edi,%eax
+ addl 4(%esp),%edx
+ rorl $11,%ecx
+ andl %eax,%ebx
+ xorl %esi,%ecx
+ addl 56(%esp),%edx
+ xorl %edi,%ebx
+ rorl $2,%ecx
+ addl %edx,%ebx
+ addl 20(%esp),%edx
+ addl %ecx,%ebx
+ movl %edx,%ecx
+ rorl $14,%edx
+ movl 24(%esp),%esi
+ xorl %ecx,%edx
+ movl 28(%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,20(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %ebx,%ecx
+ addl %edi,%edx
+ movl 8(%esp),%edi
+ movl %ebx,%esi
+ rorl $9,%ecx
+ movl %ebx,4(%esp)
+ xorl %ebx,%ecx
+ xorl %edi,%ebx
+ addl (%esp),%edx
+ rorl $11,%ecx
+ andl %ebx,%eax
+ xorl %esi,%ecx
+ addl 60(%esp),%edx
+ xorl %edi,%eax
+ rorl $2,%ecx
+ addl %edx,%eax
+ addl 16(%esp),%edx
+ addl %ecx,%eax
+ movl %edx,%ecx
+ rorl $14,%edx
+ movl 20(%esp),%esi
+ xorl %ecx,%edx
+ movl 24(%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,16(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%edx
+ movl 4(%esp),%edi
+ movl %eax,%esi
+ rorl $9,%ecx
+ movl %eax,(%esp)
+ xorl %eax,%ecx
+ xorl %edi,%eax
+ addl 28(%esp),%edx
+ rorl $11,%ecx
+ andl %eax,%ebx
+ xorl %esi,%ecx
+ addl 64(%esp),%edx
+ xorl %edi,%ebx
+ rorl $2,%ecx
+ addl %edx,%ebx
+ addl 12(%esp),%edx
+ addl %ecx,%ebx
+ movl %edx,%ecx
+ rorl $14,%edx
+ movl 16(%esp),%esi
+ xorl %ecx,%edx
+ movl 20(%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,12(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %ebx,%ecx
+ addl %edi,%edx
+ movl (%esp),%edi
+ movl %ebx,%esi
+ rorl $9,%ecx
+ movl %ebx,28(%esp)
+ xorl %ebx,%ecx
+ xorl %edi,%ebx
+ addl 24(%esp),%edx
+ rorl $11,%ecx
+ andl %ebx,%eax
+ xorl %esi,%ecx
+ addl 68(%esp),%edx
+ xorl %edi,%eax
+ rorl $2,%ecx
+ addl %edx,%eax
+ addl 8(%esp),%edx
+ addl %ecx,%eax
+ movl %edx,%ecx
+ rorl $14,%edx
+ movl 12(%esp),%esi
+ xorl %ecx,%edx
+ movl 16(%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,8(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%edx
+ movl 28(%esp),%edi
+ movl %eax,%esi
+ rorl $9,%ecx
+ movl %eax,24(%esp)
+ xorl %eax,%ecx
+ xorl %edi,%eax
+ addl 20(%esp),%edx
+ rorl $11,%ecx
+ andl %eax,%ebx
+ xorl %esi,%ecx
+ addl 72(%esp),%edx
+ xorl %edi,%ebx
+ rorl $2,%ecx
+ addl %edx,%ebx
+ addl 4(%esp),%edx
+ addl %ecx,%ebx
+ movl %edx,%ecx
+ rorl $14,%edx
+ movl 8(%esp),%esi
+ xorl %ecx,%edx
+ movl 12(%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,4(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %ebx,%ecx
+ addl %edi,%edx
+ movl 24(%esp),%edi
+ movl %ebx,%esi
+ rorl $9,%ecx
+ movl %ebx,20(%esp)
+ xorl %ebx,%ecx
+ xorl %edi,%ebx
+ addl 16(%esp),%edx
+ rorl $11,%ecx
+ andl %ebx,%eax
+ xorl %esi,%ecx
+ addl 76(%esp),%edx
+ xorl %edi,%eax
+ rorl $2,%ecx
+ addl %edx,%eax
+ addl (%esp),%edx
+ addl %ecx,%eax
+ movl %edx,%ecx
+ rorl $14,%edx
+ movl 4(%esp),%esi
+ xorl %ecx,%edx
+ movl 8(%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%edx
+ movl 20(%esp),%edi
+ movl %eax,%esi
+ rorl $9,%ecx
+ movl %eax,16(%esp)
+ xorl %eax,%ecx
+ xorl %edi,%eax
+ addl 12(%esp),%edx
+ rorl $11,%ecx
+ andl %eax,%ebx
+ xorl %esi,%ecx
+ addl 80(%esp),%edx
+ xorl %edi,%ebx
+ rorl $2,%ecx
+ addl %edx,%ebx
+ addl 28(%esp),%edx
+ addl %ecx,%ebx
+ movl %edx,%ecx
+ rorl $14,%edx
+ movl (%esp),%esi
+ xorl %ecx,%edx
+ movl 4(%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,28(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %ebx,%ecx
+ addl %edi,%edx
+ movl 16(%esp),%edi
+ movl %ebx,%esi
+ rorl $9,%ecx
+ movl %ebx,12(%esp)
+ xorl %ebx,%ecx
+ xorl %edi,%ebx
+ addl 8(%esp),%edx
+ rorl $11,%ecx
+ andl %ebx,%eax
+ xorl %esi,%ecx
+ addl 84(%esp),%edx
+ xorl %edi,%eax
+ rorl $2,%ecx
+ addl %edx,%eax
+ addl 24(%esp),%edx
+ addl %ecx,%eax
+ movl %edx,%ecx
+ rorl $14,%edx
+ movl 28(%esp),%esi
+ xorl %ecx,%edx
+ movl (%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,24(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %eax,%ecx
+ addl %edi,%edx
+ movl 12(%esp),%edi
+ movl %eax,%esi
+ rorl $9,%ecx
+ movl %eax,8(%esp)
+ xorl %eax,%ecx
+ xorl %edi,%eax
+ addl 4(%esp),%edx
+ rorl $11,%ecx
+ andl %eax,%ebx
+ xorl %esi,%ecx
+ addl 88(%esp),%edx
+ xorl %edi,%ebx
+ rorl $2,%ecx
+ addl %edx,%ebx
+ addl 20(%esp),%edx
+ addl %ecx,%ebx
+ movl %edx,%ecx
+ rorl $14,%edx
+ movl 24(%esp),%esi
+ xorl %ecx,%edx
+ movl 28(%esp),%edi
+ xorl %edi,%esi
+ rorl $5,%edx
+ andl %ecx,%esi
+ movl %ecx,20(%esp)
+ xorl %ecx,%edx
+ xorl %esi,%edi
+ rorl $6,%edx
+ movl %ebx,%ecx
+ addl %edi,%edx
+ movl 8(%esp),%edi
+ movl %ebx,%esi
+ rorl $9,%ecx
+ movl %ebx,4(%esp)
+ xorl %ebx,%ecx
+ xorl %edi,%ebx
+ addl (%esp),%edx
+ rorl $11,%ecx
+ andl %ebx,%eax
+ xorl %esi,%ecx
+ addl 92(%esp),%edx
+ xorl %edi,%eax
+ rorl $2,%ecx
+ addl %edx,%eax
+ addl 16(%esp),%edx
+ addl %ecx,%eax
+ movl 96(%esp),%esi
+ xorl %edi,%ebx
+ movl 12(%esp),%ecx
+ addl (%esi),%eax
+ addl 4(%esi),%ebx
+ addl 8(%esi),%edi
+ addl 12(%esi),%ecx
+ movl %eax,(%esi)
+ movl %ebx,4(%esi)
+ movl %edi,8(%esi)
+ movl %ecx,12(%esi)
+ movl %ebx,4(%esp)
+ xorl %edi,%ebx
+ movl %edi,8(%esp)
+ movl %ecx,12(%esp)
+ movl 20(%esp),%edi
+ movl 24(%esp),%ecx
+ addl 16(%esi),%edx
+ addl 20(%esi),%edi
+ addl 24(%esi),%ecx
+ movl %edx,16(%esi)
+ movl %edi,20(%esi)
+ movl %edi,20(%esp)
+ movl 28(%esp),%edi
+ movl %ecx,24(%esi)
+ addl 28(%esi),%edi
+ movl %ecx,24(%esp)
+ movl %edi,28(%esi)
+ movl %edi,28(%esp)
+ movl 100(%esp),%edi
+ movdqa 64(%ebp),%xmm7
+ subl $192,%ebp
+ cmpl 104(%esp),%edi
+ jb .L011grand_ssse3
+ movl 108(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
-.align 64
-.L001K256:
-.long 1116352408,1899447441,3049323471,3921009573
-.long 961987163,1508970993,2453635748,2870763221
-.long 3624381080,310598401,607225278,1426881987
-.long 1925078388,2162078206,2614888103,3248222580
-.long 3835390401,4022224774,264347078,604807628
-.long 770255983,1249150122,1555081692,1996064986
-.long 2554220882,2821834349,2952996808,3210313671
-.long 3336571891,3584528711,113926993,338241895
-.long 666307205,773529912,1294757372,1396182291
-.long 1695183700,1986661051,2177026350,2456956037
-.long 2730485921,2820302411,3259730800,3345764771
-.long 3516065817,3600352804,4094571909,275423344
-.long 430227734,506948616,659060556,883997877
-.long 958139571,1322822218,1537002063,1747873779
-.long 1955562222,2024104815,2227730452,2361852424
-.long 2428436474,2756734187,3204031479,3329325298
.size sha256_block_data_order,.-.L_sha256_block_data_order_begin
-.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97
-.byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32
-.byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97
-.byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103
-.byte 62,0
+.comm OPENSSL_ia32cap_P,16,4
diff --git a/secure/lib/libcrypto/i386/sha512-586.s b/secure/lib/libcrypto/i386/sha512-586.s
index a37f850..2dc6f1a 100644
--- a/secure/lib/libcrypto/i386/sha512-586.s
+++ b/secure/lib/libcrypto/i386/sha512-586.s
@@ -27,249 +27,2243 @@ sha512_block_data_order:
movl %eax,8(%esp)
movl %ebx,12(%esp)
leal OPENSSL_ia32cap_P,%edx
- btl $26,(%edx)
- jnc .L002loop_x86
+ movl (%edx),%ecx
+ testl $67108864,%ecx
+ jz .L002loop_x86
+ movl 4(%edx),%edx
movq (%esi),%mm0
+ andl $16777216,%ecx
movq 8(%esi),%mm1
+ andl $512,%edx
movq 16(%esi),%mm2
+ orl %edx,%ecx
movq 24(%esi),%mm3
movq 32(%esi),%mm4
movq 40(%esi),%mm5
movq 48(%esi),%mm6
movq 56(%esi),%mm7
+ cmpl $16777728,%ecx
+ je .L003SSSE3
subl $80,%esp
+ jmp .L004loop_sse2
.align 16
-.L003loop_sse2:
+.L004loop_sse2:
movq %mm1,8(%esp)
movq %mm2,16(%esp)
movq %mm3,24(%esp)
movq %mm5,40(%esp)
movq %mm6,48(%esp)
+ pxor %mm1,%mm2
movq %mm7,56(%esp)
- movl (%edi),%ecx
- movl 4(%edi),%edx
+ movq %mm0,%mm3
+ movl (%edi),%eax
+ movl 4(%edi),%ebx
addl $8,%edi
- bswap %ecx
- bswap %edx
- movl %ecx,76(%esp)
- movl %edx,72(%esp)
+ movl $15,%edx
+ bswap %eax
+ bswap %ebx
+ jmp .L00500_14_sse2
.align 16
-.L00400_14_sse2:
+.L00500_14_sse2:
+ movd %eax,%mm1
movl (%edi),%eax
+ movd %ebx,%mm7
movl 4(%edi),%ebx
addl $8,%edi
bswap %eax
bswap %ebx
- movl %eax,68(%esp)
- movl %ebx,64(%esp)
+ punpckldq %mm1,%mm7
+ movq %mm4,%mm1
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,32(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ movq %mm3,%mm0
+ movq %mm7,72(%esp)
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm0,(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 56(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ paddq (%ebp),%mm7
+ pxor %mm4,%mm3
+ movq 24(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm0,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm0,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 8(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ subl $8,%esp
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm0,%mm2
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ pxor %mm7,%mm6
movq 40(%esp),%mm5
+ paddq %mm2,%mm3
+ movq %mm0,%mm2
+ addl $8,%ebp
+ paddq %mm6,%mm3
movq 48(%esp),%mm6
- movq 56(%esp),%mm7
+ decl %edx
+ jnz .L00500_14_sse2
+ movd %eax,%mm1
+ movd %ebx,%mm7
+ punpckldq %mm1,%mm7
movq %mm4,%mm1
- movq %mm4,%mm2
+ pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
- psllq $23,%mm2
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ movq %mm3,%mm0
+ movq %mm7,72(%esp)
movq %mm1,%mm3
psrlq $4,%mm1
- pxor %mm2,%mm3
- psllq $23,%mm2
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
pxor %mm1,%mm3
+ movq %mm0,(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
psrlq $23,%mm1
- pxor %mm2,%mm3
- psllq $4,%mm2
+ paddq 56(%esp),%mm7
pxor %mm1,%mm3
+ psllq $4,%mm4
paddq (%ebp),%mm7
- pxor %mm2,%mm3
+ pxor %mm4,%mm3
+ movq 24(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm0,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm0,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 8(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ subl $8,%esp
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm0,%mm2
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ pxor %mm7,%mm6
+ movq 192(%esp),%mm7
+ paddq %mm2,%mm3
+ movq %mm0,%mm2
+ addl $8,%ebp
+ paddq %mm6,%mm3
+ pxor %mm0,%mm0
+ movl $32,%edx
+ jmp .L00616_79_sse2
+.align 16
+.L00616_79_sse2:
+ movq 88(%esp),%mm5
+ movq %mm7,%mm1
+ psrlq $1,%mm7
+ movq %mm5,%mm6
+ psrlq $6,%mm5
+ psllq $56,%mm1
+ paddq %mm3,%mm0
+ movq %mm7,%mm3
+ psrlq $6,%mm7
+ pxor %mm1,%mm3
+ psllq $7,%mm1
+ pxor %mm7,%mm3
+ psrlq $1,%mm7
+ pxor %mm1,%mm3
+ movq %mm5,%mm1
+ psrlq $13,%mm5
+ pxor %mm3,%mm7
+ psllq $3,%mm6
+ pxor %mm5,%mm1
+ paddq 200(%esp),%mm7
+ pxor %mm6,%mm1
+ psrlq $42,%mm5
+ paddq 128(%esp),%mm7
+ pxor %mm5,%mm1
+ psllq $42,%mm6
+ movq 40(%esp),%mm5
+ pxor %mm6,%mm1
+ movq 48(%esp),%mm6
+ paddq %mm1,%mm7
+ movq %mm4,%mm1
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,32(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ movq %mm7,72(%esp)
+ movq %mm1,%mm3
+ psrlq $4,%mm1
pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm0,(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 56(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ paddq (%ebp),%mm7
+ pxor %mm4,%mm3
+ movq 24(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm0,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm0,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
movq 8(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ subl $8,%esp
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm0,%mm2
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ pxor %mm7,%mm6
+ movq 192(%esp),%mm7
+ paddq %mm6,%mm2
+ addl $8,%ebp
+ movq 88(%esp),%mm5
+ movq %mm7,%mm1
+ psrlq $1,%mm7
+ movq %mm5,%mm6
+ psrlq $6,%mm5
+ psllq $56,%mm1
+ paddq %mm3,%mm2
+ movq %mm7,%mm3
+ psrlq $6,%mm7
+ pxor %mm1,%mm3
+ psllq $7,%mm1
+ pxor %mm7,%mm3
+ psrlq $1,%mm7
+ pxor %mm1,%mm3
+ movq %mm5,%mm1
+ psrlq $13,%mm5
+ pxor %mm3,%mm7
+ psllq $3,%mm6
+ pxor %mm5,%mm1
+ paddq 200(%esp),%mm7
+ pxor %mm6,%mm1
+ psrlq $42,%mm5
+ paddq 128(%esp),%mm7
+ pxor %mm5,%mm1
+ psllq $42,%mm6
+ movq 40(%esp),%mm5
+ pxor %mm6,%mm1
+ movq 48(%esp),%mm6
+ paddq %mm1,%mm7
+ movq %mm4,%mm1
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,32(%esp)
pand %mm4,%mm5
- movq 16(%esp),%mm2
+ psllq $23,%mm4
+ movq %mm7,72(%esp)
+ movq %mm1,%mm3
+ psrlq $4,%mm1
pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm2,(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 56(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ paddq (%ebp),%mm7
+ pxor %mm4,%mm3
movq 24(%esp),%mm4
- paddq %mm5,%mm3
+ paddq %mm7,%mm3
+ movq %mm2,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm2,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 8(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ subl $8,%esp
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm2,%mm0
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ pxor %mm7,%mm6
+ movq 192(%esp),%mm7
+ paddq %mm6,%mm0
+ addl $8,%ebp
+ decl %edx
+ jnz .L00616_79_sse2
+ paddq %mm3,%mm0
+ movq 8(%esp),%mm1
+ movq 24(%esp),%mm3
+ movq 40(%esp),%mm5
+ movq 48(%esp),%mm6
+ movq 56(%esp),%mm7
+ pxor %mm1,%mm2
+ paddq (%esi),%mm0
+ paddq 8(%esi),%mm1
+ paddq 16(%esi),%mm2
+ paddq 24(%esi),%mm3
+ paddq 32(%esi),%mm4
+ paddq 40(%esi),%mm5
+ paddq 48(%esi),%mm6
+ paddq 56(%esi),%mm7
+ movl $640,%eax
+ movq %mm0,(%esi)
+ movq %mm1,8(%esi)
+ movq %mm2,16(%esi)
+ movq %mm3,24(%esi)
+ movq %mm4,32(%esi)
+ movq %mm5,40(%esi)
+ movq %mm6,48(%esi)
+ movq %mm7,56(%esi)
+ leal (%esp,%eax,1),%esp
+ subl %eax,%ebp
+ cmpl 88(%esp),%edi
+ jb .L004loop_sse2
+ movl 92(%esp),%esp
+ emms
+ popl %edi
+ popl %esi
+ popl %ebx
+ popl %ebp
+ ret
+.align 32
+.L003SSSE3:
+ leal -64(%esp),%edx
+ subl $256,%esp
+ movdqa 640(%ebp),%xmm1
+ movdqu (%edi),%xmm0
+.byte 102,15,56,0,193
+ movdqa (%ebp),%xmm3
+ movdqa %xmm1,%xmm2
+ movdqu 16(%edi),%xmm1
+ paddq %xmm0,%xmm3
+.byte 102,15,56,0,202
+ movdqa %xmm3,-128(%edx)
+ movdqa 16(%ebp),%xmm4
+ movdqa %xmm2,%xmm3
+ movdqu 32(%edi),%xmm2
+ paddq %xmm1,%xmm4
+.byte 102,15,56,0,211
+ movdqa %xmm4,-112(%edx)
+ movdqa 32(%ebp),%xmm5
+ movdqa %xmm3,%xmm4
+ movdqu 48(%edi),%xmm3
+ paddq %xmm2,%xmm5
+.byte 102,15,56,0,220
+ movdqa %xmm5,-96(%edx)
+ movdqa 48(%ebp),%xmm6
+ movdqa %xmm4,%xmm5
+ movdqu 64(%edi),%xmm4
+ paddq %xmm3,%xmm6
+.byte 102,15,56,0,229
+ movdqa %xmm6,-80(%edx)
+ movdqa 64(%ebp),%xmm7
+ movdqa %xmm5,%xmm6
+ movdqu 80(%edi),%xmm5
+ paddq %xmm4,%xmm7
+.byte 102,15,56,0,238
+ movdqa %xmm7,-64(%edx)
+ movdqa %xmm0,(%edx)
+ movdqa 80(%ebp),%xmm0
+ movdqa %xmm6,%xmm7
+ movdqu 96(%edi),%xmm6
+ paddq %xmm5,%xmm0
+.byte 102,15,56,0,247
+ movdqa %xmm0,-48(%edx)
+ movdqa %xmm1,16(%edx)
+ movdqa 96(%ebp),%xmm1
+ movdqa %xmm7,%xmm0
+ movdqu 112(%edi),%xmm7
+ paddq %xmm6,%xmm1
+.byte 102,15,56,0,248
+ movdqa %xmm1,-32(%edx)
+ movdqa %xmm2,32(%edx)
+ movdqa 112(%ebp),%xmm2
+ movdqa (%edx),%xmm0
+ paddq %xmm7,%xmm2
+ movdqa %xmm2,-16(%edx)
+ nop
+.align 32
+.L007loop_ssse3:
+ movdqa 16(%edx),%xmm2
+ movdqa %xmm3,48(%edx)
+ leal 128(%ebp),%ebp
+ movq %mm1,8(%esp)
+ movl %edi,%ebx
+ movq %mm2,16(%esp)
+ leal 128(%edi),%edi
+ movq %mm3,24(%esp)
+ cmpl %eax,%edi
+ movq %mm5,40(%esp)
+ cmovbl %edi,%ebx
+ movq %mm6,48(%esp)
+ movl $4,%ecx
+ pxor %mm1,%mm2
+ movq %mm7,56(%esp)
+ pxor %mm3,%mm3
+ jmp .L00800_47_ssse3
+.align 32
+.L00800_47_ssse3:
+ movdqa %xmm5,%xmm3
+ movdqa %xmm2,%xmm1
+.byte 102,15,58,15,208,8
+ movdqa %xmm4,(%edx)
+.byte 102,15,58,15,220,8
+ movdqa %xmm2,%xmm4
+ psrlq $7,%xmm2
+ paddq %xmm3,%xmm0
+ movdqa %xmm4,%xmm3
+ psrlq $1,%xmm4
+ psllq $56,%xmm3
+ pxor %xmm4,%xmm2
+ psrlq $7,%xmm4
+ pxor %xmm3,%xmm2
+ psllq $7,%xmm3
+ pxor %xmm4,%xmm2
+ movdqa %xmm7,%xmm4
+ pxor %xmm3,%xmm2
+ movdqa %xmm7,%xmm3
+ psrlq $6,%xmm4
+ paddq %xmm2,%xmm0
+ movdqa %xmm7,%xmm2
+ psrlq $19,%xmm3
+ psllq $3,%xmm2
+ pxor %xmm3,%xmm4
+ psrlq $42,%xmm3
+ pxor %xmm2,%xmm4
+ psllq $42,%xmm2
+ pxor %xmm3,%xmm4
+ movdqa 32(%edx),%xmm3
+ pxor %xmm2,%xmm4
+ movdqa (%ebp),%xmm2
+ movq %mm4,%mm1
+ paddq %xmm4,%xmm0
+ movq -128(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,32(%esp)
+ paddq %xmm0,%xmm2
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm0
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
movq %mm0,(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 56(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
movq %mm0,%mm6
- paddq 72(%esp),%mm3
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 8(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm0,%mm2
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ pxor %mm7,%mm6
+ movq 32(%esp),%mm5
+ paddq %mm6,%mm2
+ movq 40(%esp),%mm6
+ movq %mm4,%mm1
+ movq -120(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,24(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm2
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm2,56(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 48(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 16(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
+ movq %mm2,%mm6
+ movq %mm5,%mm7
psllq $25,%mm6
+ movq (%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm2,%mm0
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ pxor %mm7,%mm6
+ movq 24(%esp),%mm5
+ paddq %mm6,%mm0
+ movq 32(%esp),%mm6
+ movdqa %xmm2,-128(%edx)
+ movdqa %xmm6,%xmm4
+ movdqa %xmm3,%xmm2
+.byte 102,15,58,15,217,8
+ movdqa %xmm5,16(%edx)
+.byte 102,15,58,15,229,8
+ movdqa %xmm3,%xmm5
+ psrlq $7,%xmm3
+ paddq %xmm4,%xmm1
+ movdqa %xmm5,%xmm4
+ psrlq $1,%xmm5
+ psllq $56,%xmm4
+ pxor %xmm5,%xmm3
+ psrlq $7,%xmm5
+ pxor %xmm4,%xmm3
+ psllq $7,%xmm4
+ pxor %xmm5,%xmm3
+ movdqa %xmm0,%xmm5
+ pxor %xmm4,%xmm3
+ movdqa %xmm0,%xmm4
+ psrlq $6,%xmm5
+ paddq %xmm3,%xmm1
+ movdqa %xmm0,%xmm3
+ psrlq $19,%xmm4
+ psllq $3,%xmm3
+ pxor %xmm4,%xmm5
+ psrlq $42,%xmm4
+ pxor %xmm3,%xmm5
+ psllq $42,%xmm3
+ pxor %xmm4,%xmm5
+ movdqa 48(%edx),%xmm4
+ pxor %xmm3,%xmm5
+ movdqa 16(%ebp),%xmm3
+ movq %mm4,%mm1
+ paddq %xmm5,%xmm1
+ movq -112(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,16(%esp)
+ paddq %xmm1,%xmm3
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm0
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm0,48(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 40(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 8(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm0,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm0,%mm6
movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 56(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
+ pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
+ pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
- subl $8,%esp
+ pxor %mm1,%mm2
+ pxor %mm7,%mm6
+ movq 16(%esp),%mm5
+ paddq %mm6,%mm2
+ movq 24(%esp),%mm6
+ movq %mm4,%mm1
+ movq -104(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,8(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm2
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm2,40(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 32(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq (%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm2,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm2,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 48(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm2,%mm0
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ pxor %mm7,%mm6
+ movq 8(%esp),%mm5
+ paddq %mm6,%mm0
+ movq 16(%esp),%mm6
+ movdqa %xmm3,-112(%edx)
+ movdqa %xmm7,%xmm5
+ movdqa %xmm4,%xmm3
+.byte 102,15,58,15,226,8
+ movdqa %xmm6,32(%edx)
+.byte 102,15,58,15,238,8
+ movdqa %xmm4,%xmm6
+ psrlq $7,%xmm4
+ paddq %xmm5,%xmm2
+ movdqa %xmm6,%xmm5
+ psrlq $1,%xmm6
+ psllq $56,%xmm5
+ pxor %xmm6,%xmm4
+ psrlq $7,%xmm6
+ pxor %xmm5,%xmm4
+ psllq $7,%xmm5
+ pxor %xmm6,%xmm4
+ movdqa %xmm1,%xmm6
+ pxor %xmm5,%xmm4
+ movdqa %xmm1,%xmm5
+ psrlq $6,%xmm6
+ paddq %xmm4,%xmm2
+ movdqa %xmm1,%xmm4
+ psrlq $19,%xmm5
+ psllq $3,%xmm4
+ pxor %xmm5,%xmm6
+ psrlq $42,%xmm5
+ pxor %xmm4,%xmm6
+ psllq $42,%xmm4
+ pxor %xmm5,%xmm6
+ movdqa (%edx),%xmm5
+ pxor %xmm4,%xmm6
+ movdqa 32(%ebp),%xmm4
+ movq %mm4,%mm1
+ paddq %xmm6,%xmm2
+ movq -96(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,(%esp)
+ paddq %xmm2,%xmm4
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm0
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm0,32(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 24(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 56(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm0,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm0,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 40(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm0,%mm2
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ pxor %mm7,%mm6
+ movq (%esp),%mm5
+ paddq %mm6,%mm2
+ movq 8(%esp),%mm6
+ movq %mm4,%mm1
+ movq -88(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,56(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm2
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm2,24(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 16(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 48(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm2,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm2,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 32(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ psrlq $5,%mm5
pxor %mm6,%mm7
+ pand %mm2,%mm0
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ pxor %mm7,%mm6
+ movq 56(%esp),%mm5
+ paddq %mm6,%mm0
+ movq (%esp),%mm6
+ movdqa %xmm4,-96(%edx)
+ movdqa %xmm0,%xmm6
+ movdqa %xmm5,%xmm4
+.byte 102,15,58,15,235,8
+ movdqa %xmm7,48(%edx)
+.byte 102,15,58,15,247,8
+ movdqa %xmm5,%xmm7
+ psrlq $7,%xmm5
+ paddq %xmm6,%xmm3
+ movdqa %xmm7,%xmm6
+ psrlq $1,%xmm7
+ psllq $56,%xmm6
+ pxor %xmm7,%xmm5
+ psrlq $7,%xmm7
+ pxor %xmm6,%xmm5
+ psllq $7,%xmm6
+ pxor %xmm7,%xmm5
+ movdqa %xmm2,%xmm7
+ pxor %xmm6,%xmm5
+ movdqa %xmm2,%xmm6
+ psrlq $6,%xmm7
+ paddq %xmm5,%xmm3
+ movdqa %xmm2,%xmm5
+ psrlq $19,%xmm6
+ psllq $3,%xmm5
+ pxor %xmm6,%xmm7
+ psrlq $42,%xmm6
+ pxor %xmm5,%xmm7
+ psllq $42,%xmm5
+ pxor %xmm6,%xmm7
+ movdqa 16(%edx),%xmm6
+ pxor %xmm5,%xmm7
+ movdqa 48(%ebp),%xmm5
+ movq %mm4,%mm1
+ paddq %xmm7,%xmm3
+ movq -80(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,48(%esp)
+ paddq %xmm3,%xmm5
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm0
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm0,16(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 8(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 40(%esp),%mm4
+ paddq %mm7,%mm3
movq %mm0,%mm5
- por %mm2,%mm0
- pand %mm2,%mm5
- pand %mm1,%mm0
- por %mm0,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm0,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 24(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm0,%mm2
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ pxor %mm7,%mm6
+ movq 48(%esp),%mm5
+ paddq %mm6,%mm2
+ movq 56(%esp),%mm6
+ movq %mm4,%mm1
+ movq -72(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,40(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm2
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm2,8(%esp)
paddq %mm5,%mm7
- movq %mm3,%mm0
- movb (%ebp),%dl
- paddq %mm7,%mm0
- addl $8,%ebp
- cmpb $53,%dl
- jne .L00400_14_sse2
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq (%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 32(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm2,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm2,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 16(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm2,%mm0
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ pxor %mm7,%mm6
movq 40(%esp),%mm5
+ paddq %mm6,%mm0
movq 48(%esp),%mm6
- movq 56(%esp),%mm7
+ movdqa %xmm5,-80(%edx)
+ movdqa %xmm1,%xmm7
+ movdqa %xmm6,%xmm5
+.byte 102,15,58,15,244,8
+ movdqa %xmm0,(%edx)
+.byte 102,15,58,15,248,8
+ movdqa %xmm6,%xmm0
+ psrlq $7,%xmm6
+ paddq %xmm7,%xmm4
+ movdqa %xmm0,%xmm7
+ psrlq $1,%xmm0
+ psllq $56,%xmm7
+ pxor %xmm0,%xmm6
+ psrlq $7,%xmm0
+ pxor %xmm7,%xmm6
+ psllq $7,%xmm7
+ pxor %xmm0,%xmm6
+ movdqa %xmm3,%xmm0
+ pxor %xmm7,%xmm6
+ movdqa %xmm3,%xmm7
+ psrlq $6,%xmm0
+ paddq %xmm6,%xmm4
+ movdqa %xmm3,%xmm6
+ psrlq $19,%xmm7
+ psllq $3,%xmm6
+ pxor %xmm7,%xmm0
+ psrlq $42,%xmm7
+ pxor %xmm6,%xmm0
+ psllq $42,%xmm6
+ pxor %xmm7,%xmm0
+ movdqa 32(%edx),%xmm7
+ pxor %xmm6,%xmm0
+ movdqa 64(%ebp),%xmm6
movq %mm4,%mm1
- movq %mm4,%mm2
+ paddq %xmm0,%xmm4
+ movq -64(%edx),%mm7
+ pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
- psllq $23,%mm2
+ paddq %xmm4,%xmm6
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
- pxor %mm2,%mm3
- psllq $23,%mm2
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
pxor %mm1,%mm3
+ movq %mm0,(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
psrlq $23,%mm1
- pxor %mm2,%mm3
- psllq $4,%mm2
+ paddq 56(%esp),%mm7
pxor %mm1,%mm3
- paddq (%ebp),%mm7
- pxor %mm2,%mm3
- pxor %mm6,%mm5
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 24(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm0,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm0,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
movq 8(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm0,%mm2
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ pxor %mm7,%mm6
+ movq 32(%esp),%mm5
+ paddq %mm6,%mm2
+ movq 40(%esp),%mm6
+ movq %mm4,%mm1
+ movq -56(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,24(%esp)
pand %mm4,%mm5
- movq 16(%esp),%mm2
+ psllq $23,%mm4
+ paddq %mm3,%mm2
+ movq %mm1,%mm3
+ psrlq $4,%mm1
pxor %mm6,%mm5
- movq 24(%esp),%mm4
- paddq %mm5,%mm3
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm2,56(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 48(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 16(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm2,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm2,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq (%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm2,%mm0
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ pxor %mm7,%mm6
+ movq 24(%esp),%mm5
+ paddq %mm6,%mm0
+ movq 32(%esp),%mm6
+ movdqa %xmm6,-64(%edx)
+ movdqa %xmm2,%xmm0
+ movdqa %xmm7,%xmm6
+.byte 102,15,58,15,253,8
+ movdqa %xmm1,16(%edx)
+.byte 102,15,58,15,193,8
+ movdqa %xmm7,%xmm1
+ psrlq $7,%xmm7
+ paddq %xmm0,%xmm5
+ movdqa %xmm1,%xmm0
+ psrlq $1,%xmm1
+ psllq $56,%xmm0
+ pxor %xmm1,%xmm7
+ psrlq $7,%xmm1
+ pxor %xmm0,%xmm7
+ psllq $7,%xmm0
+ pxor %xmm1,%xmm7
+ movdqa %xmm4,%xmm1
+ pxor %xmm0,%xmm7
+ movdqa %xmm4,%xmm0
+ psrlq $6,%xmm1
+ paddq %xmm7,%xmm5
+ movdqa %xmm4,%xmm7
+ psrlq $19,%xmm0
+ psllq $3,%xmm7
+ pxor %xmm0,%xmm1
+ psrlq $42,%xmm0
+ pxor %xmm7,%xmm1
+ psllq $42,%xmm7
+ pxor %xmm0,%xmm1
+ movdqa 48(%edx),%xmm0
+ pxor %xmm7,%xmm1
+ movdqa 80(%ebp),%xmm7
+ movq %mm4,%mm1
+ paddq %xmm1,%xmm5
+ movq -48(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,16(%esp)
+ paddq %xmm5,%xmm7
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm0
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm0,48(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 40(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 8(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm0,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm0,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 56(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm0,%mm2
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ pxor %mm7,%mm6
+ movq 16(%esp),%mm5
+ paddq %mm6,%mm2
+ movq 24(%esp),%mm6
+ movq %mm4,%mm1
+ movq -40(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,8(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm2
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm2,40(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 32(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq (%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm2,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm2,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 48(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm2,%mm0
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ pxor %mm7,%mm6
+ movq 8(%esp),%mm5
+ paddq %mm6,%mm0
+ movq 16(%esp),%mm6
+ movdqa %xmm7,-48(%edx)
+ movdqa %xmm3,%xmm1
+ movdqa %xmm0,%xmm7
+.byte 102,15,58,15,198,8
+ movdqa %xmm2,32(%edx)
+.byte 102,15,58,15,202,8
+ movdqa %xmm0,%xmm2
+ psrlq $7,%xmm0
+ paddq %xmm1,%xmm6
+ movdqa %xmm2,%xmm1
+ psrlq $1,%xmm2
+ psllq $56,%xmm1
+ pxor %xmm2,%xmm0
+ psrlq $7,%xmm2
+ pxor %xmm1,%xmm0
+ psllq $7,%xmm1
+ pxor %xmm2,%xmm0
+ movdqa %xmm5,%xmm2
+ pxor %xmm1,%xmm0
+ movdqa %xmm5,%xmm1
+ psrlq $6,%xmm2
+ paddq %xmm0,%xmm6
+ movdqa %xmm5,%xmm0
+ psrlq $19,%xmm1
+ psllq $3,%xmm0
+ pxor %xmm1,%xmm2
+ psrlq $42,%xmm1
+ pxor %xmm0,%xmm2
+ psllq $42,%xmm0
+ pxor %xmm1,%xmm2
+ movdqa (%edx),%xmm1
+ pxor %xmm0,%xmm2
+ movdqa 96(%ebp),%xmm0
+ movq %mm4,%mm1
+ paddq %xmm2,%xmm6
+ movq -32(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,(%esp)
+ paddq %xmm6,%xmm0
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm0
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm0,32(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 24(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 56(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm0,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm0,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 40(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm0,%mm2
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ pxor %mm7,%mm6
+ movq (%esp),%mm5
+ paddq %mm6,%mm2
+ movq 8(%esp),%mm6
+ movq %mm4,%mm1
+ movq -24(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,56(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm2
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm2,24(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 16(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 48(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm2,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm2,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 32(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm2,%mm0
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ pxor %mm7,%mm6
+ movq 56(%esp),%mm5
+ paddq %mm6,%mm0
+ movq (%esp),%mm6
+ movdqa %xmm0,-32(%edx)
+ movdqa %xmm4,%xmm2
+ movdqa %xmm1,%xmm0
+.byte 102,15,58,15,207,8
+ movdqa %xmm3,48(%edx)
+.byte 102,15,58,15,211,8
+ movdqa %xmm1,%xmm3
+ psrlq $7,%xmm1
+ paddq %xmm2,%xmm7
+ movdqa %xmm3,%xmm2
+ psrlq $1,%xmm3
+ psllq $56,%xmm2
+ pxor %xmm3,%xmm1
+ psrlq $7,%xmm3
+ pxor %xmm2,%xmm1
+ psllq $7,%xmm2
+ pxor %xmm3,%xmm1
+ movdqa %xmm6,%xmm3
+ pxor %xmm2,%xmm1
+ movdqa %xmm6,%xmm2
+ psrlq $6,%xmm3
+ paddq %xmm1,%xmm7
+ movdqa %xmm6,%xmm1
+ psrlq $19,%xmm2
+ psllq $3,%xmm1
+ pxor %xmm2,%xmm3
+ psrlq $42,%xmm2
+ pxor %xmm1,%xmm3
+ psllq $42,%xmm1
+ pxor %xmm2,%xmm3
+ movdqa 16(%edx),%xmm2
+ pxor %xmm1,%xmm3
+ movdqa 112(%ebp),%xmm1
+ movq %mm4,%mm1
+ paddq %xmm3,%xmm7
+ movq -16(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,48(%esp)
+ paddq %xmm7,%xmm1
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm0
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm0,16(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 8(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 40(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm0,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm0,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 24(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm0,%mm2
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ pxor %mm7,%mm6
+ movq 48(%esp),%mm5
+ paddq %mm6,%mm2
+ movq 56(%esp),%mm6
+ movq %mm4,%mm1
+ movq -8(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,40(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm2
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm2,8(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq (%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 32(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm2,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm2,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 16(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm2,%mm0
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ pxor %mm7,%mm6
+ movq 40(%esp),%mm5
+ paddq %mm6,%mm0
+ movq 48(%esp),%mm6
+ movdqa %xmm1,-16(%edx)
+ leal 128(%ebp),%ebp
+ decl %ecx
+ jnz .L00800_47_ssse3
+ movdqa (%ebp),%xmm1
+ leal -640(%ebp),%ebp
+ movdqu (%ebx),%xmm0
+.byte 102,15,56,0,193
+ movdqa (%ebp),%xmm3
+ movdqa %xmm1,%xmm2
+ movdqu 16(%ebx),%xmm1
+ paddq %xmm0,%xmm3
+.byte 102,15,56,0,202
+ movq %mm4,%mm1
+ movq -128(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,32(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm0
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
movq %mm0,(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 56(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
movq %mm0,%mm6
- paddq 72(%esp),%mm3
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 8(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm0,%mm2
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ pxor %mm7,%mm6
+ movq 32(%esp),%mm5
+ paddq %mm6,%mm2
+ movq 40(%esp),%mm6
+ movq %mm4,%mm1
+ movq -120(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,24(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm2
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm2,56(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 48(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 16(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
+ movq %mm2,%mm6
+ movq %mm5,%mm7
psllq $25,%mm6
+ movq (%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm2,%mm0
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ pxor %mm7,%mm6
+ movq 24(%esp),%mm5
+ paddq %mm6,%mm0
+ movq 32(%esp),%mm6
+ movdqa %xmm3,-128(%edx)
+ movdqa 16(%ebp),%xmm4
+ movdqa %xmm2,%xmm3
+ movdqu 32(%ebx),%xmm2
+ paddq %xmm1,%xmm4
+.byte 102,15,56,0,211
+ movq %mm4,%mm1
+ movq -112(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,16(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm0
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm0,48(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 40(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 8(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm0,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm0,%mm6
movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 56(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
+ pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
+ pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
- subl $8,%esp
+ pxor %mm1,%mm2
+ pxor %mm7,%mm6
+ movq 16(%esp),%mm5
+ paddq %mm6,%mm2
+ movq 24(%esp),%mm6
+ movq %mm4,%mm1
+ movq -104(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,8(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm2
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm2,40(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 32(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq (%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm2,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm2,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 48(%esp),%mm1
+ psrlq $6,%mm5
pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm2,%mm0
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ pxor %mm7,%mm6
+ movq 8(%esp),%mm5
+ paddq %mm6,%mm0
+ movq 16(%esp),%mm6
+ movdqa %xmm4,-112(%edx)
+ movdqa 32(%ebp),%xmm5
+ movdqa %xmm3,%xmm4
+ movdqu 48(%ebx),%xmm3
+ paddq %xmm2,%xmm5
+.byte 102,15,56,0,220
+ movq %mm4,%mm1
+ movq -96(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm0
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm0,32(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 24(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 56(%esp),%mm4
+ paddq %mm7,%mm3
movq %mm0,%mm5
- por %mm2,%mm0
- movq 88(%esp),%mm6
- pand %mm2,%mm5
- pand %mm1,%mm0
- movq 192(%esp),%mm2
- por %mm0,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm0,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 40(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm0,%mm2
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ pxor %mm7,%mm6
+ movq (%esp),%mm5
+ paddq %mm6,%mm2
+ movq 8(%esp),%mm6
+ movq %mm4,%mm1
+ movq -88(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,56(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm2
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm2,24(%esp)
paddq %mm5,%mm7
- movq %mm3,%mm0
- movb (%ebp),%dl
- paddq %mm7,%mm0
- addl $8,%ebp
-.align 16
-.L00516_79_sse2:
- movq %mm2,%mm1
- psrlq $1,%mm2
- movq %mm6,%mm7
- psrlq $6,%mm6
- movq %mm2,%mm3
- psrlq $6,%mm2
- movq %mm6,%mm5
- psrlq $13,%mm6
- pxor %mm2,%mm3
- psrlq $1,%mm2
- pxor %mm6,%mm5
- psrlq $42,%mm6
- pxor %mm2,%mm3
- movq 200(%esp),%mm2
- psllq $56,%mm1
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 16(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 48(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm2,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm2,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 32(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm2,%mm0
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ pxor %mm7,%mm6
+ movq 56(%esp),%mm5
+ paddq %mm6,%mm0
+ movq (%esp),%mm6
+ movdqa %xmm5,-96(%edx)
+ movdqa 48(%ebp),%xmm6
+ movdqa %xmm4,%xmm5
+ movdqu 64(%ebx),%xmm4
+ paddq %xmm3,%xmm6
+.byte 102,15,56,0,229
+ movq %mm4,%mm1
+ movq -80(%edx),%mm7
pxor %mm6,%mm5
- psllq $3,%mm7
+ psrlq $14,%mm1
+ movq %mm4,48(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm0
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
pxor %mm1,%mm3
- paddq 128(%esp),%mm2
- psllq $7,%mm1
- pxor %mm7,%mm5
- psllq $42,%mm7
+ movq %mm0,16(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 8(%esp),%mm7
pxor %mm1,%mm3
- pxor %mm7,%mm5
- paddq %mm5,%mm3
- paddq %mm2,%mm3
- movq %mm3,72(%esp)
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 40(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm0,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm0,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 24(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm0,%mm2
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ pxor %mm7,%mm6
+ movq 48(%esp),%mm5
+ paddq %mm6,%mm2
+ movq 56(%esp),%mm6
+ movq %mm4,%mm1
+ movq -72(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,40(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm2
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm2,8(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq (%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 32(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm2,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm2,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 16(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm2,%mm0
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ pxor %mm7,%mm6
movq 40(%esp),%mm5
+ paddq %mm6,%mm0
movq 48(%esp),%mm6
- movq 56(%esp),%mm7
+ movdqa %xmm6,-80(%edx)
+ movdqa 64(%ebp),%xmm7
+ movdqa %xmm5,%xmm6
+ movdqu 80(%ebx),%xmm5
+ paddq %xmm4,%xmm7
+.byte 102,15,56,0,238
movq %mm4,%mm1
- movq %mm4,%mm2
+ movq -64(%edx),%mm7
+ pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
- psllq $23,%mm2
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
- pxor %mm2,%mm3
- psllq $23,%mm2
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
pxor %mm1,%mm3
+ movq %mm0,(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
psrlq $23,%mm1
- pxor %mm2,%mm3
- psllq $4,%mm2
+ paddq 56(%esp),%mm7
pxor %mm1,%mm3
- paddq (%ebp),%mm7
- pxor %mm2,%mm3
- pxor %mm6,%mm5
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 24(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm0,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm0,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
movq 8(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm0,%mm2
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ pxor %mm7,%mm6
+ movq 32(%esp),%mm5
+ paddq %mm6,%mm2
+ movq 40(%esp),%mm6
+ movq %mm4,%mm1
+ movq -56(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,24(%esp)
pand %mm4,%mm5
- movq 16(%esp),%mm2
+ psllq $23,%mm4
+ paddq %mm3,%mm2
+ movq %mm1,%mm3
+ psrlq $4,%mm1
pxor %mm6,%mm5
- movq 24(%esp),%mm4
- paddq %mm5,%mm3
- movq %mm0,(%esp)
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm2,56(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 48(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 16(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm2,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm2,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq (%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm2,%mm0
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ pxor %mm7,%mm6
+ movq 24(%esp),%mm5
+ paddq %mm6,%mm0
+ movq 32(%esp),%mm6
+ movdqa %xmm7,-64(%edx)
+ movdqa %xmm0,(%edx)
+ movdqa 80(%ebp),%xmm0
+ movdqa %xmm6,%xmm7
+ movdqu 96(%ebx),%xmm6
+ paddq %xmm5,%xmm0
+.byte 102,15,56,0,247
+ movq %mm4,%mm1
+ movq -48(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,16(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm0
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm0,48(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 40(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 8(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
movq %mm0,%mm6
- paddq 72(%esp),%mm3
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 56(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm0,%mm2
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ pxor %mm7,%mm6
+ movq 16(%esp),%mm5
+ paddq %mm6,%mm2
+ movq 24(%esp),%mm6
+ movq %mm4,%mm1
+ movq -40(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,8(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm2
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm2,40(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 32(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq (%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
+ movq %mm2,%mm6
+ movq %mm5,%mm7
psllq $25,%mm6
+ movq 48(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm2,%mm0
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ pxor %mm7,%mm6
+ movq 8(%esp),%mm5
+ paddq %mm6,%mm0
+ movq 16(%esp),%mm6
+ movdqa %xmm0,-48(%edx)
+ movdqa %xmm1,16(%edx)
+ movdqa 96(%ebp),%xmm1
+ movdqa %xmm7,%xmm0
+ movdqu 112(%ebx),%xmm7
+ paddq %xmm6,%xmm1
+.byte 102,15,56,0,248
+ movq %mm4,%mm1
+ movq -32(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm0
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm0,32(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 24(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 56(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm0,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm0,%mm6
movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 40(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
+ pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
+ pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
- subl $8,%esp
+ pxor %mm1,%mm2
+ pxor %mm7,%mm6
+ movq (%esp),%mm5
+ paddq %mm6,%mm2
+ movq 8(%esp),%mm6
+ movq %mm4,%mm1
+ movq -24(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,56(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm2
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm2,24(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 16(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 48(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm2,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm2,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 32(%esp),%mm1
+ psrlq $6,%mm5
pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm2,%mm0
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ pxor %mm7,%mm6
+ movq 56(%esp),%mm5
+ paddq %mm6,%mm0
+ movq (%esp),%mm6
+ movdqa %xmm1,-32(%edx)
+ movdqa %xmm2,32(%edx)
+ movdqa 112(%ebp),%xmm2
+ movdqa (%edx),%xmm0
+ paddq %xmm7,%xmm2
+ movq %mm4,%mm1
+ movq -16(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,48(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm0
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm0,16(%esp)
+ paddq %mm5,%mm7
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq 8(%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 40(%esp),%mm4
+ paddq %mm7,%mm3
movq %mm0,%mm5
- por %mm2,%mm0
- movq 88(%esp),%mm6
- pand %mm2,%mm5
- pand %mm1,%mm0
- movq 192(%esp),%mm2
- por %mm0,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm0,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 24(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm0,%mm2
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ pxor %mm7,%mm6
+ movq 48(%esp),%mm5
+ paddq %mm6,%mm2
+ movq 56(%esp),%mm6
+ movq %mm4,%mm1
+ movq -8(%edx),%mm7
+ pxor %mm6,%mm5
+ psrlq $14,%mm1
+ movq %mm4,40(%esp)
+ pand %mm4,%mm5
+ psllq $23,%mm4
+ paddq %mm3,%mm2
+ movq %mm1,%mm3
+ psrlq $4,%mm1
+ pxor %mm6,%mm5
+ pxor %mm4,%mm3
+ psllq $23,%mm4
+ pxor %mm1,%mm3
+ movq %mm2,8(%esp)
paddq %mm5,%mm7
- movq %mm3,%mm0
- movb (%ebp),%dl
- paddq %mm7,%mm0
- addl $8,%ebp
- cmpb $23,%dl
- jne .L00516_79_sse2
- movq 8(%esp),%mm1
- movq 16(%esp),%mm2
- movq 24(%esp),%mm3
+ pxor %mm4,%mm3
+ psrlq $23,%mm1
+ paddq (%esp),%mm7
+ pxor %mm1,%mm3
+ psllq $4,%mm4
+ pxor %mm4,%mm3
+ movq 32(%esp),%mm4
+ paddq %mm7,%mm3
+ movq %mm2,%mm5
+ psrlq $28,%mm5
+ paddq %mm3,%mm4
+ movq %mm2,%mm6
+ movq %mm5,%mm7
+ psllq $25,%mm6
+ movq 16(%esp),%mm1
+ psrlq $6,%mm5
+ pxor %mm6,%mm7
+ psllq $5,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm2
+ psrlq $5,%mm5
+ pxor %mm6,%mm7
+ pand %mm2,%mm0
+ psllq $6,%mm6
+ pxor %mm5,%mm7
+ pxor %mm1,%mm0
+ pxor %mm7,%mm6
movq 40(%esp),%mm5
+ paddq %mm6,%mm0
movq 48(%esp),%mm6
+ movdqa %xmm2,-16(%edx)
+ movq 8(%esp),%mm1
+ paddq %mm3,%mm0
+ movq 24(%esp),%mm3
movq 56(%esp),%mm7
+ pxor %mm1,%mm2
paddq (%esi),%mm0
paddq 8(%esi),%mm1
paddq 16(%esi),%mm2
@@ -286,12 +2280,10 @@ sha512_block_data_order:
movq %mm5,40(%esi)
movq %mm6,48(%esi)
movq %mm7,56(%esi)
- addl $640,%esp
- subl $640,%ebp
- cmpl 88(%esp),%edi
- jb .L003loop_sse2
+ cmpl %eax,%edi
+ jb .L007loop_ssse3
+ movl 76(%edx),%esp
emms
- movl 92(%esp),%esp
popl %edi
popl %esi
popl %ebx
@@ -402,7 +2394,7 @@ sha512_block_data_order:
movl $16,%ecx
.long 2784229001
.align 16
-.L00600_15_x86:
+.L00900_15_x86:
movl 40(%esp),%ecx
movl 44(%esp),%edx
movl %ecx,%esi
@@ -509,9 +2501,9 @@ sha512_block_data_order:
subl $8,%esp
leal 8(%ebp),%ebp
cmpb $148,%dl
- jne .L00600_15_x86
+ jne .L00900_15_x86
.align 16
-.L00716_79_x86:
+.L01016_79_x86:
movl 312(%esp),%ecx
movl 316(%esp),%edx
movl %ecx,%esi
@@ -684,7 +2676,7 @@ sha512_block_data_order:
subl $8,%esp
leal 8(%ebp),%ebp
cmpb $23,%dl
- jne .L00716_79_x86
+ jne .L01016_79_x86
movl 840(%esp),%esi
movl 844(%esp),%edi
movl (%esi),%eax
@@ -827,10 +2819,12 @@ sha512_block_data_order:
.long 4234509866,1501505948
.long 987167468,1607167915
.long 1246189591,1816402316
+.long 67438087,66051
+.long 202182159,134810123
.size sha512_block_data_order,.-.L_sha512_block_data_order_begin
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97
.byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32
.byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97
.byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103
.byte 62,0
-.comm OPENSSL_ia32cap_P,8,4
+.comm OPENSSL_ia32cap_P,16,4
diff --git a/secure/lib/libcrypto/i386/vpaes-x86.s b/secure/lib/libcrypto/i386/vpaes-x86.s
index 7264297..e1dda0f 100644
--- a/secure/lib/libcrypto/i386/vpaes-x86.s
+++ b/secure/lib/libcrypto/i386/vpaes-x86.s
@@ -74,33 +74,33 @@ _vpaes_encrypt_core:
movdqa %xmm6,%xmm1
movdqa (%ebp),%xmm2
pandn %xmm0,%xmm1
- movdqu (%edx),%xmm5
- psrld $4,%xmm1
pand %xmm6,%xmm0
+ movdqu (%edx),%xmm5
.byte 102,15,56,0,208
movdqa 16(%ebp),%xmm0
-.byte 102,15,56,0,193
pxor %xmm5,%xmm2
- pxor %xmm2,%xmm0
+ psrld $4,%xmm1
addl $16,%edx
+.byte 102,15,56,0,193
leal 192(%ebp),%ebx
+ pxor %xmm2,%xmm0
jmp .L000enc_entry
.align 16
.L001enc_loop:
movdqa 32(%ebp),%xmm4
-.byte 102,15,56,0,226
- pxor %xmm5,%xmm4
movdqa 48(%ebp),%xmm0
+.byte 102,15,56,0,226
.byte 102,15,56,0,195
- pxor %xmm4,%xmm0
+ pxor %xmm5,%xmm4
movdqa 64(%ebp),%xmm5
-.byte 102,15,56,0,234
+ pxor %xmm4,%xmm0
movdqa -64(%ebx,%ecx,1),%xmm1
+.byte 102,15,56,0,234
movdqa 80(%ebp),%xmm2
-.byte 102,15,56,0,211
- pxor %xmm5,%xmm2
movdqa (%ebx,%ecx,1),%xmm4
+.byte 102,15,56,0,211
movdqa %xmm0,%xmm3
+ pxor %xmm5,%xmm2
.byte 102,15,56,0,193
addl $16,%edx
pxor %xmm2,%xmm0
@@ -109,28 +109,28 @@ _vpaes_encrypt_core:
pxor %xmm0,%xmm3
.byte 102,15,56,0,193
andl $48,%ecx
- pxor %xmm3,%xmm0
subl $1,%eax
+ pxor %xmm3,%xmm0
.L000enc_entry:
movdqa %xmm6,%xmm1
+ movdqa -32(%ebp),%xmm5
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm6,%xmm0
- movdqa -32(%ebp),%xmm5
.byte 102,15,56,0,232
- pxor %xmm1,%xmm0
movdqa %xmm7,%xmm3
+ pxor %xmm1,%xmm0
.byte 102,15,56,0,217
- pxor %xmm5,%xmm3
movdqa %xmm7,%xmm4
+ pxor %xmm5,%xmm3
.byte 102,15,56,0,224
- pxor %xmm5,%xmm4
movdqa %xmm7,%xmm2
+ pxor %xmm5,%xmm4
.byte 102,15,56,0,211
- pxor %xmm0,%xmm2
movdqa %xmm7,%xmm3
- movdqu (%edx),%xmm5
+ pxor %xmm0,%xmm2
.byte 102,15,56,0,220
+ movdqu (%edx),%xmm5
pxor %xmm1,%xmm3
jnz .L001enc_loop
movdqa 96(%ebp),%xmm4
@@ -146,8 +146,8 @@ _vpaes_encrypt_core:
.type _vpaes_decrypt_core,@function
.align 16
_vpaes_decrypt_core:
- movl 240(%edx),%eax
leal 608(%ebp),%ebx
+ movl 240(%edx),%eax
movdqa %xmm6,%xmm1
movdqa -64(%ebx),%xmm2
pandn %xmm0,%xmm1
@@ -170,56 +170,56 @@ _vpaes_decrypt_core:
.align 16
.L003dec_loop:
movdqa -32(%ebx),%xmm4
+ movdqa -16(%ebx),%xmm1
.byte 102,15,56,0,226
- pxor %xmm0,%xmm4
- movdqa -16(%ebx),%xmm0
-.byte 102,15,56,0,195
+.byte 102,15,56,0,203
pxor %xmm4,%xmm0
- addl $16,%edx
-.byte 102,15,56,0,197
movdqa (%ebx),%xmm4
+ pxor %xmm1,%xmm0
+ movdqa 16(%ebx),%xmm1
.byte 102,15,56,0,226
- pxor %xmm0,%xmm4
- movdqa 16(%ebx),%xmm0
-.byte 102,15,56,0,195
- pxor %xmm4,%xmm0
- subl $1,%eax
.byte 102,15,56,0,197
+.byte 102,15,56,0,203
+ pxor %xmm4,%xmm0
movdqa 32(%ebx),%xmm4
+ pxor %xmm1,%xmm0
+ movdqa 48(%ebx),%xmm1
.byte 102,15,56,0,226
- pxor %xmm0,%xmm4
- movdqa 48(%ebx),%xmm0
-.byte 102,15,56,0,195
- pxor %xmm4,%xmm0
.byte 102,15,56,0,197
+.byte 102,15,56,0,203
+ pxor %xmm4,%xmm0
movdqa 64(%ebx),%xmm4
+ pxor %xmm1,%xmm0
+ movdqa 80(%ebx),%xmm1
.byte 102,15,56,0,226
- pxor %xmm0,%xmm4
- movdqa 80(%ebx),%xmm0
-.byte 102,15,56,0,195
+.byte 102,15,56,0,197
+.byte 102,15,56,0,203
pxor %xmm4,%xmm0
+ addl $16,%edx
.byte 102,15,58,15,237,12
+ pxor %xmm1,%xmm0
+ subl $1,%eax
.L002dec_entry:
movdqa %xmm6,%xmm1
+ movdqa -32(%ebp),%xmm2
pandn %xmm0,%xmm1
- psrld $4,%xmm1
pand %xmm6,%xmm0
- movdqa -32(%ebp),%xmm2
+ psrld $4,%xmm1
.byte 102,15,56,0,208
- pxor %xmm1,%xmm0
movdqa %xmm7,%xmm3
+ pxor %xmm1,%xmm0
.byte 102,15,56,0,217
- pxor %xmm2,%xmm3
movdqa %xmm7,%xmm4
+ pxor %xmm2,%xmm3
.byte 102,15,56,0,224
pxor %xmm2,%xmm4
movdqa %xmm7,%xmm2
.byte 102,15,56,0,211
- pxor %xmm0,%xmm2
movdqa %xmm7,%xmm3
+ pxor %xmm0,%xmm2
.byte 102,15,56,0,220
- pxor %xmm1,%xmm3
movdqu (%edx),%xmm0
+ pxor %xmm1,%xmm3
jnz .L003dec_loop
movdqa 96(%ebx),%xmm4
.byte 102,15,56,0,226
@@ -328,12 +328,12 @@ _vpaes_schedule_core:
.type _vpaes_schedule_192_smear,@function
.align 16
_vpaes_schedule_192_smear:
- pshufd $128,%xmm6,%xmm0
- pxor %xmm0,%xmm6
+ pshufd $128,%xmm6,%xmm1
pshufd $254,%xmm7,%xmm0
+ pxor %xmm1,%xmm6
+ pxor %xmm1,%xmm1
pxor %xmm0,%xmm6
movdqa %xmm6,%xmm0
- pxor %xmm1,%xmm1
movhlps %xmm1,%xmm6
ret
.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
diff --git a/secure/lib/libcrypto/i386/wp-mmx.s b/secure/lib/libcrypto/i386/wp-mmx.s
index c0a42fd..6ceae96 100644
--- a/secure/lib/libcrypto/i386/wp-mmx.s
+++ b/secure/lib/libcrypto/i386/wp-mmx.s
@@ -67,228 +67,230 @@ whirlpool_block_mmx:
movq 4096(%ebp,%esi,8),%mm0
movl (%esp),%eax
movl 4(%esp),%ebx
- movb %al,%cl
- movb %ah,%dl
+ movzbl %al,%ecx
+ movzbl %ah,%edx
+ shrl $16,%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%eax
+ movzbl %ah,%edx
pxor (%ebp,%esi,8),%mm0
movq 7(%ebp,%edi,8),%mm1
- movb %al,%cl
- movb %ah,%dl
movl 8(%esp),%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %bh,%edx
movq 6(%ebp,%esi,8),%mm2
movq 5(%ebp,%edi,8),%mm3
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%ebx
+ movzbl %bh,%edx
movq 4(%ebp,%esi,8),%mm4
movq 3(%ebp,%edi,8),%mm5
- movb %bl,%cl
- movb %bh,%dl
movl 12(%esp),%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %ah,%edx
movq 2(%ebp,%esi,8),%mm6
movq 1(%ebp,%edi,8),%mm7
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%eax
+ movzbl %ah,%edx
pxor (%ebp,%esi,8),%mm1
pxor 7(%ebp,%edi,8),%mm2
- movb %al,%cl
- movb %ah,%dl
movl 16(%esp),%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %bh,%edx
pxor 6(%ebp,%esi,8),%mm3
pxor 5(%ebp,%edi,8),%mm4
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%ebx
+ movzbl %bh,%edx
pxor 4(%ebp,%esi,8),%mm5
pxor 3(%ebp,%edi,8),%mm6
- movb %bl,%cl
- movb %bh,%dl
movl 20(%esp),%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %ah,%edx
pxor 2(%ebp,%esi,8),%mm7
pxor 1(%ebp,%edi,8),%mm0
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%eax
+ movzbl %ah,%edx
pxor (%ebp,%esi,8),%mm2
pxor 7(%ebp,%edi,8),%mm3
- movb %al,%cl
- movb %ah,%dl
movl 24(%esp),%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %bh,%edx
pxor 6(%ebp,%esi,8),%mm4
pxor 5(%ebp,%edi,8),%mm5
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%ebx
+ movzbl %bh,%edx
pxor 4(%ebp,%esi,8),%mm6
pxor 3(%ebp,%edi,8),%mm7
- movb %bl,%cl
- movb %bh,%dl
movl 28(%esp),%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %ah,%edx
pxor 2(%ebp,%esi,8),%mm0
pxor 1(%ebp,%edi,8),%mm1
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%eax
+ movzbl %ah,%edx
pxor (%ebp,%esi,8),%mm3
pxor 7(%ebp,%edi,8),%mm4
- movb %al,%cl
- movb %ah,%dl
movl 32(%esp),%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %bh,%edx
pxor 6(%ebp,%esi,8),%mm5
pxor 5(%ebp,%edi,8),%mm6
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%ebx
+ movzbl %bh,%edx
pxor 4(%ebp,%esi,8),%mm7
pxor 3(%ebp,%edi,8),%mm0
- movb %bl,%cl
- movb %bh,%dl
movl 36(%esp),%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %ah,%edx
pxor 2(%ebp,%esi,8),%mm1
pxor 1(%ebp,%edi,8),%mm2
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%eax
+ movzbl %ah,%edx
pxor (%ebp,%esi,8),%mm4
pxor 7(%ebp,%edi,8),%mm5
- movb %al,%cl
- movb %ah,%dl
movl 40(%esp),%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %bh,%edx
pxor 6(%ebp,%esi,8),%mm6
pxor 5(%ebp,%edi,8),%mm7
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%ebx
+ movzbl %bh,%edx
pxor 4(%ebp,%esi,8),%mm0
pxor 3(%ebp,%edi,8),%mm1
- movb %bl,%cl
- movb %bh,%dl
movl 44(%esp),%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %ah,%edx
pxor 2(%ebp,%esi,8),%mm2
pxor 1(%ebp,%edi,8),%mm3
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%eax
+ movzbl %ah,%edx
pxor (%ebp,%esi,8),%mm5
pxor 7(%ebp,%edi,8),%mm6
- movb %al,%cl
- movb %ah,%dl
movl 48(%esp),%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %bh,%edx
pxor 6(%ebp,%esi,8),%mm7
pxor 5(%ebp,%edi,8),%mm0
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%ebx
+ movzbl %bh,%edx
pxor 4(%ebp,%esi,8),%mm1
pxor 3(%ebp,%edi,8),%mm2
- movb %bl,%cl
- movb %bh,%dl
movl 52(%esp),%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %ah,%edx
pxor 2(%ebp,%esi,8),%mm3
pxor 1(%ebp,%edi,8),%mm4
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%eax
+ movzbl %ah,%edx
pxor (%ebp,%esi,8),%mm6
pxor 7(%ebp,%edi,8),%mm7
- movb %al,%cl
- movb %ah,%dl
movl 56(%esp),%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %bh,%edx
pxor 6(%ebp,%esi,8),%mm0
pxor 5(%ebp,%edi,8),%mm1
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%ebx
+ movzbl %bh,%edx
pxor 4(%ebp,%esi,8),%mm2
pxor 3(%ebp,%edi,8),%mm3
- movb %bl,%cl
- movb %bh,%dl
movl 60(%esp),%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %ah,%edx
pxor 2(%ebp,%esi,8),%mm4
pxor 1(%ebp,%edi,8),%mm5
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%eax
+ movzbl %ah,%edx
pxor (%ebp,%esi,8),%mm7
pxor 7(%ebp,%edi,8),%mm0
- movb %al,%cl
- movb %ah,%dl
movl 64(%esp),%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %bh,%edx
pxor 6(%ebp,%esi,8),%mm1
pxor 5(%ebp,%edi,8),%mm2
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%ebx
+ movzbl %bh,%edx
pxor 4(%ebp,%esi,8),%mm3
pxor 3(%ebp,%edi,8),%mm4
- movb %bl,%cl
- movb %bh,%dl
movl 68(%esp),%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %ah,%edx
pxor 2(%ebp,%esi,8),%mm5
pxor 1(%ebp,%edi,8),%mm6
movq %mm0,(%esp)
@@ -299,226 +301,226 @@ whirlpool_block_mmx:
movq %mm5,40(%esp)
movq %mm6,48(%esp)
movq %mm7,56(%esp)
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%eax
+ movzbl %ah,%edx
pxor (%ebp,%esi,8),%mm0
pxor 7(%ebp,%edi,8),%mm1
- movb %al,%cl
- movb %ah,%dl
movl 72(%esp),%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %bh,%edx
pxor 6(%ebp,%esi,8),%mm2
pxor 5(%ebp,%edi,8),%mm3
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%ebx
+ movzbl %bh,%edx
pxor 4(%ebp,%esi,8),%mm4
pxor 3(%ebp,%edi,8),%mm5
- movb %bl,%cl
- movb %bh,%dl
movl 76(%esp),%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %ah,%edx
pxor 2(%ebp,%esi,8),%mm6
pxor 1(%ebp,%edi,8),%mm7
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%eax
+ movzbl %ah,%edx
pxor (%ebp,%esi,8),%mm1
pxor 7(%ebp,%edi,8),%mm2
- movb %al,%cl
- movb %ah,%dl
movl 80(%esp),%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %bh,%edx
pxor 6(%ebp,%esi,8),%mm3
pxor 5(%ebp,%edi,8),%mm4
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%ebx
+ movzbl %bh,%edx
pxor 4(%ebp,%esi,8),%mm5
pxor 3(%ebp,%edi,8),%mm6
- movb %bl,%cl
- movb %bh,%dl
movl 84(%esp),%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %ah,%edx
pxor 2(%ebp,%esi,8),%mm7
pxor 1(%ebp,%edi,8),%mm0
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%eax
+ movzbl %ah,%edx
pxor (%ebp,%esi,8),%mm2
pxor 7(%ebp,%edi,8),%mm3
- movb %al,%cl
- movb %ah,%dl
movl 88(%esp),%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %bh,%edx
pxor 6(%ebp,%esi,8),%mm4
pxor 5(%ebp,%edi,8),%mm5
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%ebx
+ movzbl %bh,%edx
pxor 4(%ebp,%esi,8),%mm6
pxor 3(%ebp,%edi,8),%mm7
- movb %bl,%cl
- movb %bh,%dl
movl 92(%esp),%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %ah,%edx
pxor 2(%ebp,%esi,8),%mm0
pxor 1(%ebp,%edi,8),%mm1
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%eax
+ movzbl %ah,%edx
pxor (%ebp,%esi,8),%mm3
pxor 7(%ebp,%edi,8),%mm4
- movb %al,%cl
- movb %ah,%dl
movl 96(%esp),%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %bh,%edx
pxor 6(%ebp,%esi,8),%mm5
pxor 5(%ebp,%edi,8),%mm6
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%ebx
+ movzbl %bh,%edx
pxor 4(%ebp,%esi,8),%mm7
pxor 3(%ebp,%edi,8),%mm0
- movb %bl,%cl
- movb %bh,%dl
movl 100(%esp),%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %ah,%edx
pxor 2(%ebp,%esi,8),%mm1
pxor 1(%ebp,%edi,8),%mm2
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%eax
+ movzbl %ah,%edx
pxor (%ebp,%esi,8),%mm4
pxor 7(%ebp,%edi,8),%mm5
- movb %al,%cl
- movb %ah,%dl
movl 104(%esp),%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %bh,%edx
pxor 6(%ebp,%esi,8),%mm6
pxor 5(%ebp,%edi,8),%mm7
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%ebx
+ movzbl %bh,%edx
pxor 4(%ebp,%esi,8),%mm0
pxor 3(%ebp,%edi,8),%mm1
- movb %bl,%cl
- movb %bh,%dl
movl 108(%esp),%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %ah,%edx
pxor 2(%ebp,%esi,8),%mm2
pxor 1(%ebp,%edi,8),%mm3
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%eax
+ movzbl %ah,%edx
pxor (%ebp,%esi,8),%mm5
pxor 7(%ebp,%edi,8),%mm6
- movb %al,%cl
- movb %ah,%dl
movl 112(%esp),%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %bh,%edx
pxor 6(%ebp,%esi,8),%mm7
pxor 5(%ebp,%edi,8),%mm0
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%ebx
+ movzbl %bh,%edx
pxor 4(%ebp,%esi,8),%mm1
pxor 3(%ebp,%edi,8),%mm2
- movb %bl,%cl
- movb %bh,%dl
movl 116(%esp),%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %ah,%edx
pxor 2(%ebp,%esi,8),%mm3
pxor 1(%ebp,%edi,8),%mm4
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%eax
+ movzbl %ah,%edx
pxor (%ebp,%esi,8),%mm6
pxor 7(%ebp,%edi,8),%mm7
- movb %al,%cl
- movb %ah,%dl
movl 120(%esp),%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %bh,%edx
pxor 6(%ebp,%esi,8),%mm0
pxor 5(%ebp,%edi,8),%mm1
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%ebx
+ movzbl %bh,%edx
pxor 4(%ebp,%esi,8),%mm2
pxor 3(%ebp,%edi,8),%mm3
- movb %bl,%cl
- movb %bh,%dl
movl 124(%esp),%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %ah,%edx
pxor 2(%ebp,%esi,8),%mm4
pxor 1(%ebp,%edi,8),%mm5
- movb %al,%cl
- movb %ah,%dl
+ shrl $16,%eax
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%eax
+ movzbl %ah,%edx
pxor (%ebp,%esi,8),%mm7
pxor 7(%ebp,%edi,8),%mm0
- movb %al,%cl
- movb %ah,%dl
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %bh,%edx
pxor 6(%ebp,%esi,8),%mm1
pxor 5(%ebp,%edi,8),%mm2
- movb %bl,%cl
- movb %bh,%dl
+ shrl $16,%ebx
leal (%ecx,%ecx,1),%esi
+ movzbl %bl,%ecx
leal (%edx,%edx,1),%edi
- shrl $16,%ebx
+ movzbl %bh,%edx
pxor 4(%ebp,%esi,8),%mm3
pxor 3(%ebp,%edi,8),%mm4
- movb %bl,%cl
- movb %bh,%dl
leal (%ecx,%ecx,1),%esi
+ movzbl %al,%ecx
leal (%edx,%edx,1),%edi
+ movzbl %ah,%edx
pxor 2(%ebp,%esi,8),%mm5
pxor 1(%ebp,%edi,8),%mm6
leal 128(%esp),%ebx
diff --git a/secure/lib/libcrypto/i386/x86-gf2m.s b/secure/lib/libcrypto/i386/x86-gf2m.s
index 1fcfe38..b7177ed 100644
--- a/secure/lib/libcrypto/i386/x86-gf2m.s
+++ b/secure/lib/libcrypto/i386/x86-gf2m.s
@@ -341,4 +341,4 @@ bn_GF2m_mul_2x2:
.byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97
.byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103
.byte 62,0
-.comm OPENSSL_ia32cap_P,8,4
+.comm OPENSSL_ia32cap_P,16,4
diff --git a/secure/lib/libcrypto/i386/x86-mont.s b/secure/lib/libcrypto/i386/x86-mont.s
index 0eec75d..26f84f8 100644
--- a/secure/lib/libcrypto/i386/x86-mont.s
+++ b/secure/lib/libcrypto/i386/x86-mont.s
@@ -454,4 +454,4 @@ bn_mul_mont:
.byte 54,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121
.byte 32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46
.byte 111,114,103,62,0
-.comm OPENSSL_ia32cap_P,8,4
+.comm OPENSSL_ia32cap_P,16,4
diff --git a/secure/lib/libcrypto/i386/x86cpuid.s b/secure/lib/libcrypto/i386/x86cpuid.s
index 815ff69..69622ec 100644
--- a/secure/lib/libcrypto/i386/x86cpuid.s
+++ b/secure/lib/libcrypto/i386/x86cpuid.s
@@ -23,6 +23,8 @@ OPENSSL_ia32_cpuid:
xorl %eax,%eax
btl $21,%ecx
jnc .L000nocpuid
+ movl 20(%esp),%esi
+ movl %eax,8(%esi)
.byte 0x0f,0xa2
movl %eax,%edi
xorl %eax,%eax
@@ -73,28 +75,36 @@ OPENSSL_ia32_cpuid:
andl $4026531839,%edx
jmp .L002generic
.L001intel:
+ cmpl $7,%edi
+ jb .L003cacheinfo
+ movl 20(%esp),%esi
+ movl $7,%eax
+ xorl %ecx,%ecx
+ .byte 0x0f,0xa2
+ movl %ebx,8(%esi)
+.L003cacheinfo:
cmpl $4,%edi
movl $-1,%edi
- jb .L003nocacheinfo
+ jb .L004nocacheinfo
movl $4,%eax
movl $0,%ecx
.byte 0x0f,0xa2
movl %eax,%edi
shrl $14,%edi
andl $4095,%edi
-.L003nocacheinfo:
+.L004nocacheinfo:
movl $1,%eax
xorl %ecx,%ecx
.byte 0x0f,0xa2
andl $3220176895,%edx
cmpl $0,%ebp
- jne .L004notintel
+ jne .L005notintel
orl $1073741824,%edx
andb $15,%ah
cmpb $15,%ah
- jne .L004notintel
+ jne .L005notintel
orl $1048576,%edx
-.L004notintel:
+.L005notintel:
btl $28,%edx
jnc .L002generic
andl $4026531839,%edx
@@ -111,20 +121,22 @@ OPENSSL_ia32_cpuid:
movl %edx,%esi
orl %ecx,%ebp
btl $27,%ecx
- jnc .L005clear_avx
+ jnc .L006clear_avx
xorl %ecx,%ecx
.byte 15,1,208
andl $6,%eax
cmpl $6,%eax
- je .L006done
+ je .L007done
cmpl $2,%eax
- je .L005clear_avx
-.L007clear_xmm:
+ je .L006clear_avx
+.L008clear_xmm:
andl $4261412861,%ebp
andl $4278190079,%esi
-.L005clear_avx:
+.L006clear_avx:
andl $4026525695,%ebp
-.L006done:
+ movl 20(%esp),%edi
+ andl $4294967263,8(%edi)
+.L007done:
movl %esi,%eax
movl %ebp,%edx
.L000nocpuid:
@@ -143,9 +155,9 @@ OPENSSL_rdtsc:
xorl %edx,%edx
leal OPENSSL_ia32cap_P,%ecx
btl $4,(%ecx)
- jnc .L008notsc
+ jnc .L009notsc
.byte 0x0f,0x31
-.L008notsc:
+.L009notsc:
ret
.size OPENSSL_rdtsc,.-.L_OPENSSL_rdtsc_begin
.globl OPENSSL_instrument_halt
@@ -155,14 +167,14 @@ OPENSSL_instrument_halt:
.L_OPENSSL_instrument_halt_begin:
leal OPENSSL_ia32cap_P,%ecx
btl $4,(%ecx)
- jnc .L009nohalt
+ jnc .L010nohalt
.long 2421723150
andl $3,%eax
- jnz .L009nohalt
+ jnz .L010nohalt
pushfl
popl %eax
btl $9,%eax
- jnc .L009nohalt
+ jnc .L010nohalt
.byte 0x0f,0x31
pushl %edx
pushl %eax
@@ -172,7 +184,7 @@ OPENSSL_instrument_halt:
sbbl 4(%esp),%edx
addl $8,%esp
ret
-.L009nohalt:
+.L010nohalt:
xorl %eax,%eax
xorl %edx,%edx
ret
@@ -185,21 +197,21 @@ OPENSSL_far_spin:
pushfl
popl %eax
btl $9,%eax
- jnc .L010nospin
+ jnc .L011nospin
movl 4(%esp),%eax
movl 8(%esp),%ecx
.long 2430111262
xorl %eax,%eax
movl (%ecx),%edx
- jmp .L011spin
+ jmp .L012spin
.align 16
-.L011spin:
+.L012spin:
incl %eax
cmpl (%ecx),%edx
- je .L011spin
+ je .L012spin
.long 529567888
ret
-.L010nospin:
+.L011nospin:
xorl %eax,%eax
xorl %edx,%edx
ret
@@ -214,10 +226,10 @@ OPENSSL_wipe_cpu:
leal OPENSSL_ia32cap_P,%ecx
movl (%ecx),%ecx
btl $1,(%ecx)
- jnc .L012no_x87
+ jnc .L013no_x87
andl $83886080,%ecx
cmpl $83886080,%ecx
- jne .L013no_sse2
+ jne .L014no_sse2
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
@@ -226,9 +238,9 @@ OPENSSL_wipe_cpu:
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
-.L013no_sse2:
+.L014no_sse2:
.long 4007259865,4007259865,4007259865,4007259865,2430851995
-.L012no_x87:
+.L013no_x87:
leal 4(%esp),%eax
ret
.size OPENSSL_wipe_cpu,.-.L_OPENSSL_wipe_cpu_begin
@@ -242,11 +254,11 @@ OPENSSL_atomic_add:
pushl %ebx
nop
movl (%edx),%eax
-.L014spin:
+.L015spin:
leal (%eax,%ecx,1),%ebx
nop
.long 447811568
- jne .L014spin
+ jne .L015spin
movl %ebx,%eax
popl %ebx
ret
@@ -287,32 +299,32 @@ OPENSSL_cleanse:
movl 8(%esp),%ecx
xorl %eax,%eax
cmpl $7,%ecx
- jae .L015lot
+ jae .L016lot
cmpl $0,%ecx
- je .L016ret
-.L017little:
+ je .L017ret
+.L018little:
movb %al,(%edx)
subl $1,%ecx
leal 1(%edx),%edx
- jnz .L017little
-.L016ret:
+ jnz .L018little
+.L017ret:
ret
.align 16
-.L015lot:
+.L016lot:
testl $3,%edx
- jz .L018aligned
+ jz .L019aligned
movb %al,(%edx)
leal -1(%ecx),%ecx
leal 1(%edx),%edx
- jmp .L015lot
-.L018aligned:
+ jmp .L016lot
+.L019aligned:
movl %eax,(%edx)
leal -4(%ecx),%ecx
testl $-4,%ecx
leal 4(%edx),%edx
- jnz .L018aligned
+ jnz .L019aligned
cmpl $0,%ecx
- jne .L017little
+ jne .L018little
ret
.size OPENSSL_cleanse,.-.L_OPENSSL_cleanse_begin
.globl OPENSSL_ia32_rdrand
@@ -321,15 +333,32 @@ OPENSSL_cleanse:
OPENSSL_ia32_rdrand:
.L_OPENSSL_ia32_rdrand_begin:
movl $8,%ecx
-.L019loop:
+.L020loop:
.byte 15,199,240
- jc .L020break
- loop .L019loop
-.L020break:
+ jc .L021break
+ loop .L020loop
+.L021break:
cmpl $0,%eax
cmovel %ecx,%eax
ret
.size OPENSSL_ia32_rdrand,.-.L_OPENSSL_ia32_rdrand_begin
-.comm OPENSSL_ia32cap_P,8,4
+.globl OPENSSL_ia32_rdseed
+.type OPENSSL_ia32_rdseed,@function
+.align 16
+OPENSSL_ia32_rdseed:
+.L_OPENSSL_ia32_rdseed_begin:
+ movl $8,%ecx
+.L022loop:
+.byte 15,199,248
+ jc .L023break
+ loop .L022loop
+.L023break:
+ cmpl $0,%eax
+ cmovel %ecx,%eax
+ ret
+.size OPENSSL_ia32_rdseed,.-.L_OPENSSL_ia32_rdseed_begin
+.hidden OPENSSL_cpuid_setup
+.hidden OPENSSL_ia32cap_P
+.comm OPENSSL_ia32cap_P,16,4
.section .init
call OPENSSL_cpuid_setup
diff --git a/secure/lib/libcrypto/man/ASN1_OBJECT_new.3 b/secure/lib/libcrypto/man/ASN1_OBJECT_new.3
index 927b28b..2b7ff89 100644
--- a/secure/lib/libcrypto/man/ASN1_OBJECT_new.3
+++ b/secure/lib/libcrypto/man/ASN1_OBJECT_new.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "ASN1_OBJECT_new 3"
-.TH ASN1_OBJECT_new 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH ASN1_OBJECT_new 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/ASN1_STRING_length.3 b/secure/lib/libcrypto/man/ASN1_STRING_length.3
index dd9bc74..fe95425 100644
--- a/secure/lib/libcrypto/man/ASN1_STRING_length.3
+++ b/secure/lib/libcrypto/man/ASN1_STRING_length.3
@@ -133,14 +133,14 @@
.\" ========================================================================
.\"
.IX Title "ASN1_STRING_length 3"
-.TH ASN1_STRING_length 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH ASN1_STRING_length 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
ASN1_STRING_dup, ASN1_STRING_cmp, ASN1_STRING_set, ASN1_STRING_length,
-ASN1_STRING_length_set, ASN1_STRING_type, ASN1_STRING_data \-
+ASN1_STRING_length_set, ASN1_STRING_type, ASN1_STRING_data, ASN1_STRING_to_UTF8 \-
ASN1_STRING utility functions
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
diff --git a/secure/lib/libcrypto/man/ASN1_STRING_new.3 b/secure/lib/libcrypto/man/ASN1_STRING_new.3
index 52203a3..3d24f4a 100644
--- a/secure/lib/libcrypto/man/ASN1_STRING_new.3
+++ b/secure/lib/libcrypto/man/ASN1_STRING_new.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "ASN1_STRING_new 3"
-.TH ASN1_STRING_new 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH ASN1_STRING_new 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/ASN1_STRING_print_ex.3 b/secure/lib/libcrypto/man/ASN1_STRING_print_ex.3
index b5e0c23..037a498 100644
--- a/secure/lib/libcrypto/man/ASN1_STRING_print_ex.3
+++ b/secure/lib/libcrypto/man/ASN1_STRING_print_ex.3
@@ -133,13 +133,13 @@
.\" ========================================================================
.\"
.IX Title "ASN1_STRING_print_ex 3"
-.TH ASN1_STRING_print_ex 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH ASN1_STRING_print_ex 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-ASN1_STRING_print_ex, ASN1_STRING_print_ex_fp \- ASN1_STRING output routines.
+ASN1_STRING_print_ex, ASN1_STRING_print_ex_fp, ASN1_STRING_print \- ASN1_STRING output routines.
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
diff --git a/secure/lib/libcrypto/man/ASN1_TIME_set.3 b/secure/lib/libcrypto/man/ASN1_TIME_set.3
new file mode 100644
index 0000000..8ac876b
--- /dev/null
+++ b/secure/lib/libcrypto/man/ASN1_TIME_set.3
@@ -0,0 +1,264 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "ASN1_TIME_set 3"
+.TH ASN1_TIME_set 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+ASN1_TIME_set, ASN1_TIME_adj, ASN1_TIME_check, ASN1_TIME_set_string,
+ASN1_TIME_print, ASN1_TIME_diff \- ASN.1 Time functions.
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 6
+\& ASN1_TIME *ASN1_TIME_set(ASN1_TIME *s, time_t t);
+\& ASN1_TIME *ASN1_TIME_adj(ASN1_TIME *s, time_t t,
+\& int offset_day, long offset_sec);
+\& int ASN1_TIME_set_string(ASN1_TIME *s, const char *str);
+\& int ASN1_TIME_check(const ASN1_TIME *t);
+\& int ASN1_TIME_print(BIO *b, const ASN1_TIME *s);
+\&
+\& int ASN1_TIME_diff(int *pday, int *psec,
+\& const ASN1_TIME *from, const ASN1_TIME *to);
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+The function \fIASN1_TIME_set()\fR sets the \s-1ASN1_TIME\s0 structure \fBs\fR to the
+time represented by the time_t value \fBt\fR. If \fBs\fR is \s-1NULL\s0 a new \s-1ASN1_TIME\s0
+structure is allocated and returned.
+.PP
+\&\fIASN1_TIME_adj()\fR sets the \s-1ASN1_TIME\s0 structure \fBs\fR to the time represented
+by the time \fBoffset_day\fR and \fBoffset_sec\fR after the time_t value \fBt\fR.
+The values of \fBoffset_day\fR or \fBoffset_sec\fR can be negative to set a
+time before \fBt\fR. The \fBoffset_sec\fR value can also exceed the number of
+seconds in a day. If \fBs\fR is \s-1NULL\s0 a new \s-1ASN1_TIME\s0 structure is allocated
+and returned.
+.PP
+\&\fIASN1_TIME_set_string()\fR sets \s-1ASN1_TIME\s0 structure \fBs\fR to the time
+represented by string \fBstr\fR which must be in appropriate \s-1ASN.1\s0 time
+format (for example \s-1YYMMDDHHMMSSZ\s0 or \s-1YYYYMMDDHHMMSSZ\s0).
+.PP
+\&\fIASN1_TIME_check()\fR checks the syntax of \s-1ASN1_TIME\s0 structure \fBs\fR.
+.PP
+\&\fIASN1_TIME_print()\fR prints out the time \fBs\fR to \s-1BIO \s0\fBb\fR in human readable
+format. It will be of the format \s-1MMM DD HH:MM:SS YYYY\s0 [\s-1GMT\s0], for example
+\&\*(L"Feb 3 00:55:52 2015 \s-1GMT\*(R"\s0 it does not include a newline. If the time
+structure has invalid format it prints out \*(L"Bad time value\*(R" and returns
+an error.
+.PP
+\&\fIASN1_TIME_diff()\fR sets \fB*pday\fR and \fB*psec\fR to the time difference between
+\&\fBfrom\fR and \fBto\fR. If \fBto\fR represents a time later than \fBfrom\fR then
+one or both (depending on the time difference) of \fB*pday\fR and \fB*psec\fR
+will be positive. If \fBto\fR represents a time earlier than \fBfrom\fR then
+one or both of \fB*pday\fR and \fB*psec\fR will be negative. If \fBto\fR and \fBfrom\fR
+represent the same time then \fB*pday\fR and \fB*psec\fR will both be zero.
+If both \fB*pday\fR and \fB*psec\fR are non-zero they will always have the same
+sign. The value of \fB*psec\fR will always be less than the number of seconds
+in a day. If \fBfrom\fR or \fBto\fR is \s-1NULL\s0 the current time is used.
+.SH "NOTES"
+.IX Header "NOTES"
+The \s-1ASN1_TIME\s0 structure corresponds to the \s-1ASN.1\s0 structure \fBTime\fR
+defined in \s-1RFC5280\s0 et al. The time setting functions obey the rules outlined
+in \s-1RFC5280:\s0 if the date can be represented by UTCTime it is used, else
+GeneralizedTime is used.
+.PP
+The \s-1ASN1_TIME\s0 structure is represented as an \s-1ASN1_STRING\s0 internally and can
+be freed up using \fIASN1_STRING_free()\fR.
+.PP
+The \s-1ASN1_TIME\s0 structure can represent years from 0000 to 9999 but no attempt
+is made to correct ancient calendar changes (for example from Julian to
+Gregorian calendars).
+.PP
+Some applications add offset times directly to a time_t value and pass the
+results to \fIASN1_TIME_set()\fR (or equivalent). This can cause problems as the
+time_t value can overflow on some systems resulting in unexpected results.
+New applications should use \fIASN1_TIME_adj()\fR instead and pass the offset value
+in the \fBoffset_sec\fR and \fBoffset_day\fR parameters instead of directly
+manipulating a time_t value.
+.SH "BUGS"
+.IX Header "BUGS"
+\&\fIASN1_TIME_print()\fR currently does not print out the time zone: it either prints
+out \*(L"\s-1GMT\*(R"\s0 or nothing. But all certificates complying with \s-1RFC5280\s0 et al use \s-1GMT\s0
+anyway.
+.SH "EXAMPLES"
+.IX Header "EXAMPLES"
+Set a time structure to one hour after the current time and print it out:
+.PP
+.Vb 11
+\& #include <time.h>
+\& #include <openssl/asn1.h>
+\& ASN1_TIME *tm;
+\& time_t t;
+\& BIO *b;
+\& t = time(NULL);
+\& tm = ASN1_TIME_adj(NULL, t, 0, 60 * 60);
+\& b = BIO_new_fp(stdout, BIO_NOCLOSE);
+\& ASN1_TIME_print(b, tm);
+\& ASN1_STRING_free(tm);
+\& BIO_free(b);
+.Ve
+.PP
+Determine if one time is later or sooner than the current time:
+.PP
+.Vb 1
+\& int day, sec;
+\&
+\& if (!ASN1_TIME_diff(&day, &sec, NULL, to))
+\& /* Invalid time format */
+\&
+\& if (day > 0 || sec > 0)
+\& printf("Later\en");
+\& else if (day < 0 || sec < 0)
+\& printf("Sooner\en");
+\& else
+\& printf("Same\en");
+.Ve
+.SH "RETURN VALUES"
+.IX Header "RETURN VALUES"
+\&\fIASN1_TIME_set()\fR and \fIASN1_TIME_adj()\fR return a pointer to an \s-1ASN1_TIME\s0 structure
+or \s-1NULL\s0 if an error occurred.
+.PP
+\&\fIASN1_TIME_set_string()\fR returns 1 if the time value is successfully set and
+0 otherwise.
+.PP
+\&\fIASN1_TIME_check()\fR returns 1 if the structure is syntactically correct and 0
+otherwise.
+.PP
+\&\fIASN1_TIME_print()\fR returns 1 if the time is successfully printed out and 0 if
+an error occurred (I/O error or invalid time format).
+.PP
+\&\fIASN1_TIME_diff()\fR returns 1 for sucess and 0 for failure. It can fail if the
+pass \s-1ASN1_TIME\s0 structure has invalid syntax for example.
diff --git a/secure/lib/libcrypto/man/ASN1_generate_nconf.3 b/secure/lib/libcrypto/man/ASN1_generate_nconf.3
index af892e2..7149547 100644
--- a/secure/lib/libcrypto/man/ASN1_generate_nconf.3
+++ b/secure/lib/libcrypto/man/ASN1_generate_nconf.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "ASN1_generate_nconf 3"
-.TH ASN1_generate_nconf 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH ASN1_generate_nconf 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BIO_ctrl.3 b/secure/lib/libcrypto/man/BIO_ctrl.3
index b191ed2..a4b3691 100644
--- a/secure/lib/libcrypto/man/BIO_ctrl.3
+++ b/secure/lib/libcrypto/man/BIO_ctrl.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BIO_ctrl 3"
-.TH BIO_ctrl 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BIO_ctrl 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BIO_f_base64.3 b/secure/lib/libcrypto/man/BIO_f_base64.3
index db4a753..1678773 100644
--- a/secure/lib/libcrypto/man/BIO_f_base64.3
+++ b/secure/lib/libcrypto/man/BIO_f_base64.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BIO_f_base64 3"
-.TH BIO_f_base64 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BIO_f_base64 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BIO_f_buffer.3 b/secure/lib/libcrypto/man/BIO_f_buffer.3
index 8a7e7a2..022f6ac 100644
--- a/secure/lib/libcrypto/man/BIO_f_buffer.3
+++ b/secure/lib/libcrypto/man/BIO_f_buffer.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BIO_f_buffer 3"
-.TH BIO_f_buffer 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BIO_f_buffer 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BIO_f_cipher.3 b/secure/lib/libcrypto/man/BIO_f_cipher.3
index f751a5c..2ae0f3e 100644
--- a/secure/lib/libcrypto/man/BIO_f_cipher.3
+++ b/secure/lib/libcrypto/man/BIO_f_cipher.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BIO_f_cipher 3"
-.TH BIO_f_cipher 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BIO_f_cipher 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BIO_f_md.3 b/secure/lib/libcrypto/man/BIO_f_md.3
index 0217508..ae83639 100644
--- a/secure/lib/libcrypto/man/BIO_f_md.3
+++ b/secure/lib/libcrypto/man/BIO_f_md.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BIO_f_md 3"
-.TH BIO_f_md 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BIO_f_md 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BIO_f_null.3 b/secure/lib/libcrypto/man/BIO_f_null.3
index 9b4f647..f877a0a 100644
--- a/secure/lib/libcrypto/man/BIO_f_null.3
+++ b/secure/lib/libcrypto/man/BIO_f_null.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BIO_f_null 3"
-.TH BIO_f_null 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BIO_f_null 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BIO_f_ssl.3 b/secure/lib/libcrypto/man/BIO_f_ssl.3
index 8ccb59a..84c8510 100644
--- a/secure/lib/libcrypto/man/BIO_f_ssl.3
+++ b/secure/lib/libcrypto/man/BIO_f_ssl.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BIO_f_ssl 3"
-.TH BIO_f_ssl 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BIO_f_ssl 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -244,7 +244,7 @@ already been established this call has no effect.
is non blocking they can still request a retry in exceptional
circumstances. Specifically this will happen if a session
renegotiation takes place during a \fIBIO_read()\fR operation, one
-case where this happens is when \s-1SGC\s0 or step up occurs.
+case where this happens is when step up occurs.
.PP
In OpenSSL 0.9.6 and later the \s-1SSL\s0 flag \s-1SSL_AUTO_RETRY\s0 can be
set to disable this behaviour. That is when this flag is set
diff --git a/secure/lib/libcrypto/man/BIO_find_type.3 b/secure/lib/libcrypto/man/BIO_find_type.3
index 3fdfa1e..04876c1 100644
--- a/secure/lib/libcrypto/man/BIO_find_type.3
+++ b/secure/lib/libcrypto/man/BIO_find_type.3
@@ -133,13 +133,13 @@
.\" ========================================================================
.\"
.IX Title "BIO_find_type 3"
-.TH BIO_find_type 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BIO_find_type 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-BIO_find_type, BIO_next \- BIO chain traversal
+BIO_find_type, BIO_next, BIO_method_type \- BIO chain traversal
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
diff --git a/secure/lib/libcrypto/man/BIO_new.3 b/secure/lib/libcrypto/man/BIO_new.3
index b51eb3c..75a8a0b 100644
--- a/secure/lib/libcrypto/man/BIO_new.3
+++ b/secure/lib/libcrypto/man/BIO_new.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BIO_new 3"
-.TH BIO_new 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BIO_new 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BIO_new_CMS.3 b/secure/lib/libcrypto/man/BIO_new_CMS.3
index 87fb129..e30e9ab 100644
--- a/secure/lib/libcrypto/man/BIO_new_CMS.3
+++ b/secure/lib/libcrypto/man/BIO_new_CMS.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BIO_new_CMS 3"
-.TH BIO_new_CMS 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BIO_new_CMS 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BIO_push.3 b/secure/lib/libcrypto/man/BIO_push.3
index 3a45766..e7943c4 100644
--- a/secure/lib/libcrypto/man/BIO_push.3
+++ b/secure/lib/libcrypto/man/BIO_push.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BIO_push 3"
-.TH BIO_push 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BIO_push 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BIO_read.3 b/secure/lib/libcrypto/man/BIO_read.3
index 495d84e..9267591 100644
--- a/secure/lib/libcrypto/man/BIO_read.3
+++ b/secure/lib/libcrypto/man/BIO_read.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BIO_read 3"
-.TH BIO_read 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BIO_read 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BIO_s_accept.3 b/secure/lib/libcrypto/man/BIO_s_accept.3
index eddc678..fd084f5 100644
--- a/secure/lib/libcrypto/man/BIO_s_accept.3
+++ b/secure/lib/libcrypto/man/BIO_s_accept.3
@@ -133,13 +133,13 @@
.\" ========================================================================
.\"
.IX Title "BIO_s_accept 3"
-.TH BIO_s_accept 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BIO_s_accept 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-BIO_s_accept, BIO_set_accept_port, BIO_get_accept_port,
+BIO_s_accept, BIO_set_accept_port, BIO_get_accept_port, BIO_new_accept,
BIO_set_nbio_accept, BIO_set_accept_bios, BIO_set_bind_mode,
BIO_get_bind_mode, BIO_do_accept \- accept BIO
.SH "SYNOPSIS"
diff --git a/secure/lib/libcrypto/man/BIO_s_bio.3 b/secure/lib/libcrypto/man/BIO_s_bio.3
index 98692ac..cb0f399 100644
--- a/secure/lib/libcrypto/man/BIO_s_bio.3
+++ b/secure/lib/libcrypto/man/BIO_s_bio.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BIO_s_bio 3"
-.TH BIO_s_bio 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BIO_s_bio 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BIO_s_connect.3 b/secure/lib/libcrypto/man/BIO_s_connect.3
index e3482df..f4f5010 100644
--- a/secure/lib/libcrypto/man/BIO_s_connect.3
+++ b/secure/lib/libcrypto/man/BIO_s_connect.3
@@ -133,13 +133,13 @@
.\" ========================================================================
.\"
.IX Title "BIO_s_connect 3"
-.TH BIO_s_connect 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BIO_s_connect 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-BIO_s_connect, BIO_set_conn_hostname, BIO_set_conn_port,
+BIO_s_connect, BIO_new_connect, BIO_set_conn_hostname, BIO_set_conn_port,
BIO_set_conn_ip, BIO_set_conn_int_port, BIO_get_conn_hostname,
BIO_get_conn_port, BIO_get_conn_ip, BIO_get_conn_int_port,
BIO_set_nbio, BIO_do_connect \- connect BIO
diff --git a/secure/lib/libcrypto/man/BIO_s_fd.3 b/secure/lib/libcrypto/man/BIO_s_fd.3
index 06a20a8..f5e6d03 100644
--- a/secure/lib/libcrypto/man/BIO_s_fd.3
+++ b/secure/lib/libcrypto/man/BIO_s_fd.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BIO_s_fd 3"
-.TH BIO_s_fd 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BIO_s_fd 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BIO_s_file.3 b/secure/lib/libcrypto/man/BIO_s_file.3
index b14c86f..63d1ef5 100644
--- a/secure/lib/libcrypto/man/BIO_s_file.3
+++ b/secure/lib/libcrypto/man/BIO_s_file.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BIO_s_file 3"
-.TH BIO_s_file 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BIO_s_file 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BIO_s_mem.3 b/secure/lib/libcrypto/man/BIO_s_mem.3
index ec74f15..1908fdb 100644
--- a/secure/lib/libcrypto/man/BIO_s_mem.3
+++ b/secure/lib/libcrypto/man/BIO_s_mem.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BIO_s_mem 3"
-.TH BIO_s_mem 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BIO_s_mem 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BIO_s_null.3 b/secure/lib/libcrypto/man/BIO_s_null.3
index 077723e..dd2857b 100644
--- a/secure/lib/libcrypto/man/BIO_s_null.3
+++ b/secure/lib/libcrypto/man/BIO_s_null.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BIO_s_null 3"
-.TH BIO_s_null 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BIO_s_null 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BIO_s_socket.3 b/secure/lib/libcrypto/man/BIO_s_socket.3
index 0afadac..01d9f44 100644
--- a/secure/lib/libcrypto/man/BIO_s_socket.3
+++ b/secure/lib/libcrypto/man/BIO_s_socket.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BIO_s_socket 3"
-.TH BIO_s_socket 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BIO_s_socket 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BIO_set_callback.3 b/secure/lib/libcrypto/man/BIO_set_callback.3
index b355f65..2b0b4db 100644
--- a/secure/lib/libcrypto/man/BIO_set_callback.3
+++ b/secure/lib/libcrypto/man/BIO_set_callback.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BIO_set_callback 3"
-.TH BIO_set_callback 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BIO_set_callback 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BIO_should_retry.3 b/secure/lib/libcrypto/man/BIO_should_retry.3
index 980952a..f9563754 100644
--- a/secure/lib/libcrypto/man/BIO_should_retry.3
+++ b/secure/lib/libcrypto/man/BIO_should_retry.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BIO_should_retry 3"
-.TH BIO_should_retry 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BIO_should_retry 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BN_BLINDING_new.3 b/secure/lib/libcrypto/man/BN_BLINDING_new.3
index 3266d01..025f834 100644
--- a/secure/lib/libcrypto/man/BN_BLINDING_new.3
+++ b/secure/lib/libcrypto/man/BN_BLINDING_new.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BN_BLINDING_new 3"
-.TH BN_BLINDING_new 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BN_BLINDING_new 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -141,7 +141,7 @@
.SH "NAME"
BN_BLINDING_new, BN_BLINDING_free, BN_BLINDING_update, BN_BLINDING_convert,
BN_BLINDING_invert, BN_BLINDING_convert_ex, BN_BLINDING_invert_ex,
-BN_BLINDING_get_thread_id, BN_BLINDING_set_thread_id, BN_BLINDING_get_flags,
+BN_BLINDING_get_thread_id, BN_BLINDING_set_thread_id, BN_BLINDING_thread_id, BN_BLINDING_get_flags,
BN_BLINDING_set_flags, BN_BLINDING_create_param \- blinding related BIGNUM
functions.
.SH "SYNOPSIS"
@@ -220,7 +220,7 @@ or \s-1NULL\s0 in case of an error.
.PP
\&\fIBN_BLINDING_update()\fR, \fIBN_BLINDING_convert()\fR, \fIBN_BLINDING_invert()\fR,
\&\fIBN_BLINDING_convert_ex()\fR and \fIBN_BLINDING_invert_ex()\fR return 1 on
-success and 0 if an error occured.
+success and 0 if an error occurred.
.PP
\&\fIBN_BLINDING_thread_id()\fR returns a pointer to the thread id object
within a \fB\s-1BN_BLINDING\s0\fR object.
diff --git a/secure/lib/libcrypto/man/BN_CTX_new.3 b/secure/lib/libcrypto/man/BN_CTX_new.3
index 340197f..f2f61e9 100644
--- a/secure/lib/libcrypto/man/BN_CTX_new.3
+++ b/secure/lib/libcrypto/man/BN_CTX_new.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BN_CTX_new 3"
-.TH BN_CTX_new 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BN_CTX_new 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -147,10 +147,14 @@ BN_CTX_new, BN_CTX_init, BN_CTX_free \- allocate and free BN_CTX structures
\&
\& BN_CTX *BN_CTX_new(void);
\&
-\& void BN_CTX_init(BN_CTX *c);
-\&
\& void BN_CTX_free(BN_CTX *c);
.Ve
+.PP
+Deprecated:
+.PP
+.Vb 1
+\& void BN_CTX_init(BN_CTX *c);
+.Ve
.SH "DESCRIPTION"
.IX Header "DESCRIPTION"
A \fB\s-1BN_CTX\s0\fR is a structure that holds \fB\s-1BIGNUM\s0\fR temporary variables used by
@@ -159,14 +163,16 @@ is rather expensive when used in conjunction with repeated subroutine
calls, the \fB\s-1BN_CTX\s0\fR structure is used.
.PP
\&\fIBN_CTX_new()\fR allocates and initializes a \fB\s-1BN_CTX\s0\fR
-structure. \fIBN_CTX_init()\fR initializes an existing uninitialized
-\&\fB\s-1BN_CTX\s0\fR.
+structure.
.PP
\&\fIBN_CTX_free()\fR frees the components of the \fB\s-1BN_CTX\s0\fR, and if it was
created by \fIBN_CTX_new()\fR, also the structure itself.
If \fIBN_CTX_start\fR\|(3) has been used on the \fB\s-1BN_CTX\s0\fR,
\&\fIBN_CTX_end\fR\|(3) must be called before the \fB\s-1BN_CTX\s0\fR
may be freed by \fIBN_CTX_free()\fR.
+.PP
+\&\fIBN_CTX_init()\fR (deprecated) initializes an existing uninitialized \fB\s-1BN_CTX\s0\fR.
+This should not be used for new programs. Use \fIBN_CTX_new()\fR instead.
.SH "RETURN VALUES"
.IX Header "RETURN VALUES"
\&\fIBN_CTX_new()\fR returns a pointer to the \fB\s-1BN_CTX\s0\fR. If the allocation fails,
diff --git a/secure/lib/libcrypto/man/BN_CTX_start.3 b/secure/lib/libcrypto/man/BN_CTX_start.3
index 60f0c50..f630a0f 100644
--- a/secure/lib/libcrypto/man/BN_CTX_start.3
+++ b/secure/lib/libcrypto/man/BN_CTX_start.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BN_CTX_start 3"
-.TH BN_CTX_start 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BN_CTX_start 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BN_add.3 b/secure/lib/libcrypto/man/BN_add.3
index 5689dc6..75a2a5c 100644
--- a/secure/lib/libcrypto/man/BN_add.3
+++ b/secure/lib/libcrypto/man/BN_add.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BN_add 3"
-.TH BN_add 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BN_add 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BN_add_word.3 b/secure/lib/libcrypto/man/BN_add_word.3
index f42d70e..c00d9f4 100644
--- a/secure/lib/libcrypto/man/BN_add_word.3
+++ b/secure/lib/libcrypto/man/BN_add_word.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BN_add_word 3"
-.TH BN_add_word 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BN_add_word 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BN_bn2bin.3 b/secure/lib/libcrypto/man/BN_bn2bin.3
index 3f7f574..4f94150 100644
--- a/secure/lib/libcrypto/man/BN_bn2bin.3
+++ b/secure/lib/libcrypto/man/BN_bn2bin.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BN_bn2bin 3"
-.TH BN_bn2bin 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BN_bn2bin 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BN_cmp.3 b/secure/lib/libcrypto/man/BN_cmp.3
index 57d3ae9..c9f5a68 100644
--- a/secure/lib/libcrypto/man/BN_cmp.3
+++ b/secure/lib/libcrypto/man/BN_cmp.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BN_cmp 3"
-.TH BN_cmp 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BN_cmp 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BN_copy.3 b/secure/lib/libcrypto/man/BN_copy.3
index a671762..4fb3a8a 100644
--- a/secure/lib/libcrypto/man/BN_copy.3
+++ b/secure/lib/libcrypto/man/BN_copy.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BN_copy 3"
-.TH BN_copy 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BN_copy 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BN_generate_prime.3 b/secure/lib/libcrypto/man/BN_generate_prime.3
index dd69285..f27e165 100644
--- a/secure/lib/libcrypto/man/BN_generate_prime.3
+++ b/secure/lib/libcrypto/man/BN_generate_prime.3
@@ -133,18 +133,38 @@
.\" ========================================================================
.\"
.IX Title "BN_generate_prime 3"
-.TH BN_generate_prime 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BN_generate_prime 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-BN_generate_prime, BN_is_prime, BN_is_prime_fasttest \- generate primes and test for primality
+BN_generate_prime_ex, BN_is_prime_ex, BN_is_prime_fasttest_ex, BN_GENCB_call,
+BN_GENCB_set_old, BN_GENCB_set, BN_generate_prime, BN_is_prime,
+BN_is_prime_fasttest \- generate primes and test for primality
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
\& #include <openssl/bn.h>
\&
+\& int BN_generate_prime_ex(BIGNUM *ret,int bits,int safe, const BIGNUM *add,
+\& const BIGNUM *rem, BN_GENCB *cb);
+\&
+\& int BN_is_prime_ex(const BIGNUM *p,int nchecks, BN_CTX *ctx, BN_GENCB *cb);
+\&
+\& int BN_is_prime_fasttest_ex(const BIGNUM *p,int nchecks, BN_CTX *ctx,
+\& int do_trial_division, BN_GENCB *cb);
+\&
+\& int BN_GENCB_call(BN_GENCB *cb, int a, int b);
+\&
+\& #define BN_GENCB_set_old(gencb, callback, cb_arg) ...
+\&
+\& #define BN_GENCB_set(gencb, callback, cb_arg) ...
+.Ve
+.PP
+Deprecated:
+.PP
+.Vb 2
\& BIGNUM *BN_generate_prime(BIGNUM *ret, int num, int safe, BIGNUM *add,
\& BIGNUM *rem, void (*callback)(int, int, void *), void *cb_arg);
\&
@@ -157,19 +177,19 @@ BN_generate_prime, BN_is_prime, BN_is_prime_fasttest \- generate primes and test
.Ve
.SH "DESCRIPTION"
.IX Header "DESCRIPTION"
-\&\fIBN_generate_prime()\fR generates a pseudo-random prime number of \fBnum\fR
-bits.
+\&\fIBN_generate_prime_ex()\fR generates a pseudo-random prime number of
+bit length \fBbits\fR.
If \fBret\fR is not \fB\s-1NULL\s0\fR, it will be used to store the number.
.PP
-If \fBcallback\fR is not \fB\s-1NULL\s0\fR, it is called as follows:
+If \fBcb\fR is not \fB\s-1NULL\s0\fR, it is used as follows:
.IP "\(bu" 4
-\&\fBcallback(0, i, cb_arg)\fR is called after generating the i\-th
+\&\fBBN_GENCB_call(cb, 0, i)\fR is called after generating the i\-th
potential prime number.
.IP "\(bu" 4
-While the number is being tested for primality, \fBcallback(1, j,
-cb_arg)\fR is called as described below.
+While the number is being tested for primality,
+\&\fBBN_GENCB_call(cb, 1, j)\fR is called as described below.
.IP "\(bu" 4
-When a prime has been found, \fBcallback(2, i, cb_arg)\fR is called.
+When a prime has been found, \fBBN_GENCB_call(cb, 2, i)\fR is called.
.PP
The prime may have to fulfill additional requirements for use in
Diffie-Hellman key exchange:
@@ -181,37 +201,66 @@ generator.
If \fBsafe\fR is true, it will be a safe prime (i.e. a prime p so
that (p\-1)/2 is also prime).
.PP
-The \s-1PRNG\s0 must be seeded prior to calling \fIBN_generate_prime()\fR.
+The \s-1PRNG\s0 must be seeded prior to calling \fIBN_generate_prime_ex()\fR.
The prime number generation has a negligible error probability.
.PP
-\&\fIBN_is_prime()\fR and \fIBN_is_prime_fasttest()\fR test if the number \fBa\fR is
+\&\fIBN_is_prime_ex()\fR and \fIBN_is_prime_fasttest_ex()\fR test if the number \fBp\fR is
prime. The following tests are performed until one of them shows that
-\&\fBa\fR is composite; if \fBa\fR passes all these tests, it is considered
+\&\fBp\fR is composite; if \fBp\fR passes all these tests, it is considered
prime.
.PP
-\&\fIBN_is_prime_fasttest()\fR, when called with \fBdo_trial_division == 1\fR,
+\&\fIBN_is_prime_fasttest_ex()\fR, when called with \fBdo_trial_division == 1\fR,
first attempts trial division by a number of small primes;
-if no divisors are found by this test and \fBcallback\fR is not \fB\s-1NULL\s0\fR,
-\&\fBcallback(1, \-1, cb_arg)\fR is called.
+if no divisors are found by this test and \fBcb\fR is not \fB\s-1NULL\s0\fR,
+\&\fBBN_GENCB_call(cb, 1, \-1)\fR is called.
If \fBdo_trial_division == 0\fR, this test is skipped.
.PP
-Both \fIBN_is_prime()\fR and \fIBN_is_prime_fasttest()\fR perform a Miller-Rabin
-probabilistic primality test with \fBchecks\fR iterations. If
-\&\fBchecks == BN_prime_checks\fR, a number of iterations is used that
+Both \fIBN_is_prime_ex()\fR and \fIBN_is_prime_fasttest_ex()\fR perform a Miller-Rabin
+probabilistic primality test with \fBnchecks\fR iterations. If
+\&\fBnchecks == BN_prime_checks\fR, a number of iterations is used that
yields a false positive rate of at most 2^\-80 for random input.
.PP
-If \fBcallback\fR is not \fB\s-1NULL\s0\fR, \fBcallback(1, j, cb_arg)\fR is called
+If \fBcb\fR is not \fB\s-1NULL\s0\fR, \fBBN_GENCB_call(cb, 1, j)\fR is called
after the j\-th iteration (j = 0, 1, ...). \fBctx\fR is a
pre-allocated \fB\s-1BN_CTX\s0\fR (to save the overhead of allocating and
freeing the structure in a loop), or \fB\s-1NULL\s0\fR.
+.PP
+BN_GENCB_call calls the callback function held in the \fB\s-1BN_GENCB\s0\fR structure
+and passes the ints \fBa\fR and \fBb\fR as arguments. There are two types of
+\&\fB\s-1BN_GENCB\s0\fR structure that are supported: \*(L"new\*(R" style and \*(L"old\*(R" style. New
+programs should prefer the \*(L"new\*(R" style, whilst the \*(L"old\*(R" style is provided
+for backwards compatibility purposes.
+.PP
+For \*(L"new\*(R" style callbacks a \s-1BN_GENCB\s0 structure should be initialised with a
+call to BN_GENCB_set, where \fBgencb\fR is a \fB\s-1BN_GENCB\s0 *\fR, \fBcallback\fR is of
+type \fBint (*callback)(int, int, \s-1BN_GENCB\s0 *)\fR and \fBcb_arg\fR is a \fBvoid *\fR.
+\&\*(L"Old\*(R" style callbacks are the same except they are initialised with a call
+to BN_GENCB_set_old and \fBcallback\fR is of type
+\&\fBvoid (*callback)(int, int, void *)\fR.
+.PP
+A callback is invoked through a call to \fBBN_GENCB_call\fR. This will check
+the type of the callback and will invoke \fBcallback(a, b, gencb)\fR for new
+style callbacks or \fBcallback(a, b, cb_arg)\fR for old style.
+.PP
+BN_generate_prime (deprecated) works in the same way as
+BN_generate_prime_ex but expects an old style callback function
+directly in the \fBcallback\fR parameter, and an argument to pass to it in
+the \fBcb_arg\fR. Similarly BN_is_prime and BN_is_prime_fasttest are
+deprecated and can be compared to BN_is_prime_ex and
+BN_is_prime_fasttest_ex respectively.
.SH "RETURN VALUES"
.IX Header "RETURN VALUES"
-\&\fIBN_generate_prime()\fR returns the prime number on success, \fB\s-1NULL\s0\fR otherwise.
+\&\fIBN_generate_prime_ex()\fR return 1 on success or 0 on error.
.PP
-\&\fIBN_is_prime()\fR returns 0 if the number is composite, 1 if it is
-prime with an error probability of less than 0.25^\fBchecks\fR, and
+\&\fIBN_is_prime_ex()\fR, \fIBN_is_prime_fasttest_ex()\fR, \fIBN_is_prime()\fR and
+\&\fIBN_is_prime_fasttest()\fR return 0 if the number is composite, 1 if it is
+prime with an error probability of less than 0.25^\fBnchecks\fR, and
\&\-1 on error.
.PP
+\&\fIBN_generate_prime()\fR returns the prime number on success, \fB\s-1NULL\s0\fR otherwise.
+.PP
+Callback functions should return 1 on success or 0 on error.
+.PP
The error codes can be obtained by \fIERR_get_error\fR\|(3).
.SH "SEE ALSO"
.IX Header "SEE ALSO"
diff --git a/secure/lib/libcrypto/man/BN_mod_inverse.3 b/secure/lib/libcrypto/man/BN_mod_inverse.3
index 47e4a73..7562fea 100644
--- a/secure/lib/libcrypto/man/BN_mod_inverse.3
+++ b/secure/lib/libcrypto/man/BN_mod_inverse.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BN_mod_inverse 3"
-.TH BN_mod_inverse 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BN_mod_inverse 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BN_mod_mul_montgomery.3 b/secure/lib/libcrypto/man/BN_mod_mul_montgomery.3
index 5319570..5663241 100644
--- a/secure/lib/libcrypto/man/BN_mod_mul_montgomery.3
+++ b/secure/lib/libcrypto/man/BN_mod_mul_montgomery.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BN_mod_mul_montgomery 3"
-.TH BN_mod_mul_montgomery 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BN_mod_mul_montgomery 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BN_mod_mul_reciprocal.3 b/secure/lib/libcrypto/man/BN_mod_mul_reciprocal.3
index 1589115..1e518cf 100644
--- a/secure/lib/libcrypto/man/BN_mod_mul_reciprocal.3
+++ b/secure/lib/libcrypto/man/BN_mod_mul_reciprocal.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BN_mod_mul_reciprocal 3"
-.TH BN_mod_mul_reciprocal 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BN_mod_mul_reciprocal 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BN_new.3 b/secure/lib/libcrypto/man/BN_new.3
index b150cda..553bad9 100644
--- a/secure/lib/libcrypto/man/BN_new.3
+++ b/secure/lib/libcrypto/man/BN_new.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BN_new 3"
-.TH BN_new 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BN_new 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BN_num_bytes.3 b/secure/lib/libcrypto/man/BN_num_bytes.3
index ce7487a..c9ea90f 100644
--- a/secure/lib/libcrypto/man/BN_num_bytes.3
+++ b/secure/lib/libcrypto/man/BN_num_bytes.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BN_num_bytes 3"
-.TH BN_num_bytes 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BN_num_bytes 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BN_rand.3 b/secure/lib/libcrypto/man/BN_rand.3
index 8297eac..aa365bb 100644
--- a/secure/lib/libcrypto/man/BN_rand.3
+++ b/secure/lib/libcrypto/man/BN_rand.3
@@ -133,13 +133,13 @@
.\" ========================================================================
.\"
.IX Title "BN_rand 3"
-.TH BN_rand 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BN_rand 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-BN_rand, BN_pseudo_rand \- generate pseudo\-random number
+BN_rand, BN_pseudo_rand, BN_rand_range, BN_pseudo_rand_range \- generate pseudo\-random number
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
diff --git a/secure/lib/libcrypto/man/BN_set_bit.3 b/secure/lib/libcrypto/man/BN_set_bit.3
index 7e20ebe..7f62476 100644
--- a/secure/lib/libcrypto/man/BN_set_bit.3
+++ b/secure/lib/libcrypto/man/BN_set_bit.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BN_set_bit 3"
-.TH BN_set_bit 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BN_set_bit 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BN_swap.3 b/secure/lib/libcrypto/man/BN_swap.3
index d4a4511..2a32dd8 100644
--- a/secure/lib/libcrypto/man/BN_swap.3
+++ b/secure/lib/libcrypto/man/BN_swap.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BN_swap 3"
-.TH BN_swap 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BN_swap 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/BN_zero.3 b/secure/lib/libcrypto/man/BN_zero.3
index ab6e8ea..51a71bb 100644
--- a/secure/lib/libcrypto/man/BN_zero.3
+++ b/secure/lib/libcrypto/man/BN_zero.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "BN_zero 3"
-.TH BN_zero 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH BN_zero 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/CMS_add0_cert.3 b/secure/lib/libcrypto/man/CMS_add0_cert.3
index 1553be8..155e9e3 100644
--- a/secure/lib/libcrypto/man/CMS_add0_cert.3
+++ b/secure/lib/libcrypto/man/CMS_add0_cert.3
@@ -133,15 +133,13 @@
.\" ========================================================================
.\"
.IX Title "CMS_add0_cert 3"
-.TH CMS_add0_cert 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH CMS_add0_cert 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-.Vb 1
-\& CMS_add0_cert, CMS_add1_cert, CMS_get1_certs, CMS_add0_crl, CMS_get1_crls, \- CMS certificate and CRL utility functions
-.Ve
+CMS_add0_cert, CMS_add1_cert, CMS_get1_certs, CMS_add0_crl, CMS_add1_crl, CMS_get1_crls, \- CMS certificate and CRL utility functions
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
diff --git a/secure/lib/libcrypto/man/CMS_add1_recipient_cert.3 b/secure/lib/libcrypto/man/CMS_add1_recipient_cert.3
index 505f4ec..9e1d3b2 100644
--- a/secure/lib/libcrypto/man/CMS_add1_recipient_cert.3
+++ b/secure/lib/libcrypto/man/CMS_add1_recipient_cert.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "CMS_add1_recipient_cert 3"
-.TH CMS_add1_recipient_cert 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH CMS_add1_recipient_cert 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/CMS_add1_signer.3 b/secure/lib/libcrypto/man/CMS_add1_signer.3
index ddaecfc..5729bbc 100644
--- a/secure/lib/libcrypto/man/CMS_add1_signer.3
+++ b/secure/lib/libcrypto/man/CMS_add1_signer.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "CMS_add1_signer 3"
-.TH CMS_add1_signer 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH CMS_add1_signer 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/CMS_compress.3 b/secure/lib/libcrypto/man/CMS_compress.3
index ecd4611..fc29407 100644
--- a/secure/lib/libcrypto/man/CMS_compress.3
+++ b/secure/lib/libcrypto/man/CMS_compress.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "CMS_compress 3"
-.TH CMS_compress 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH CMS_compress 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/CMS_decrypt.3 b/secure/lib/libcrypto/man/CMS_decrypt.3
index fd3fa72..36e24fe 100644
--- a/secure/lib/libcrypto/man/CMS_decrypt.3
+++ b/secure/lib/libcrypto/man/CMS_decrypt.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "CMS_decrypt 3"
-.TH CMS_decrypt 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH CMS_decrypt 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/CMS_encrypt.3 b/secure/lib/libcrypto/man/CMS_encrypt.3
index 32398ae..34faa80 100644
--- a/secure/lib/libcrypto/man/CMS_encrypt.3
+++ b/secure/lib/libcrypto/man/CMS_encrypt.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "CMS_encrypt 3"
-.TH CMS_encrypt 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH CMS_encrypt 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/CMS_final.3 b/secure/lib/libcrypto/man/CMS_final.3
index cd043ae..a2b986b 100644
--- a/secure/lib/libcrypto/man/CMS_final.3
+++ b/secure/lib/libcrypto/man/CMS_final.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "CMS_final 3"
-.TH CMS_final 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH CMS_final 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/CMS_get0_RecipientInfos.3 b/secure/lib/libcrypto/man/CMS_get0_RecipientInfos.3
index ff2cb31..6945fe4 100644
--- a/secure/lib/libcrypto/man/CMS_get0_RecipientInfos.3
+++ b/secure/lib/libcrypto/man/CMS_get0_RecipientInfos.3
@@ -133,15 +133,13 @@
.\" ========================================================================
.\"
.IX Title "CMS_get0_RecipientInfos 3"
-.TH CMS_get0_RecipientInfos 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH CMS_get0_RecipientInfos 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-.Vb 1
-\& CMS_get0_RecipientInfos, CMS_RecipientInfo_type, CMS_RecipientInfo_ktri_get0_signer_id,CMS_RecipientInfo_ktri_cert_cmp, CMS_RecipientInfo_set0_pkey, CMS_RecipientInfo_kekri_get0_id, CMS_RecipientInfo_kekri_id_cmp, CMS_RecipientInfo_set0_key, CMS_RecipientInfo_decrypt \- CMS envelopedData RecipientInfo routines
-.Ve
+CMS_get0_RecipientInfos, CMS_RecipientInfo_type, CMS_RecipientInfo_ktri_get0_signer_id,CMS_RecipientInfo_ktri_cert_cmp, CMS_RecipientInfo_set0_pkey, CMS_RecipientInfo_kekri_get0_id, CMS_RecipientInfo_kekri_id_cmp, CMS_RecipientInfo_set0_key, CMS_RecipientInfo_decrypt, CMS_RecipientInfo_encrypt \- CMS envelopedData RecipientInfo routines
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
@@ -159,6 +157,7 @@
\& int CMS_RecipientInfo_set0_key(CMS_RecipientInfo *ri, unsigned char *key, size_t keylen);
\&
\& int CMS_RecipientInfo_decrypt(CMS_ContentInfo *cms, CMS_RecipientInfo *ri);
+\& int CMS_RecipientInfo_encrypt(CMS_ContentInfo *cms, CMS_RecipientInfo *ri);
.Ve
.SH "DESCRIPTION"
.IX Header "DESCRIPTION"
@@ -204,6 +203,11 @@ successful and non zero if not.
\&\fICMS_RecipientInfo_decrypt()\fR attempts to decrypt CMS_RecipientInfo structure
\&\fBri\fR in structure \fBcms\fR. A key must have been associated with the structure
first.
+.PP
+\&\fICMS_RecipientInfo_encrypt()\fR attempts to encrypt CMS_RecipientInfo structure
+\&\fBri\fR in structure \fBcms\fR. A key must have been associated with the structure
+first and the content encryption key must be available: for example by a
+previous call to \fICMS_RecipientInfo_decrypt()\fR.
.SH "NOTES"
.IX Header "NOTES"
The main purpose of these functions is to enable an application to lookup
@@ -218,6 +222,13 @@ function. Then if the corresponding secret or private key can be obtained by
any appropriate means it can then associated with the structure and
\&\fICMS_RecpientInfo_decrypt()\fR called. If successful \fICMS_decrypt()\fR can be called
with a \s-1NULL\s0 key to decrypt the enveloped content.
+.PP
+The \fICMS_RecipientInfo_encrypt()\fR can be used to add a new recipient to an
+existing enveloped data structure. Typically an application will first decrypt
+an appropriate CMS_RecipientInfo structure to make the content encrypt key
+available, it will then add a new recipient using a function such as
+\&\fICMS_add1_recipient_cert()\fR and finally encrypt the content encryption key
+using \fICMS_RecipientInfo_encrypt()\fR.
.SH "RETURN VALUES"
.IX Header "RETURN VALUES"
\&\fICMS_get0_RecipientInfos()\fR returns all CMS_RecipientInfo structures, or \s-1NULL\s0 if
@@ -226,6 +237,7 @@ an error occurs.
\&\fICMS_RecipientInfo_ktri_get0_signer_id()\fR, \fICMS_RecipientInfo_set0_pkey()\fR,
\&\fICMS_RecipientInfo_kekri_get0_id()\fR, \fICMS_RecipientInfo_set0_key()\fR and
\&\fICMS_RecipientInfo_decrypt()\fR return 1 for success or 0 if an error occurs.
+\&\fICMS_RecipientInfo_encrypt()\fR return 1 for success or 0 if an error occurs.
.PP
\&\fICMS_RecipientInfo_ktri_cert_cmp()\fR and \fICMS_RecipientInfo_kekri_cmp()\fR return 0
for a successful comparison and non zero otherwise.
diff --git a/secure/lib/libcrypto/man/CMS_get0_SignerInfos.3 b/secure/lib/libcrypto/man/CMS_get0_SignerInfos.3
index 2b9265f..364fa87 100644
--- a/secure/lib/libcrypto/man/CMS_get0_SignerInfos.3
+++ b/secure/lib/libcrypto/man/CMS_get0_SignerInfos.3
@@ -133,15 +133,13 @@
.\" ========================================================================
.\"
.IX Title "CMS_get0_SignerInfos 3"
-.TH CMS_get0_SignerInfos 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH CMS_get0_SignerInfos 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-.Vb 1
-\& CMS_get0_SignerInfos, CMS_SignerInfo_get0_signer_id, CMS_SignerInfo_cert_cmp, CMS_set1_signer_certs \- CMS signedData signer functions.
-.Ve
+CMS_get0_SignerInfos, CMS_SignerInfo_get0_signer_id, CMS_SignerInfo_get0_signature, CMS_SignerInfo_cert_cmp, CMS_set1_signer_cert \- CMS signedData signer functions.
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
@@ -150,6 +148,7 @@
\& STACK_OF(CMS_SignerInfo) *CMS_get0_SignerInfos(CMS_ContentInfo *cms);
\&
\& int CMS_SignerInfo_get0_signer_id(CMS_SignerInfo *si, ASN1_OCTET_STRING **keyid, X509_NAME **issuer, ASN1_INTEGER **sno);
+\& ASN1_OCTET_STRING *CMS_SignerInfo_get0_signature(CMS_SignerInfo *si);
\& int CMS_SignerInfo_cert_cmp(CMS_SignerInfo *si, X509 *cert);
\& void CMS_SignerInfo_set1_signer_cert(CMS_SignerInfo *si, X509 *signer);
.Ve
@@ -163,6 +162,11 @@ associated with a specific CMS_SignerInfo structure \fBsi\fR. Either the
keyidentifier will be set in \fBkeyid\fR or \fBboth\fR issuer name and serial number
in \fBissuer\fR and \fBsno\fR.
.PP
+\&\fICMS_SignerInfo_get0_signature()\fR retrieves the signature associated with
+\&\fBsi\fR in a pointer to an \s-1ASN1_OCTET_STRING\s0 structure. This pointer returned
+corresponds to the internal signature value if \fBsi\fR so it may be read or
+modified.
+.PP
\&\fICMS_SignerInfo_cert_cmp()\fR compares the certificate \fBcert\fR against the signer
identifier \fBsi\fR. It returns zero if the comparison is successful and non zero
if not.
diff --git a/secure/lib/libcrypto/man/CMS_get0_type.3 b/secure/lib/libcrypto/man/CMS_get0_type.3
index a977393..2d552c2 100644
--- a/secure/lib/libcrypto/man/CMS_get0_type.3
+++ b/secure/lib/libcrypto/man/CMS_get0_type.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "CMS_get0_type 3"
-.TH CMS_get0_type 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH CMS_get0_type 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/CMS_get1_ReceiptRequest.3 b/secure/lib/libcrypto/man/CMS_get1_ReceiptRequest.3
index 52ffa53..14bc08b 100644
--- a/secure/lib/libcrypto/man/CMS_get1_ReceiptRequest.3
+++ b/secure/lib/libcrypto/man/CMS_get1_ReceiptRequest.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "CMS_get1_ReceiptRequest 3"
-.TH CMS_get1_ReceiptRequest 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH CMS_get1_ReceiptRequest 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/CMS_sign.3 b/secure/lib/libcrypto/man/CMS_sign.3
index 1b1fdfe..133362e 100644
--- a/secure/lib/libcrypto/man/CMS_sign.3
+++ b/secure/lib/libcrypto/man/CMS_sign.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "CMS_sign 3"
-.TH CMS_sign 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH CMS_sign 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/CMS_sign_receipt.3 b/secure/lib/libcrypto/man/CMS_sign_receipt.3
index 98372e0..f757976 100644
--- a/secure/lib/libcrypto/man/CMS_sign_receipt.3
+++ b/secure/lib/libcrypto/man/CMS_sign_receipt.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "CMS_sign_receipt 3"
-.TH CMS_sign_receipt 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH CMS_sign_receipt 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/CMS_uncompress.3 b/secure/lib/libcrypto/man/CMS_uncompress.3
index 5fd9e70..4a75752 100644
--- a/secure/lib/libcrypto/man/CMS_uncompress.3
+++ b/secure/lib/libcrypto/man/CMS_uncompress.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "CMS_uncompress 3"
-.TH CMS_uncompress 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH CMS_uncompress 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/CMS_verify.3 b/secure/lib/libcrypto/man/CMS_verify.3
index 088ee45..7572718 100644
--- a/secure/lib/libcrypto/man/CMS_verify.3
+++ b/secure/lib/libcrypto/man/CMS_verify.3
@@ -133,15 +133,13 @@
.\" ========================================================================
.\"
.IX Title "CMS_verify 3"
-.TH CMS_verify 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH CMS_verify 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-.Vb 1
-\& CMS_verify \- verify a CMS SignedData structure
-.Ve
+CMS_verify, CMS_get0_signers \- verify a CMS SignedData structure
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
diff --git a/secure/lib/libcrypto/man/CMS_verify_receipt.3 b/secure/lib/libcrypto/man/CMS_verify_receipt.3
index d618959..f941405 100644
--- a/secure/lib/libcrypto/man/CMS_verify_receipt.3
+++ b/secure/lib/libcrypto/man/CMS_verify_receipt.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "CMS_verify_receipt 3"
-.TH CMS_verify_receipt 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH CMS_verify_receipt 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/CONF_modules_free.3 b/secure/lib/libcrypto/man/CONF_modules_free.3
index 5e99d8e..3ed0b47 100644
--- a/secure/lib/libcrypto/man/CONF_modules_free.3
+++ b/secure/lib/libcrypto/man/CONF_modules_free.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "CONF_modules_free 3"
-.TH CONF_modules_free 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH CONF_modules_free 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/CONF_modules_load_file.3 b/secure/lib/libcrypto/man/CONF_modules_load_file.3
index 222d832..c5be2a2 100644
--- a/secure/lib/libcrypto/man/CONF_modules_load_file.3
+++ b/secure/lib/libcrypto/man/CONF_modules_load_file.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "CONF_modules_load_file 3"
-.TH CONF_modules_load_file 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH CONF_modules_load_file 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/CRYPTO_set_ex_data.3 b/secure/lib/libcrypto/man/CRYPTO_set_ex_data.3
index 1abdb16..0df3708 100644
--- a/secure/lib/libcrypto/man/CRYPTO_set_ex_data.3
+++ b/secure/lib/libcrypto/man/CRYPTO_set_ex_data.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "CRYPTO_set_ex_data 3"
-.TH CRYPTO_set_ex_data 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH CRYPTO_set_ex_data 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/DH_generate_key.3 b/secure/lib/libcrypto/man/DH_generate_key.3
index dd63e80..db22819 100644
--- a/secure/lib/libcrypto/man/DH_generate_key.3
+++ b/secure/lib/libcrypto/man/DH_generate_key.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "DH_generate_key 3"
-.TH DH_generate_key 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH DH_generate_key 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/DH_generate_parameters.3 b/secure/lib/libcrypto/man/DH_generate_parameters.3
index d217bb0..f4dd262e 100644
--- a/secure/lib/libcrypto/man/DH_generate_parameters.3
+++ b/secure/lib/libcrypto/man/DH_generate_parameters.3
@@ -133,38 +133,46 @@
.\" ========================================================================
.\"
.IX Title "DH_generate_parameters 3"
-.TH DH_generate_parameters 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH DH_generate_parameters 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-DH_generate_parameters, DH_check \- generate and check Diffie\-Hellman parameters
+DH_generate_parameters_ex, DH_generate_parameters,
+DH_check \- generate and check Diffie\-Hellman parameters
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
\& #include <openssl/dh.h>
\&
-\& DH *DH_generate_parameters(int prime_len, int generator,
-\& void (*callback)(int, int, void *), void *cb_arg);
+\& int DH_generate_parameters_ex(DH *dh, int prime_len,int generator, BN_GENCB *cb);
\&
\& int DH_check(DH *dh, int *codes);
.Ve
+.PP
+Deprecated:
+.PP
+.Vb 2
+\& DH *DH_generate_parameters(int prime_len, int generator,
+\& void (*callback)(int, int, void *), void *cb_arg);
+.Ve
.SH "DESCRIPTION"
.IX Header "DESCRIPTION"
-\&\fIDH_generate_parameters()\fR generates Diffie-Hellman parameters that can
-be shared among a group of users, and returns them in a newly
-allocated \fB\s-1DH\s0\fR structure. The pseudo-random number generator must be
+\&\fIDH_generate_parameters_ex()\fR generates Diffie-Hellman parameters that can
+be shared among a group of users, and stores them in the provided \fB\s-1DH\s0\fR
+structure. The pseudo-random number generator must be
seeded prior to calling \fIDH_generate_parameters()\fR.
.PP
\&\fBprime_len\fR is the length in bits of the safe prime to be generated.
\&\fBgenerator\fR is a small number > 1, typically 2 or 5.
.PP
A callback function may be used to provide feedback about the progress
-of the key generation. If \fBcallback\fR is not \fB\s-1NULL\s0\fR, it will be
+of the key generation. If \fBcb\fR is not \fB\s-1NULL\s0\fR, it will be
called as described in \fIBN_generate_prime\fR\|(3) while a random prime
-number is generated, and when a prime has been found, \fBcallback(3,
-0, cb_arg)\fR is called.
+number is generated, and when a prime has been found, \fBBN_GENCB_call(cb, 3, 0)\fR
+is called. See \fIBN_generate_prime\fR\|(3) for information on
+the \fIBN_GENCB_call()\fR function.
.PP
\&\fIDH_check()\fR validates Diffie-Hellman parameters. It checks that \fBp\fR is
a safe prime, and that \fBg\fR is a suitable generator. In the case of an
@@ -174,18 +182,20 @@ error, the bit flags \s-1DH_CHECK_P_NOT_SAFE_PRIME\s0 or
checked, i.e. it does not equal 2 or 5.
.SH "RETURN VALUES"
.IX Header "RETURN VALUES"
-\&\fIDH_generate_parameters()\fR returns a pointer to the \s-1DH\s0 structure, or
-\&\s-1NULL\s0 if the parameter generation fails. The error codes can be
-obtained by \fIERR_get_error\fR\|(3).
+\&\fIDH_generate_parameters_ex()\fR and \fIDH_check()\fR return 1 if the check could be
+performed, 0 otherwise.
+.PP
+\&\fIDH_generate_parameters()\fR (deprecated) returns a pointer to the \s-1DH\s0 structure, or
+\&\s-1NULL\s0 if the parameter generation fails.
.PP
-\&\fIDH_check()\fR returns 1 if the check could be performed, 0 otherwise.
+The error codes can be obtained by \fIERR_get_error\fR\|(3).
.SH "NOTES"
.IX Header "NOTES"
-\&\fIDH_generate_parameters()\fR may run for several hours before finding a
-suitable prime.
+\&\fIDH_generate_parameters_ex()\fR and \fIDH_generate_parameters()\fR may run for several
+hours before finding a suitable prime.
.PP
-The parameters generated by \fIDH_generate_parameters()\fR are not to be
-used in signature schemes.
+The parameters generated by \fIDH_generate_parameters_ex()\fR and \fIDH_generate_parameters()\fR
+are not to be used in signature schemes.
.SH "BUGS"
.IX Header "BUGS"
If \fBgenerator\fR is not 2 or 5, \fBdh\->g\fR=\fBgenerator\fR is not
diff --git a/secure/lib/libcrypto/man/DH_get_ex_new_index.3 b/secure/lib/libcrypto/man/DH_get_ex_new_index.3
index e145ff3..b1b57c7 100644
--- a/secure/lib/libcrypto/man/DH_get_ex_new_index.3
+++ b/secure/lib/libcrypto/man/DH_get_ex_new_index.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "DH_get_ex_new_index 3"
-.TH DH_get_ex_new_index 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH DH_get_ex_new_index 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/DH_new.3 b/secure/lib/libcrypto/man/DH_new.3
index ba35c41..7d29964 100644
--- a/secure/lib/libcrypto/man/DH_new.3
+++ b/secure/lib/libcrypto/man/DH_new.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "DH_new 3"
-.TH DH_new 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH DH_new 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/DH_set_method.3 b/secure/lib/libcrypto/man/DH_set_method.3
index 327ad6b..2032e91 100644
--- a/secure/lib/libcrypto/man/DH_set_method.3
+++ b/secure/lib/libcrypto/man/DH_set_method.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "DH_set_method 3"
-.TH DH_set_method 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH DH_set_method 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/DH_size.3 b/secure/lib/libcrypto/man/DH_size.3
index 610af44..388e335 100644
--- a/secure/lib/libcrypto/man/DH_size.3
+++ b/secure/lib/libcrypto/man/DH_size.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "DH_size 3"
-.TH DH_size 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH DH_size 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/DSA_SIG_new.3 b/secure/lib/libcrypto/man/DSA_SIG_new.3
index 707f760..7c32203 100644
--- a/secure/lib/libcrypto/man/DSA_SIG_new.3
+++ b/secure/lib/libcrypto/man/DSA_SIG_new.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "DSA_SIG_new 3"
-.TH DSA_SIG_new 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH DSA_SIG_new 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/DSA_do_sign.3 b/secure/lib/libcrypto/man/DSA_do_sign.3
index bf54098..35ee6a6 100644
--- a/secure/lib/libcrypto/man/DSA_do_sign.3
+++ b/secure/lib/libcrypto/man/DSA_do_sign.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "DSA_do_sign 3"
-.TH DSA_do_sign 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH DSA_do_sign 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/DSA_dup_DH.3 b/secure/lib/libcrypto/man/DSA_dup_DH.3
index bce717f..08e499a 100644
--- a/secure/lib/libcrypto/man/DSA_dup_DH.3
+++ b/secure/lib/libcrypto/man/DSA_dup_DH.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "DSA_dup_DH 3"
-.TH DSA_dup_DH 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH DSA_dup_DH 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/DSA_generate_key.3 b/secure/lib/libcrypto/man/DSA_generate_key.3
index 9e2ac1c..f87545f 100644
--- a/secure/lib/libcrypto/man/DSA_generate_key.3
+++ b/secure/lib/libcrypto/man/DSA_generate_key.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "DSA_generate_key 3"
-.TH DSA_generate_key 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH DSA_generate_key 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/DSA_generate_parameters.3 b/secure/lib/libcrypto/man/DSA_generate_parameters.3
index 6ffe950..f41c6b8 100644
--- a/secure/lib/libcrypto/man/DSA_generate_parameters.3
+++ b/secure/lib/libcrypto/man/DSA_generate_parameters.3
@@ -133,26 +133,34 @@
.\" ========================================================================
.\"
.IX Title "DSA_generate_parameters 3"
-.TH DSA_generate_parameters 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH DSA_generate_parameters 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-DSA_generate_parameters \- generate DSA parameters
+DSA_generate_parameters_ex, DSA_generate_parameters \- generate DSA parameters
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
\& #include <openssl/dsa.h>
\&
+\& int DSA_generate_parameters_ex(DSA *dsa, int bits,
+\& const unsigned char *seed,int seed_len,
+\& int *counter_ret, unsigned long *h_ret, BN_GENCB *cb);
+.Ve
+.PP
+Deprecated:
+.PP
+.Vb 3
\& DSA *DSA_generate_parameters(int bits, unsigned char *seed,
\& int seed_len, int *counter_ret, unsigned long *h_ret,
\& void (*callback)(int, int, void *), void *cb_arg);
.Ve
.SH "DESCRIPTION"
.IX Header "DESCRIPTION"
-\&\fIDSA_generate_parameters()\fR generates primes p and q and a generator g
-for use in the \s-1DSA.\s0
+\&\fIDSA_generate_parameters_ex()\fR generates primes p and q and a generator g
+for use in the \s-1DSA\s0 and stores the result in \fBdsa\fR.
.PP
\&\fBbits\fR is the length of the prime to be generated; the \s-1DSS\s0 allows a
maximum of 1024 bits.
@@ -162,52 +170,62 @@ generated at random. Otherwise, the seed is used to generate
them. If the given seed does not yield a prime q, a new random
seed is chosen and placed at \fBseed\fR.
.PP
-\&\fIDSA_generate_parameters()\fR places the iteration count in
+\&\fIDSA_generate_parameters_ex()\fR places the iteration count in
*\fBcounter_ret\fR and a counter used for finding a generator in
*\fBh_ret\fR, unless these are \fB\s-1NULL\s0\fR.
.PP
A callback function may be used to provide feedback about the progress
-of the key generation. If \fBcallback\fR is not \fB\s-1NULL\s0\fR, it will be
-called as follows:
+of the key generation. If \fBcb\fR is not \fB\s-1NULL\s0\fR, it will be
+called as shown below. For information on the \s-1BN_GENCB\s0 structure and the
+BN_GENCB_call function discussed below, refer to
+\&\fIBN_generate_prime\fR\|(3).
.IP "\(bu" 4
-When a candidate for q is generated, \fBcallback(0, m++, cb_arg)\fR is called
+When a candidate for q is generated, \fBBN_GENCB_call(cb, 0, m++)\fR is called
(m is 0 for the first candidate).
.IP "\(bu" 4
When a candidate for q has passed a test by trial division,
-\&\fBcallback(1, \-1, cb_arg)\fR is called.
+\&\fBBN_GENCB_call(cb, 1, \-1)\fR is called.
While a candidate for q is tested by Miller-Rabin primality tests,
-\&\fBcallback(1, i, cb_arg)\fR is called in the outer loop
+\&\fBBN_GENCB_call(cb, 1, i)\fR is called in the outer loop
(once for each witness that confirms that the candidate may be prime);
i is the loop counter (starting at 0).
.IP "\(bu" 4
-When a prime q has been found, \fBcallback(2, 0, cb_arg)\fR and
-\&\fBcallback(3, 0, cb_arg)\fR are called.
+When a prime q has been found, \fBBN_GENCB_call(cb, 2, 0)\fR and
+\&\fBBN_GENCB_call(cb, 3, 0)\fR are called.
.IP "\(bu" 4
Before a candidate for p (other than the first) is generated and tested,
-\&\fBcallback(0, counter, cb_arg)\fR is called.
+\&\fBBN_GENCB_call(cb, 0, counter)\fR is called.
.IP "\(bu" 4
When a candidate for p has passed the test by trial division,
-\&\fBcallback(1, \-1, cb_arg)\fR is called.
+\&\fBBN_GENCB_call(cb, 1, \-1)\fR is called.
While it is tested by the Miller-Rabin primality test,
-\&\fBcallback(1, i, cb_arg)\fR is called in the outer loop
+\&\fBBN_GENCB_call(cb, 1, i)\fR is called in the outer loop
(once for each witness that confirms that the candidate may be prime).
i is the loop counter (starting at 0).
.IP "\(bu" 4
-When p has been found, \fBcallback(2, 1, cb_arg)\fR is called.
+When p has been found, \fBBN_GENCB_call(cb, 2, 1)\fR is called.
.IP "\(bu" 4
-When the generator has been found, \fBcallback(3, 1, cb_arg)\fR is called.
+When the generator has been found, \fBBN_GENCB_call(cb, 3, 1)\fR is called.
+.PP
+\&\fIDSA_generate_parameters()\fR (deprecated) works in much the same way as for DSA_generate_parameters_ex, except that no \fBdsa\fR parameter is passed and
+instead a newly allocated \fB\s-1DSA\s0\fR structure is returned. Additionally \*(L"old
+style\*(R" callbacks are used instead of the newer \s-1BN_GENCB\s0 based approach.
+Refer to \fIBN_generate_prime\fR\|(3) for further information.
.SH "RETURN VALUE"
.IX Header "RETURN VALUE"
+\&\fIDSA_generate_parameters_ex()\fR returns a 1 on success, or 0 otherwise.
+.PP
\&\fIDSA_generate_parameters()\fR returns a pointer to the \s-1DSA\s0 structure, or
-\&\fB\s-1NULL\s0\fR if the parameter generation fails. The error codes can be
-obtained by \fIERR_get_error\fR\|(3).
+\&\fB\s-1NULL\s0\fR if the parameter generation fails.
+.PP
+The error codes can be obtained by \fIERR_get_error\fR\|(3).
.SH "BUGS"
.IX Header "BUGS"
Seed lengths > 20 are not supported.
.SH "SEE ALSO"
.IX Header "SEE ALSO"
\&\fIdsa\fR\|(3), \fIERR_get_error\fR\|(3), \fIrand\fR\|(3),
-\&\fIDSA_free\fR\|(3)
+\&\fIDSA_free\fR\|(3), \fIBN_generate_prime\fR\|(3)
.SH "HISTORY"
.IX Header "HISTORY"
\&\fIDSA_generate_parameters()\fR appeared in SSLeay 0.8. The \fBcb_arg\fR
diff --git a/secure/lib/libcrypto/man/DSA_get_ex_new_index.3 b/secure/lib/libcrypto/man/DSA_get_ex_new_index.3
index 69cc05d..6124470 100644
--- a/secure/lib/libcrypto/man/DSA_get_ex_new_index.3
+++ b/secure/lib/libcrypto/man/DSA_get_ex_new_index.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "DSA_get_ex_new_index 3"
-.TH DSA_get_ex_new_index 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH DSA_get_ex_new_index 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/DSA_new.3 b/secure/lib/libcrypto/man/DSA_new.3
index 9d91fb2..9c29d65 100644
--- a/secure/lib/libcrypto/man/DSA_new.3
+++ b/secure/lib/libcrypto/man/DSA_new.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "DSA_new 3"
-.TH DSA_new 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH DSA_new 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/DSA_set_method.3 b/secure/lib/libcrypto/man/DSA_set_method.3
index 706ecf1..280706d 100644
--- a/secure/lib/libcrypto/man/DSA_set_method.3
+++ b/secure/lib/libcrypto/man/DSA_set_method.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "DSA_set_method 3"
-.TH DSA_set_method 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH DSA_set_method 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/DSA_sign.3 b/secure/lib/libcrypto/man/DSA_sign.3
index 636dc2e..b555ef7 100644
--- a/secure/lib/libcrypto/man/DSA_sign.3
+++ b/secure/lib/libcrypto/man/DSA_sign.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "DSA_sign 3"
-.TH DSA_sign 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH DSA_sign 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/DSA_size.3 b/secure/lib/libcrypto/man/DSA_size.3
index 485cc77..3ecdac7 100644
--- a/secure/lib/libcrypto/man/DSA_size.3
+++ b/secure/lib/libcrypto/man/DSA_size.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "DSA_size 3"
-.TH DSA_size 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH DSA_size 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/EC_GFp_simple_method.3 b/secure/lib/libcrypto/man/EC_GFp_simple_method.3
new file mode 100644
index 0000000..703b27f
--- /dev/null
+++ b/secure/lib/libcrypto/man/EC_GFp_simple_method.3
@@ -0,0 +1,193 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "EC_GFp_simple_method 3"
+.TH EC_GFp_simple_method 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+EC_GFp_simple_method, EC_GFp_mont_method, EC_GFp_nist_method, EC_GFp_nistp224_method, EC_GFp_nistp256_method, EC_GFp_nistp521_method, EC_GF2m_simple_method, EC_METHOD_get_field_type \- Functions for obtaining EC_METHOD objects.
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 1
+\& #include <openssl/ec.h>
+\&
+\& const EC_METHOD *EC_GFp_simple_method(void);
+\& const EC_METHOD *EC_GFp_mont_method(void);
+\& const EC_METHOD *EC_GFp_nist_method(void);
+\& const EC_METHOD *EC_GFp_nistp224_method(void);
+\& const EC_METHOD *EC_GFp_nistp256_method(void);
+\& const EC_METHOD *EC_GFp_nistp521_method(void);
+\&
+\& const EC_METHOD *EC_GF2m_simple_method(void);
+\&
+\& int EC_METHOD_get_field_type(const EC_METHOD *meth);
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+The Elliptic Curve library provides a number of different implementations through a single common interface.
+When constructing a curve using EC_GROUP_new (see \fIEC_GROUP_new\fR\|(3)) an
+implementation method must be provided. The functions described here all return a const pointer to an
+\&\fB\s-1EC_METHOD\s0\fR structure that can be passed to \s-1EC_GROUP_NEW.\s0 It is important that the correct implementation
+type for the form of curve selected is used.
+.PP
+For F2^m curves there is only one implementation choice, i.e. EC_GF2_simple_method.
+.PP
+For Fp curves the lowest common denominator implementation is the EC_GFp_simple_method implementation. All
+other implementations are based on this one. EC_GFp_mont_method builds on EC_GFp_simple_method but adds the
+use of montgomery multiplication (see \fIBN_mod_mul_montgomery\fR\|(3)). EC_GFp_nist_method
+offers an implementation optimised for use with \s-1NIST\s0 recommended curves (\s-1NIST\s0 curves are available through
+EC_GROUP_new_by_curve_name as described in \fIEC_GROUP_new\fR\|(3)).
+.PP
+The functions EC_GFp_nistp224_method, EC_GFp_nistp256_method and EC_GFp_nistp521_method offer 64 bit
+optimised implementations for the \s-1NIST P224, P256\s0 and P521 curves respectively. Note, however, that these
+implementations are not available on all platforms.
+.PP
+EC_METHOD_get_field_type identifies what type of field the \s-1EC_METHOD\s0 structure supports, which will be either
+F2^m or Fp. If the field type is Fp then the value \fBNID_X9_62_prime_field\fR is returned. If the field type is
+F2^m then the value \fBNID_X9_62_characteristic_two_field\fR is returned. These values are defined in the
+obj_mac.h header file.
+.SH "RETURN VALUES"
+.IX Header "RETURN VALUES"
+All EC_GFp* functions and EC_GF2m_simple_method always return a const pointer to an \s-1EC_METHOD\s0 structure.
+.PP
+EC_METHOD_get_field_type returns an integer that identifies the type of field the \s-1EC_METHOD\s0 structure supports.
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+\&\fIcrypto\fR\|(3), \fIec\fR\|(3), \fIEC_GROUP_new\fR\|(3), \fIEC_GROUP_copy\fR\|(3),
+\&\fIEC_POINT_new\fR\|(3), \fIEC_POINT_add\fR\|(3), \fIEC_KEY_new\fR\|(3),
+\&\fId2i_ECPKParameters\fR\|(3),
+\&\fIBN_mod_mul_montgomery\fR\|(3)
diff --git a/secure/lib/libcrypto/man/EC_GROUP_copy.3 b/secure/lib/libcrypto/man/EC_GROUP_copy.3
new file mode 100644
index 0000000..6bed833
--- /dev/null
+++ b/secure/lib/libcrypto/man/EC_GROUP_copy.3
@@ -0,0 +1,308 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "EC_GROUP_copy 3"
+.TH EC_GROUP_copy 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+EC_GROUP_copy, EC_GROUP_dup, EC_GROUP_method_of, EC_GROUP_set_generator, EC_GROUP_get0_generator, EC_GROUP_get_order, EC_GROUP_get_cofactor, EC_GROUP_set_curve_name, EC_GROUP_get_curve_name, EC_GROUP_set_asn1_flag, EC_GROUP_get_asn1_flag, EC_GROUP_set_point_conversion_form, EC_GROUP_get_point_conversion_form, EC_GROUP_get0_seed, EC_GROUP_get_seed_len, EC_GROUP_set_seed, EC_GROUP_get_degree, EC_GROUP_check, EC_GROUP_check_discriminant, EC_GROUP_cmp, EC_GROUP_get_basis_type, EC_GROUP_get_trinomial_basis, EC_GROUP_get_pentanomial_basis \- Functions for manipulating EC_GROUP objects.
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 2
+\& #include <openssl/ec.h>
+\& #include <openssl/bn.h>
+\&
+\& int EC_GROUP_copy(EC_GROUP *dst, const EC_GROUP *src);
+\& EC_GROUP *EC_GROUP_dup(const EC_GROUP *src);
+\&
+\& const EC_METHOD *EC_GROUP_method_of(const EC_GROUP *group);
+\&
+\& int EC_GROUP_set_generator(EC_GROUP *group, const EC_POINT *generator, const BIGNUM *order, const BIGNUM *cofactor);
+\& const EC_POINT *EC_GROUP_get0_generator(const EC_GROUP *group);
+\&
+\& int EC_GROUP_get_order(const EC_GROUP *group, BIGNUM *order, BN_CTX *ctx);
+\& int EC_GROUP_get_cofactor(const EC_GROUP *group, BIGNUM *cofactor, BN_CTX *ctx);
+\&
+\& void EC_GROUP_set_curve_name(EC_GROUP *group, int nid);
+\& int EC_GROUP_get_curve_name(const EC_GROUP *group);
+\&
+\& void EC_GROUP_set_asn1_flag(EC_GROUP *group, int flag);
+\& int EC_GROUP_get_asn1_flag(const EC_GROUP *group);
+\&
+\& void EC_GROUP_set_point_conversion_form(EC_GROUP *group, point_conversion_form_t form);
+\& point_conversion_form_t EC_GROUP_get_point_conversion_form(const EC_GROUP *);
+\&
+\& unsigned char *EC_GROUP_get0_seed(const EC_GROUP *x);
+\& size_t EC_GROUP_get_seed_len(const EC_GROUP *);
+\& size_t EC_GROUP_set_seed(EC_GROUP *, const unsigned char *, size_t len);
+\&
+\& int EC_GROUP_get_degree(const EC_GROUP *group);
+\&
+\& int EC_GROUP_check(const EC_GROUP *group, BN_CTX *ctx);
+\&
+\& int EC_GROUP_check_discriminant(const EC_GROUP *group, BN_CTX *ctx);
+\&
+\& int EC_GROUP_cmp(const EC_GROUP *a, const EC_GROUP *b, BN_CTX *ctx);
+\&
+\& int EC_GROUP_get_basis_type(const EC_GROUP *);
+\& int EC_GROUP_get_trinomial_basis(const EC_GROUP *, unsigned int *k);
+\& int EC_GROUP_get_pentanomial_basis(const EC_GROUP *, unsigned int *k1,
+\& unsigned int *k2, unsigned int *k3);
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+EC_GROUP_copy copies the curve \fBsrc\fR into \fBdst\fR. Both \fBsrc\fR and \fBdst\fR must use the same \s-1EC_METHOD.\s0
+.PP
+EC_GROUP_dup creates a new \s-1EC_GROUP\s0 object and copies the content from \fBsrc\fR to the newly created
+\&\s-1EC_GROUP\s0 object.
+.PP
+EC_GROUP_method_of obtains the \s-1EC_METHOD\s0 of \fBgroup\fR.
+.PP
+EC_GROUP_set_generator sets curve paramaters that must be agreed by all participants using the curve. These
+paramaters include the \fBgenerator\fR, the \fBorder\fR and the \fBcofactor\fR. The \fBgenerator\fR is a well defined point on the
+curve chosen for cryptographic operations. Integers used for point multiplications will be between 0 and
+n\-1 where n is the \fBorder\fR. The \fBorder\fR multipied by the \fBcofactor\fR gives the number of points on the curve.
+.PP
+EC_GROUP_get0_generator returns the generator for the identified \fBgroup\fR.
+.PP
+The functions EC_GROUP_get_order and EC_GROUP_get_cofactor populate the provided \fBorder\fR and \fBcofactor\fR parameters
+with the respective order and cofactors for the \fBgroup\fR.
+.PP
+The functions EC_GROUP_set_curve_name and EC_GROUP_get_curve_name, set and get the \s-1NID\s0 for the curve respectively
+(see \fIEC_GROUP_new\fR\|(3)). If a curve does not have a \s-1NID\s0 associated with it, then EC_GROUP_get_curve_name
+will return 0.
+.PP
+The asn1_flag value on a curve is used to determine whether there is a specific \s-1ASN1 OID\s0 to describe the curve or not.
+If the asn1_flag is 1 then this is a named curve with an associated \s-1ASN1 OID.\s0 If not then asn1_flag is 0. The functions
+EC_GROUP_get_asn1_flag and EC_GROUP_set_asn1_flag get and set the status of the asn1_flag for the curve. If set then
+the curve_name must also be set.
+.PP
+The point_coversion_form for a curve controls how \s-1EC_POINT\s0 data is encoded as \s-1ASN1\s0 as defined in X9.62 (\s-1ECDSA\s0).
+point_conversion_form_t is an enum defined as follows:
+.PP
+.Vb 10
+\& typedef enum {
+\& /** the point is encoded as z||x, where the octet z specifies
+\& * which solution of the quadratic equation y is */
+\& POINT_CONVERSION_COMPRESSED = 2,
+\& /** the point is encoded as z||x||y, where z is the octet 0x02 */
+\& POINT_CONVERSION_UNCOMPRESSED = 4,
+\& /** the point is encoded as z||x||y, where the octet z specifies
+\& * which solution of the quadratic equation y is */
+\& POINT_CONVERSION_HYBRID = 6
+\& } point_conversion_form_t;
+.Ve
+.PP
+For \s-1POINT_CONVERSION_UNCOMPRESSED\s0 the point is encoded as an octet signifying the \s-1UNCOMPRESSED\s0 form has been used followed by
+the octets for x, followed by the octets for y.
+.PP
+For any given x co-ordinate for a point on a curve it is possible to derive two possible y values. For
+\&\s-1POINT_CONVERSION_COMPRESSED\s0 the point is encoded as an octet signifying that the \s-1COMPRESSED\s0 form has been used \s-1AND\s0 which of
+the two possible solutions for y has been used, followed by the octets for x.
+.PP
+For \s-1POINT_CONVERSION_HYBRID\s0 the point is encoded as an octet signifying the \s-1HYBRID\s0 form has been used \s-1AND\s0 which of the two
+possible solutions for y has been used, followed by the octets for x, followed by the octets for y.
+.PP
+The functions EC_GROUP_set_point_conversion_form and EC_GROUP_get_point_conversion_form set and get the point_conversion_form
+for the curve respectively.
+.PP
+\&\s-1ANSI X9.62 \s0(\s-1ECDSA\s0 standard) defines a method of generating the curve parameter b from a random number. This provides advantages
+in that a parameter obtained in this way is highly unlikely to be susceptible to special purpose attacks, or have any trapdoors in it.
+If the seed is present for a curve then the b parameter was generated in a verifiable fashion using that seed. The OpenSSL \s-1EC\s0 library
+does not use this seed value but does enable you to inspect it using EC_GROUP_get0_seed. This returns a pointer to a memory block
+containing the seed that was used. The length of the memory block can be obtained using EC_GROUP_get_seed_len. A number of the
+builtin curves within the library provide seed values that can be obtained. It is also possible to set a custom seed using
+EC_GROUP_set_seed and passing a pointer to a memory block, along with the length of the seed. Again, the \s-1EC\s0 library will not use
+this seed value, although it will be preserved in any \s-1ASN1\s0 based communications.
+.PP
+EC_GROUP_get_degree gets the degree of the field. For Fp fields this will be the number of bits in p. For F2^m fields this will be
+the value m.
+.PP
+The function EC_GROUP_check_discriminant calculates the discriminant for the curve and verifies that it is valid.
+For a curve defined over Fp the discriminant is given by the formula 4*a^3 + 27*b^2 whilst for F2^m curves the discriminant is
+simply b. In either case for the curve to be valid the discriminant must be non zero.
+.PP
+The function EC_GROUP_check performs a number of checks on a curve to verify that it is valid. Checks performed include
+verifying that the discriminant is non zero; that a generator has been defined; that the generator is on the curve and has
+the correct order.
+.PP
+EC_GROUP_cmp compares \fBa\fR and \fBb\fR to determine whether they represent the same curve or not.
+.PP
+The functions EC_GROUP_get_basis_type, EC_GROUP_get_trinomial_basis and EC_GROUP_get_pentanomial_basis should only be called for curves
+defined over an F2^m field. Addition and multiplication operations within an F2^m field are performed using an irreducible polynomial
+function f(x). This function is either a trinomial of the form:
+.PP
+f(x) = x^m + x^k + 1 with m > k >= 1
+.PP
+or a pentanomial of the form:
+.PP
+f(x) = x^m + x^k3 + x^k2 + x^k1 + 1 with m > k3 > k2 > k1 >= 1
+.PP
+The function EC_GROUP_get_basis_type returns a \s-1NID\s0 identifying whether a trinomial or pentanomial is in use for the field. The
+function EC_GROUP_get_trinomial_basis must only be called where f(x) is of the trinomial form, and returns the value of \fBk\fR. Similary
+the function EC_GROUP_get_pentanomial_basis must only be called where f(x) is of the pentanomial form, and returns the values of \fBk1\fR,
+\&\fBk2\fR and \fBk3\fR respectively.
+.SH "RETURN VALUES"
+.IX Header "RETURN VALUES"
+The following functions return 1 on success or 0 on error: EC_GROUP_copy, EC_GROUP_set_generator, EC_GROUP_check,
+EC_GROUP_check_discriminant, EC_GROUP_get_trinomial_basis and EC_GROUP_get_pentanomial_basis.
+.PP
+EC_GROUP_dup returns a pointer to the duplicated curve, or \s-1NULL\s0 on error.
+.PP
+EC_GROUP_method_of returns the \s-1EC_METHOD\s0 implementation in use for the given curve or \s-1NULL\s0 on error.
+.PP
+EC_GROUP_get0_generator returns the generator for the given curve or \s-1NULL\s0 on error.
+.PP
+EC_GROUP_get_order, EC_GROUP_get_cofactor, EC_GROUP_get_curve_name, EC_GROUP_get_asn1_flag, EC_GROUP_get_point_conversion_form
+and EC_GROUP_get_degree return the order, cofactor, curve name (\s-1NID\s0), \s-1ASN1\s0 flag, point_conversion_form and degree for the
+specified curve respectively. If there is no curve name associated with a curve then EC_GROUP_get_curve_name will return 0.
+.PP
+EC_GROUP_get0_seed returns a pointer to the seed that was used to generate the parameter b, or \s-1NULL\s0 if the seed is not
+specified. EC_GROUP_get_seed_len returns the length of the seed or 0 if the seed is not specified.
+.PP
+EC_GROUP_set_seed returns the length of the seed that has been set. If the supplied seed is \s-1NULL,\s0 or the supplied seed length is
+0, the return value will be 1. On error 0 is returned.
+.PP
+EC_GROUP_cmp returns 0 if the curves are equal, 1 if they are not equal, or \-1 on error.
+.PP
+EC_GROUP_get_basis_type returns the values NID_X9_62_tpBasis or NID_X9_62_ppBasis (as defined in <openssl/obj_mac.h>) for a
+trinomial or pentanomial respectively. Alternatively in the event of an error a 0 is returned.
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+\&\fIcrypto\fR\|(3), \fIec\fR\|(3), \fIEC_GROUP_new\fR\|(3),
+\&\fIEC_POINT_new\fR\|(3), \fIEC_POINT_add\fR\|(3), \fIEC_KEY_new\fR\|(3),
+\&\fIEC_GFp_simple_method\fR\|(3), \fId2i_ECPKParameters\fR\|(3)
diff --git a/secure/lib/libcrypto/man/EC_GROUP_new.3 b/secure/lib/libcrypto/man/EC_GROUP_new.3
new file mode 100644
index 0000000..583a5d3
--- /dev/null
+++ b/secure/lib/libcrypto/man/EC_GROUP_new.3
@@ -0,0 +1,230 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "EC_GROUP_new 3"
+.TH EC_GROUP_new 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+EC_GROUP_new, EC_GROUP_free, EC_GROUP_clear_free, EC_GROUP_new_curve_GFp, EC_GROUP_new_curve_GF2m, EC_GROUP_new_by_curve_name, EC_GROUP_set_curve_GFp, EC_GROUP_get_curve_GFp, EC_GROUP_set_curve_GF2m, EC_GROUP_get_curve_GF2m, EC_get_builtin_curves \- Functions for creating and destroying EC_GROUP objects.
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 2
+\& #include <openssl/ec.h>
+\& #include <openssl/bn.h>
+\&
+\& EC_GROUP *EC_GROUP_new(const EC_METHOD *meth);
+\& void EC_GROUP_free(EC_GROUP *group);
+\& void EC_GROUP_clear_free(EC_GROUP *group);
+\&
+\& EC_GROUP *EC_GROUP_new_curve_GFp(const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx);
+\& EC_GROUP *EC_GROUP_new_curve_GF2m(const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx);
+\& EC_GROUP *EC_GROUP_new_by_curve_name(int nid);
+\&
+\& int EC_GROUP_set_curve_GFp(EC_GROUP *group, const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx);
+\& int EC_GROUP_get_curve_GFp(const EC_GROUP *group, BIGNUM *p, BIGNUM *a, BIGNUM *b, BN_CTX *ctx);
+\& int EC_GROUP_set_curve_GF2m(EC_GROUP *group, const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx);
+\& int EC_GROUP_get_curve_GF2m(const EC_GROUP *group, BIGNUM *p, BIGNUM *a, BIGNUM *b, BN_CTX *ctx);
+\&
+\& size_t EC_get_builtin_curves(EC_builtin_curve *r, size_t nitems);
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+Within the library there are two forms of elliptic curve that are of interest. The first form is those defined over the
+prime field Fp. The elements of Fp are the integers 0 to p\-1, where p is a prime number. This gives us a revised
+elliptic curve equation as follows:
+.PP
+y^2 mod p = x^3 +ax + b mod p
+.PP
+The second form is those defined over a binary field F2^m where the elements of the field are integers of length at
+most m bits. For this form the elliptic curve equation is modified to:
+.PP
+y^2 + xy = x^3 + ax^2 + b (where b != 0)
+.PP
+Operations in a binary field are performed relative to an \fBirreducible polynomial\fR. All such curves with OpenSSL
+use a trinomial or a pentanomial for this parameter.
+.PP
+A new curve can be constructed by calling EC_GROUP_new, using the implementation provided by \fBmeth\fR (see
+\&\fIEC_GFp_simple_method\fR\|(3)). It is then necessary to call either EC_GROUP_set_curve_GFp or
+EC_GROUP_set_curve_GF2m as appropriate to create a curve defined over Fp or over F2^m respectively.
+.PP
+EC_GROUP_set_curve_GFp sets the curve parameters \fBp\fR, \fBa\fR and \fBb\fR for a curve over Fp stored in \fBgroup\fR.
+EC_group_get_curve_GFp obtains the previously set curve parameters.
+.PP
+EC_GROUP_set_curve_GF2m sets the equivalent curve parameters for a curve over F2^m. In this case \fBp\fR represents
+the irreducible polybnomial \- each bit represents a term in the polynomial. Therefore there will either be three
+or five bits set dependant on whether the polynomial is a trinomial or a pentanomial.
+EC_group_get_curve_GF2m obtains the previously set curve parameters.
+.PP
+The functions EC_GROUP_new_curve_GFp and EC_GROUP_new_curve_GF2m are shortcuts for calling EC_GROUP_new and the
+appropriate EC_group_set_curve function. An appropriate default implementation method will be used.
+.PP
+Whilst the library can be used to create any curve using the functions described above, there are also a number of
+predefined curves that are available. In order to obtain a list of all of the predefined curves, call the function
+EC_get_builtin_curves. The parameter \fBr\fR should be an array of EC_builtin_curve structures of size \fBnitems\fR. The function
+will populate the \fBr\fR array with information about the builtin curves. If \fBnitems\fR is less than the total number of
+curves available, then the first \fBnitems\fR curves will be returned. Otherwise the total number of curves will be
+provided. The return value is the total number of curves available (whether that number has been populated in \fBr\fR or
+not). Passing a \s-1NULL \s0\fBr\fR, or setting \fBnitems\fR to 0 will do nothing other than return the total number of curves available.
+The EC_builtin_curve structure is defined as follows:
+.PP
+.Vb 4
+\& typedef struct {
+\& int nid;
+\& const char *comment;
+\& } EC_builtin_curve;
+.Ve
+.PP
+Each EC_builtin_curve item has a unique integer id (\fBnid\fR), and a human readable comment string describing the curve.
+.PP
+In order to construct a builtin curve use the function EC_GROUP_new_by_curve_name and provide the \fBnid\fR of the curve to
+be constructed.
+.PP
+EC_GROUP_free frees the memory associated with the \s-1EC_GROUP.\s0
+.PP
+EC_GROUP_clear_free destroys any sensitive data held within the \s-1EC_GROUP\s0 and then frees its memory.
+.SH "RETURN VALUES"
+.IX Header "RETURN VALUES"
+All EC_GROUP_new* functions return a pointer to the newly constructed group, or \s-1NULL\s0 on error.
+.PP
+EC_get_builtin_curves returns the number of builtin curves that are available.
+.PP
+EC_GROUP_set_curve_GFp, EC_GROUP_get_curve_GFp, EC_GROUP_set_curve_GF2m, EC_GROUP_get_curve_GF2m return 1 on success or 0 on error.
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+\&\fIcrypto\fR\|(3), \fIec\fR\|(3), \fIEC_GROUP_copy\fR\|(3),
+\&\fIEC_POINT_new\fR\|(3), \fIEC_POINT_add\fR\|(3), \fIEC_KEY_new\fR\|(3),
+\&\fIEC_GFp_simple_method\fR\|(3), \fId2i_ECPKParameters\fR\|(3)
diff --git a/secure/lib/libcrypto/man/EC_KEY_new.3 b/secure/lib/libcrypto/man/EC_KEY_new.3
new file mode 100644
index 0000000..cf0d471
--- /dev/null
+++ b/secure/lib/libcrypto/man/EC_KEY_new.3
@@ -0,0 +1,239 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "EC_KEY_new 3"
+.TH EC_KEY_new 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+EC_KEY_new, EC_KEY_get_flags, EC_KEY_set_flags, EC_KEY_clear_flags, EC_KEY_new_by_curve_name, EC_KEY_free, EC_KEY_copy, EC_KEY_dup, EC_KEY_up_ref, EC_KEY_get0_group, EC_KEY_set_group, EC_KEY_get0_private_key, EC_KEY_set_private_key, EC_KEY_get0_public_key, EC_KEY_set_public_key, EC_KEY_get_enc_flags, EC_KEY_set_enc_flags, EC_KEY_get_conv_form, EC_KEY_set_conv_form, EC_KEY_get_key_method_data, EC_KEY_insert_key_method_data, EC_KEY_set_asn1_flag, EC_KEY_precompute_mult, EC_KEY_generate_key, EC_KEY_check_key, EC_KEY_set_public_key_affine_coordinates \- Functions for creating, destroying and manipulating EC_KEY objects.
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 2
+\& #include <openssl/ec.h>
+\& #include <openssl/bn.h>
+\&
+\& EC_KEY *EC_KEY_new(void);
+\& int EC_KEY_get_flags(const EC_KEY *key);
+\& void EC_KEY_set_flags(EC_KEY *key, int flags);
+\& void EC_KEY_clear_flags(EC_KEY *key, int flags);
+\& EC_KEY *EC_KEY_new_by_curve_name(int nid);
+\& void EC_KEY_free(EC_KEY *key);
+\& EC_KEY *EC_KEY_copy(EC_KEY *dst, const EC_KEY *src);
+\& EC_KEY *EC_KEY_dup(const EC_KEY *src);
+\& int EC_KEY_up_ref(EC_KEY *key);
+\& const EC_GROUP *EC_KEY_get0_group(const EC_KEY *key);
+\& int EC_KEY_set_group(EC_KEY *key, const EC_GROUP *group);
+\& const BIGNUM *EC_KEY_get0_private_key(const EC_KEY *key);
+\& int EC_KEY_set_private_key(EC_KEY *key, const BIGNUM *prv);
+\& const EC_POINT *EC_KEY_get0_public_key(const EC_KEY *key);
+\& int EC_KEY_set_public_key(EC_KEY *key, const EC_POINT *pub);
+\& point_conversion_form_t EC_KEY_get_conv_form(const EC_KEY *key);
+\& void EC_KEY_set_conv_form(EC_KEY *eckey, point_conversion_form_t cform);
+\& void *EC_KEY_get_key_method_data(EC_KEY *key,
+\& void *(*dup_func)(void *), void (*free_func)(void *), void (*clear_free_func)(void *));
+\& void EC_KEY_insert_key_method_data(EC_KEY *key, void *data,
+\& void *(*dup_func)(void *), void (*free_func)(void *), void (*clear_free_func)(void *));
+\& void EC_KEY_set_asn1_flag(EC_KEY *eckey, int asn1_flag);
+\& int EC_KEY_precompute_mult(EC_KEY *key, BN_CTX *ctx);
+\& int EC_KEY_generate_key(EC_KEY *key);
+\& int EC_KEY_check_key(const EC_KEY *key);
+\& int EC_KEY_set_public_key_affine_coordinates(EC_KEY *key, BIGNUM *x, BIGNUM *y);
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+An \s-1EC_KEY\s0 represents a public key and (optionaly) an associated private key. A new \s-1EC_KEY \s0(with no associated curve) can be constructed by calling EC_KEY_new.
+The reference count for the newly created \s-1EC_KEY\s0 is initially set to 1. A curve can be associated with the \s-1EC_KEY\s0 by calling
+EC_KEY_set_group.
+.PP
+Alternatively a new \s-1EC_KEY\s0 can be constructed by calling EC_KEY_new_by_curve_name and supplying the nid of the associated curve. Refer to \fIEC_GROUP_new\fR\|(3) for a description of curve names. This function simply wraps calls to EC_KEY_new and
+EC_GROUP_new_by_curve_name.
+.PP
+Calling EC_KEY_free decrements the reference count for the \s-1EC_KEY\s0 object, and if it has dropped to zero then frees the memory associated
+with it.
+.PP
+EC_KEY_copy copies the contents of the \s-1EC_KEY\s0 in \fBsrc\fR into \fBdest\fR.
+.PP
+EC_KEY_dup creates a new \s-1EC_KEY\s0 object and copies \fBec_key\fR into it.
+.PP
+EC_KEY_up_ref increments the reference count associated with the \s-1EC_KEY\s0 object.
+.PP
+EC_KEY_generate_key generates a new public and private key for the supplied \fBeckey\fR object. \fBeckey\fR must have an \s-1EC_GROUP\s0 object
+associated with it before calling this function. The private key is a random integer (0 < priv_key < order, where order is the order
+of the \s-1EC_GROUP\s0 object). The public key is an \s-1EC_POINT\s0 on the curve calculated by multiplying the generator for the curve by the
+private key.
+.PP
+EC_KEY_check_key performs various sanity checks on the \s-1EC_KEY\s0 object to confirm that it is valid.
+.PP
+EC_KEY_set_public_key_affine_coordinates sets the public key for \fBkey\fR based on its affine co-ordinates, i.e. it constructs an \s-1EC_POINT\s0
+object based on the supplied \fBx\fR and \fBy\fR values and sets the public key to be this \s-1EC_POINT.\s0 It will also performs certain sanity checks
+on the key to confirm that it is valid.
+.PP
+The functions EC_KEY_get0_group, EC_KEY_set_group, EC_KEY_get0_private_key, EC_KEY_set_private_key, EC_KEY_get0_public_key, and EC_KEY_set_public_key get and set the \s-1EC_GROUP\s0 object, the private key and the \s-1EC_POINT\s0 public key for the \fBkey\fR respectively.
+.PP
+The functions EC_KEY_get_conv_form and EC_KEY_set_conv_form get and set the point_conversion_form for the \fBkey\fR. For a description
+of point_conversion_forms please refer to \fIEC_POINT_new\fR\|(3).
+.PP
+EC_KEY_insert_key_method_data and EC_KEY_get_key_method_data enable the caller to associate arbitary additional data specific to the
+elliptic curve scheme being used with the \s-1EC_KEY\s0 object. This data is treated as a \*(L"black box\*(R" by the ec library. The data to be stored by EC_KEY_insert_key_method_data is provided in the \fBdata\fR parameter, which must have have associated functions for duplicating, freeing and \*(L"clear_freeing\*(R" the data item. If a subsequent EC_KEY_get_key_method_data call is issued, the functions for duplicating, freeing and \*(L"clear_freeing\*(R" the data item must be provided again, and they must be the same as they were when the data item was inserted.
+.PP
+EC_KEY_set_flags sets the flags in the \fBflags\fR parameter on the \s-1EC_KEY\s0 object. Any flags that are already set are left set. The currently defined standard flags are \s-1EC_FLAG_NON_FIPS_ALLOW\s0 and \s-1EC_FLAG_FIPS_CHECKED.\s0 In addition there is the flag \s-1EC_FLAG_COFACTOR_ECDH\s0 which is specific to \s-1ECDH\s0 and is defined in ecdh.h. EC_KEY_get_flags returns the current flags that are set for this \s-1EC_KEY.\s0 EC_KEY_clear_flags clears the flags indicated by the \fBflags\fR parameter. All other flags are left in their existing state.
+.PP
+EC_KEY_set_asn1_flag sets the asn1_flag on the underlying \s-1EC_GROUP\s0 object (if set). Refer to \fIEC_GROUP_copy\fR\|(3) for further information on the asn1_flag.
+.PP
+EC_KEY_precompute_mult stores multiples of the underlying \s-1EC_GROUP\s0 generator for faster point multiplication. See also \fIEC_POINT_add\fR\|(3).
+.SH "RETURN VALUES"
+.IX Header "RETURN VALUES"
+EC_KEY_new, EC_KEY_new_by_curve_name and EC_KEY_dup return a pointer to the newly created \s-1EC_KEY\s0 object, or \s-1NULL\s0 on error.
+.PP
+EC_KEY_get_flags returns the flags associated with the \s-1EC_KEY\s0 object as an integer.
+.PP
+EC_KEY_copy returns a pointer to the destination key, or \s-1NULL\s0 on error.
+.PP
+EC_KEY_up_ref, EC_KEY_set_group, EC_KEY_set_private_key, EC_KEY_set_public_key, EC_KEY_precompute_mult, EC_KEY_generate_key, EC_KEY_check_key and EC_KEY_set_public_key_affine_coordinates return 1 on success or 0 on error.
+.PP
+EC_KEY_get0_group returns the \s-1EC_GROUP\s0 associated with the \s-1EC_KEY.\s0
+.PP
+EC_KEY_get0_private_key returns the private key associated with the \s-1EC_KEY.\s0
+.PP
+EC_KEY_get_conv_form return the point_conversion_form for the \s-1EC_KEY.\s0
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+\&\fIcrypto\fR\|(3), \fIec\fR\|(3), \fIEC_GROUP_new\fR\|(3),
+\&\fIEC_GROUP_copy\fR\|(3), \fIEC_POINT_new\fR\|(3),
+\&\fIEC_POINT_add\fR\|(3),
+\&\fIEC_GFp_simple_method\fR\|(3),
+\&\fId2i_ECPKParameters\fR\|(3)
diff --git a/secure/lib/libcrypto/man/EC_POINT_add.3 b/secure/lib/libcrypto/man/EC_POINT_add.3
new file mode 100644
index 0000000..36f1ce1
--- /dev/null
+++ b/secure/lib/libcrypto/man/EC_POINT_add.3
@@ -0,0 +1,203 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "EC_POINT_add 3"
+.TH EC_POINT_add 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+EC_POINT_add, EC_POINT_dbl, EC_POINT_invert, EC_POINT_is_at_infinity, EC_POINT_is_on_curve, EC_POINT_cmp, EC_POINT_make_affine, EC_POINTs_make_affine, EC_POINTs_mul, EC_POINT_mul, EC_GROUP_precompute_mult, EC_GROUP_have_precompute_mult \- Functions for performing mathematical operations and tests on EC_POINT objects.
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 2
+\& #include <openssl/ec.h>
+\& #include <openssl/bn.h>
+\&
+\& int EC_POINT_add(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, const EC_POINT *b, BN_CTX *ctx);
+\& int EC_POINT_dbl(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, BN_CTX *ctx);
+\& int EC_POINT_invert(const EC_GROUP *group, EC_POINT *a, BN_CTX *ctx);
+\& int EC_POINT_is_at_infinity(const EC_GROUP *group, const EC_POINT *p);
+\& int EC_POINT_is_on_curve(const EC_GROUP *group, const EC_POINT *point, BN_CTX *ctx);
+\& int EC_POINT_cmp(const EC_GROUP *group, const EC_POINT *a, const EC_POINT *b, BN_CTX *ctx);
+\& int EC_POINT_make_affine(const EC_GROUP *group, EC_POINT *point, BN_CTX *ctx);
+\& int EC_POINTs_make_affine(const EC_GROUP *group, size_t num, EC_POINT *points[], BN_CTX *ctx);
+\& int EC_POINTs_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *n, size_t num, const EC_POINT *p[], const BIGNUM *m[], BN_CTX *ctx);
+\& int EC_POINT_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *n, const EC_POINT *q, const BIGNUM *m, BN_CTX *ctx);
+\& int EC_GROUP_precompute_mult(EC_GROUP *group, BN_CTX *ctx);
+\& int EC_GROUP_have_precompute_mult(const EC_GROUP *group);
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+EC_POINT_add adds the two points \fBa\fR and \fBb\fR and places the result in \fBr\fR. Similarly EC_POINT_dbl doubles the point \fBa\fR and places the
+result in \fBr\fR. In both cases it is valid for \fBr\fR to be one of \fBa\fR or \fBb\fR.
+.PP
+EC_POINT_invert calculates the inverse of the supplied point \fBa\fR. The result is placed back in \fBa\fR.
+.PP
+The function EC_POINT_is_at_infinity tests whether the supplied point is at infinity or not.
+.PP
+EC_POINT_is_on_curve tests whether the supplied point is on the curve or not.
+.PP
+EC_POINT_cmp compares the two supplied points and tests whether or not they are equal.
+.PP
+The functions EC_POINT_make_affine and EC_POINTs_make_affine force the internal representation of the \s-1EC_POINT\s0(s) into the affine
+co-ordinate system. In the case of EC_POINTs_make_affine the value \fBnum\fR provides the number of points in the array \fBpoints\fR to be
+forced.
+.PP
+EC_POINT_mul calculates the value generator * \fBn\fR + \fBq\fR * \fBm\fR and stores the result in \fBr\fR. The value \fBn\fR may be \s-1NULL\s0 in which case the result is just \fBq\fR * \fBm\fR.
+.PP
+EC_POINTs_mul calculates the value generator * \fBn\fR + \fBq[0]\fR * \fBm[0]\fR + ... + \fBq[num\-1]\fR * \fBm[num\-1]\fR. As for EC_POINT_mul the value
+\&\fBn\fR may be \s-1NULL.\s0
+.PP
+The function EC_GROUP_precompute_mult stores multiples of the generator for faster point multiplication, whilst
+EC_GROUP_have_precompute_mult tests whether precomputation has already been done. See \fIEC_GROUP_copy\fR\|(3) for information
+about the generator.
+.SH "RETURN VALUES"
+.IX Header "RETURN VALUES"
+The following functions return 1 on success or 0 on error: EC_POINT_add, EC_POINT_dbl, EC_POINT_invert, EC_POINT_make_affine,
+EC_POINTs_make_affine, EC_POINTs_make_affine, EC_POINT_mul, EC_POINTs_mul and EC_GROUP_precompute_mult.
+.PP
+EC_POINT_is_at_infinity returns 1 if the point is at infinity, or 0 otherwise.
+.PP
+EC_POINT_is_on_curve returns 1 if the point is on the curve, 0 if not, or \-1 on error.
+.PP
+EC_POINT_cmp returns 1 if the points are not equal, 0 if they are, or \-1 on error.
+.PP
+EC_GROUP_have_precompute_mult return 1 if a precomputation has been done, or 0 if not.
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+\&\fIcrypto\fR\|(3), \fIec\fR\|(3), \fIEC_GROUP_new\fR\|(3), \fIEC_GROUP_copy\fR\|(3),
+\&\fIEC_POINT_new\fR\|(3), \fIEC_KEY_new\fR\|(3),
+\&\fIEC_GFp_simple_method\fR\|(3), \fId2i_ECPKParameters\fR\|(3)
diff --git a/secure/lib/libcrypto/man/EC_POINT_new.3 b/secure/lib/libcrypto/man/EC_POINT_new.3
new file mode 100644
index 0000000..b28c7c1
--- /dev/null
+++ b/secure/lib/libcrypto/man/EC_POINT_new.3
@@ -0,0 +1,260 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "EC_POINT_new 3"
+.TH EC_POINT_new 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+EC_POINT_new, EC_POINT_free, EC_POINT_clear_free, EC_POINT_copy, EC_POINT_dup, EC_POINT_method_of, EC_POINT_set_to_infinity, EC_POINT_set_Jprojective_coordinates, EC_POINT_get_Jprojective_coordinates_GFp, EC_POINT_set_affine_coordinates_GFp, EC_POINT_get_affine_coordinates_GFp, EC_POINT_set_compressed_coordinates_GFp, EC_POINT_set_affine_coordinates_GF2m, EC_POINT_get_affine_coordinates_GF2m, EC_POINT_set_compressed_coordinates_GF2m, EC_POINT_point2oct, EC_POINT_oct2point, EC_POINT_point2bn, EC_POINT_bn2point, EC_POINT_point2hex, EC_POINT_hex2point \- Functions for creating, destroying and manipulating EC_POINT objects.
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 2
+\& #include <openssl/ec.h>
+\& #include <openssl/bn.h>
+\&
+\& EC_POINT *EC_POINT_new(const EC_GROUP *group);
+\& void EC_POINT_free(EC_POINT *point);
+\& void EC_POINT_clear_free(EC_POINT *point);
+\& int EC_POINT_copy(EC_POINT *dst, const EC_POINT *src);
+\& EC_POINT *EC_POINT_dup(const EC_POINT *src, const EC_GROUP *group);
+\& const EC_METHOD *EC_POINT_method_of(const EC_POINT *point);
+\& int EC_POINT_set_to_infinity(const EC_GROUP *group, EC_POINT *point);
+\& int EC_POINT_set_Jprojective_coordinates_GFp(const EC_GROUP *group, EC_POINT *p,
+\& const BIGNUM *x, const BIGNUM *y, const BIGNUM *z, BN_CTX *ctx);
+\& int EC_POINT_get_Jprojective_coordinates_GFp(const EC_GROUP *group,
+\& const EC_POINT *p, BIGNUM *x, BIGNUM *y, BIGNUM *z, BN_CTX *ctx);
+\& int EC_POINT_set_affine_coordinates_GFp(const EC_GROUP *group, EC_POINT *p,
+\& const BIGNUM *x, const BIGNUM *y, BN_CTX *ctx);
+\& int EC_POINT_get_affine_coordinates_GFp(const EC_GROUP *group,
+\& const EC_POINT *p, BIGNUM *x, BIGNUM *y, BN_CTX *ctx);
+\& int EC_POINT_set_compressed_coordinates_GFp(const EC_GROUP *group, EC_POINT *p,
+\& const BIGNUM *x, int y_bit, BN_CTX *ctx);
+\& int EC_POINT_set_affine_coordinates_GF2m(const EC_GROUP *group, EC_POINT *p,
+\& const BIGNUM *x, const BIGNUM *y, BN_CTX *ctx);
+\& int EC_POINT_get_affine_coordinates_GF2m(const EC_GROUP *group,
+\& const EC_POINT *p, BIGNUM *x, BIGNUM *y, BN_CTX *ctx);
+\& int EC_POINT_set_compressed_coordinates_GF2m(const EC_GROUP *group, EC_POINT *p,
+\& const BIGNUM *x, int y_bit, BN_CTX *ctx);
+\& size_t EC_POINT_point2oct(const EC_GROUP *group, const EC_POINT *p,
+\& point_conversion_form_t form,
+\& unsigned char *buf, size_t len, BN_CTX *ctx);
+\& int EC_POINT_oct2point(const EC_GROUP *group, EC_POINT *p,
+\& const unsigned char *buf, size_t len, BN_CTX *ctx);
+\& BIGNUM *EC_POINT_point2bn(const EC_GROUP *, const EC_POINT *,
+\& point_conversion_form_t form, BIGNUM *, BN_CTX *);
+\& EC_POINT *EC_POINT_bn2point(const EC_GROUP *, const BIGNUM *,
+\& EC_POINT *, BN_CTX *);
+\& char *EC_POINT_point2hex(const EC_GROUP *, const EC_POINT *,
+\& point_conversion_form_t form, BN_CTX *);
+\& EC_POINT *EC_POINT_hex2point(const EC_GROUP *, const char *,
+\& EC_POINT *, BN_CTX *);
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+An \s-1EC_POINT\s0 represents a point on a curve. A new point is constructed by calling the function EC_POINT_new and providing the \fBgroup\fR
+object that the point relates to.
+.PP
+EC_POINT_free frees the memory associated with the \s-1EC_POINT.\s0
+.PP
+EC_POINT_clear_free destroys any sensitive data held within the \s-1EC_POINT\s0 and then frees its memory.
+.PP
+EC_POINT_copy copies the point \fBsrc\fR into \fBdst\fR. Both \fBsrc\fR and \fBdst\fR must use the same \s-1EC_METHOD.\s0
+.PP
+EC_POINT_dup creates a new \s-1EC_POINT\s0 object and copies the content from \fBsrc\fR to the newly created
+\&\s-1EC_POINT\s0 object.
+.PP
+EC_POINT_method_of obtains the \s-1EC_METHOD\s0 associated with \fBpoint\fR.
+.PP
+A valid point on a curve is the special point at infinity. A point is set to be at infinity by calling EC_POINT_set_to_infinity.
+.PP
+The affine co-ordinates for a point describe a point in terms of its x and y position. The functions
+EC_POINT_set_affine_coordinates_GFp and EC_POINT_set_affine_coordinates_GF2m set the \fBx\fR and \fBy\fR co-ordinates for the point
+\&\fBp\fR defined over the curve given in \fBgroup\fR.
+.PP
+As well as the affine co-ordinates, a point can alternatively be described in terms of its Jacobian
+projective co-ordinates (for Fp curves only). Jacobian projective co-ordinates are expressed as three values x, y and z. Working in
+this co-ordinate system provides more efficient point multiplication operations.
+A mapping exists between Jacobian projective co-ordinates and affine co-ordinates. A Jacobian projective co-ordinate (x, y, z) can be written as an affine co-ordinate as (x/(z^2), y/(z^3)). Conversion to Jacobian projective to affine co-ordinates is simple. The co-ordinate (x, y) is
+mapped to (x, y, 1). To set or get the projective co-ordinates use EC_POINT_set_Jprojective_coordinates_GFp and
+EC_POINT_get_Jprojective_coordinates_GFp respectively.
+.PP
+Points can also be described in terms of their compressed co-ordinates. For a point (x, y), for any given value for x such that the point is
+on the curve there will only ever be two possible values for y. Therefore a point can be set using the EC_POINT_set_compressed_coordinates_GFp
+and EC_POINT_set_compressed_coordinates_GF2m functions where \fBx\fR is the x co-ordinate and \fBy_bit\fR is a value 0 or 1 to identify which of
+the two possible values for y should be used.
+.PP
+In addition EC_POINTs can be converted to and from various external
+representations. Supported representations are octet strings, BIGNUMs and
+hexadecimal. Octet strings are stored in a buffer along with an associated
+buffer length. A point held in a \s-1BIGNUM\s0 is calculated by converting the point to
+an octet string and then converting that octet string into a \s-1BIGNUM\s0 integer.
+Points in hexadecimal format are stored in a \s-1NULL\s0 terminated character string
+where each character is one of the printable values 0\-9 or A\-F (or a\-f).
+.PP
+The functions EC_POINT_point2oct, EC_POINT_oct2point, EC_POINT_point2bn, EC_POINT_bn2point, EC_POINT_point2hex and EC_POINT_hex2point convert
+from and to EC_POINTs for the formats: octet string, \s-1BIGNUM\s0 and hexadecimal respectively.
+.PP
+The function EC_POINT_point2oct must be supplied with a buffer long enough to store the octet string. The return value provides the number of
+octets stored. Calling the function with a \s-1NULL\s0 buffer will not perform the conversion but will still return the required buffer length.
+.PP
+The function EC_POINT_point2hex will allocate sufficient memory to store the hexadecimal string. It is the caller's responsibility to free
+this memory with a subsequent call to \fIOPENSSL_free()\fR.
+.SH "RETURN VALUES"
+.IX Header "RETURN VALUES"
+EC_POINT_new and EC_POINT_dup return the newly allocated \s-1EC_POINT\s0 or \s-1NULL\s0 on error.
+.PP
+The following functions return 1 on success or 0 on error: EC_POINT_copy, EC_POINT_set_to_infinity, EC_POINT_set_Jprojective_coordinates_GFp,
+EC_POINT_get_Jprojective_coordinates_GFp, EC_POINT_set_affine_coordinates_GFp, EC_POINT_get_affine_coordinates_GFp,
+EC_POINT_set_compressed_coordinates_GFp, EC_POINT_set_affine_coordinates_GF2m, EC_POINT_get_affine_coordinates_GF2m,
+EC_POINT_set_compressed_coordinates_GF2m and EC_POINT_oct2point.
+.PP
+EC_POINT_method_of returns the \s-1EC_METHOD\s0 associated with the supplied \s-1EC_POINT.\s0
+.PP
+EC_POINT_point2oct returns the length of the required buffer, or 0 on error.
+.PP
+EC_POINT_point2bn returns the pointer to the \s-1BIGNUM\s0 supplied, or \s-1NULL\s0 on error.
+.PP
+EC_POINT_bn2point returns the pointer to the \s-1EC_POINT\s0 supplied, or \s-1NULL\s0 on error.
+.PP
+EC_POINT_point2hex returns a pointer to the hex string, or \s-1NULL\s0 on error.
+.PP
+EC_POINT_hex2point returns the pointer to the \s-1EC_POINT\s0 supplied, or \s-1NULL\s0 on error.
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+\&\fIcrypto\fR\|(3), \fIec\fR\|(3), \fIEC_GROUP_new\fR\|(3), \fIEC_GROUP_copy\fR\|(3),
+\&\fIEC_POINT_add\fR\|(3), \fIEC_KEY_new\fR\|(3),
+\&\fIEC_GFp_simple_method\fR\|(3), \fId2i_ECPKParameters\fR\|(3)
diff --git a/secure/lib/libcrypto/man/ERR_GET_LIB.3 b/secure/lib/libcrypto/man/ERR_GET_LIB.3
index 9b5ac95..d7eb967 100644
--- a/secure/lib/libcrypto/man/ERR_GET_LIB.3
+++ b/secure/lib/libcrypto/man/ERR_GET_LIB.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "ERR_GET_LIB 3"
-.TH ERR_GET_LIB 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH ERR_GET_LIB 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/ERR_clear_error.3 b/secure/lib/libcrypto/man/ERR_clear_error.3
index 914086d..e98d053 100644
--- a/secure/lib/libcrypto/man/ERR_clear_error.3
+++ b/secure/lib/libcrypto/man/ERR_clear_error.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "ERR_clear_error 3"
-.TH ERR_clear_error 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH ERR_clear_error 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/ERR_error_string.3 b/secure/lib/libcrypto/man/ERR_error_string.3
index 2899929..470342a 100644
--- a/secure/lib/libcrypto/man/ERR_error_string.3
+++ b/secure/lib/libcrypto/man/ERR_error_string.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "ERR_error_string 3"
-.TH ERR_error_string 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH ERR_error_string 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/ERR_get_error.3 b/secure/lib/libcrypto/man/ERR_get_error.3
index f6f85d7..a6ade52 100644
--- a/secure/lib/libcrypto/man/ERR_get_error.3
+++ b/secure/lib/libcrypto/man/ERR_get_error.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "ERR_get_error 3"
-.TH ERR_get_error 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH ERR_get_error 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/ERR_load_crypto_strings.3 b/secure/lib/libcrypto/man/ERR_load_crypto_strings.3
index 4918d0c..9c42a1a 100644
--- a/secure/lib/libcrypto/man/ERR_load_crypto_strings.3
+++ b/secure/lib/libcrypto/man/ERR_load_crypto_strings.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "ERR_load_crypto_strings 3"
-.TH ERR_load_crypto_strings 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH ERR_load_crypto_strings 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/ERR_load_strings.3 b/secure/lib/libcrypto/man/ERR_load_strings.3
index 08af931..ea5990d 100644
--- a/secure/lib/libcrypto/man/ERR_load_strings.3
+++ b/secure/lib/libcrypto/man/ERR_load_strings.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "ERR_load_strings 3"
-.TH ERR_load_strings 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH ERR_load_strings 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/ERR_print_errors.3 b/secure/lib/libcrypto/man/ERR_print_errors.3
index d54345a..3b01d25 100644
--- a/secure/lib/libcrypto/man/ERR_print_errors.3
+++ b/secure/lib/libcrypto/man/ERR_print_errors.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "ERR_print_errors 3"
-.TH ERR_print_errors 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH ERR_print_errors 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/ERR_put_error.3 b/secure/lib/libcrypto/man/ERR_put_error.3
index 37cc32f..694b728 100644
--- a/secure/lib/libcrypto/man/ERR_put_error.3
+++ b/secure/lib/libcrypto/man/ERR_put_error.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "ERR_put_error 3"
-.TH ERR_put_error 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH ERR_put_error 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/ERR_remove_state.3 b/secure/lib/libcrypto/man/ERR_remove_state.3
index a0ece2b..57009b3 100644
--- a/secure/lib/libcrypto/man/ERR_remove_state.3
+++ b/secure/lib/libcrypto/man/ERR_remove_state.3
@@ -133,34 +133,47 @@
.\" ========================================================================
.\"
.IX Title "ERR_remove_state 3"
-.TH ERR_remove_state 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH ERR_remove_state 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-ERR_remove_state \- free a thread's error queue
+ERR_remove_thread_state, ERR_remove_state \- free a thread's error queue
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
\& #include <openssl/err.h>
\&
+\& void ERR_remove_thread_state(const CRYPTO_THREADID *tid);
+.Ve
+.PP
+Deprecated:
+.PP
+.Vb 1
\& void ERR_remove_state(unsigned long pid);
.Ve
.SH "DESCRIPTION"
.IX Header "DESCRIPTION"
-\&\fIERR_remove_state()\fR frees the error queue associated with thread \fBpid\fR.
-If \fBpid\fR == 0, the current thread will have its error queue removed.
+\&\fIERR_remove_thread_state()\fR frees the error queue associated with thread \fBtid\fR.
+If \fBtid\fR == \fB\s-1NULL\s0\fR, the current thread will have its error queue removed.
.PP
Since error queue data structures are allocated automatically for new
threads, they must be freed when threads are terminated in order to
avoid memory leaks.
+.PP
+ERR_remove_state is deprecated and has been replaced by
+ERR_remove_thread_state. Since threads in OpenSSL are no longer identified
+by unsigned long values any argument to this function is ignored. Calling
+ERR_remove_state is equivalent to \fBERR_remove_thread_state(\s-1NULL\s0)\fR.
.SH "RETURN VALUE"
.IX Header "RETURN VALUE"
-\&\fIERR_remove_state()\fR returns no value.
+ERR_remove_thread_state and \fIERR_remove_state()\fR return no value.
.SH "SEE ALSO"
.IX Header "SEE ALSO"
\&\fIerr\fR\|(3)
.SH "HISTORY"
.IX Header "HISTORY"
-\&\fIERR_remove_state()\fR is available in all versions of SSLeay and OpenSSL.
+\&\fIERR_remove_state()\fR is available in all versions of SSLeay and OpenSSL. It
+was deprecated in OpenSSL 1.0.0 when ERR_remove_thread_state was introduced
+and thread IDs were introduced to identify threads instead of 'unsigned long'.
diff --git a/secure/lib/libcrypto/man/ERR_set_mark.3 b/secure/lib/libcrypto/man/ERR_set_mark.3
index fada986..29ed839 100644
--- a/secure/lib/libcrypto/man/ERR_set_mark.3
+++ b/secure/lib/libcrypto/man/ERR_set_mark.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "ERR_set_mark 3"
-.TH ERR_set_mark 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH ERR_set_mark 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/EVP_BytesToKey.3 b/secure/lib/libcrypto/man/EVP_BytesToKey.3
index 20c608f..b11c92d 100644
--- a/secure/lib/libcrypto/man/EVP_BytesToKey.3
+++ b/secure/lib/libcrypto/man/EVP_BytesToKey.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "EVP_BytesToKey 3"
-.TH EVP_BytesToKey 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_BytesToKey 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -172,8 +172,8 @@ If the total key and \s-1IV\s0 length is less than the digest length and
\&\fB\s-1MD5\s0\fR is used then the derivation algorithm is compatible with PKCS#5 v1.5
otherwise a non standard extension is used to derive the extra data.
.PP
-Newer applications should use more standard algorithms such as PKCS#5
-v2.0 for key derivation.
+Newer applications should use a more modern algorithm such as \s-1PBKDF2\s0 as
+defined in PKCS#5v2.1 and provided by \s-1PKCS5_PBKDF2_HMAC.\s0
.SH "KEY DERIVATION ALGORITHM"
.IX Header "KEY DERIVATION ALGORITHM"
The key and \s-1IV\s0 is derived by concatenating D_1, D_2, etc until
@@ -191,7 +191,10 @@ The initial bytes are used for the key and the subsequent bytes for
the \s-1IV.\s0
.SH "RETURN VALUES"
.IX Header "RETURN VALUES"
-\&\fIEVP_BytesToKey()\fR returns the size of the derived key in bytes.
+If \fBdata\fR is \s-1NULL,\s0 then \fIEVP_BytesToKey()\fR returns the number of bytes
+needed to store the derived key.
+Otherwise, \fIEVP_BytesToKey()\fR returns the size of the derived key in bytes,
+or 0 on error.
.SH "SEE ALSO"
.IX Header "SEE ALSO"
\&\fIevp\fR\|(3), \fIrand\fR\|(3),
diff --git a/secure/lib/libcrypto/man/EVP_DigestInit.3 b/secure/lib/libcrypto/man/EVP_DigestInit.3
index 5df74cb..dcdff95 100644
--- a/secure/lib/libcrypto/man/EVP_DigestInit.3
+++ b/secure/lib/libcrypto/man/EVP_DigestInit.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "EVP_DigestInit 3"
-.TH EVP_DigestInit 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_DigestInit 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -141,10 +141,10 @@
.SH "NAME"
EVP_MD_CTX_init, EVP_MD_CTX_create, EVP_DigestInit_ex, EVP_DigestUpdate,
EVP_DigestFinal_ex, EVP_MD_CTX_cleanup, EVP_MD_CTX_destroy, EVP_MAX_MD_SIZE,
-EVP_MD_CTX_copy_ex, EVP_MD_CTX_copy, EVP_MD_type, EVP_MD_pkey_type, EVP_MD_size,
-EVP_MD_block_size, EVP_MD_CTX_md, EVP_MD_CTX_size, EVP_MD_CTX_block_size, EVP_MD_CTX_type,
-EVP_md_null, EVP_md2, EVP_md5, EVP_sha, EVP_sha1, EVP_sha224, EVP_sha256,
-EVP_sha384, EVP_sha512, EVP_dss, EVP_dss1, EVP_mdc2,
+EVP_MD_CTX_copy_ex, EVP_DigestInit, EVP_DigestFinal, EVP_MD_CTX_copy, EVP_MD_type,
+EVP_MD_pkey_type, EVP_MD_size, EVP_MD_block_size, EVP_MD_CTX_md, EVP_MD_CTX_size,
+EVP_MD_CTX_block_size, EVP_MD_CTX_type, EVP_md_null, EVP_md2, EVP_md5, EVP_sha, EVP_sha1,
+EVP_sha224, EVP_sha256, EVP_sha384, EVP_sha512, EVP_dss, EVP_dss1, EVP_mdc2,
EVP_ripemd160, EVP_get_digestbyname, EVP_get_digestbynid, EVP_get_digestbyobj \-
EVP digest routines
.SH "SYNOPSIS"
@@ -407,7 +407,7 @@ and \fIEVP_DigestFinal_ex()\fR were added in OpenSSL 0.9.7.
.PP
\&\fIEVP_md_null()\fR, \fIEVP_md2()\fR, \fIEVP_md5()\fR, \fIEVP_sha()\fR, \fIEVP_sha1()\fR,
\&\fIEVP_dss()\fR, \fIEVP_dss1()\fR, \fIEVP_mdc2()\fR and \fIEVP_ripemd160()\fR were
-changed to return truely const \s-1EVP_MD\s0 * in OpenSSL 0.9.7.
+changed to return truly const \s-1EVP_MD\s0 * in OpenSSL 0.9.7.
.PP
The link between digests and signing algorithms was fixed in OpenSSL 1.0 and
later, so now \fIEVP_sha1()\fR can be used with \s-1RSA\s0 and \s-1DSA\s0; there is no need to
diff --git a/secure/lib/libcrypto/man/EVP_DigestSignInit.3 b/secure/lib/libcrypto/man/EVP_DigestSignInit.3
index 4cf71d5..e607489 100644
--- a/secure/lib/libcrypto/man/EVP_DigestSignInit.3
+++ b/secure/lib/libcrypto/man/EVP_DigestSignInit.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "EVP_DigestSignInit 3"
-.TH EVP_DigestSignInit 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_DigestSignInit 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/EVP_DigestVerifyInit.3 b/secure/lib/libcrypto/man/EVP_DigestVerifyInit.3
index dfae90f..7b3cea1 100644
--- a/secure/lib/libcrypto/man/EVP_DigestVerifyInit.3
+++ b/secure/lib/libcrypto/man/EVP_DigestVerifyInit.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "EVP_DigestVerifyInit 3"
-.TH EVP_DigestVerifyInit 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_DigestVerifyInit 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -148,7 +148,7 @@ EVP_DigestVerifyInit, EVP_DigestVerifyUpdate, EVP_DigestVerifyFinal \- EVP signa
\& int EVP_DigestVerifyInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx,
\& const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey);
\& int EVP_DigestVerifyUpdate(EVP_MD_CTX *ctx, const void *d, unsigned int cnt);
-\& int EVP_DigestVerifyFinal(EVP_MD_CTX *ctx, unsigned char *sig, size_t siglen);
+\& int EVP_DigestVerifyFinal(EVP_MD_CTX *ctx, const unsigned char *sig, size_t siglen);
.Ve
.SH "DESCRIPTION"
.IX Header "DESCRIPTION"
diff --git a/secure/lib/libcrypto/man/EVP_EncryptInit.3 b/secure/lib/libcrypto/man/EVP_EncryptInit.3
index e8826f5..87ad242 100644
--- a/secure/lib/libcrypto/man/EVP_EncryptInit.3
+++ b/secure/lib/libcrypto/man/EVP_EncryptInit.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "EVP_EncryptInit 3"
-.TH EVP_EncryptInit 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_EncryptInit 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -153,7 +153,17 @@ EVP_CIPHER_CTX_nid, EVP_CIPHER_CTX_block_size, EVP_CIPHER_CTX_key_length,
EVP_CIPHER_CTX_iv_length, EVP_CIPHER_CTX_get_app_data,
EVP_CIPHER_CTX_set_app_data, EVP_CIPHER_CTX_type, EVP_CIPHER_CTX_flags,
EVP_CIPHER_CTX_mode, EVP_CIPHER_param_to_asn1, EVP_CIPHER_asn1_to_param,
-EVP_CIPHER_CTX_set_padding \- EVP cipher routines
+EVP_CIPHER_CTX_set_padding, EVP_enc_null, EVP_des_cbc, EVP_des_ecb,
+EVP_des_cfb, EVP_des_ofb, EVP_des_ede_cbc, EVP_des_ede, EVP_des_ede_ofb,
+EVP_des_ede_cfb, EVP_des_ede3_cbc, EVP_des_ede3, EVP_des_ede3_ofb,
+EVP_des_ede3_cfb, EVP_desx_cbc, EVP_rc4, EVP_rc4_40, EVP_idea_cbc,
+EVP_idea_ecb, EVP_idea_cfb, EVP_idea_ofb, EVP_idea_cbc, EVP_rc2_cbc,
+EVP_rc2_ecb, EVP_rc2_cfb, EVP_rc2_ofb, EVP_rc2_40_cbc, EVP_rc2_64_cbc,
+EVP_bf_cbc, EVP_bf_ecb, EVP_bf_cfb, EVP_bf_ofb, EVP_cast5_cbc,
+EVP_cast5_ecb, EVP_cast5_cfb, EVP_cast5_ofb, EVP_rc5_32_12_16_cbc,
+EVP_rc5_32_12_16_ecb, EVP_rc5_32_12_16_cfb, EVP_rc5_32_12_16_ofb,
+EVP_aes_128_gcm, EVP_aes_192_gcm, EVP_aes_256_gcm, EVP_aes_128_ccm,
+EVP_aes_192_ccm, EVP_aes_256_ccm \- EVP cipher routines
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
@@ -368,8 +378,7 @@ or the parameters cannot be set (for example the \s-1RC2\s0 effective key length
is not supported.
.PP
\&\fIEVP_CIPHER_CTX_ctrl()\fR allows various cipher specific parameters to be determined
-and set. Currently only the \s-1RC2\s0 effective key length and the number of rounds of
-\&\s-1RC5\s0 can be set.
+and set.
.SH "RETURN VALUES"
.IX Header "RETURN VALUES"
\&\fIEVP_EncryptInit_ex()\fR, \fIEVP_EncryptUpdate()\fR and \fIEVP_EncryptFinal_ex()\fR
@@ -457,6 +466,92 @@ length cipher.
\&\s-1RC5\s0 encryption algorithm in \s-1CBC, ECB, CFB\s0 and \s-1OFB\s0 modes respectively. This is a variable key length
cipher with an additional \*(L"number of rounds\*(R" parameter. By default the key length is set to 128
bits and 12 rounds.
+.IP "EVP_aes_128_gcm(void), EVP_aes_192_gcm(void), EVP_aes_256_gcm(void)" 4
+.IX Item "EVP_aes_128_gcm(void), EVP_aes_192_gcm(void), EVP_aes_256_gcm(void)"
+\&\s-1AES\s0 Galois Counter Mode (\s-1GCM\s0) for 128, 192 and 256 bit keys respectively.
+These ciphers require additional control operations to function correctly: see
+\&\*(L"\s-1GCM\s0 mode\*(R" section below for details.
+.IP "EVP_aes_128_ccm(void), EVP_aes_192_ccm(void), EVP_aes_256_ccm(void)" 4
+.IX Item "EVP_aes_128_ccm(void), EVP_aes_192_ccm(void), EVP_aes_256_ccm(void)"
+\&\s-1AES\s0 Counter with CBC-MAC Mode (\s-1CCM\s0) for 128, 192 and 256 bit keys respectively.
+These ciphers require additional control operations to function correctly: see
+\&\s-1CCM\s0 mode section below for details.
+.SH "GCM Mode"
+.IX Header "GCM Mode"
+For \s-1GCM\s0 mode ciphers the behaviour of the \s-1EVP\s0 interface is subtly altered and
+several \s-1GCM\s0 specific ctrl operations are supported.
+.PP
+To specify any additional authenticated data (\s-1AAD\s0) a call to \fIEVP_CipherUpdate()\fR,
+\&\fIEVP_EncryptUpdate()\fR or \fIEVP_DecryptUpdate()\fR should be made with the output
+parameter \fBout\fR set to \fB\s-1NULL\s0\fR.
+.PP
+When decrypting the return value of \fIEVP_DecryptFinal()\fR or \fIEVP_CipherFinal()\fR
+indicates if the operation was successful. If it does not indicate success
+the authentication operation has failed and any output data \fB\s-1MUST NOT\s0\fR
+be used as it is corrupted.
+.PP
+The following ctrls are supported in \s-1GCM\s0 mode:
+.PP
+.Vb 1
+\& EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN, ivlen, NULL);
+.Ve
+.PP
+Sets the \s-1GCM IV\s0 length: this call can only be made before specifying an \s-1IV.\s0 If
+not called a default \s-1IV\s0 length is used (96 bits for \s-1AES\s0).
+.PP
+.Vb 1
+\& EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_GET_TAG, taglen, tag);
+.Ve
+.PP
+Writes \fBtaglen\fR bytes of the tag value to the buffer indicated by \fBtag\fR.
+This call can only be made when encrypting data and \fBafter\fR all data has been
+processed (e.g. after an \fIEVP_EncryptFinal()\fR call).
+.PP
+.Vb 1
+\& EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, taglen, tag);
+.Ve
+.PP
+Sets the expected tag to \fBtaglen\fR bytes from \fBtag\fR. This call is only legal
+when decrypting data and must be made \fBbefore\fR any data is processed (e.g.
+before any \fIEVP_DecryptUpdate()\fR call).
+.PP
+See \s-1EXAMPLES\s0 below for an example of the use of \s-1GCM\s0 mode.
+.SH "CCM Mode"
+.IX Header "CCM Mode"
+The behaviour of \s-1CCM\s0 mode ciphers is similar to \s-1CCM\s0 mode but with a few
+additional requirements and different ctrl values.
+.PP
+Like \s-1GCM\s0 mode any additional authenticated data (\s-1AAD\s0) is passed by calling
+\&\fIEVP_CipherUpdate()\fR, \fIEVP_EncryptUpdate()\fR or \fIEVP_DecryptUpdate()\fR with the output
+parameter \fBout\fR set to \fB\s-1NULL\s0\fR. Additionally the total plaintext or ciphertext
+length \fB\s-1MUST\s0\fR be passed to \fIEVP_CipherUpdate()\fR, \fIEVP_EncryptUpdate()\fR or
+\&\fIEVP_DecryptUpdate()\fR with the output and input parameters (\fBin\fR and \fBout\fR)
+set to \fB\s-1NULL\s0\fR and the length passed in the \fBinl\fR parameter.
+.PP
+The following ctrls are supported in \s-1CCM\s0 mode:
+.PP
+.Vb 1
+\& EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_CCM_SET_TAG, taglen, tag);
+.Ve
+.PP
+This call is made to set the expected \fB\s-1CCM\s0\fR tag value when decrypting or
+the length of the tag (with the \fBtag\fR parameter set to \s-1NULL\s0) when encrypting.
+The tag length is often referred to as \fBM\fR. If not set a default value is
+used (12 for \s-1AES\s0).
+.PP
+.Vb 1
+\& EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_CCM_SET_L, ivlen, NULL);
+.Ve
+.PP
+Sets the \s-1CCM \s0\fBL\fR value. If not set a default is used (8 for \s-1AES\s0).
+.PP
+.Vb 1
+\& EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_CCM_SET_IVLEN, ivlen, NULL);
+.Ve
+.PP
+Sets the \s-1CCM\s0 nonce (\s-1IV\s0) length: this call can only be made before specifying
+an nonce value. The nonce length is given by \fB15 \- L\fR so it is 7 by default
+for \s-1AES.\s0
.SH "NOTES"
.IX Header "NOTES"
Where possible the \fB\s-1EVP\s0\fR interface to symmetric ciphers should be used in
diff --git a/secure/lib/libcrypto/man/EVP_OpenInit.3 b/secure/lib/libcrypto/man/EVP_OpenInit.3
index 1e2695e..d5acd78 100644
--- a/secure/lib/libcrypto/man/EVP_OpenInit.3
+++ b/secure/lib/libcrypto/man/EVP_OpenInit.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "EVP_OpenInit 3"
-.TH EVP_OpenInit 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_OpenInit 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/EVP_PKEY_CTX_ctrl.3 b/secure/lib/libcrypto/man/EVP_PKEY_CTX_ctrl.3
index aaf6c8e..47ee71f 100644
--- a/secure/lib/libcrypto/man/EVP_PKEY_CTX_ctrl.3
+++ b/secure/lib/libcrypto/man/EVP_PKEY_CTX_ctrl.3
@@ -133,13 +133,19 @@
.\" ========================================================================
.\"
.IX Title "EVP_PKEY_CTX_ctrl 3"
-.TH EVP_PKEY_CTX_ctrl 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_PKEY_CTX_ctrl 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-EVP_PKEY_ctrl, EVP_PKEY_ctrl_str \- algorithm specific control operations
+EVP_PKEY_CTX_ctrl, EVP_PKEY_CTX_ctrl_str, EVP_PKEY_get_default_digest_nid,
+EVP_PKEY_CTX_set_signature_md, EVP_PKEY_CTX_set_rsa_padding,
+EVP_PKEY_CTX_set_rsa_pss_saltlen, EVP_PKEY_CTX_set_rsa_rsa_keygen_bits,
+EVP_PKEY_CTX_set_rsa_keygen_pubexp, EVP_PKEY_CTX_set_dsa_paramgen_bits,
+EVP_PKEY_CTX_set_dh_paramgen_prime_len,
+EVP_PKEY_CTX_set_dh_paramgen_generator,
+EVP_PKEY_CTX_set_ec_paramgen_curve_nid \- algorithm specific control operations
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
@@ -182,7 +188,7 @@ The control command is indicated in \fBcmd\fR and any additional arguments in
Applications will not normally call \fIEVP_PKEY_CTX_ctrl()\fR directly but will
instead call one of the algorithm specific macros below.
.PP
-The function \fIEVP_PKEY_ctrl_str()\fR allows an application to send an algorithm
+The function \fIEVP_PKEY_CTX_ctrl_str()\fR allows an application to send an algorithm
specific control operation to a context \fBctx\fR in string form. This is
intended to be used for options specified on the command line or in text
files. The commands supported are documented in the openssl utility
diff --git a/secure/lib/libcrypto/man/EVP_PKEY_CTX_new.3 b/secure/lib/libcrypto/man/EVP_PKEY_CTX_new.3
index 639ed76..e113644 100644
--- a/secure/lib/libcrypto/man/EVP_PKEY_CTX_new.3
+++ b/secure/lib/libcrypto/man/EVP_PKEY_CTX_new.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "EVP_PKEY_CTX_new 3"
-.TH EVP_PKEY_CTX_new 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_PKEY_CTX_new 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/EVP_PKEY_cmp.3 b/secure/lib/libcrypto/man/EVP_PKEY_cmp.3
index 958a3d8..67c9fe2 100644
--- a/secure/lib/libcrypto/man/EVP_PKEY_cmp.3
+++ b/secure/lib/libcrypto/man/EVP_PKEY_cmp.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "EVP_PKEY_cmp 3"
-.TH EVP_PKEY_cmp 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_PKEY_cmp 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -160,10 +160,10 @@ doesn't use parameters.
The function \fIEVP_PKEY_copy_parameters()\fR copies the parameters from key
\&\fBfrom\fR to key \fBto\fR.
.PP
-The funcion \fIEVP_PKEY_cmp_parameters()\fR compares the parameters of keys
+The function \fIEVP_PKEY_cmp_parameters()\fR compares the parameters of keys
\&\fBa\fR and \fBb\fR.
.PP
-The funcion \fIEVP_PKEY_cmp()\fR compares the public key components and paramters
+The function \fIEVP_PKEY_cmp()\fR compares the public key components and paramters
(if present) of keys \fBa\fR and \fBb\fR.
.SH "NOTES"
.IX Header "NOTES"
diff --git a/secure/lib/libcrypto/man/EVP_PKEY_decrypt.3 b/secure/lib/libcrypto/man/EVP_PKEY_decrypt.3
index bf3d420..c52382d 100644
--- a/secure/lib/libcrypto/man/EVP_PKEY_decrypt.3
+++ b/secure/lib/libcrypto/man/EVP_PKEY_decrypt.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "EVP_PKEY_decrypt 3"
-.TH EVP_PKEY_decrypt 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_PKEY_decrypt 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/EVP_PKEY_derive.3 b/secure/lib/libcrypto/man/EVP_PKEY_derive.3
index 68ddbbb..7923347 100644
--- a/secure/lib/libcrypto/man/EVP_PKEY_derive.3
+++ b/secure/lib/libcrypto/man/EVP_PKEY_derive.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "EVP_PKEY_derive 3"
-.TH EVP_PKEY_derive 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_PKEY_derive 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/EVP_PKEY_encrypt.3 b/secure/lib/libcrypto/man/EVP_PKEY_encrypt.3
index be44f10..6fd361b 100644
--- a/secure/lib/libcrypto/man/EVP_PKEY_encrypt.3
+++ b/secure/lib/libcrypto/man/EVP_PKEY_encrypt.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "EVP_PKEY_encrypt 3"
-.TH EVP_PKEY_encrypt 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_PKEY_encrypt 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/EVP_PKEY_get_default_digest.3 b/secure/lib/libcrypto/man/EVP_PKEY_get_default_digest.3
index 33be108..084b3e2 100644
--- a/secure/lib/libcrypto/man/EVP_PKEY_get_default_digest.3
+++ b/secure/lib/libcrypto/man/EVP_PKEY_get_default_digest.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "EVP_PKEY_get_default_digest 3"
-.TH EVP_PKEY_get_default_digest 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_PKEY_get_default_digest 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/EVP_PKEY_keygen.3 b/secure/lib/libcrypto/man/EVP_PKEY_keygen.3
index 1c4daf9..48e07f8 100644
--- a/secure/lib/libcrypto/man/EVP_PKEY_keygen.3
+++ b/secure/lib/libcrypto/man/EVP_PKEY_keygen.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "EVP_PKEY_keygen 3"
-.TH EVP_PKEY_keygen 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_PKEY_keygen 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/EVP_PKEY_new.3 b/secure/lib/libcrypto/man/EVP_PKEY_new.3
index ab0c7d9..fd37be3 100644
--- a/secure/lib/libcrypto/man/EVP_PKEY_new.3
+++ b/secure/lib/libcrypto/man/EVP_PKEY_new.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "EVP_PKEY_new 3"
-.TH EVP_PKEY_new 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_PKEY_new 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/EVP_PKEY_print_private.3 b/secure/lib/libcrypto/man/EVP_PKEY_print_private.3
index 4adde36..4924221 100644
--- a/secure/lib/libcrypto/man/EVP_PKEY_print_private.3
+++ b/secure/lib/libcrypto/man/EVP_PKEY_print_private.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "EVP_PKEY_print_private 3"
-.TH EVP_PKEY_print_private 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_PKEY_print_private 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/EVP_PKEY_set1_RSA.3 b/secure/lib/libcrypto/man/EVP_PKEY_set1_RSA.3
index 890d124..6f32c7c 100644
--- a/secure/lib/libcrypto/man/EVP_PKEY_set1_RSA.3
+++ b/secure/lib/libcrypto/man/EVP_PKEY_set1_RSA.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "EVP_PKEY_set1_RSA 3"
-.TH EVP_PKEY_set1_RSA 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_PKEY_set1_RSA 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/EVP_PKEY_sign.3 b/secure/lib/libcrypto/man/EVP_PKEY_sign.3
index d3611e0..ea634a3 100644
--- a/secure/lib/libcrypto/man/EVP_PKEY_sign.3
+++ b/secure/lib/libcrypto/man/EVP_PKEY_sign.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "EVP_PKEY_sign 3"
-.TH EVP_PKEY_sign 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_PKEY_sign 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/EVP_PKEY_verify.3 b/secure/lib/libcrypto/man/EVP_PKEY_verify.3
index 09c8d6a..3ba848b 100644
--- a/secure/lib/libcrypto/man/EVP_PKEY_verify.3
+++ b/secure/lib/libcrypto/man/EVP_PKEY_verify.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "EVP_PKEY_verify 3"
-.TH EVP_PKEY_verify 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_PKEY_verify 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/EVP_PKEY_verify_recover.3 b/secure/lib/libcrypto/man/EVP_PKEY_verify_recover.3
index 940d0a2..617a33b 100644
--- a/secure/lib/libcrypto/man/EVP_PKEY_verify_recover.3
+++ b/secure/lib/libcrypto/man/EVP_PKEY_verify_recover.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "EVP_PKEY_verify_recover 3"
-.TH EVP_PKEY_verify_recover 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_PKEY_verify_recover 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/EVP_SealInit.3 b/secure/lib/libcrypto/man/EVP_SealInit.3
index e57d788..42ccd2a 100644
--- a/secure/lib/libcrypto/man/EVP_SealInit.3
+++ b/secure/lib/libcrypto/man/EVP_SealInit.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "EVP_SealInit 3"
-.TH EVP_SealInit 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_SealInit 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/EVP_SignInit.3 b/secure/lib/libcrypto/man/EVP_SignInit.3
index 7aed114..5cbe3f9 100644
--- a/secure/lib/libcrypto/man/EVP_SignInit.3
+++ b/secure/lib/libcrypto/man/EVP_SignInit.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "EVP_SignInit 3"
-.TH EVP_SignInit 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_SignInit 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/EVP_VerifyInit.3 b/secure/lib/libcrypto/man/EVP_VerifyInit.3
index b9ce446..5d34258 100644
--- a/secure/lib/libcrypto/man/EVP_VerifyInit.3
+++ b/secure/lib/libcrypto/man/EVP_VerifyInit.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "EVP_VerifyInit 3"
-.TH EVP_VerifyInit 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH EVP_VerifyInit 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/OBJ_nid2obj.3 b/secure/lib/libcrypto/man/OBJ_nid2obj.3
index 31fe94d..acdaff9 100644
--- a/secure/lib/libcrypto/man/OBJ_nid2obj.3
+++ b/secure/lib/libcrypto/man/OBJ_nid2obj.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "OBJ_nid2obj 3"
-.TH OBJ_nid2obj 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH OBJ_nid2obj 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/OPENSSL_Applink.3 b/secure/lib/libcrypto/man/OPENSSL_Applink.3
index 78f7f38..221e86c 100644
--- a/secure/lib/libcrypto/man/OPENSSL_Applink.3
+++ b/secure/lib/libcrypto/man/OPENSSL_Applink.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "OPENSSL_Applink 3"
-.TH OPENSSL_Applink 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH OPENSSL_Applink 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/OPENSSL_VERSION_NUMBER.3 b/secure/lib/libcrypto/man/OPENSSL_VERSION_NUMBER.3
index 7db1044..58aa212 100644
--- a/secure/lib/libcrypto/man/OPENSSL_VERSION_NUMBER.3
+++ b/secure/lib/libcrypto/man/OPENSSL_VERSION_NUMBER.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "OPENSSL_VERSION_NUMBER 3"
-.TH OPENSSL_VERSION_NUMBER 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH OPENSSL_VERSION_NUMBER 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -155,7 +155,7 @@ OPENSSL_VERSION_NUMBER, SSLeay, SSLeay_version \- get OpenSSL version number
\&\s-1OPENSSL_VERSION_NUMBER\s0 is a numeric release version identifier:
.PP
.Vb 1
-\& MMNNFFPPS: major minor fix patch status
+\& MNNFFPPS: major minor fix patch status
.Ve
.PP
The status nibble has one of the values 0 for development, 1 to e for betas
diff --git a/secure/lib/libcrypto/man/OPENSSL_config.3 b/secure/lib/libcrypto/man/OPENSSL_config.3
index f34f3e8..63ef10a 100644
--- a/secure/lib/libcrypto/man/OPENSSL_config.3
+++ b/secure/lib/libcrypto/man/OPENSSL_config.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "OPENSSL_config 3"
-.TH OPENSSL_config 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH OPENSSL_config 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -183,16 +183,6 @@ configuration file.
.PP
Applications should free up configuration at application closedown by calling
\&\fICONF_modules_free()\fR.
-.SH "RESTRICTIONS"
-.IX Header "RESTRICTIONS"
-The \fIOPENSSL_config()\fR function is designed to be a very simple \*(L"call it and
-forget it\*(R" function. As a result its behaviour is somewhat limited. It ignores
-all errors silently and it can only load from the standard configuration file
-location for example.
-.PP
-It is however \fBmuch\fR better than nothing. Applications which need finer
-control over their configuration functionality should use the configuration
-functions such as \fICONF_load_modules()\fR directly.
.SH "RETURN VALUES"
.IX Header "RETURN VALUES"
Neither \fIOPENSSL_config()\fR nor \fIOPENSSL_no_config()\fR return a value.
diff --git a/secure/lib/libcrypto/man/OPENSSL_ia32cap.3 b/secure/lib/libcrypto/man/OPENSSL_ia32cap.3
index 0e8972d..1acf0f7 100644
--- a/secure/lib/libcrypto/man/OPENSSL_ia32cap.3
+++ b/secure/lib/libcrypto/man/OPENSSL_ia32cap.3
@@ -133,46 +133,94 @@
.\" ========================================================================
.\"
.IX Title "OPENSSL_ia32cap 3"
-.TH OPENSSL_ia32cap 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH OPENSSL_ia32cap 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-OPENSSL_ia32cap \- finding the IA\-32 processor capabilities
+OPENSSL_ia32cap, OPENSSL_ia32cap_loc \- the IA\-32 processor capabilities vector
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 2
-\& unsigned long *OPENSSL_ia32cap_loc(void);
-\& #define OPENSSL_ia32cap (*(OPENSSL_ia32cap_loc()))
+\& unsigned int *OPENSSL_ia32cap_loc(void);
+\& #define OPENSSL_ia32cap ((OPENSSL_ia32cap_loc())[0])
.Ve
.SH "DESCRIPTION"
.IX Header "DESCRIPTION"
Value returned by \fIOPENSSL_ia32cap_loc()\fR is address of a variable
-containing \s-1IA\-32\s0 processor capabilities bit vector as it appears in \s-1EDX\s0
-register after executing \s-1CPUID\s0 instruction with EAX=1 input value (see
-Intel Application Note #241618). Naturally it's meaningful on IA\-32[E]
-platforms only. The variable is normally set up automatically upon
-toolkit initialization, but can be manipulated afterwards to modify
-crypto library behaviour. For the moment of this writing six bits are
-significant, namely:
-.PP
-1. bit #28 denoting Hyperthreading, which is used to distiguish
- cores with shared cache;
-2. bit #26 denoting \s-1SSE2\s0 support;
-3. bit #25 denoting \s-1SSE\s0 support;
-4. bit #23 denoting \s-1MMX\s0 support;
-5. bit #20, reserved by Intel, is used to choose between \s-1RC4\s0 code
- pathes;
-6. bit #4 denoting presence of Time-Stamp Counter.
+containing \s-1IA\-32\s0 processor capabilities bit vector as it appears in
+\&\s-1EDX:ECX\s0 register pair after executing \s-1CPUID\s0 instruction with EAX=1
+input value (see Intel Application Note #241618). Naturally it's
+meaningful on x86 and x86_64 platforms only. The variable is normally
+set up automatically upon toolkit initialization, but can be
+manipulated afterwards to modify crypto library behaviour. For the
+moment of this writing following bits are significant:
+.IP "bit #4 denoting presence of Time-Stamp Counter." 4
+.IX Item "bit #4 denoting presence of Time-Stamp Counter."
+.PD 0
+.IP "bit #19 denoting availability of \s-1CLFLUSH\s0 instruction;" 4
+.IX Item "bit #19 denoting availability of CLFLUSH instruction;"
+.IP "bit #20, reserved by Intel, is used to choose among \s-1RC4\s0 code paths;" 4
+.IX Item "bit #20, reserved by Intel, is used to choose among RC4 code paths;"
+.IP "bit #23 denoting \s-1MMX\s0 support;" 4
+.IX Item "bit #23 denoting MMX support;"
+.IP "bit #24, \s-1FXSR\s0 bit, denoting availability of \s-1XMM\s0 registers;" 4
+.IX Item "bit #24, FXSR bit, denoting availability of XMM registers;"
+.IP "bit #25 denoting \s-1SSE\s0 support;" 4
+.IX Item "bit #25 denoting SSE support;"
+.IP "bit #26 denoting \s-1SSE2\s0 support;" 4
+.IX Item "bit #26 denoting SSE2 support;"
+.IP "bit #28 denoting Hyperthreading, which is used to distinguish cores with shared cache;" 4
+.IX Item "bit #28 denoting Hyperthreading, which is used to distinguish cores with shared cache;"
+.IP "bit #30, reserved by Intel, denotes specifically Intel CPUs;" 4
+.IX Item "bit #30, reserved by Intel, denotes specifically Intel CPUs;"
+.IP "bit #33 denoting availability of \s-1PCLMULQDQ\s0 instruction;" 4
+.IX Item "bit #33 denoting availability of PCLMULQDQ instruction;"
+.IP "bit #41 denoting \s-1SSSE3,\s0 Supplemental \s-1SSE3,\s0 support;" 4
+.IX Item "bit #41 denoting SSSE3, Supplemental SSE3, support;"
+.IP "bit #43 denoting \s-1AMD XOP\s0 support (forced to zero on non-AMD CPUs);" 4
+.IX Item "bit #43 denoting AMD XOP support (forced to zero on non-AMD CPUs);"
+.IP "bit #57 denoting AES-NI instruction set extension;" 4
+.IX Item "bit #57 denoting AES-NI instruction set extension;"
+.IP "bit #59, \s-1OSXSAVE\s0 bit, denoting availability of \s-1YMM\s0 registers;" 4
+.IX Item "bit #59, OSXSAVE bit, denoting availability of YMM registers;"
+.IP "bit #60 denoting \s-1AVX\s0 extension;" 4
+.IX Item "bit #60 denoting AVX extension;"
+.IP "bit #62 denoting availability of \s-1RDRAND\s0 instruction;" 4
+.IX Item "bit #62 denoting availability of RDRAND instruction;"
+.PD
.PP
For example, clearing bit #26 at run-time disables high-performance
-\&\s-1SSE2\s0 code present in the crypto library. You might have to do this if
-target OpenSSL application is executed on \s-1SSE2\s0 capable \s-1CPU,\s0 but under
-control of \s-1OS\s0 which does not support \s-1SSE2\s0 extentions. Even though you
-can manipulate the value programmatically, you most likely will find it
-more appropriate to set up an environment variable with the same name
-prior starting target application, e.g. on Intel P4 processor 'env
-OPENSSL_ia32cap=0x12900010 apps/openssl', to achieve same effect
-without modifying the application source code. Alternatively you can
-reconfigure the toolkit with no\-sse2 option and recompile.
+\&\s-1SSE2\s0 code present in the crypto library, while clearing bit #24
+disables \s-1SSE2\s0 code operating on 128\-bit \s-1XMM\s0 register bank. You might
+have to do the latter if target OpenSSL application is executed on \s-1SSE2\s0
+capable \s-1CPU,\s0 but under control of \s-1OS\s0 that does not enable \s-1XMM\s0
+registers. Even though you can manipulate the value programmatically,
+you most likely will find it more appropriate to set up an environment
+variable with the same name prior starting target application, e.g. on
+Intel P4 processor 'env OPENSSL_ia32cap=0x16980010 apps/openssl', or
+better yet 'env OPENSSL_ia32cap=~0x1000000 apps/openssl' to achieve same
+effect without modifying the application source code. Alternatively you
+can reconfigure the toolkit with no\-sse2 option and recompile.
+.PP
+Less intuitive is clearing bit #28. The truth is that it's not copied
+from \s-1CPUID\s0 output verbatim, but is adjusted to reflect whether or not
+the data cache is actually shared between logical cores. This in turn
+affects the decision on whether or not expensive countermeasures
+against cache-timing attacks are applied, most notably in \s-1AES\s0 assembler
+module.
+.PP
+The vector is further extended with \s-1EBX\s0 value returned by \s-1CPUID\s0 with
+EAX=7 and ECX=0 as input. Following bits are significant:
+.IP "bit #64+3 denoting availability of \s-1BMI1\s0 instructions, e.g. \s-1ANDN\s0;" 4
+.IX Item "bit #64+3 denoting availability of BMI1 instructions, e.g. ANDN;"
+.PD 0
+.IP "bit #64+5 denoting availability of \s-1AVX2\s0 instructions;" 4
+.IX Item "bit #64+5 denoting availability of AVX2 instructions;"
+.IP "bit #64+8 denoting availability of \s-1BMI2\s0 instructions, e.g. \s-1MUXL\s0 and \s-1RORX\s0;" 4
+.IX Item "bit #64+8 denoting availability of BMI2 instructions, e.g. MUXL and RORX;"
+.IP "bit #64+18 denoting availability of \s-1RDSEED\s0 instruction;" 4
+.IX Item "bit #64+18 denoting availability of RDSEED instruction;"
+.IP "bit #64+19 denoting availability of \s-1ADCX\s0 and \s-1ADOX\s0 instructions;" 4
+.IX Item "bit #64+19 denoting availability of ADCX and ADOX instructions;"
diff --git a/secure/lib/libcrypto/man/OPENSSL_instrument_bus.3 b/secure/lib/libcrypto/man/OPENSSL_instrument_bus.3
new file mode 100644
index 0000000..5d8fea1
--- /dev/null
+++ b/secure/lib/libcrypto/man/OPENSSL_instrument_bus.3
@@ -0,0 +1,178 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "OPENSSL_instrument_bus 3"
+.TH OPENSSL_instrument_bus 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+OPENSSL_instrument_bus, OPENSSL_instrument_bus2 \- instrument references to memory bus
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 4
+\& #ifdef OPENSSL_CPUID_OBJ
+\& size_t OPENSSL_instrument_bus (int *vector,size_t num);
+\& size_t OPENSSL_instrument_bus2(int *vector,size_t num,size_t max);
+\& #endif
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+It was empirically found that timings of references to primary memory
+are subject to irregular, apparently non-deterministic variations. The
+subroutines in question instrument these references for purposes of
+gathering entropy for random number generator. In order to make it
+bus-bound a 'flush cache line' instruction is used between probes. In
+addition probes are added to \fBvector\fR elements in atomic or
+interlocked manner, which should contribute additional noise on
+multi-processor systems. This also means that \fBvector[num]\fR should be
+zeroed upon invocation (if you want to retrieve actual probe values).
+.PP
+OPENSSL_instrument_bus performs \fBnum\fR probes and records the number of
+oscillator cycles every probe took.
+.PP
+OPENSSL_instrument_bus2 on the other hand \fBaccumulates\fR consecutive
+probes with the same value, i.e. in a way it records duration of
+periods when probe values appeared deterministic. The subroutine
+performs at most \fBmax\fR probes in attempt to fill the \fBvector[num]\fR,
+with \fBmax\fR value of 0 meaning \*(L"as many as it takes.\*(R"
+.SH "RETURN VALUE"
+.IX Header "RETURN VALUE"
+Return value of 0 indicates that \s-1CPU\s0 is not capable of performing the
+benchmark, either because oscillator counter or 'flush cache line' is
+not available on current platform. For reference, on x86 'flush cache
+line' was introduced with the \s-1SSE2\s0 extensions.
+.PP
+Otherwise number of recorded values is returned.
diff --git a/secure/lib/libcrypto/man/OPENSSL_load_builtin_modules.3 b/secure/lib/libcrypto/man/OPENSSL_load_builtin_modules.3
index fa0a058..06a041f 100644
--- a/secure/lib/libcrypto/man/OPENSSL_load_builtin_modules.3
+++ b/secure/lib/libcrypto/man/OPENSSL_load_builtin_modules.3
@@ -133,13 +133,13 @@
.\" ========================================================================
.\"
.IX Title "OPENSSL_load_builtin_modules 3"
-.TH OPENSSL_load_builtin_modules 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH OPENSSL_load_builtin_modules 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-OPENSSL_load_builtin_modules \- add standard configuration modules
+OPENSSL_load_builtin_modules, ASN1_add_oid_module, ENGINE_add_conf_module \- add standard configuration modules
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
diff --git a/secure/lib/libcrypto/man/OpenSSL_add_all_algorithms.3 b/secure/lib/libcrypto/man/OpenSSL_add_all_algorithms.3
index 9e88ee9..5338f19 100644
--- a/secure/lib/libcrypto/man/OpenSSL_add_all_algorithms.3
+++ b/secure/lib/libcrypto/man/OpenSSL_add_all_algorithms.3
@@ -133,13 +133,13 @@
.\" ========================================================================
.\"
.IX Title "OpenSSL_add_all_algorithms 3"
-.TH OpenSSL_add_all_algorithms 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH OpenSSL_add_all_algorithms 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-OpenSSL_add_all_algorithms, OpenSSL_add_all_ciphers, OpenSSL_add_all_digests \-
+OpenSSL_add_all_algorithms, OpenSSL_add_all_ciphers, OpenSSL_add_all_digests, EVP_cleanup \-
add algorithms to internal table
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
diff --git a/secure/lib/libcrypto/man/PEM_write_bio_CMS_stream.3 b/secure/lib/libcrypto/man/PEM_write_bio_CMS_stream.3
index abc2345..48dcd4e 100644
--- a/secure/lib/libcrypto/man/PEM_write_bio_CMS_stream.3
+++ b/secure/lib/libcrypto/man/PEM_write_bio_CMS_stream.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "PEM_write_bio_CMS_stream 3"
-.TH PEM_write_bio_CMS_stream 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH PEM_write_bio_CMS_stream 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/PEM_write_bio_PKCS7_stream.3 b/secure/lib/libcrypto/man/PEM_write_bio_PKCS7_stream.3
index f186dba..2c5026b 100644
--- a/secure/lib/libcrypto/man/PEM_write_bio_PKCS7_stream.3
+++ b/secure/lib/libcrypto/man/PEM_write_bio_PKCS7_stream.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "PEM_write_bio_PKCS7_stream 3"
-.TH PEM_write_bio_PKCS7_stream 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH PEM_write_bio_PKCS7_stream 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/PKCS12_create.3 b/secure/lib/libcrypto/man/PKCS12_create.3
index 5b45b6e..f48de873 100644
--- a/secure/lib/libcrypto/man/PKCS12_create.3
+++ b/secure/lib/libcrypto/man/PKCS12_create.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "PKCS12_create 3"
-.TH PKCS12_create 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH PKCS12_create 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/PKCS12_parse.3 b/secure/lib/libcrypto/man/PKCS12_parse.3
index 4ce2f26..3c41710 100644
--- a/secure/lib/libcrypto/man/PKCS12_parse.3
+++ b/secure/lib/libcrypto/man/PKCS12_parse.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "PKCS12_parse 3"
-.TH PKCS12_parse 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH PKCS12_parse 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/PKCS7_decrypt.3 b/secure/lib/libcrypto/man/PKCS7_decrypt.3
index 479feb3..7f991b6 100644
--- a/secure/lib/libcrypto/man/PKCS7_decrypt.3
+++ b/secure/lib/libcrypto/man/PKCS7_decrypt.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "PKCS7_decrypt 3"
-.TH PKCS7_decrypt 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH PKCS7_decrypt 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/PKCS7_encrypt.3 b/secure/lib/libcrypto/man/PKCS7_encrypt.3
index 24b0089..1dd9942 100644
--- a/secure/lib/libcrypto/man/PKCS7_encrypt.3
+++ b/secure/lib/libcrypto/man/PKCS7_encrypt.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "PKCS7_encrypt 3"
-.TH PKCS7_encrypt 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH PKCS7_encrypt 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/PKCS7_sign.3 b/secure/lib/libcrypto/man/PKCS7_sign.3
index 4a2b7ce..9d6f06d 100644
--- a/secure/lib/libcrypto/man/PKCS7_sign.3
+++ b/secure/lib/libcrypto/man/PKCS7_sign.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "PKCS7_sign 3"
-.TH PKCS7_sign 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH PKCS7_sign 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/PKCS7_sign_add_signer.3 b/secure/lib/libcrypto/man/PKCS7_sign_add_signer.3
index 80205d5..a98f6df 100644
--- a/secure/lib/libcrypto/man/PKCS7_sign_add_signer.3
+++ b/secure/lib/libcrypto/man/PKCS7_sign_add_signer.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "PKCS7_sign_add_signer 3"
-.TH PKCS7_sign_add_signer 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH PKCS7_sign_add_signer 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/PKCS7_verify.3 b/secure/lib/libcrypto/man/PKCS7_verify.3
index 1bb8146..e58a10d 100644
--- a/secure/lib/libcrypto/man/PKCS7_verify.3
+++ b/secure/lib/libcrypto/man/PKCS7_verify.3
@@ -133,13 +133,13 @@
.\" ========================================================================
.\"
.IX Title "PKCS7_verify 3"
-.TH PKCS7_verify 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH PKCS7_verify 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-PKCS7_verify \- verify a PKCS#7 signedData structure
+PKCS7_verify, PKCS7_get0_signers \- verify a PKCS#7 signedData structure
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
@@ -225,8 +225,8 @@ signer it cannot be trusted without additional evidence (such as a trusted
timestamp).
.SH "RETURN VALUES"
.IX Header "RETURN VALUES"
-\&\fIPKCS7_verify()\fR returns 1 for a successful verification and zero or a negative
-value if an error occurs.
+\&\fIPKCS7_verify()\fR returns one for a successful verification and zero
+if an error occurs.
.PP
\&\fIPKCS7_get0_signers()\fR returns all signers or \fB\s-1NULL\s0\fR if an error occurred.
.PP
diff --git a/secure/lib/libcrypto/man/RAND_add.3 b/secure/lib/libcrypto/man/RAND_add.3
index 21c5869..12f24ea 100644
--- a/secure/lib/libcrypto/man/RAND_add.3
+++ b/secure/lib/libcrypto/man/RAND_add.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "RAND_add 3"
-.TH RAND_add 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH RAND_add 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/RAND_bytes.3 b/secure/lib/libcrypto/man/RAND_bytes.3
index de043c1..d3dab57 100644
--- a/secure/lib/libcrypto/man/RAND_bytes.3
+++ b/secure/lib/libcrypto/man/RAND_bytes.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "RAND_bytes 3"
-.TH RAND_bytes 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH RAND_bytes 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/RAND_cleanup.3 b/secure/lib/libcrypto/man/RAND_cleanup.3
index d562b9d..5954086 100644
--- a/secure/lib/libcrypto/man/RAND_cleanup.3
+++ b/secure/lib/libcrypto/man/RAND_cleanup.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "RAND_cleanup 3"
-.TH RAND_cleanup 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH RAND_cleanup 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/RAND_egd.3 b/secure/lib/libcrypto/man/RAND_egd.3
index b6e027c..ff644ad 100644
--- a/secure/lib/libcrypto/man/RAND_egd.3
+++ b/secure/lib/libcrypto/man/RAND_egd.3
@@ -133,13 +133,13 @@
.\" ========================================================================
.\"
.IX Title "RAND_egd 3"
-.TH RAND_egd 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH RAND_egd 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-RAND_egd \- query entropy gathering daemon
+RAND_egd, RAND_egd_bytes, RAND_query_egd_bytes \- query entropy gathering daemon
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
diff --git a/secure/lib/libcrypto/man/RAND_load_file.3 b/secure/lib/libcrypto/man/RAND_load_file.3
index 6d55b96..1e1ab59 100644
--- a/secure/lib/libcrypto/man/RAND_load_file.3
+++ b/secure/lib/libcrypto/man/RAND_load_file.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "RAND_load_file 3"
-.TH RAND_load_file 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH RAND_load_file 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/RAND_set_rand_method.3 b/secure/lib/libcrypto/man/RAND_set_rand_method.3
index df6118b..0a8ee84 100644
--- a/secure/lib/libcrypto/man/RAND_set_rand_method.3
+++ b/secure/lib/libcrypto/man/RAND_set_rand_method.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "RAND_set_rand_method 3"
-.TH RAND_set_rand_method 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH RAND_set_rand_method 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/RSA_blinding_on.3 b/secure/lib/libcrypto/man/RSA_blinding_on.3
index 3d65308..502b30c 100644
--- a/secure/lib/libcrypto/man/RSA_blinding_on.3
+++ b/secure/lib/libcrypto/man/RSA_blinding_on.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "RSA_blinding_on 3"
-.TH RSA_blinding_on 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH RSA_blinding_on 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/RSA_check_key.3 b/secure/lib/libcrypto/man/RSA_check_key.3
index 2e36c74..4d7689e 100644
--- a/secure/lib/libcrypto/man/RSA_check_key.3
+++ b/secure/lib/libcrypto/man/RSA_check_key.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "RSA_check_key 3"
-.TH RSA_check_key 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH RSA_check_key 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/RSA_generate_key.3 b/secure/lib/libcrypto/man/RSA_generate_key.3
index 0425128..05acc68 100644
--- a/secure/lib/libcrypto/man/RSA_generate_key.3
+++ b/secure/lib/libcrypto/man/RSA_generate_key.3
@@ -133,58 +133,71 @@
.\" ========================================================================
.\"
.IX Title "RSA_generate_key 3"
-.TH RSA_generate_key 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH RSA_generate_key 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-RSA_generate_key \- generate RSA key pair
+RSA_generate_key_ex, RSA_generate_key \- generate RSA key pair
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
\& #include <openssl/rsa.h>
\&
+\& int RSA_generate_key_ex(RSA *rsa, int bits, BIGNUM *e, BN_GENCB *cb);
+.Ve
+.PP
+Deprecated:
+.PP
+.Vb 2
\& RSA *RSA_generate_key(int num, unsigned long e,
\& void (*callback)(int,int,void *), void *cb_arg);
.Ve
.SH "DESCRIPTION"
.IX Header "DESCRIPTION"
-\&\fIRSA_generate_key()\fR generates a key pair and returns it in a newly
-allocated \fB\s-1RSA\s0\fR structure. The pseudo-random number generator must
-be seeded prior to calling \fIRSA_generate_key()\fR.
+\&\fIRSA_generate_key_ex()\fR generates a key pair and stores it in the \fB\s-1RSA\s0\fR
+structure provided in \fBrsa\fR. The pseudo-random number generator must
+be seeded prior to calling \fIRSA_generate_key_ex()\fR.
.PP
-The modulus size will be \fBnum\fR bits, and the public exponent will be
+The modulus size will be of length \fBbits\fR, and the public exponent will be
\&\fBe\fR. Key sizes with \fBnum\fR < 1024 should be considered insecure.
The exponent is an odd number, typically 3, 17 or 65537.
.PP
A callback function may be used to provide feedback about the
-progress of the key generation. If \fBcallback\fR is not \fB\s-1NULL\s0\fR, it
-will be called as follows:
+progress of the key generation. If \fBcb\fR is not \fB\s-1NULL\s0\fR, it
+will be called as follows using the \fIBN_GENCB_call()\fR function
+described on the \fIBN_generate_prime\fR\|(3) page.
.IP "\(bu" 4
While a random prime number is generated, it is called as
described in \fIBN_generate_prime\fR\|(3).
.IP "\(bu" 4
When the n\-th randomly generated prime is rejected as not
-suitable for the key, \fBcallback(2, n, cb_arg)\fR is called.
+suitable for the key, \fBBN_GENCB_call(cb, 2, n)\fR is called.
.IP "\(bu" 4
When a random p has been found with p\-1 relatively prime to \fBe\fR,
-it is called as \fBcallback(3, 0, cb_arg)\fR.
+it is called as \fBBN_GENCB_call(cb, 3, 0)\fR.
.PP
-The process is then repeated for prime q with \fBcallback(3, 1, cb_arg)\fR.
+The process is then repeated for prime q with \fBBN_GENCB_call(cb, 3, 1)\fR.
+.PP
+RSA_generate_key is deprecated (new applications should use
+RSA_generate_key_ex instead). RSA_generate_key works in the same was as
+RSA_generate_key_ex except it uses \*(L"old style\*(R" call backs. See
+\&\fIBN_generate_prime\fR\|(3) for further details.
.SH "RETURN VALUE"
.IX Header "RETURN VALUE"
-If key generation fails, \fIRSA_generate_key()\fR returns \fB\s-1NULL\s0\fR; the
-error codes can be obtained by \fIERR_get_error\fR\|(3).
+If key generation fails, \fIRSA_generate_key()\fR returns \fB\s-1NULL\s0\fR.
+.PP
+The error codes can be obtained by \fIERR_get_error\fR\|(3).
.SH "BUGS"
.IX Header "BUGS"
-\&\fBcallback(2, x, cb_arg)\fR is used with two different meanings.
+\&\fBBN_GENCB_call(cb, 2, x)\fR is used with two different meanings.
.PP
\&\fIRSA_generate_key()\fR goes into an infinite loop for illegal input values.
.SH "SEE ALSO"
.IX Header "SEE ALSO"
\&\fIERR_get_error\fR\|(3), \fIrand\fR\|(3), \fIrsa\fR\|(3),
-\&\fIRSA_free\fR\|(3)
+\&\fIRSA_free\fR\|(3), \fIBN_generate_prime\fR\|(3)
.SH "HISTORY"
.IX Header "HISTORY"
The \fBcb_arg\fR argument was added in SSLeay 0.9.0.
diff --git a/secure/lib/libcrypto/man/RSA_get_ex_new_index.3 b/secure/lib/libcrypto/man/RSA_get_ex_new_index.3
index 954791d..18983d3 100644
--- a/secure/lib/libcrypto/man/RSA_get_ex_new_index.3
+++ b/secure/lib/libcrypto/man/RSA_get_ex_new_index.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "RSA_get_ex_new_index 3"
-.TH RSA_get_ex_new_index 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH RSA_get_ex_new_index 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/RSA_new.3 b/secure/lib/libcrypto/man/RSA_new.3
index 550f94c..48b7d3e 100644
--- a/secure/lib/libcrypto/man/RSA_new.3
+++ b/secure/lib/libcrypto/man/RSA_new.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "RSA_new 3"
-.TH RSA_new 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH RSA_new 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/RSA_padding_add_PKCS1_type_1.3 b/secure/lib/libcrypto/man/RSA_padding_add_PKCS1_type_1.3
index c273008..f050930 100644
--- a/secure/lib/libcrypto/man/RSA_padding_add_PKCS1_type_1.3
+++ b/secure/lib/libcrypto/man/RSA_padding_add_PKCS1_type_1.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "RSA_padding_add_PKCS1_type_1 3"
-.TH RSA_padding_add_PKCS1_type_1 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH RSA_padding_add_PKCS1_type_1 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/RSA_print.3 b/secure/lib/libcrypto/man/RSA_print.3
index d6a198c..ece4a26 100644
--- a/secure/lib/libcrypto/man/RSA_print.3
+++ b/secure/lib/libcrypto/man/RSA_print.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "RSA_print 3"
-.TH RSA_print 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH RSA_print 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/RSA_private_encrypt.3 b/secure/lib/libcrypto/man/RSA_private_encrypt.3
index f2f2989..15ad222 100644
--- a/secure/lib/libcrypto/man/RSA_private_encrypt.3
+++ b/secure/lib/libcrypto/man/RSA_private_encrypt.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "RSA_private_encrypt 3"
-.TH RSA_private_encrypt 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH RSA_private_encrypt 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/RSA_public_encrypt.3 b/secure/lib/libcrypto/man/RSA_public_encrypt.3
index 864ecd5..c3e05cf 100644
--- a/secure/lib/libcrypto/man/RSA_public_encrypt.3
+++ b/secure/lib/libcrypto/man/RSA_public_encrypt.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "RSA_public_encrypt 3"
-.TH RSA_public_encrypt 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH RSA_public_encrypt 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/RSA_set_method.3 b/secure/lib/libcrypto/man/RSA_set_method.3
index c2e11c4..e7a53de 100644
--- a/secure/lib/libcrypto/man/RSA_set_method.3
+++ b/secure/lib/libcrypto/man/RSA_set_method.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "RSA_set_method 3"
-.TH RSA_set_method 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH RSA_set_method 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/RSA_sign.3 b/secure/lib/libcrypto/man/RSA_sign.3
index 2f0c11a..a7a33bc 100644
--- a/secure/lib/libcrypto/man/RSA_sign.3
+++ b/secure/lib/libcrypto/man/RSA_sign.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "RSA_sign 3"
-.TH RSA_sign 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH RSA_sign 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/RSA_sign_ASN1_OCTET_STRING.3 b/secure/lib/libcrypto/man/RSA_sign_ASN1_OCTET_STRING.3
index d175c0f..aa7b184 100644
--- a/secure/lib/libcrypto/man/RSA_sign_ASN1_OCTET_STRING.3
+++ b/secure/lib/libcrypto/man/RSA_sign_ASN1_OCTET_STRING.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "RSA_sign_ASN1_OCTET_STRING 3"
-.TH RSA_sign_ASN1_OCTET_STRING 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH RSA_sign_ASN1_OCTET_STRING 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/RSA_size.3 b/secure/lib/libcrypto/man/RSA_size.3
index 6085268..5bdc8bf 100644
--- a/secure/lib/libcrypto/man/RSA_size.3
+++ b/secure/lib/libcrypto/man/RSA_size.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "RSA_size 3"
-.TH RSA_size 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH RSA_size 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/SMIME_read_CMS.3 b/secure/lib/libcrypto/man/SMIME_read_CMS.3
index fa5c996..e9647f9 100644
--- a/secure/lib/libcrypto/man/SMIME_read_CMS.3
+++ b/secure/lib/libcrypto/man/SMIME_read_CMS.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SMIME_read_CMS 3"
-.TH SMIME_read_CMS 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SMIME_read_CMS 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/SMIME_read_PKCS7.3 b/secure/lib/libcrypto/man/SMIME_read_PKCS7.3
index 420a504..6693560 100644
--- a/secure/lib/libcrypto/man/SMIME_read_PKCS7.3
+++ b/secure/lib/libcrypto/man/SMIME_read_PKCS7.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SMIME_read_PKCS7 3"
-.TH SMIME_read_PKCS7 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SMIME_read_PKCS7 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/SMIME_write_CMS.3 b/secure/lib/libcrypto/man/SMIME_write_CMS.3
index da2c50d..e0d738e 100644
--- a/secure/lib/libcrypto/man/SMIME_write_CMS.3
+++ b/secure/lib/libcrypto/man/SMIME_write_CMS.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SMIME_write_CMS 3"
-.TH SMIME_write_CMS 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SMIME_write_CMS 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/SMIME_write_PKCS7.3 b/secure/lib/libcrypto/man/SMIME_write_PKCS7.3
index 20b0716..c7c551a 100644
--- a/secure/lib/libcrypto/man/SMIME_write_PKCS7.3
+++ b/secure/lib/libcrypto/man/SMIME_write_PKCS7.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SMIME_write_PKCS7 3"
-.TH SMIME_write_PKCS7 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SMIME_write_PKCS7 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/SSLeay_version.3 b/secure/lib/libcrypto/man/SSLeay_version.3
new file mode 100644
index 0000000..677cc4b
--- /dev/null
+++ b/secure/lib/libcrypto/man/SSLeay_version.3
@@ -0,0 +1,192 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "SSLeay_version 3"
+.TH SSLeay_version 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+SSLeay_version \- retrieve version/build information about OpenSSL library
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 1
+\& #include <openssl/crypto.h>
+\&
+\& const char *SSLeay_version(int type);
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+\&\fISSLeay_version()\fR returns a pointer to a constant string describing the
+version of the OpenSSL library or giving information about the library
+build.
+.PP
+The following \fBtype\fR values are supported:
+.IP "\s-1SSLEAY_VERSION\s0" 4
+.IX Item "SSLEAY_VERSION"
+The version of the OpenSSL library including the release date.
+.IP "\s-1SSLEAY_CFLAGS\s0" 4
+.IX Item "SSLEAY_CFLAGS"
+The compiler flags set for the compilation process in the form
+\&\*(L"compiler: ...\*(R" if available or \*(L"compiler: information not available\*(R"
+otherwise.
+.IP "\s-1SSLEAY_BUILT_ON\s0" 4
+.IX Item "SSLEAY_BUILT_ON"
+The date of the build process in the form \*(L"built on: ...\*(R" if available
+or \*(L"built on: date not available\*(R" otherwise.
+.IP "\s-1SSLEAY_PLATFORM\s0" 4
+.IX Item "SSLEAY_PLATFORM"
+The \*(L"Configure\*(R" target of the library build in the form \*(L"platform: ...\*(R"
+if available or \*(L"platform: information not available\*(R" otherwise.
+.IP "\s-1SSLEAY_DIR\s0" 4
+.IX Item "SSLEAY_DIR"
+The \*(L"\s-1OPENSSLDIR\*(R"\s0 setting of the library build in the form \*(L"\s-1OPENSSLDIR: \*(R"..."\*(L"\s0
+if available or \*(R"\s-1OPENSSLDIR: N/A"\s0 otherwise.
+.SH "RETURN VALUES"
+.IX Header "RETURN VALUES"
+The following return values can occur:
+.ie n .IP """not available""" 4
+.el .IP "``not available''" 4
+.IX Item "not available"
+An invalid value for \fBtype\fR was given.
+.IP "Pointer to constant string" 4
+.IX Item "Pointer to constant string"
+Textual description.
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+\&\fIcrypto\fR\|(3)
+.SH "HISTORY"
+.IX Header "HISTORY"
+\&\fB\s-1SSLEAY_DIR\s0\fR was added in OpenSSL 0.9.7.
diff --git a/secure/lib/libcrypto/man/X509_NAME_ENTRY_get_object.3 b/secure/lib/libcrypto/man/X509_NAME_ENTRY_get_object.3
index 6ebeaf2..77cc58e 100644
--- a/secure/lib/libcrypto/man/X509_NAME_ENTRY_get_object.3
+++ b/secure/lib/libcrypto/man/X509_NAME_ENTRY_get_object.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "X509_NAME_ENTRY_get_object 3"
-.TH X509_NAME_ENTRY_get_object 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH X509_NAME_ENTRY_get_object 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/X509_NAME_add_entry_by_txt.3 b/secure/lib/libcrypto/man/X509_NAME_add_entry_by_txt.3
index 208b598..5dfc928 100644
--- a/secure/lib/libcrypto/man/X509_NAME_add_entry_by_txt.3
+++ b/secure/lib/libcrypto/man/X509_NAME_add_entry_by_txt.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "X509_NAME_add_entry_by_txt 3"
-.TH X509_NAME_add_entry_by_txt 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH X509_NAME_add_entry_by_txt 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -180,7 +180,7 @@ the call.
.SH "NOTES"
.IX Header "NOTES"
The use of string types such as \fB\s-1MBSTRING_ASC\s0\fR or \fB\s-1MBSTRING_UTF8\s0\fR
-is strongly recommened for the \fBtype\fR parameter. This allows the
+is strongly recommended for the \fBtype\fR parameter. This allows the
internal code to correctly determine the type of the field and to
apply length checks according to the relevant standards. This is
done using \fIASN1_STRING_set_by_NID()\fR.
diff --git a/secure/lib/libcrypto/man/X509_NAME_get_index_by_NID.3 b/secure/lib/libcrypto/man/X509_NAME_get_index_by_NID.3
index 5c598f8..9f10125 100644
--- a/secure/lib/libcrypto/man/X509_NAME_get_index_by_NID.3
+++ b/secure/lib/libcrypto/man/X509_NAME_get_index_by_NID.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "X509_NAME_get_index_by_NID 3"
-.TH X509_NAME_get_index_by_NID 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH X509_NAME_get_index_by_NID 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/X509_NAME_print_ex.3 b/secure/lib/libcrypto/man/X509_NAME_print_ex.3
index 10fdfb6..6eda0e0 100644
--- a/secure/lib/libcrypto/man/X509_NAME_print_ex.3
+++ b/secure/lib/libcrypto/man/X509_NAME_print_ex.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "X509_NAME_print_ex 3"
-.TH X509_NAME_print_ex 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH X509_NAME_print_ex 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/X509_STORE_CTX_get_error.3 b/secure/lib/libcrypto/man/X509_STORE_CTX_get_error.3
index cd437af..9154c3d 100644
--- a/secure/lib/libcrypto/man/X509_STORE_CTX_get_error.3
+++ b/secure/lib/libcrypto/man/X509_STORE_CTX_get_error.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "X509_STORE_CTX_get_error 3"
-.TH X509_STORE_CTX_get_error 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH X509_STORE_CTX_get_error 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -169,7 +169,7 @@ checks.
.PP
\&\fIX509_STORE_CTX_get_error_depth()\fR returns the \fBdepth\fR of the error. This is a
non-negative integer representing where in the certificate chain the error
-occurred. If it is zero it occured in the end entity certificate, one if
+occurred. If it is zero it occurred in the end entity certificate, one if
it is the certificate which signed the end entity certificate and so on.
.PP
\&\fIX509_STORE_CTX_get_current_cert()\fR returns the certificate in \fBctx\fR which
@@ -342,10 +342,10 @@ The only CRLs that could be found did not match the scope of the certificate.
Some feature of a certificate extension is not supported. Unused.
.IP "\fBX509_V_ERR_PERMITTED_VIOLATION: permitted subtree violation\fR" 4
.IX Item "X509_V_ERR_PERMITTED_VIOLATION: permitted subtree violation"
-A name constraint violation occured in the permitted subtrees.
+A name constraint violation occurred in the permitted subtrees.
.IP "\fBX509_V_ERR_EXCLUDED_VIOLATION: excluded subtree violation\fR" 4
.IX Item "X509_V_ERR_EXCLUDED_VIOLATION: excluded subtree violation"
-A name constraint violation occured in the excluded subtrees.
+A name constraint violation occurred in the excluded subtrees.
.IP "\fBX509_V_ERR_SUBTREE_MINMAX: name constraints minimum and maximum not supported\fR" 4
.IX Item "X509_V_ERR_SUBTREE_MINMAX: name constraints minimum and maximum not supported"
A certificate name constraints extension included a minimum or maximum field:
@@ -361,7 +361,7 @@ address format of a form not mentioned in \s-1RFC3280.\s0 This could be caused b
a garbage extension or some new feature not currently supported.
.IP "\fBX509_V_ERR_CRL_PATH_VALIDATION_ERROR: \s-1CRL\s0 path validation error\fR" 4
.IX Item "X509_V_ERR_CRL_PATH_VALIDATION_ERROR: CRL path validation error"
-An error occured when attempting to verify the \s-1CRL\s0 path. This error can only
+An error occurred when attempting to verify the \s-1CRL\s0 path. This error can only
happen if extended \s-1CRL\s0 checking is enabled.
.IP "\fBX509_V_ERR_APPLICATION_VERIFICATION: application verification failure\fR" 4
.IX Item "X509_V_ERR_APPLICATION_VERIFICATION: application verification failure"
diff --git a/secure/lib/libcrypto/man/X509_STORE_CTX_get_ex_new_index.3 b/secure/lib/libcrypto/man/X509_STORE_CTX_get_ex_new_index.3
index dbfc8f6..00a0fe8 100644
--- a/secure/lib/libcrypto/man/X509_STORE_CTX_get_ex_new_index.3
+++ b/secure/lib/libcrypto/man/X509_STORE_CTX_get_ex_new_index.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "X509_STORE_CTX_get_ex_new_index 3"
-.TH X509_STORE_CTX_get_ex_new_index 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH X509_STORE_CTX_get_ex_new_index 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/X509_STORE_CTX_new.3 b/secure/lib/libcrypto/man/X509_STORE_CTX_new.3
index 25cedf2..1f01425 100644
--- a/secure/lib/libcrypto/man/X509_STORE_CTX_new.3
+++ b/secure/lib/libcrypto/man/X509_STORE_CTX_new.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "X509_STORE_CTX_new 3"
-.TH X509_STORE_CTX_new 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH X509_STORE_CTX_new 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/X509_STORE_CTX_set_verify_cb.3 b/secure/lib/libcrypto/man/X509_STORE_CTX_set_verify_cb.3
index 54e9db6..234c747 100644
--- a/secure/lib/libcrypto/man/X509_STORE_CTX_set_verify_cb.3
+++ b/secure/lib/libcrypto/man/X509_STORE_CTX_set_verify_cb.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "X509_STORE_CTX_set_verify_cb 3"
-.TH X509_STORE_CTX_set_verify_cb 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH X509_STORE_CTX_set_verify_cb 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/X509_STORE_set_verify_cb_func.3 b/secure/lib/libcrypto/man/X509_STORE_set_verify_cb_func.3
index 9586a18..4ab026a 100644
--- a/secure/lib/libcrypto/man/X509_STORE_set_verify_cb_func.3
+++ b/secure/lib/libcrypto/man/X509_STORE_set_verify_cb_func.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "X509_STORE_set_verify_cb_func 3"
-.TH X509_STORE_set_verify_cb_func 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH X509_STORE_set_verify_cb_func 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/X509_VERIFY_PARAM_set_flags.3 b/secure/lib/libcrypto/man/X509_VERIFY_PARAM_set_flags.3
index f08c9bc..c8c9907 100644
--- a/secure/lib/libcrypto/man/X509_VERIFY_PARAM_set_flags.3
+++ b/secure/lib/libcrypto/man/X509_VERIFY_PARAM_set_flags.3
@@ -133,13 +133,13 @@
.\" ========================================================================
.\"
.IX Title "X509_VERIFY_PARAM_set_flags 3"
-.TH X509_VERIFY_PARAM_set_flags 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH X509_VERIFY_PARAM_set_flags 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-X509_VERIFY_PARAM_set_flags, X509_VERIFY_PARAM_clear_flags, X509_VERIFY_PARAM_get_flags, X509_VERIFY_PARAM_set_purpose, X509_VERIFY_PARAM_set_trust, X509_VERIFY_PARAM_set_depth, X509_VERIFY_PARAM_get_depth, X509_VERIFY_PARAM_set_time, X509_VERIFY_PARAM_add0_policy, X509_VERIFY_PARAM_set1_policies \- X509 verification parameters
+X509_VERIFY_PARAM_set_flags, X509_VERIFY_PARAM_clear_flags, X509_VERIFY_PARAM_get_flags, X509_VERIFY_PARAM_set_purpose, X509_VERIFY_PARAM_set_trust, X509_VERIFY_PARAM_set_depth, X509_VERIFY_PARAM_get_depth, X509_VERIFY_PARAM_set_time, X509_VERIFY_PARAM_add0_policy, X509_VERIFY_PARAM_set1_policies, X509_VERIFY_PARAM_set1_host, X509_VERIFY_PARAM_add1_host, X509_VERIFY_PARAM_set_hostflags, X509_VERIFY_PARAM_get0_peername, X509_VERIFY_PARAM_set1_email, X509_VERIFY_PARAM_set1_ip, X509_VERIFY_PARAM_set1_ip_asc \- X509 verification parameters
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
@@ -162,6 +162,19 @@ X509_VERIFY_PARAM_set_flags, X509_VERIFY_PARAM_clear_flags, X509_VERIFY_PARAM_ge
\&
\& void X509_VERIFY_PARAM_set_depth(X509_VERIFY_PARAM *param, int depth);
\& int X509_VERIFY_PARAM_get_depth(const X509_VERIFY_PARAM *param);
+\&
+\& int X509_VERIFY_PARAM_set1_host(X509_VERIFY_PARAM *param,
+\& const char *name, size_t namelen);
+\& int X509_VERIFY_PARAM_add1_host(X509_VERIFY_PARAM *param,
+\& const char *name, size_t namelen);
+\& void X509_VERIFY_PARAM_set_hostflags(X509_VERIFY_PARAM *param,
+\& unsigned int flags);
+\& char *X509_VERIFY_PARAM_get0_peername(X509_VERIFY_PARAM *param);
+\& int X509_VERIFY_PARAM_set1_email(X509_VERIFY_PARAM *param,
+\& const char *email, size_t emaillen);
+\& int X509_VERIFY_PARAM_set1_ip(X509_VERIFY_PARAM *param,
+\& const unsigned char *ip, size_t iplen);
+\& int X509_VERIFY_PARAM_set1_ip_asc(X509_VERIFY_PARAM *param, const char *ipasc);
.Ve
.SH "DESCRIPTION"
.IX Header "DESCRIPTION"
@@ -197,12 +210,63 @@ an existing policy set.
\&\fIX509_VERIFY_PARAM_set_depth()\fR sets the maximum verification depth to \fBdepth\fR.
That is the maximum number of untrusted \s-1CA\s0 certificates that can appear in a
chain.
+.PP
+\&\fIX509_VERIFY_PARAM_set1_host()\fR sets the expected \s-1DNS\s0 hostname to
+\&\fBname\fR clearing any previously specified host name or names. If
+\&\fBname\fR is \s-1NULL,\s0 or empty the list of hostnames is cleared, and
+name checks are not performed on the peer certificate. If \fBname\fR
+is NUL-terminated, \fBnamelen\fR may be zero, otherwise \fBnamelen\fR
+must be set to the length of \fBname\fR. When a hostname is specified,
+certificate verification automatically invokes \fIX509_check_host\fR\|(3)
+with flags equal to the \fBflags\fR argument given to
+\&\fB\f(BIX509_VERIFY_PARAM_set_hostflags()\fB\fR (default zero). Applications
+are strongly advised to use this interface in preference to explicitly
+calling \fIX509_check_host\fR\|(3), hostname checks are out of scope
+with the \s-1\fIDANE\-EE\s0\fR\|(3) certificate usage, and the internal check will
+be suppressed as appropriate when \s-1DANE\s0 support is added to OpenSSL.
+.PP
+\&\fIX509_VERIFY_PARAM_add1_host()\fR adds \fBname\fR as an additional reference
+identifer that can match the peer's certificate. Any previous names
+set via \fIX509_VERIFY_PARAM_set1_host()\fR or \fIX509_VERIFY_PARAM_add1_host()\fR
+are retained, no change is made if \fBname\fR is \s-1NULL\s0 or empty. When
+multiple names are configured, the peer is considered verified when
+any name matches.
+.PP
+\&\fIX509_VERIFY_PARAM_get0_peername()\fR returns the \s-1DNS\s0 hostname or subject
+CommonName from the peer certificate that matched one of the reference
+identifiers. When wildcard matching is not disabled, or when a
+reference identifier specifies a parent domain (starts with \*(L".\*(R")
+rather than a hostname, the peer name may be a wildcard name or a
+sub-domain of the reference identifier respectively. The return
+string is allocated by the library and is no longer valid once the
+associated \fBparam\fR argument is freed. Applications must not free
+the return value.
+.PP
+\&\fIX509_VERIFY_PARAM_set1_email()\fR sets the expected \s-1RFC822\s0 email address to
+\&\fBemail\fR. If \fBemail\fR is NUL-terminated, \fBemaillen\fR may be zero, otherwise
+\&\fBemaillen\fR must be set to the length of \fBemail\fR. When an email address
+is specified, certificate verification automatically invokes
+\&\fIX509_check_email\fR\|(3).
+.PP
+\&\fIX509_VERIFY_PARAM_set1_ip()\fR sets the expected \s-1IP\s0 address to \fBip\fR.
+The \fBip\fR argument is in binary format, in network byte-order and
+\&\fBiplen\fR must be set to 4 for IPv4 and 16 for IPv6. When an \s-1IP\s0
+address is specified, certificate verification automatically invokes
+\&\fIX509_check_ip\fR\|(3).
+.PP
+\&\fIX509_VERIFY_PARAM_set1_ip_asc()\fR sets the expected \s-1IP\s0 address to
+\&\fBipasc\fR. The \fBipasc\fR argument is a NUL-terminal \s-1ASCII\s0 string:
+dotted decimal quad for IPv4 and colon-separated hexadecimal for
+IPv6. The condensed \*(L"::\*(R" notation is supported for IPv6 addresses.
.SH "RETURN VALUES"
.IX Header "RETURN VALUES"
-\&\fIX509_VERIFY_PARAM_set_flags()\fR, \fIX509_VERIFY_PARAM_clear_flags()\fR,
+\&\fIX509_VERIFY_PARAM_set_flags()\fR, \fIX509_VERIFY_PARAM_clear_flags()\fR,
\&\fIX509_VERIFY_PARAM_set_purpose()\fR, \fIX509_VERIFY_PARAM_set_trust()\fR,
-\&\fIX509_VERIFY_PARAM_add0_policy()\fR and \fIX509_VERIFY_PARAM_set1_policies()\fR return 1
-for success and 0 for failure.
+\&\fIX509_VERIFY_PARAM_add0_policy()\fR \fIX509_VERIFY_PARAM_set1_policies()\fR,
+\&\fIX509_VERIFY_PARAM_set1_host()\fR, \fIX509_VERIFY_PARAM_set_hostflags()\fR,
+\&\fIX509_VERIFY_PARAM_set1_email()\fR, \fIX509_VERIFY_PARAM_set1_ip()\fR and
+\&\fIX509_VERIFY_PARAM_set1_ip_asc()\fR return 1 for success and 0 for
+failure.
.PP
\&\fIX509_VERIFY_PARAM_get_flags()\fR returns the current verification flags.
.PP
@@ -272,7 +336,7 @@ The \fBX509_V_FLAG_NO_ALT_CHAINS\fR flag suppresses checking for alternative
chains. By default, when building a certificate chain, if the first certificate
chain found is not trusted, then OpenSSL will continue to check to see if an
alternative chain can be found that is trusted. With this flag set the behaviour
-will match that of OpenSSL versions prior to 1.0.1n and 1.0.2b.
+will match that of OpenSSL versions prior to 1.0.2b.
.SH "NOTES"
.IX Header "NOTES"
The above functions should be used to manipulate verification parameters
@@ -301,7 +365,10 @@ connections associated with an \fB\s-1SSL_CTX\s0\fR structure \fBctx\fR:
.Ve
.SH "SEE ALSO"
.IX Header "SEE ALSO"
-\&\fIX509_verify_cert\fR\|(3)
+\&\fIX509_verify_cert\fR\|(3),
+\&\fIX509_check_host\fR\|(3),
+\&\fIX509_check_email\fR\|(3),
+\&\fIX509_check_ip\fR\|(3)
.SH "HISTORY"
.IX Header "HISTORY"
-The \fBX509_V_FLAG_NO_ALT_CHAINS\fR flag was added in OpenSSL 1.0.1n and 1.0.2b
+The \fBX509_V_FLAG_NO_ALT_CHAINS\fR flag was added in OpenSSL 1.0.2b
diff --git a/secure/lib/libcrypto/man/X509_check_host.3 b/secure/lib/libcrypto/man/X509_check_host.3
new file mode 100644
index 0000000..4556924
--- /dev/null
+++ b/secure/lib/libcrypto/man/X509_check_host.3
@@ -0,0 +1,269 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "X509_check_host 3"
+.TH X509_check_host 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+X509_check_host, X509_check_email, X509_check_ip, X509_check_ip_asc \- X.509 certificate matching
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 1
+\& #include <openssl/x509.h>
+\&
+\& int X509_check_host(X509 *, const char *name, size_t namelen,
+\& unsigned int flags, char **peername);
+\& int X509_check_email(X509 *, const char *address, size_t addresslen,
+\& unsigned int flags);
+\& int X509_check_ip(X509 *, const unsigned char *address, size_t addresslen,
+\& unsigned int flags);
+\& int X509_check_ip_asc(X509 *, const char *address, unsigned int flags);
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+The certificate matching functions are used to check whether a
+certificate matches a given host name, email address, or \s-1IP\s0 address.
+The validity of the certificate and its trust level has to be checked by
+other means.
+.PP
+\&\fIX509_check_host()\fR checks if the certificate Subject Alternative
+Name (\s-1SAN\s0) or Subject CommonName (\s-1CN\s0) matches the specified host
+name, which must be encoded in the preferred name syntax described
+in section 3.5 of \s-1RFC 1034. \s0 By default, wildcards are supported
+and they match only in the left-most label; but they may match
+part of that label with an explicit prefix or suffix. For example,
+by default, the host \fBname\fR \*(L"www.example.com\*(R" would match a
+certificate with a \s-1SAN\s0 or \s-1CN\s0 value of \*(L"*.example.com\*(R", \*(L"w*.example.com\*(R"
+or \*(L"*w.example.com\*(R".
+.PP
+Per section 6.4.2 of \s-1RFC 6125, \s0\fBname\fR values representing international
+domain names must be given in A\-label form. The \fBnamelen\fR argument
+must be the number of characters in the name string or zero in which
+case the length is calculated with strlen(\fBname\fR). When \fBname\fR starts
+with a dot (e.g \*(L".example.com\*(R"), it will be matched by a certificate
+valid for any sub-domain of \fBname\fR, (see also
+\&\fBX509_CHECK_FLAG_SINGLE_LABEL_SUBDOMAINS\fR below).
+.PP
+When the certificate is matched, and \fBpeername\fR is not \s-1NULL,\s0 a
+pointer to a copy of the matching \s-1SAN\s0 or \s-1CN\s0 from the peer certificate
+is stored at the address passed in \fBpeername\fR. The application
+is responsible for freeing the peername via \fIOPENSSL_free()\fR when it
+is no longer needed.
+.PP
+\&\fIX509_check_email()\fR checks if the certificate matches the specified
+email \fBaddress\fR. Only the mailbox syntax of \s-1RFC 822\s0 is supported,
+comments are not allowed, and no attempt is made to normalize quoted
+characters. The \fBaddresslen\fR argument must be the number of
+characters in the address string or zero in which case the length
+is calculated with strlen(\fBaddress\fR).
+.PP
+\&\fIX509_check_ip()\fR checks if the certificate matches a specified IPv4 or
+IPv6 address. The \fBaddress\fR array is in binary format, in network
+byte order. The length is either 4 (IPv4) or 16 (IPv6). Only
+explicitly marked addresses in the certificates are considered; \s-1IP\s0
+addresses stored in \s-1DNS\s0 names and Common Names are ignored.
+.PP
+\&\fIX509_check_ip_asc()\fR is similar, except that the NUL-terminated
+string \fBaddress\fR is first converted to the internal representation.
+.PP
+The \fBflags\fR argument is usually 0. It can be the bitwise \s-1OR\s0 of the
+flags:
+.IP "\fBX509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT\fR," 4
+.IX Item "X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT,"
+.PD 0
+.IP "\fBX509_CHECK_FLAG_NO_WILDCARDS\fR," 4
+.IX Item "X509_CHECK_FLAG_NO_WILDCARDS,"
+.IP "\fBX509_CHECK_FLAG_NO_PARTIAL_WILDCARDS\fR," 4
+.IX Item "X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS,"
+.IP "\fBX509_CHECK_FLAG_MULTI_LABEL_WILDCARDS\fR." 4
+.IX Item "X509_CHECK_FLAG_MULTI_LABEL_WILDCARDS."
+.IP "\fBX509_CHECK_FLAG_SINGLE_LABEL_SUBDOMAINS\fR." 4
+.IX Item "X509_CHECK_FLAG_SINGLE_LABEL_SUBDOMAINS."
+.PD
+.PP
+The \fBX509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT\fR flag causes the function
+to consider the subject \s-1DN\s0 even if the certificate contains at least
+one subject alternative name of the right type (\s-1DNS\s0 name or email
+address as appropriate); the default is to ignore the subject \s-1DN\s0
+when at least one corresponding subject alternative names is present.
+.PP
+If set, \fBX509_CHECK_FLAG_NO_WILDCARDS\fR disables wildcard
+expansion; this only applies to \fBX509_check_host\fR.
+.PP
+If set, \fBX509_CHECK_FLAG_NO_PARTIAL_WILDCARDS\fR suppresses support
+for \*(L"*\*(R" as wildcard pattern in labels that have a prefix or suffix,
+such as: \*(L"www*\*(R" or \*(L"*www\*(R"; this only aplies to \fBX509_check_host\fR.
+.PP
+If set, \fBX509_CHECK_FLAG_MULTI_LABEL_WILDCARDS\fR allows a \*(L"*\*(R" that
+constitutes the complete label of a \s-1DNS\s0 name (e.g. \*(L"*.example.com\*(R")
+to match more than one label in \fBname\fR; this flag only applies
+to \fBX509_check_host\fR.
+.PP
+If set, \fBX509_CHECK_FLAG_SINGLE_LABEL_SUBDOMAINS\fR restricts \fBname\fR
+values which start with \*(L".\*(R", that would otherwise match any sub-domain
+in the peer certificate, to only match direct child sub-domains.
+Thus, for instance, with this flag set a \fBname\fR of \*(L".example.com\*(R"
+would match a peer certificate with a \s-1DNS\s0 name of \*(L"www.example.com\*(R",
+but would not match a peer certificate with a \s-1DNS\s0 name of
+\&\*(L"www.sub.example.com\*(R"; this flag only applies to \fBX509_check_host\fR.
+.SH "RETURN VALUES"
+.IX Header "RETURN VALUES"
+The functions return 1 for a successful match, 0 for a failed match
+and \-1 for an internal error: typically a memory allocation failure
+or an \s-1ASN.1\s0 decoding error.
+.PP
+All functions can also return \-2 if the input is malformed. For example,
+\&\fIX509_check_host()\fR returns \-2 if the provided \fBname\fR contains embedded
+NULs.
+.SH "NOTES"
+.IX Header "NOTES"
+Applications are encouraged to use \fIX509_VERIFY_PARAM_set1_host()\fR
+rather than explicitly calling \fIX509_check_host\fR\|(3). Host name
+checks are out of scope with the \s-1\fIDANE\-EE\s0\fR\|(3) certificate usage,
+and the internal checks will be suppressed as appropriate when
+\&\s-1DANE\s0 support is added to OpenSSL.
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+\&\fISSL_get_verify_result\fR\|(3),
+\&\fIX509_VERIFY_PARAM_set1_host\fR\|(3),
+\&\fIX509_VERIFY_PARAM_add1_host\fR\|(3),
+\&\fIX509_VERIFY_PARAM_set1_email\fR\|(3),
+\&\fIX509_VERIFY_PARAM_set1_ip\fR\|(3),
+\&\fIX509_VERIFY_PARAM_set1_ipasc\fR\|(3)
+.SH "HISTORY"
+.IX Header "HISTORY"
+These functions were added in OpenSSL 1.1.0.
diff --git a/secure/lib/libcrypto/man/X509_new.3 b/secure/lib/libcrypto/man/X509_new.3
index 587b861..8f5fda7 100644
--- a/secure/lib/libcrypto/man/X509_new.3
+++ b/secure/lib/libcrypto/man/X509_new.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "X509_new 3"
-.TH X509_new 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH X509_new 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/X509_verify_cert.3 b/secure/lib/libcrypto/man/X509_verify_cert.3
index 6250339..7dc95ca 100644
--- a/secure/lib/libcrypto/man/X509_verify_cert.3
+++ b/secure/lib/libcrypto/man/X509_verify_cert.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "X509_verify_cert 3"
-.TH X509_verify_cert 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH X509_verify_cert 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/bio.3 b/secure/lib/libcrypto/man/bio.3
index 18288c4..29b4622 100644
--- a/secure/lib/libcrypto/man/bio.3
+++ b/secure/lib/libcrypto/man/bio.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "bio 3"
-.TH bio 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH bio 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/blowfish.3 b/secure/lib/libcrypto/man/blowfish.3
index 738c2b8..1375358 100644
--- a/secure/lib/libcrypto/man/blowfish.3
+++ b/secure/lib/libcrypto/man/blowfish.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "blowfish 3"
-.TH blowfish 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH blowfish 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/bn.3 b/secure/lib/libcrypto/man/bn.3
index 441b944..72a4c04 100644
--- a/secure/lib/libcrypto/man/bn.3
+++ b/secure/lib/libcrypto/man/bn.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "bn 3"
-.TH bn 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH bn 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/bn_internal.3 b/secure/lib/libcrypto/man/bn_internal.3
index 83afdda..df16fd6 100644
--- a/secure/lib/libcrypto/man/bn_internal.3
+++ b/secure/lib/libcrypto/man/bn_internal.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "bn_internal 3"
-.TH bn_internal 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH bn_internal 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/buffer.3 b/secure/lib/libcrypto/man/buffer.3
index 8cd00a8..fcb7985 100644
--- a/secure/lib/libcrypto/man/buffer.3
+++ b/secure/lib/libcrypto/man/buffer.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "buffer 3"
-.TH buffer 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH buffer 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/crypto.3 b/secure/lib/libcrypto/man/crypto.3
index 53e9f5b..3aeaa33 100644
--- a/secure/lib/libcrypto/man/crypto.3
+++ b/secure/lib/libcrypto/man/crypto.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "crypto 3"
-.TH crypto 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH crypto 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -182,7 +182,7 @@ hash functions and a cryptographic pseudo-random number generator.
\&\fIpkcs7\fR\|(3), \fIpkcs12\fR\|(3)
.IP "\s-1INTERNAL FUNCTIONS\s0" 4
.IX Item "INTERNAL FUNCTIONS"
-\&\fIbn\fR\|(3), \fIbuffer\fR\|(3), \fIlhash\fR\|(3),
+\&\fIbn\fR\|(3), \fIbuffer\fR\|(3), \fIec\fR\|(3), \fIlhash\fR\|(3),
\&\fIobjects\fR\|(3), \fIstack\fR\|(3),
\&\fItxt_db\fR\|(3)
.SH "NOTES"
diff --git a/secure/lib/libcrypto/man/d2i_ASN1_OBJECT.3 b/secure/lib/libcrypto/man/d2i_ASN1_OBJECT.3
index aaf5154..dfd106c 100644
--- a/secure/lib/libcrypto/man/d2i_ASN1_OBJECT.3
+++ b/secure/lib/libcrypto/man/d2i_ASN1_OBJECT.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "d2i_ASN1_OBJECT 3"
-.TH d2i_ASN1_OBJECT 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH d2i_ASN1_OBJECT 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/d2i_CMS_ContentInfo.3 b/secure/lib/libcrypto/man/d2i_CMS_ContentInfo.3
index d2b17cf..2c4f36e 100644
--- a/secure/lib/libcrypto/man/d2i_CMS_ContentInfo.3
+++ b/secure/lib/libcrypto/man/d2i_CMS_ContentInfo.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "d2i_CMS_ContentInfo 3"
-.TH d2i_CMS_ContentInfo 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH d2i_CMS_ContentInfo 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/d2i_DHparams.3 b/secure/lib/libcrypto/man/d2i_DHparams.3
index 67e8d7b..8edf342 100644
--- a/secure/lib/libcrypto/man/d2i_DHparams.3
+++ b/secure/lib/libcrypto/man/d2i_DHparams.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "d2i_DHparams 3"
-.TH d2i_DHparams 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH d2i_DHparams 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/d2i_DSAPublicKey.3 b/secure/lib/libcrypto/man/d2i_DSAPublicKey.3
index 0455f59..ed56f87 100644
--- a/secure/lib/libcrypto/man/d2i_DSAPublicKey.3
+++ b/secure/lib/libcrypto/man/d2i_DSAPublicKey.3
@@ -133,14 +133,14 @@
.\" ========================================================================
.\"
.IX Title "d2i_DSAPublicKey 3"
-.TH d2i_DSAPublicKey 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH d2i_DSAPublicKey 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
d2i_DSAPublicKey, i2d_DSAPublicKey, d2i_DSAPrivateKey, i2d_DSAPrivateKey,
-d2i_DSA_PUBKEY, i2d_DSA_PUBKEY, d2i_DSA_SIG, i2d_DSA_SIG \- DSA key encoding
+d2i_DSA_PUBKEY, i2d_DSA_PUBKEY, d2i_DSAparams, i2d_DSAparams, d2i_DSA_SIG, i2d_DSA_SIG \- DSA key encoding
and parsing functions.
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
diff --git a/secure/lib/libcrypto/man/d2i_ECPKParameters.3 b/secure/lib/libcrypto/man/d2i_ECPKParameters.3
new file mode 100644
index 0000000..b14d481
--- /dev/null
+++ b/secure/lib/libcrypto/man/d2i_ECPKParameters.3
@@ -0,0 +1,216 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "d2i_ECPKParameters 3"
+.TH d2i_ECPKParameters 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+d2i_ECPKParameters, i2d_ECPKParameters, d2i_ECPKParameters_bio, i2d_ECPKParameters_bio, d2i_ECPKParameters_fp, i2d_ECPKParameters_fp, ECPKParameters_print, ECPKParameters_print_fp \- Functions for decoding and encoding ASN1 representations of elliptic curve entities
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 1
+\& #include <openssl/ec.h>
+\&
+\& EC_GROUP *d2i_ECPKParameters(EC_GROUP **px, const unsigned char **in, long len);
+\& int i2d_ECPKParameters(const EC_GROUP *x, unsigned char **out);
+\& #define d2i_ECPKParameters_bio(bp,x) ASN1_d2i_bio_of(EC_GROUP,NULL,d2i_ECPKParameters,bp,x)
+\& #define i2d_ECPKParameters_bio(bp,x) ASN1_i2d_bio_of_const(EC_GROUP,i2d_ECPKParameters,bp,x)
+\& #define d2i_ECPKParameters_fp(fp,x) (EC_GROUP *)ASN1_d2i_fp(NULL, \e
+\& (char *(*)())d2i_ECPKParameters,(fp),(unsigned char **)(x))
+\& #define i2d_ECPKParameters_fp(fp,x) ASN1_i2d_fp(i2d_ECPKParameters,(fp), \e
+\& (unsigned char *)(x))
+\& int ECPKParameters_print(BIO *bp, const EC_GROUP *x, int off);
+\& int ECPKParameters_print_fp(FILE *fp, const EC_GROUP *x, int off);
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+The ECPKParameters encode and decode routines encode and parse the public parameters for an
+\&\fB\s-1EC_GROUP\s0\fR structure, which represents a curve.
+.PP
+\&\fId2i_ECPKParameters()\fR attempts to decode \fBlen\fR bytes at \fB*in\fR. If
+successful a pointer to the \fB\s-1EC_GROUP\s0\fR structure is returned. If an error
+occurred then \fB\s-1NULL\s0\fR is returned. If \fBpx\fR is not \fB\s-1NULL\s0\fR then the
+returned structure is written to \fB*px\fR. If \fB*px\fR is not \fB\s-1NULL\s0\fR
+then it is assumed that \fB*px\fR contains a valid \fB\s-1EC_GROUP\s0\fR
+structure and an attempt is made to reuse it. If the call is
+successful \fB*in\fR is incremented to the byte following the
+parsed data.
+.PP
+\&\fIi2d_ECPKParameters()\fR encodes the structure pointed to by \fBx\fR into \s-1DER\s0 format.
+If \fBout\fR is not \fB\s-1NULL\s0\fR is writes the \s-1DER\s0 encoded data to the buffer
+at \fB*out\fR, and increments it to point after the data just written.
+If the return value is negative an error occurred, otherwise it
+returns the length of the encoded data.
+.PP
+If \fB*out\fR is \fB\s-1NULL\s0\fR memory will be allocated for a buffer and the encoded
+data written to it. In this case \fB*out\fR is not incremented and it points to
+the start of the data just written.
+.PP
+\&\fId2i_ECPKParameters_bio()\fR is similar to \fId2i_ECPKParameters()\fR except it attempts
+to parse data from \s-1BIO \s0\fBbp\fR.
+.PP
+\&\fId2i_ECPKParameters_fp()\fR is similar to \fId2i_ECPKParameters()\fR except it attempts
+to parse data from \s-1FILE\s0 pointer \fBfp\fR.
+.PP
+\&\fIi2d_ECPKParameters_bio()\fR is similar to \fIi2d_ECPKParameters()\fR except it writes
+the encoding of the structure \fBx\fR to \s-1BIO \s0\fBbp\fR and it
+returns 1 for success and 0 for failure.
+.PP
+\&\fIi2d_ECPKParameters_fp()\fR is similar to \fIi2d_ECPKParameters()\fR except it writes
+the encoding of the structure \fBx\fR to \s-1BIO \s0\fBbp\fR and it
+returns 1 for success and 0 for failure.
+.PP
+These functions are very similar to the X509 functions described in \fId2i_X509\fR\|(3),
+where further notes and examples are available.
+.PP
+The ECPKParameters_print and ECPKParameters_print_fp functions print a human-readable output
+of the public parameters of the \s-1EC_GROUP\s0 to \fBbp\fR or \fBfp\fR. The output lines are indented by \fBoff\fR spaces.
+.SH "RETURN VALUES"
+.IX Header "RETURN VALUES"
+\&\fId2i_ECPKParameters()\fR, \fId2i_ECPKParameters_bio()\fR and \fId2i_ECPKParameters_fp()\fR return a valid \fB\s-1EC_GROUP\s0\fR structure
+or \fB\s-1NULL\s0\fR if an error occurs.
+.PP
+\&\fIi2d_ECPKParameters()\fR returns the number of bytes successfully encoded or a negative
+value if an error occurs.
+.PP
+\&\fIi2d_ECPKParameters_bio()\fR, \fIi2d_ECPKParameters_fp()\fR, ECPKParameters_print and ECPKParameters_print_fp
+return 1 for success and 0 if an error occurs.
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+\&\fIcrypto\fR\|(3), \fIec\fR\|(3), \fIEC_GROUP_new\fR\|(3), \fIEC_GROUP_copy\fR\|(3),
+\&\fIEC_POINT_new\fR\|(3), \fIEC_POINT_add\fR\|(3), \fIEC_KEY_new\fR\|(3),
+\&\fIEC_GFp_simple_method\fR\|(3), \fId2i_X509\fR\|(3)
diff --git a/secure/lib/libcrypto/man/d2i_ECPrivateKey.3 b/secure/lib/libcrypto/man/d2i_ECPrivateKey.3
index f6161ef..07a2962 100644
--- a/secure/lib/libcrypto/man/d2i_ECPrivateKey.3
+++ b/secure/lib/libcrypto/man/d2i_ECPrivateKey.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "d2i_ECPrivateKey 3"
-.TH d2i_ECPrivateKey 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH d2i_ECPrivateKey 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/d2i_PKCS8PrivateKey.3 b/secure/lib/libcrypto/man/d2i_PKCS8PrivateKey.3
index 91eb895..a89957d 100644
--- a/secure/lib/libcrypto/man/d2i_PKCS8PrivateKey.3
+++ b/secure/lib/libcrypto/man/d2i_PKCS8PrivateKey.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "d2i_PKCS8PrivateKey 3"
-.TH d2i_PKCS8PrivateKey 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH d2i_PKCS8PrivateKey 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/d2i_RSAPublicKey.3 b/secure/lib/libcrypto/man/d2i_RSAPublicKey.3
index 839512a..4e5af7a 100644
--- a/secure/lib/libcrypto/man/d2i_RSAPublicKey.3
+++ b/secure/lib/libcrypto/man/d2i_RSAPublicKey.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "d2i_RSAPublicKey 3"
-.TH d2i_RSAPublicKey 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH d2i_RSAPublicKey 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/d2i_X509.3 b/secure/lib/libcrypto/man/d2i_X509.3
index fc2a852..fa12c9f 100644
--- a/secure/lib/libcrypto/man/d2i_X509.3
+++ b/secure/lib/libcrypto/man/d2i_X509.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "d2i_X509 3"
-.TH d2i_X509 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH d2i_X509 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -154,6 +154,8 @@ i2d_X509_fp \- X509 encode and decode functions
\&
\& int i2d_X509_bio(BIO *bp, X509 *x);
\& int i2d_X509_fp(FILE *fp, X509 *x);
+\&
+\& int i2d_re_X509_tbs(X509 *x, unsigned char **out);
.Ve
.SH "DESCRIPTION"
.IX Header "DESCRIPTION"
@@ -196,11 +198,17 @@ returns 1 for success and 0 for failure.
\&\fIi2d_X509_fp()\fR is similar to \fIi2d_X509()\fR except it writes
the encoding of the structure \fBx\fR to \s-1BIO \s0\fBbp\fR and it
returns 1 for success and 0 for failure.
+.PP
+\&\fIi2d_re_X509_tbs()\fR is similar to \fIi2d_X509()\fR except it encodes
+only the TBSCertificate portion of the certificate.
.SH "NOTES"
.IX Header "NOTES"
The letters \fBi\fR and \fBd\fR in for example \fBi2d_X509\fR stand for
-\&\*(L"internal\*(R" (that is an internal C structure) and \*(L"\s-1DER\*(R".\s0 So that
-\&\fBi2d_X509\fR converts from internal to \s-1DER.\s0
+\&\*(L"internal\*(R" (that is an internal C structure) and \*(L"\s-1DER\*(R".\s0 So
+\&\fBi2d_X509\fR converts from internal to \s-1DER.\s0 The \*(L"re\*(R" in
+\&\fBi2d_re_X509_tbs\fR stands for \*(L"re-encode\*(R", and ensures that a fresh
+encoding is generated in case the object has been modified after
+creation (see the \s-1BUGS\s0 section).
.PP
The functions can also understand \fB\s-1BER\s0\fR forms.
.PP
@@ -351,6 +359,21 @@ then the encoded structure may contain invalid data or omit the
fields entirely and will not be parsed by \fId2i_X509()\fR. This may be
fixed in future so code should not assume that \fIi2d_X509()\fR will
always succeed.
+.PP
+The encoding of the TBSCertificate portion of a certificate is cached
+in the \fBX509\fR structure internally to improve encoding performance
+and to ensure certificate signatures are verified correctly in some
+certificates with broken (non-DER) encodings.
+.PP
+Any function which encodes an X509 structure such as \fIi2d_X509()\fR,
+\&\fIi2d_X509_fp()\fR or \fIi2d_X509_bio()\fR may return a stale encoding if the
+\&\fBX509\fR structure has been modified after deserialization or previous
+serialization.
+.PP
+If, after modification, the \fBX509\fR object is re-signed with \fIX509_sign()\fR,
+the encoding is automatically renewed. Otherwise, the encoding of the
+TBSCertificate portion of the \fBX509\fR can be manually renewed by calling
+\&\fIi2d_re_X509_tbs()\fR.
.SH "RETURN VALUES"
.IX Header "RETURN VALUES"
\&\fId2i_X509()\fR, \fId2i_X509_bio()\fR and \fId2i_X509_fp()\fR return a valid \fBX509\fR structure
diff --git a/secure/lib/libcrypto/man/d2i_X509_ALGOR.3 b/secure/lib/libcrypto/man/d2i_X509_ALGOR.3
index d369ab3..223d300 100644
--- a/secure/lib/libcrypto/man/d2i_X509_ALGOR.3
+++ b/secure/lib/libcrypto/man/d2i_X509_ALGOR.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "d2i_X509_ALGOR 3"
-.TH d2i_X509_ALGOR 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH d2i_X509_ALGOR 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/d2i_X509_CRL.3 b/secure/lib/libcrypto/man/d2i_X509_CRL.3
index f30cdda..399cd97 100644
--- a/secure/lib/libcrypto/man/d2i_X509_CRL.3
+++ b/secure/lib/libcrypto/man/d2i_X509_CRL.3
@@ -133,13 +133,13 @@
.\" ========================================================================
.\"
.IX Title "d2i_X509_CRL 3"
-.TH d2i_X509_CRL 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH d2i_X509_CRL 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-d2i_X509_CRL, i2d_X509_CRL, d2i_X509_CRL_bio, d2i_509_CRL_fp,
+d2i_X509_CRL, i2d_X509_CRL, d2i_X509_CRL_bio, d2i_X509_CRL_fp,
i2d_X509_CRL_bio, i2d_X509_CRL_fp \- PKCS#10 certificate request functions.
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
diff --git a/secure/lib/libcrypto/man/d2i_X509_NAME.3 b/secure/lib/libcrypto/man/d2i_X509_NAME.3
index 87e1348..04a2489 100644
--- a/secure/lib/libcrypto/man/d2i_X509_NAME.3
+++ b/secure/lib/libcrypto/man/d2i_X509_NAME.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "d2i_X509_NAME 3"
-.TH d2i_X509_NAME 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH d2i_X509_NAME 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/d2i_X509_REQ.3 b/secure/lib/libcrypto/man/d2i_X509_REQ.3
index e2ac57f..f6adf50 100644
--- a/secure/lib/libcrypto/man/d2i_X509_REQ.3
+++ b/secure/lib/libcrypto/man/d2i_X509_REQ.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "d2i_X509_REQ 3"
-.TH d2i_X509_REQ 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH d2i_X509_REQ 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/d2i_X509_SIG.3 b/secure/lib/libcrypto/man/d2i_X509_SIG.3
index 026826e..630ede5 100644
--- a/secure/lib/libcrypto/man/d2i_X509_SIG.3
+++ b/secure/lib/libcrypto/man/d2i_X509_SIG.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "d2i_X509_SIG 3"
-.TH d2i_X509_SIG 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH d2i_X509_SIG 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/des.3 b/secure/lib/libcrypto/man/des.3
index 35d8d29..db4329c 100644
--- a/secure/lib/libcrypto/man/des.3
+++ b/secure/lib/libcrypto/man/des.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "des 3"
-.TH des 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH des 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/dh.3 b/secure/lib/libcrypto/man/dh.3
index 4d3dcbd..93e848d 100644
--- a/secure/lib/libcrypto/man/dh.3
+++ b/secure/lib/libcrypto/man/dh.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "dh 3"
-.TH dh 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH dh 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/dsa.3 b/secure/lib/libcrypto/man/dsa.3
index d7e540d..14043aa 100644
--- a/secure/lib/libcrypto/man/dsa.3
+++ b/secure/lib/libcrypto/man/dsa.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "dsa 3"
-.TH dsa 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH dsa 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/ec.3 b/secure/lib/libcrypto/man/ec.3
new file mode 100644
index 0000000..cda2f21
--- /dev/null
+++ b/secure/lib/libcrypto/man/ec.3
@@ -0,0 +1,333 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "ec 3"
+.TH ec 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+ec \- Elliptic Curve functions
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 2
+\& #include <openssl/ec.h>
+\& #include <openssl/bn.h>
+\&
+\& const EC_METHOD *EC_GFp_simple_method(void);
+\& const EC_METHOD *EC_GFp_mont_method(void);
+\& const EC_METHOD *EC_GFp_nist_method(void);
+\& const EC_METHOD *EC_GFp_nistp224_method(void);
+\& const EC_METHOD *EC_GFp_nistp256_method(void);
+\& const EC_METHOD *EC_GFp_nistp521_method(void);
+\&
+\& const EC_METHOD *EC_GF2m_simple_method(void);
+\&
+\& EC_GROUP *EC_GROUP_new(const EC_METHOD *meth);
+\& void EC_GROUP_free(EC_GROUP *group);
+\& void EC_GROUP_clear_free(EC_GROUP *group);
+\& int EC_GROUP_copy(EC_GROUP *dst, const EC_GROUP *src);
+\& EC_GROUP *EC_GROUP_dup(const EC_GROUP *src);
+\& const EC_METHOD *EC_GROUP_method_of(const EC_GROUP *group);
+\& int EC_METHOD_get_field_type(const EC_METHOD *meth);
+\& int EC_GROUP_set_generator(EC_GROUP *group, const EC_POINT *generator, const BIGNUM *order, const BIGNUM *cofactor);
+\& const EC_POINT *EC_GROUP_get0_generator(const EC_GROUP *group);
+\& int EC_GROUP_get_order(const EC_GROUP *group, BIGNUM *order, BN_CTX *ctx);
+\& int EC_GROUP_get_cofactor(const EC_GROUP *group, BIGNUM *cofactor, BN_CTX *ctx);
+\& void EC_GROUP_set_curve_name(EC_GROUP *group, int nid);
+\& int EC_GROUP_get_curve_name(const EC_GROUP *group);
+\& void EC_GROUP_set_asn1_flag(EC_GROUP *group, int flag);
+\& int EC_GROUP_get_asn1_flag(const EC_GROUP *group);
+\& void EC_GROUP_set_point_conversion_form(EC_GROUP *group, point_conversion_form_t form);
+\& point_conversion_form_t EC_GROUP_get_point_conversion_form(const EC_GROUP *);
+\& unsigned char *EC_GROUP_get0_seed(const EC_GROUP *x);
+\& size_t EC_GROUP_get_seed_len(const EC_GROUP *);
+\& size_t EC_GROUP_set_seed(EC_GROUP *, const unsigned char *, size_t len);
+\& int EC_GROUP_set_curve_GFp(EC_GROUP *group, const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx);
+\& int EC_GROUP_get_curve_GFp(const EC_GROUP *group, BIGNUM *p, BIGNUM *a, BIGNUM *b, BN_CTX *ctx);
+\& int EC_GROUP_set_curve_GF2m(EC_GROUP *group, const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx);
+\& int EC_GROUP_get_curve_GF2m(const EC_GROUP *group, BIGNUM *p, BIGNUM *a, BIGNUM *b, BN_CTX *ctx);
+\& int EC_GROUP_get_degree(const EC_GROUP *group);
+\& int EC_GROUP_check(const EC_GROUP *group, BN_CTX *ctx);
+\& int EC_GROUP_check_discriminant(const EC_GROUP *group, BN_CTX *ctx);
+\& int EC_GROUP_cmp(const EC_GROUP *a, const EC_GROUP *b, BN_CTX *ctx);
+\& EC_GROUP *EC_GROUP_new_curve_GFp(const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx);
+\& EC_GROUP *EC_GROUP_new_curve_GF2m(const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx);
+\& EC_GROUP *EC_GROUP_new_by_curve_name(int nid);
+\&
+\& size_t EC_get_builtin_curves(EC_builtin_curve *r, size_t nitems);
+\&
+\& EC_POINT *EC_POINT_new(const EC_GROUP *group);
+\& void EC_POINT_free(EC_POINT *point);
+\& void EC_POINT_clear_free(EC_POINT *point);
+\& int EC_POINT_copy(EC_POINT *dst, const EC_POINT *src);
+\& EC_POINT *EC_POINT_dup(const EC_POINT *src, const EC_GROUP *group);
+\& const EC_METHOD *EC_POINT_method_of(const EC_POINT *point);
+\& int EC_POINT_set_to_infinity(const EC_GROUP *group, EC_POINT *point);
+\& int EC_POINT_set_Jprojective_coordinates_GFp(const EC_GROUP *group, EC_POINT *p,
+\& const BIGNUM *x, const BIGNUM *y, const BIGNUM *z, BN_CTX *ctx);
+\& int EC_POINT_get_Jprojective_coordinates_GFp(const EC_GROUP *group,
+\& const EC_POINT *p, BIGNUM *x, BIGNUM *y, BIGNUM *z, BN_CTX *ctx);
+\& int EC_POINT_set_affine_coordinates_GFp(const EC_GROUP *group, EC_POINT *p,
+\& const BIGNUM *x, const BIGNUM *y, BN_CTX *ctx);
+\& int EC_POINT_get_affine_coordinates_GFp(const EC_GROUP *group,
+\& const EC_POINT *p, BIGNUM *x, BIGNUM *y, BN_CTX *ctx);
+\& int EC_POINT_set_compressed_coordinates_GFp(const EC_GROUP *group, EC_POINT *p,
+\& const BIGNUM *x, int y_bit, BN_CTX *ctx);
+\& int EC_POINT_set_affine_coordinates_GF2m(const EC_GROUP *group, EC_POINT *p,
+\& const BIGNUM *x, const BIGNUM *y, BN_CTX *ctx);
+\& int EC_POINT_get_affine_coordinates_GF2m(const EC_GROUP *group,
+\& const EC_POINT *p, BIGNUM *x, BIGNUM *y, BN_CTX *ctx);
+\& int EC_POINT_set_compressed_coordinates_GF2m(const EC_GROUP *group, EC_POINT *p,
+\& const BIGNUM *x, int y_bit, BN_CTX *ctx);
+\& size_t EC_POINT_point2oct(const EC_GROUP *group, const EC_POINT *p,
+\& point_conversion_form_t form,
+\& unsigned char *buf, size_t len, BN_CTX *ctx);
+\& int EC_POINT_oct2point(const EC_GROUP *group, EC_POINT *p,
+\& const unsigned char *buf, size_t len, BN_CTX *ctx);
+\& BIGNUM *EC_POINT_point2bn(const EC_GROUP *, const EC_POINT *,
+\& point_conversion_form_t form, BIGNUM *, BN_CTX *);
+\& EC_POINT *EC_POINT_bn2point(const EC_GROUP *, const BIGNUM *,
+\& EC_POINT *, BN_CTX *);
+\& char *EC_POINT_point2hex(const EC_GROUP *, const EC_POINT *,
+\& point_conversion_form_t form, BN_CTX *);
+\& EC_POINT *EC_POINT_hex2point(const EC_GROUP *, const char *,
+\& EC_POINT *, BN_CTX *);
+\&
+\& int EC_POINT_add(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, const EC_POINT *b, BN_CTX *ctx);
+\& int EC_POINT_dbl(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, BN_CTX *ctx);
+\& int EC_POINT_invert(const EC_GROUP *group, EC_POINT *a, BN_CTX *ctx);
+\& int EC_POINT_is_at_infinity(const EC_GROUP *group, const EC_POINT *p);
+\& int EC_POINT_is_on_curve(const EC_GROUP *group, const EC_POINT *point, BN_CTX *ctx);
+\& int EC_POINT_cmp(const EC_GROUP *group, const EC_POINT *a, const EC_POINT *b, BN_CTX *ctx);
+\& int EC_POINT_make_affine(const EC_GROUP *group, EC_POINT *point, BN_CTX *ctx);
+\& int EC_POINTs_make_affine(const EC_GROUP *group, size_t num, EC_POINT *points[], BN_CTX *ctx);
+\& int EC_POINTs_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *n, size_t num, const EC_POINT *p[], const BIGNUM *m[], BN_CTX *ctx);
+\& int EC_POINT_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *n, const EC_POINT *q, const BIGNUM *m, BN_CTX *ctx);
+\& int EC_GROUP_precompute_mult(EC_GROUP *group, BN_CTX *ctx);
+\& int EC_GROUP_have_precompute_mult(const EC_GROUP *group);
+\&
+\& int EC_GROUP_get_basis_type(const EC_GROUP *);
+\& int EC_GROUP_get_trinomial_basis(const EC_GROUP *, unsigned int *k);
+\& int EC_GROUP_get_pentanomial_basis(const EC_GROUP *, unsigned int *k1,
+\& unsigned int *k2, unsigned int *k3);
+\& EC_GROUP *d2i_ECPKParameters(EC_GROUP **, const unsigned char **in, long len);
+\& int i2d_ECPKParameters(const EC_GROUP *, unsigned char **out);
+\& #define d2i_ECPKParameters_bio(bp,x) ASN1_d2i_bio_of(EC_GROUP,NULL,d2i_ECPKParameters,bp,x)
+\& #define i2d_ECPKParameters_bio(bp,x) ASN1_i2d_bio_of_const(EC_GROUP,i2d_ECPKParameters,bp,x)
+\& #define d2i_ECPKParameters_fp(fp,x) (EC_GROUP *)ASN1_d2i_fp(NULL, \e
+\& (char *(*)())d2i_ECPKParameters,(fp),(unsigned char **)(x))
+\& #define i2d_ECPKParameters_fp(fp,x) ASN1_i2d_fp(i2d_ECPKParameters,(fp), \e
+\& (unsigned char *)(x))
+\& int ECPKParameters_print(BIO *bp, const EC_GROUP *x, int off);
+\& int ECPKParameters_print_fp(FILE *fp, const EC_GROUP *x, int off);
+\&
+\& EC_KEY *EC_KEY_new(void);
+\& int EC_KEY_get_flags(const EC_KEY *key);
+\& void EC_KEY_set_flags(EC_KEY *key, int flags);
+\& void EC_KEY_clear_flags(EC_KEY *key, int flags);
+\& EC_KEY *EC_KEY_new_by_curve_name(int nid);
+\& void EC_KEY_free(EC_KEY *key);
+\& EC_KEY *EC_KEY_copy(EC_KEY *dst, const EC_KEY *src);
+\& EC_KEY *EC_KEY_dup(const EC_KEY *src);
+\& int EC_KEY_up_ref(EC_KEY *key);
+\& const EC_GROUP *EC_KEY_get0_group(const EC_KEY *key);
+\& int EC_KEY_set_group(EC_KEY *key, const EC_GROUP *group);
+\& const BIGNUM *EC_KEY_get0_private_key(const EC_KEY *key);
+\& int EC_KEY_set_private_key(EC_KEY *key, const BIGNUM *prv);
+\& const EC_POINT *EC_KEY_get0_public_key(const EC_KEY *key);
+\& int EC_KEY_set_public_key(EC_KEY *key, const EC_POINT *pub);
+\& unsigned EC_KEY_get_enc_flags(const EC_KEY *key);
+\& void EC_KEY_set_enc_flags(EC_KEY *eckey, unsigned int flags);
+\& point_conversion_form_t EC_KEY_get_conv_form(const EC_KEY *key);
+\& void EC_KEY_set_conv_form(EC_KEY *eckey, point_conversion_form_t cform);
+\& void *EC_KEY_get_key_method_data(EC_KEY *key,
+\& void *(*dup_func)(void *), void (*free_func)(void *), void (*clear_free_func)(void *));
+\& void EC_KEY_insert_key_method_data(EC_KEY *key, void *data,
+\& void *(*dup_func)(void *), void (*free_func)(void *), void (*clear_free_func)(void *));
+\& void EC_KEY_set_asn1_flag(EC_KEY *eckey, int asn1_flag);
+\& int EC_KEY_precompute_mult(EC_KEY *key, BN_CTX *ctx);
+\& int EC_KEY_generate_key(EC_KEY *key);
+\& int EC_KEY_check_key(const EC_KEY *key);
+\& int EC_KEY_set_public_key_affine_coordinates(EC_KEY *key, BIGNUM *x, BIGNUM *y);
+\&
+\& EC_KEY *d2i_ECPrivateKey(EC_KEY **key, const unsigned char **in, long len);
+\& int i2d_ECPrivateKey(EC_KEY *key, unsigned char **out);
+\&
+\& EC_KEY *d2i_ECParameters(EC_KEY **key, const unsigned char **in, long len);
+\& int i2d_ECParameters(EC_KEY *key, unsigned char **out);
+\&
+\& EC_KEY *o2i_ECPublicKey(EC_KEY **key, const unsigned char **in, long len);
+\& int i2o_ECPublicKey(EC_KEY *key, unsigned char **out);
+\& int ECParameters_print(BIO *bp, const EC_KEY *key);
+\& int EC_KEY_print(BIO *bp, const EC_KEY *key, int off);
+\& int ECParameters_print_fp(FILE *fp, const EC_KEY *key);
+\& int EC_KEY_print_fp(FILE *fp, const EC_KEY *key, int off);
+\& #define ECParameters_dup(x) ASN1_dup_of(EC_KEY,i2d_ECParameters,d2i_ECParameters,x)
+\& #define EVP_PKEY_CTX_set_ec_paramgen_curve_nid(ctx, nid) \e
+\& EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_EC, EVP_PKEY_OP_PARAMGEN, \e
+\& EVP_PKEY_CTRL_EC_PARAMGEN_CURVE_NID, nid, NULL)
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+This library provides an extensive set of functions for performing operations on elliptic curves over finite fields.
+In general an elliptic curve is one with an equation of the form:
+.PP
+y^2 = x^3 + ax + b
+.PP
+An \fB\s-1EC_GROUP\s0\fR structure is used to represent the definition of an elliptic curve. Points on a curve are stored using an
+\&\fB\s-1EC_POINT\s0\fR structure. An \fB\s-1EC_KEY\s0\fR is used to hold a private/public key pair, where a private key is simply a \s-1BIGNUM\s0 and a
+public key is a point on a curve (represented by an \fB\s-1EC_POINT\s0\fR).
+.PP
+The library contains a number of alternative implementations of the different functions. Each implementation is optimised
+for different scenarios. No matter which implementation is being used, the interface remains the same. The library
+handles calling the correct implementation when an interface function is invoked. An implementation is represented by
+an \fB\s-1EC_METHOD\s0\fR structure.
+.PP
+The creation and destruction of \fB\s-1EC_GROUP\s0\fR objects is described in \fIEC_GROUP_new\fR\|(3). Functions for
+manipulating \fB\s-1EC_GROUP\s0\fR objects are described in \fIEC_GROUP_copy\fR\|(3).
+.PP
+Functions for creating, destroying and manipulating \fB\s-1EC_POINT\s0\fR objects are explained in \fIEC_POINT_new\fR\|(3),
+whilst functions for performing mathematical operations and tests on \fBEC_POINTs\fR are coverd in \fIEC_POINT_add\fR\|(3).
+.PP
+For working with private and public keys refer to \fIEC_KEY_new\fR\|(3). Implementations are covered in
+\&\fIEC_GFp_simple_method\fR\|(3).
+.PP
+For information on encoding and decoding curve parameters to and from \s-1ASN1\s0 see \fId2i_ECPKParameters\fR\|(3).
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+\&\fIcrypto\fR\|(3), \fIEC_GROUP_new\fR\|(3), \fIEC_GROUP_copy\fR\|(3),
+\&\fIEC_POINT_new\fR\|(3), \fIEC_POINT_add\fR\|(3), \fIEC_KEY_new\fR\|(3),
+\&\fIEC_GFp_simple_method\fR\|(3), \fId2i_ECPKParameters\fR\|(3)
diff --git a/secure/lib/libcrypto/man/ecdsa.3 b/secure/lib/libcrypto/man/ecdsa.3
index 7594d5f..d3f0c70 100644
--- a/secure/lib/libcrypto/man/ecdsa.3
+++ b/secure/lib/libcrypto/man/ecdsa.3
@@ -133,13 +133,13 @@
.\" ========================================================================
.\"
.IX Title "ecdsa 3"
-.TH ecdsa 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH ecdsa 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-ecdsa \- Elliptic Curve Digital Signature Algorithm
+ECDSA_SIG_new, ECDSA_SIG_free, i2d_ECDSA_SIG, d2i_ECDSA_SIG, ECDSA_size, ECDSA_sign_setup, ECDSA_sign, ECDSA_sign_ex, ECDSA_verify, ECDSA_do_sign, ECDSA_do_sign_ex, ECDSA_do_verify \- Elliptic Curve Digital Signature Algorithm
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
diff --git a/secure/lib/libcrypto/man/engine.3 b/secure/lib/libcrypto/man/engine.3
index 9896b6c..f363220 100644
--- a/secure/lib/libcrypto/man/engine.3
+++ b/secure/lib/libcrypto/man/engine.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "engine 3"
-.TH engine 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH engine 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/err.3 b/secure/lib/libcrypto/man/err.3
index 66210ae..63c025b 100644
--- a/secure/lib/libcrypto/man/err.3
+++ b/secure/lib/libcrypto/man/err.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "err 3"
-.TH err 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH err 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/evp.3 b/secure/lib/libcrypto/man/evp.3
index a7cb86a..69c9fdf 100644
--- a/secure/lib/libcrypto/man/evp.3
+++ b/secure/lib/libcrypto/man/evp.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "evp 3"
-.TH evp 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH evp 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -150,17 +150,42 @@ evp \- high\-level cryptographic functions
The \s-1EVP\s0 library provides a high-level interface to cryptographic
functions.
.PP
-\&\fBEVP_Seal\fR\fI...\fR and \fBEVP_Open\fR\fI...\fR provide public key encryption
-and decryption to implement digital \*(L"envelopes\*(R".
+\&\fBEVP_Seal\fR\fI...\fR and \fBEVP_Open\fR\fI...\fR
+provide public key encryption and decryption to implement digital \*(L"envelopes\*(R".
.PP
-The \fBEVP_Sign\fR\fI...\fR and \fBEVP_Verify\fR\fI...\fR functions implement
-digital signatures.
+The \fBEVP_DigestSign\fR\fI...\fR and
+\&\fBEVP_DigestVerify\fR\fI...\fR functions implement
+digital signatures and Message Authentication Codes (MACs). Also see the older
+\&\fBEVP_Sign\fR\fI...\fR and \fBEVP_Verify\fR\fI...\fR
+functions.
.PP
Symmetric encryption is available with the \fBEVP_Encrypt\fR\fI...\fR
functions. The \fBEVP_Digest\fR\fI...\fR functions provide message digests.
.PP
The \fB\s-1EVP_PKEY\s0\fR\fI...\fR functions provide a high level interface to
-asymmetric algorithms.
+asymmetric algorithms. To create a new \s-1EVP_PKEY\s0 see
+\&\fIEVP_PKEY_new\fR\|(3). EVP_PKEYs can be associated
+with a private key of a particular algorithm by using the functions
+described on the \fIEVP_PKEY_set1_RSA\fR\|(3) page, or
+new keys can be generated using \fIEVP_PKEY_keygen\fR\|(3).
+EVP_PKEYs can be compared using \fIEVP_PKEY_cmp\fR\|(3), or printed using
+\&\fIEVP_PKEY_print_private\fR\|(3).
+.PP
+The \s-1EVP_PKEY\s0 functions support the full range of asymmetric algorithm operations:
+.IP "For key agreement see \fIEVP_PKEY_derive\fR\|(3)" 4
+.IX Item "For key agreement see EVP_PKEY_derive"
+.PD 0
+.IP "For signing and verifying see \fIEVP_PKEY_sign\fR\|(3), \fIEVP_PKEY_verify\fR\|(3) and \fIEVP_PKEY_verify_recover\fR\|(3). However, note that these functions do not perform a digest of the data to be signed. Therefore normally you would use the \fBEVP_DigestSign\fR\fI...\fR functions for this purpose." 4
+.IX Item "For signing and verifying see EVP_PKEY_sign, EVP_PKEY_verify and EVP_PKEY_verify_recover. However, note that these functions do not perform a digest of the data to be signed. Therefore normally you would use the EVP_DigestSign... functions for this purpose."
+.ie n .IP "For encryption and decryption see \fIEVP_PKEY_encrypt\fR\|(3) and \fIEVP_PKEY_decrypt\fR\|(3) respectively. However, note that these functions perform encryption and decryption only. As public key encryption is an expensive operation, normally you would wrap an encrypted message in a ""digital envelope"" using the \fBEVP_Seal\fR\fI...\fR and \fBEVP_Open\fR\fI...\fR functions." 4
+.el .IP "For encryption and decryption see \fIEVP_PKEY_encrypt\fR\|(3) and \fIEVP_PKEY_decrypt\fR\|(3) respectively. However, note that these functions perform encryption and decryption only. As public key encryption is an expensive operation, normally you would wrap an encrypted message in a ``digital envelope'' using the \fBEVP_Seal\fR\fI...\fR and \fBEVP_Open\fR\fI...\fR functions." 4
+.IX Item "For encryption and decryption see EVP_PKEY_encrypt and EVP_PKEY_decrypt respectively. However, note that these functions perform encryption and decryption only. As public key encryption is an expensive operation, normally you would wrap an encrypted message in a digital envelope using the EVP_Seal... and EVP_Open... functions."
+.PD
+.PP
+The \fIEVP_BytesToKey\fR\|(3) function provides some limited support for password
+based encryption. Careful selection of the parameters will provide a PKCS#5 \s-1PBKDF1\s0 compatible
+implementation. However, new applications should not typically use this (preferring, for example,
+\&\s-1PBKDF2\s0 from PCKS#5).
.PP
Algorithms are loaded with \fIOpenSSL_add_all_algorithms\fR\|(3).
.PP
@@ -183,7 +208,19 @@ using the high level interface.
\&\fIEVP_EncryptInit\fR\|(3),
\&\fIEVP_OpenInit\fR\|(3),
\&\fIEVP_SealInit\fR\|(3),
+\&\fIEVP_DigestSignInit\fR\|(3),
\&\fIEVP_SignInit\fR\|(3),
\&\fIEVP_VerifyInit\fR\|(3),
+\&\fIEVP_PKEY_new\fR\|(3),
+\&\fIEVP_PKEY_set1_RSA\fR\|(3),
+\&\fIEVP_PKEY_keygen\fR\|(3),
+\&\fIEVP_PKEY_print_private\fR\|(3),
+\&\fIEVP_PKEY_decrypt\fR\|(3),
+\&\fIEVP_PKEY_encrypt\fR\|(3),
+\&\fIEVP_PKEY_sign\fR\|(3),
+\&\fIEVP_PKEY_verify\fR\|(3),
+\&\fIEVP_PKEY_verify_recover\fR\|(3),
+\&\fIEVP_PKEY_derive\fR\|(3),
+\&\fIEVP_BytesToKey\fR\|(3),
\&\fIOpenSSL_add_all_algorithms\fR\|(3),
\&\fIengine\fR\|(3)
diff --git a/secure/lib/libcrypto/man/hmac.3 b/secure/lib/libcrypto/man/hmac.3
index 8f5c458..b6fed21 100644
--- a/secure/lib/libcrypto/man/hmac.3
+++ b/secure/lib/libcrypto/man/hmac.3
@@ -133,14 +133,14 @@
.\" ========================================================================
.\"
.IX Title "hmac 3"
-.TH hmac 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH hmac 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-HMAC, HMAC_Init, HMAC_Update, HMAC_Final, HMAC_cleanup \- HMAC message
-authentication code
+HMAC, HMAC_CTX_init, HMAC_Init, HMAC_Init_ex, HMAC_Update, HMAC_Final, HMAC_CTX_cleanup,
+HMAC_cleanup \- HMAC message authentication code
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
diff --git a/secure/lib/libcrypto/man/i2d_CMS_bio_stream.3 b/secure/lib/libcrypto/man/i2d_CMS_bio_stream.3
index 93b49c1..b1eee7d 100644
--- a/secure/lib/libcrypto/man/i2d_CMS_bio_stream.3
+++ b/secure/lib/libcrypto/man/i2d_CMS_bio_stream.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "i2d_CMS_bio_stream 3"
-.TH i2d_CMS_bio_stream 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH i2d_CMS_bio_stream 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/i2d_PKCS7_bio_stream.3 b/secure/lib/libcrypto/man/i2d_PKCS7_bio_stream.3
index fd07c3b..1910be2 100644
--- a/secure/lib/libcrypto/man/i2d_PKCS7_bio_stream.3
+++ b/secure/lib/libcrypto/man/i2d_PKCS7_bio_stream.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "i2d_PKCS7_bio_stream 3"
-.TH i2d_PKCS7_bio_stream 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH i2d_PKCS7_bio_stream 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -158,7 +158,7 @@ This function is effectively a version of the \fId2i_PKCS7_bio()\fR supporting
streaming.
.SH "BUGS"
.IX Header "BUGS"
-The prefix \*(L"d2i\*(R" is arguably wrong because the function outputs \s-1BER\s0 format.
+The prefix \*(L"i2d\*(R" is arguably wrong because the function outputs \s-1BER\s0 format.
.SH "RETURN VALUES"
.IX Header "RETURN VALUES"
\&\fIi2d_PKCS7_bio_stream()\fR returns 1 for success or 0 for failure.
diff --git a/secure/lib/libcrypto/man/lh_stats.3 b/secure/lib/libcrypto/man/lh_stats.3
index cf28f33..19a5146 100644
--- a/secure/lib/libcrypto/man/lh_stats.3
+++ b/secure/lib/libcrypto/man/lh_stats.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "lh_stats 3"
-.TH lh_stats 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH lh_stats 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/lhash.3 b/secure/lib/libcrypto/man/lhash.3
index 055a248..fce16ee 100644
--- a/secure/lib/libcrypto/man/lhash.3
+++ b/secure/lib/libcrypto/man/lhash.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "lhash 3"
-.TH lhash 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH lhash 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/md5.3 b/secure/lib/libcrypto/man/md5.3
index 3c43627..3970a45 100644
--- a/secure/lib/libcrypto/man/md5.3
+++ b/secure/lib/libcrypto/man/md5.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "md5 3"
-.TH md5 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH md5 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/mdc2.3 b/secure/lib/libcrypto/man/mdc2.3
index 183793a..46915da 100644
--- a/secure/lib/libcrypto/man/mdc2.3
+++ b/secure/lib/libcrypto/man/mdc2.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "mdc2 3"
-.TH mdc2 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH mdc2 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/pem.3 b/secure/lib/libcrypto/man/pem.3
index 2e03f31..c25cff4 100644
--- a/secure/lib/libcrypto/man/pem.3
+++ b/secure/lib/libcrypto/man/pem.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "pem 3"
-.TH pem 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH pem 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/rand.3 b/secure/lib/libcrypto/man/rand.3
index 9445f4b..8c45a7a 100644
--- a/secure/lib/libcrypto/man/rand.3
+++ b/secure/lib/libcrypto/man/rand.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "rand 3"
-.TH rand 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH rand 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -176,7 +176,7 @@ Since the introduction of the \s-1ENGINE API,\s0 the recommended way of controll
default implementations is by using the \s-1ENGINE API\s0 functions. The default
\&\fB\s-1RAND_METHOD\s0\fR, as set by \fIRAND_set_rand_method()\fR and returned by
\&\fIRAND_get_rand_method()\fR, is only used if no \s-1ENGINE\s0 has been set as the default
-\&\*(L"rand\*(R" implementation. Hence, these two functions are no longer the recommened
+\&\*(L"rand\*(R" implementation. Hence, these two functions are no longer the recommended
way to control defaults.
.PP
If an alternative \fB\s-1RAND_METHOD\s0\fR implementation is being used (either set
diff --git a/secure/lib/libcrypto/man/rc4.3 b/secure/lib/libcrypto/man/rc4.3
index c9987df..076898b 100644
--- a/secure/lib/libcrypto/man/rc4.3
+++ b/secure/lib/libcrypto/man/rc4.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "rc4 3"
-.TH rc4 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH rc4 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/ripemd.3 b/secure/lib/libcrypto/man/ripemd.3
index b2c6443..eee8447 100644
--- a/secure/lib/libcrypto/man/ripemd.3
+++ b/secure/lib/libcrypto/man/ripemd.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "ripemd 3"
-.TH ripemd 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH ripemd 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/rsa.3 b/secure/lib/libcrypto/man/rsa.3
index 11a364c..d49f16c 100644
--- a/secure/lib/libcrypto/man/rsa.3
+++ b/secure/lib/libcrypto/man/rsa.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "rsa 3"
-.TH rsa 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH rsa 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/sha.3 b/secure/lib/libcrypto/man/sha.3
index dbaf4e8..a76d682 100644
--- a/secure/lib/libcrypto/man/sha.3
+++ b/secure/lib/libcrypto/man/sha.3
@@ -133,35 +133,64 @@
.\" ========================================================================
.\"
.IX Title "sha 3"
-.TH sha 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH sha 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-SHA1, SHA1_Init, SHA1_Update, SHA1_Final \- Secure Hash Algorithm
+SHA1, SHA1_Init, SHA1_Update, SHA1_Final, SHA224, SHA224_Init, SHA224_Update,
+SHA224_Final, SHA256, SHA256_Init, SHA256_Update, SHA256_Final, SHA384,
+SHA384_Init, SHA384_Update, SHA384_Final, SHA512, SHA512_Init, SHA512_Update,
+SHA512_Final \- Secure Hash Algorithm
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
\& #include <openssl/sha.h>
\&
-\& unsigned char *SHA1(const unsigned char *d, unsigned long n,
-\& unsigned char *md);
-\&
\& int SHA1_Init(SHA_CTX *c);
-\& int SHA1_Update(SHA_CTX *c, const void *data,
-\& unsigned long len);
+\& int SHA1_Update(SHA_CTX *c, const void *data, size_t len);
\& int SHA1_Final(unsigned char *md, SHA_CTX *c);
+\& unsigned char *SHA1(const unsigned char *d, size_t n,
+\& unsigned char *md);
+\&
+\& int SHA224_Init(SHA256_CTX *c);
+\& int SHA224_Update(SHA256_CTX *c, const void *data, size_t len);
+\& int SHA224_Final(unsigned char *md, SHA256_CTX *c);
+\& unsigned char *SHA224(const unsigned char *d, size_t n,
+\& unsigned char *md);
+\&
+\& int SHA256_Init(SHA256_CTX *c);
+\& int SHA256_Update(SHA256_CTX *c, const void *data, size_t len);
+\& int SHA256_Final(unsigned char *md, SHA256_CTX *c);
+\& unsigned char *SHA256(const unsigned char *d, size_t n,
+\& unsigned char *md);
+\&
+\& int SHA384_Init(SHA512_CTX *c);
+\& int SHA384_Update(SHA512_CTX *c, const void *data, size_t len);
+\& int SHA384_Final(unsigned char *md, SHA512_CTX *c);
+\& unsigned char *SHA384(const unsigned char *d, size_t n,
+\& unsigned char *md);
+\&
+\& int SHA512_Init(SHA512_CTX *c);
+\& int SHA512_Update(SHA512_CTX *c, const void *data, size_t len);
+\& int SHA512_Final(unsigned char *md, SHA512_CTX *c);
+\& unsigned char *SHA512(const unsigned char *d, size_t n,
+\& unsigned char *md);
.Ve
.SH "DESCRIPTION"
.IX Header "DESCRIPTION"
+Applications should use the higher level functions
+\&\fIEVP_DigestInit\fR\|(3) etc. instead of calling the hash
+functions directly.
+.PP
\&\s-1SHA\-1 \s0(Secure Hash Algorithm) is a cryptographic hash function with a
160 bit output.
.PP
\&\s-1\fISHA1\s0()\fR computes the \s-1SHA\-1\s0 message digest of the \fBn\fR
bytes at \fBd\fR and places it in \fBmd\fR (which must have space for
\&\s-1SHA_DIGEST_LENGTH\s0 == 20 bytes of output). If \fBmd\fR is \s-1NULL,\s0 the digest
-is placed in a static array.
+is placed in a static array. Note: setting \fBmd\fR to \s-1NULL\s0 is \fBnot thread safe\fR.
.PP
The following functions may be used if the message is not completely
stored in memory:
@@ -174,22 +203,27 @@ be hashed (\fBlen\fR bytes at \fBdata\fR).
\&\fISHA1_Final()\fR places the message digest in \fBmd\fR, which must have space
for \s-1SHA_DIGEST_LENGTH\s0 == 20 bytes of output, and erases the \fB\s-1SHA_CTX\s0\fR.
.PP
-Applications should use the higher level functions
-\&\fIEVP_DigestInit\fR\|(3)
-etc. instead of calling the hash functions directly.
+The \s-1SHA224, SHA256, SHA384\s0 and \s-1SHA512\s0 families of functions operate in the
+same way as for the \s-1SHA1\s0 functions. Note that \s-1SHA224\s0 and \s-1SHA256\s0 use a
+\&\fB\s-1SHA256_CTX\s0\fR object instead of \fB\s-1SHA_CTX\s0\fR. \s-1SHA384\s0 and \s-1SHA512\s0 use \fB\s-1SHA512_CTX\s0\fR.
+The buffer \fBmd\fR must have space for the output from the \s-1SHA\s0 variant being used
+(defined by \s-1SHA224_DIGEST_LENGTH, SHA256_DIGEST_LENGTH, SHA384_DIGEST_LENGTH\s0 and
+\&\s-1SHA512_DIGEST_LENGTH\s0). Also note that, as for the \s-1\fISHA1\s0()\fR function above, the
+\&\s-1\fISHA224\s0()\fR, \s-1\fISHA256\s0()\fR, \s-1\fISHA384\s0()\fR and \s-1\fISHA512\s0()\fR functions are not thread safe if
+\&\fBmd\fR is \s-1NULL.\s0
.PP
The predecessor of \s-1SHA\-1, SHA,\s0 is also implemented, but it should be
used only when backward compatibility is required.
.SH "RETURN VALUES"
.IX Header "RETURN VALUES"
-\&\s-1\fISHA1\s0()\fR returns a pointer to the hash value.
+\&\s-1\fISHA1\s0()\fR, \s-1\fISHA224\s0()\fR, \s-1\fISHA256\s0()\fR, \s-1\fISHA384\s0()\fR and \s-1\fISHA512\s0()\fR return a pointer to the hash
+value.
.PP
-\&\fISHA1_Init()\fR, \fISHA1_Update()\fR and \fISHA1_Final()\fR return 1 for success, 0 otherwise.
+\&\fISHA1_Init()\fR, \fISHA1_Update()\fR and \fISHA1_Final()\fR and equivalent \s-1SHA224, SHA256,
+SHA384\s0 and \s-1SHA512\s0 functions return 1 for success, 0 otherwise.
.SH "CONFORMING TO"
.IX Header "CONFORMING TO"
-\&\s-1SHA: US\s0 Federal Information Processing Standard \s-1FIPS PUB 180 \s0(Secure Hash
-Standard),
-\&\s-1SHA\-1: US\s0 Federal Information Processing Standard \s-1FIPS PUB 180\-1 \s0(Secure Hash
+\&\s-1US\s0 Federal Information Processing Standard \s-1FIPS PUB 180\-4 \s0(Secure Hash
Standard),
\&\s-1ANSI X9.30\s0
.SH "SEE ALSO"
diff --git a/secure/lib/libcrypto/man/threads.3 b/secure/lib/libcrypto/man/threads.3
index 07e763e..2ef7ebb 100644
--- a/secure/lib/libcrypto/man/threads.3
+++ b/secure/lib/libcrypto/man/threads.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "threads 3"
-.TH threads 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH threads 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/ui.3 b/secure/lib/libcrypto/man/ui.3
index 5d98570..2a3e7f5 100644
--- a/secure/lib/libcrypto/man/ui.3
+++ b/secure/lib/libcrypto/man/ui.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "ui 3"
-.TH ui 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH ui 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/ui_compat.3 b/secure/lib/libcrypto/man/ui_compat.3
index 15894d5..e286dc6 100644
--- a/secure/lib/libcrypto/man/ui_compat.3
+++ b/secure/lib/libcrypto/man/ui_compat.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "ui_compat 3"
-.TH ui_compat 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH ui_compat 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/man/x509.3 b/secure/lib/libcrypto/man/x509.3
index 7cbfada..0777817 100644
--- a/secure/lib/libcrypto/man/x509.3
+++ b/secure/lib/libcrypto/man/x509.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "x509 3"
-.TH x509 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH x509 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libcrypto/opensslconf-aarch64.h b/secure/lib/libcrypto/opensslconf-aarch64.h
index adbaba9..80adf45 100644
--- a/secure/lib/libcrypto/opensslconf-aarch64.h
+++ b/secure/lib/libcrypto/opensslconf-aarch64.h
@@ -21,12 +21,18 @@ extern "C" {
#ifndef OPENSSL_NO_KRB5
# define OPENSSL_NO_KRB5
#endif
+#ifndef OPENSSL_NO_LIBUNBOUND
+# define OPENSSL_NO_LIBUNBOUND
+#endif
#ifndef OPENSSL_NO_MD2
# define OPENSSL_NO_MD2
#endif
#ifndef OPENSSL_NO_SCTP
# define OPENSSL_NO_SCTP
#endif
+#ifndef OPENSSL_NO_SSL_TRACE
+# define OPENSSL_NO_SSL_TRACE
+#endif
#ifndef OPENSSL_NO_SSL2
# define OPENSSL_NO_SSL2
#endif
@@ -66,12 +72,18 @@ extern "C" {
# if defined(OPENSSL_NO_KRB5) && !defined(NO_KRB5)
# define NO_KRB5
# endif
+# if defined(OPENSSL_NO_LIBUNBOUND) && !defined(NO_LIBUNBOUND)
+# define NO_LIBUNBOUND
+# endif
# if defined(OPENSSL_NO_MD2) && !defined(NO_MD2)
# define NO_MD2
# endif
# if defined(OPENSSL_NO_SCTP) && !defined(NO_SCTP)
# define NO_SCTP
# endif
+# if defined(OPENSSL_NO_SSL_TRACE) && !defined(NO_SSL_TRACE)
+# define NO_SSL_TRACE
+# endif
# if defined(OPENSSL_NO_SSL2) && !defined(NO_SSL2)
# define NO_SSL2
# endif
@@ -186,7 +198,7 @@ extern "C" {
#endif
#if defined(DES_RISC1) && defined(DES_RISC2)
-YOU SHOULD NOT HAVE BOTH DES_RISC1 AND DES_RISC2 DEFINED!!!!!
+#error YOU SHOULD NOT HAVE BOTH DES_RISC1 AND DES_RISC2 DEFINED!!!!!
#endif
/* Unroll the inner loop, this sometimes helps, sometimes hinders.
diff --git a/secure/lib/libcrypto/opensslconf-arm.h b/secure/lib/libcrypto/opensslconf-arm.h
index 4ddfe82..cfa6bdc 100644
--- a/secure/lib/libcrypto/opensslconf-arm.h
+++ b/secure/lib/libcrypto/opensslconf-arm.h
@@ -21,12 +21,18 @@ extern "C" {
#ifndef OPENSSL_NO_KRB5
# define OPENSSL_NO_KRB5
#endif
+#ifndef OPENSSL_NO_LIBUNBOUND
+# define OPENSSL_NO_LIBUNBOUND
+#endif
#ifndef OPENSSL_NO_MD2
# define OPENSSL_NO_MD2
#endif
#ifndef OPENSSL_NO_SCTP
# define OPENSSL_NO_SCTP
#endif
+#ifndef OPENSSL_NO_SSL_TRACE
+# define OPENSSL_NO_SSL_TRACE
+#endif
#ifndef OPENSSL_NO_SSL2
# define OPENSSL_NO_SSL2
#endif
@@ -66,12 +72,18 @@ extern "C" {
# if defined(OPENSSL_NO_KRB5) && !defined(NO_KRB5)
# define NO_KRB5
# endif
+# if defined(OPENSSL_NO_LIBUNBOUND) && !defined(NO_LIBUNBOUND)
+# define NO_LIBUNBOUND
+# endif
# if defined(OPENSSL_NO_MD2) && !defined(NO_MD2)
# define NO_MD2
# endif
# if defined(OPENSSL_NO_SCTP) && !defined(NO_SCTP)
# define NO_SCTP
# endif
+# if defined(OPENSSL_NO_SSL_TRACE) && !defined(NO_SSL_TRACE)
+# define NO_SSL_TRACE
+# endif
# if defined(OPENSSL_NO_SSL2) && !defined(NO_SSL2)
# define NO_SSL2
# endif
@@ -186,7 +198,7 @@ extern "C" {
#endif
#if defined(DES_RISC1) && defined(DES_RISC2)
-YOU SHOULD NOT HAVE BOTH DES_RISC1 AND DES_RISC2 DEFINED!!!!!
+#error YOU SHOULD NOT HAVE BOTH DES_RISC1 AND DES_RISC2 DEFINED!!!!!
#endif
/* Unroll the inner loop, this sometimes helps, sometimes hinders.
diff --git a/secure/lib/libcrypto/opensslconf-mips.h b/secure/lib/libcrypto/opensslconf-mips.h
index b55557e..2c4bd24 100644
--- a/secure/lib/libcrypto/opensslconf-mips.h
+++ b/secure/lib/libcrypto/opensslconf-mips.h
@@ -21,12 +21,18 @@ extern "C" {
#ifndef OPENSSL_NO_KRB5
# define OPENSSL_NO_KRB5
#endif
+#ifndef OPENSSL_NO_LIBUNBOUND
+# define OPENSSL_NO_LIBUNBOUND
+#endif
#ifndef OPENSSL_NO_MD2
# define OPENSSL_NO_MD2
#endif
#ifndef OPENSSL_NO_SCTP
# define OPENSSL_NO_SCTP
#endif
+#ifndef OPENSSL_NO_SSL_TRACE
+# define OPENSSL_NO_SSL_TRACE
+#endif
#ifndef OPENSSL_NO_SSL2
# define OPENSSL_NO_SSL2
#endif
@@ -66,12 +72,18 @@ extern "C" {
# if defined(OPENSSL_NO_KRB5) && !defined(NO_KRB5)
# define NO_KRB5
# endif
+# if defined(OPENSSL_NO_LIBUNBOUND) && !defined(NO_LIBUNBOUND)
+# define NO_LIBUNBOUND
+# endif
# if defined(OPENSSL_NO_MD2) && !defined(NO_MD2)
# define NO_MD2
# endif
# if defined(OPENSSL_NO_SCTP) && !defined(NO_SCTP)
# define NO_SCTP
# endif
+# if defined(OPENSSL_NO_SSL_TRACE) && !defined(NO_SSL_TRACE)
+# define NO_SSL_TRACE
+# endif
# if defined(OPENSSL_NO_SSL2) && !defined(NO_SSL2)
# define NO_SSL2
# endif
@@ -200,7 +212,7 @@ extern "C" {
#endif
#if defined(DES_RISC1) && defined(DES_RISC2)
-YOU SHOULD NOT HAVE BOTH DES_RISC1 AND DES_RISC2 DEFINED!!!!!
+#error YOU SHOULD NOT HAVE BOTH DES_RISC1 AND DES_RISC2 DEFINED!!!!!
#endif
/* Unroll the inner loop, this sometimes helps, sometimes hinders.
diff --git a/secure/lib/libcrypto/opensslconf-powerpc.h b/secure/lib/libcrypto/opensslconf-powerpc.h
index 341daa2..45c1abd 100644
--- a/secure/lib/libcrypto/opensslconf-powerpc.h
+++ b/secure/lib/libcrypto/opensslconf-powerpc.h
@@ -21,12 +21,18 @@ extern "C" {
#ifndef OPENSSL_NO_KRB5
# define OPENSSL_NO_KRB5
#endif
+#ifndef OPENSSL_NO_LIBUNBOUND
+# define OPENSSL_NO_LIBUNBOUND
+#endif
#ifndef OPENSSL_NO_MD2
# define OPENSSL_NO_MD2
#endif
#ifndef OPENSSL_NO_SCTP
# define OPENSSL_NO_SCTP
#endif
+#ifndef OPENSSL_NO_SSL_TRACE
+# define OPENSSL_NO_SSL_TRACE
+#endif
#ifndef OPENSSL_NO_SSL2
# define OPENSSL_NO_SSL2
#endif
@@ -66,12 +72,18 @@ extern "C" {
# if defined(OPENSSL_NO_KRB5) && !defined(NO_KRB5)
# define NO_KRB5
# endif
+# if defined(OPENSSL_NO_LIBUNBOUND) && !defined(NO_LIBUNBOUND)
+# define NO_LIBUNBOUND
+# endif
# if defined(OPENSSL_NO_MD2) && !defined(NO_MD2)
# define NO_MD2
# endif
# if defined(OPENSSL_NO_SCTP) && !defined(NO_SCTP)
# define NO_SCTP
# endif
+# if defined(OPENSSL_NO_SSL_TRACE) && !defined(NO_SSL_TRACE)
+# define NO_SSL_TRACE
+# endif
# if defined(OPENSSL_NO_SSL2) && !defined(NO_SSL2)
# define NO_SSL2
# endif
@@ -195,7 +207,7 @@ extern "C" {
#endif
#if defined(DES_RISC1) && defined(DES_RISC2)
-YOU SHOULD NOT HAVE BOTH DES_RISC1 AND DES_RISC2 DEFINED!!!!!
+#error YOU SHOULD NOT HAVE BOTH DES_RISC1 AND DES_RISC2 DEFINED!!!!!
#endif
/* Unroll the inner loop, this sometimes helps, sometimes hinders.
diff --git a/secure/lib/libcrypto/opensslconf-sparc64.h b/secure/lib/libcrypto/opensslconf-sparc64.h
index 23abf0c..564d5e1 100644
--- a/secure/lib/libcrypto/opensslconf-sparc64.h
+++ b/secure/lib/libcrypto/opensslconf-sparc64.h
@@ -21,12 +21,18 @@ extern "C" {
#ifndef OPENSSL_NO_KRB5
# define OPENSSL_NO_KRB5
#endif
+#ifndef OPENSSL_NO_LIBUNBOUND
+# define OPENSSL_NO_LIBUNBOUND
+#endif
#ifndef OPENSSL_NO_MD2
# define OPENSSL_NO_MD2
#endif
#ifndef OPENSSL_NO_SCTP
# define OPENSSL_NO_SCTP
#endif
+#ifndef OPENSSL_NO_SSL_TRACE
+# define OPENSSL_NO_SSL_TRACE
+#endif
#ifndef OPENSSL_NO_SSL2
# define OPENSSL_NO_SSL2
#endif
@@ -66,12 +72,18 @@ extern "C" {
# if defined(OPENSSL_NO_KRB5) && !defined(NO_KRB5)
# define NO_KRB5
# endif
+# if defined(OPENSSL_NO_LIBUNBOUND) && !defined(NO_LIBUNBOUND)
+# define NO_LIBUNBOUND
+# endif
# if defined(OPENSSL_NO_MD2) && !defined(NO_MD2)
# define NO_MD2
# endif
# if defined(OPENSSL_NO_SCTP) && !defined(NO_SCTP)
# define NO_SCTP
# endif
+# if defined(OPENSSL_NO_SSL_TRACE) && !defined(NO_SSL_TRACE)
+# define NO_SSL_TRACE
+# endif
# if defined(OPENSSL_NO_SSL2) && !defined(NO_SSL2)
# define NO_SSL2
# endif
@@ -186,7 +198,7 @@ extern "C" {
#endif
#if defined(DES_RISC1) && defined(DES_RISC2)
-YOU SHOULD NOT HAVE BOTH DES_RISC1 AND DES_RISC2 DEFINED!!!!!
+#error YOU SHOULD NOT HAVE BOTH DES_RISC1 AND DES_RISC2 DEFINED!!!!!
#endif
/* Unroll the inner loop, this sometimes helps, sometimes hinders.
diff --git a/secure/lib/libcrypto/opensslconf-x86.h b/secure/lib/libcrypto/opensslconf-x86.h
index ed3ee20..4aab445 100644
--- a/secure/lib/libcrypto/opensslconf-x86.h
+++ b/secure/lib/libcrypto/opensslconf-x86.h
@@ -21,12 +21,18 @@ extern "C" {
#ifndef OPENSSL_NO_KRB5
# define OPENSSL_NO_KRB5
#endif
+#ifndef OPENSSL_NO_LIBUNBOUND
+# define OPENSSL_NO_LIBUNBOUND
+#endif
#ifndef OPENSSL_NO_MD2
# define OPENSSL_NO_MD2
#endif
#ifndef OPENSSL_NO_SCTP
# define OPENSSL_NO_SCTP
#endif
+#ifndef OPENSSL_NO_SSL_TRACE
+# define OPENSSL_NO_SSL_TRACE
+#endif
#ifndef OPENSSL_NO_SSL2
# define OPENSSL_NO_SSL2
#endif
@@ -63,12 +69,18 @@ extern "C" {
# if defined(OPENSSL_NO_KRB5) && !defined(NO_KRB5)
# define NO_KRB5
# endif
+# if defined(OPENSSL_NO_LIBUNBOUND) && !defined(NO_LIBUNBOUND)
+# define NO_LIBUNBOUND
+# endif
# if defined(OPENSSL_NO_MD2) && !defined(NO_MD2)
# define NO_MD2
# endif
# if defined(OPENSSL_NO_SCTP) && !defined(NO_SCTP)
# define NO_SCTP
# endif
+# if defined(OPENSSL_NO_SSL_TRACE) && !defined(NO_SSL_TRACE)
+# define NO_SSL_TRACE
+# endif
# if defined(OPENSSL_NO_SSL2) && !defined(NO_SSL2)
# define NO_SSL2
# endif
@@ -215,7 +227,7 @@ extern "C" {
#endif
#if defined(DES_RISC1) && defined(DES_RISC2)
-YOU SHOULD NOT HAVE BOTH DES_RISC1 AND DES_RISC2 DEFINED!!!!!
+#error YOU SHOULD NOT HAVE BOTH DES_RISC1 AND DES_RISC2 DEFINED!!!!!
#endif
/* Unroll the inner loop, this sometimes helps, sometimes hinders.
diff --git a/secure/lib/libssl/Makefile b/secure/lib/libssl/Makefile
index 271c0ce..6b2bf3a 100644
--- a/secure/lib/libssl/Makefile
+++ b/secure/lib/libssl/Makefile
@@ -1,7 +1,7 @@
# $FreeBSD$
LIB= ssl
-SHLIB_MAJOR= 7
+SHLIB_MAJOR= 8
NO_LINT=
@@ -10,13 +10,13 @@ NO_LINT=
.endif
.include "../libcrypto/Makefile.inc"
-SRCS= bio_ssl.c d1_both.c d1_clnt.c d1_enc.c d1_lib.c d1_meth.c d1_pkt.c \
- d1_srtp.c d1_srvr.c s23_clnt.c s23_lib.c s23_meth.c s23_pkt.c \
- s23_srvr.c s3_both.c s3_cbc.c s3_clnt.c s3_enc.c s3_lib.c s3_meth.c \
- s3_pkt.c s3_srvr.c ssl_algs.c ssl_asn1.c ssl_cert.c ssl_ciph.c \
+SRCS= bio_ssl.c d1_both.c d1_clnt.c d1_lib.c d1_meth.c d1_pkt.c d1_srtp.c \
+ d1_srvr.c s23_clnt.c s23_lib.c s23_meth.c s23_pkt.c s23_srvr.c \
+ s3_both.c s3_cbc.c s3_clnt.c s3_enc.c s3_lib.c s3_meth.c s3_pkt.c \
+ s3_srvr.c ssl_algs.c ssl_asn1.c ssl_cert.c ssl_ciph.c ssl_conf.c \
ssl_err.c ssl_err2.c ssl_lib.c ssl_rsa.c ssl_sess.c ssl_stat.c \
- ssl_txt.c t1_clnt.c t1_enc.c t1_lib.c t1_meth.c t1_reneg.c t1_srvr.c \
- tls_srp.c
+ ssl_txt.c t1_clnt.c t1_enc.c t1_ext.c t1_lib.c t1_meth.c t1_reneg.c \
+ t1_srvr.c tls_srp.c
INCS= dtls1.h kssl.h srtp.h ssl.h ssl2.h ssl23.h ssl3.h tls1.h
INCSDIR=${INCLUDEDIR}/openssl
diff --git a/secure/lib/libssl/Makefile.man b/secure/lib/libssl/Makefile.man
index 5302f4f..c06c27f 100644
--- a/secure/lib/libssl/Makefile.man
+++ b/secure/lib/libssl/Makefile.man
@@ -2,11 +2,19 @@
# DO NOT EDIT: generated from man-makefile-update target
MAN+= SSL_CIPHER_get_name.3
MAN+= SSL_COMP_add_compression_method.3
+MAN+= SSL_CONF_CTX_new.3
+MAN+= SSL_CONF_CTX_set1_prefix.3
+MAN+= SSL_CONF_CTX_set_flags.3
+MAN+= SSL_CONF_CTX_set_ssl_ctx.3
+MAN+= SSL_CONF_cmd.3
+MAN+= SSL_CONF_cmd_argv.3
+MAN+= SSL_CTX_add1_chain_cert.3
MAN+= SSL_CTX_add_extra_chain_cert.3
MAN+= SSL_CTX_add_session.3
MAN+= SSL_CTX_ctrl.3
MAN+= SSL_CTX_flush_sessions.3
MAN+= SSL_CTX_free.3
+MAN+= SSL_CTX_get0_param.3
MAN+= SSL_CTX_get_ex_new_index.3
MAN+= SSL_CTX_get_verify_mode.3
MAN+= SSL_CTX_load_verify_locations.3
@@ -15,11 +23,15 @@ MAN+= SSL_CTX_sess_number.3
MAN+= SSL_CTX_sess_set_cache_size.3
MAN+= SSL_CTX_sess_set_get_cb.3
MAN+= SSL_CTX_sessions.3
+MAN+= SSL_CTX_set1_curves.3
+MAN+= SSL_CTX_set1_verify_cert_store.3
+MAN+= SSL_CTX_set_cert_cb.3
MAN+= SSL_CTX_set_cert_store.3
MAN+= SSL_CTX_set_cert_verify_callback.3
MAN+= SSL_CTX_set_cipher_list.3
MAN+= SSL_CTX_set_client_CA_list.3
MAN+= SSL_CTX_set_client_cert_cb.3
+MAN+= SSL_CTX_set_custom_cli_ext.3
MAN+= SSL_CTX_set_default_passwd_cb.3
MAN+= SSL_CTX_set_generate_session_id.3
MAN+= SSL_CTX_set_info_callback.3
@@ -40,6 +52,7 @@ MAN+= SSL_CTX_set_tmp_rsa_callback.3
MAN+= SSL_CTX_set_verify.3
MAN+= SSL_CTX_use_certificate.3
MAN+= SSL_CTX_use_psk_identity_hint.3
+MAN+= SSL_CTX_use_serverinfo.3
MAN+= SSL_SESSION_free.3
MAN+= SSL_SESSION_get_ex_new_index.3
MAN+= SSL_SESSION_get_time.3
@@ -87,6 +100,27 @@ MAN+= ssl.3
MLINKS+= SSL_CIPHER_get_name.3 SSL_CIPHER_get_bits.3
MLINKS+= SSL_CIPHER_get_name.3 SSL_CIPHER_get_version.3
MLINKS+= SSL_CIPHER_get_name.3 SSL_CIPHER_description.3
+MLINKS+= SSL_COMP_add_compression_method.3 SSL_COMP_free_compression_methods.3
+MLINKS+= SSL_CONF_CTX_new.3 SSL_CONF_CTX_free.3
+MLINKS+= SSL_CONF_CTX_set_flags.3 SSL_CONF_CTX_clear_flags.3
+MLINKS+= SSL_CONF_CTX_set_ssl_ctx.3 SSL_CONF_CTX_set_ssl.3
+MLINKS+= SSL_CTX_add1_chain_cert.3 SSL_CTX_set0_chain.3
+MLINKS+= SSL_CTX_add1_chain_cert.3 SSL_CTX_set1_chain.3
+MLINKS+= SSL_CTX_add1_chain_cert.3 SSL_CTX_add0_chain_cert.3
+MLINKS+= SSL_CTX_add1_chain_cert.3 SSL_CTX_get0_chain_certs.3
+MLINKS+= SSL_CTX_add1_chain_cert.3 SSL_CTX_clear_chain_certs.3
+MLINKS+= SSL_CTX_add1_chain_cert.3 SSL_set0_chain.3
+MLINKS+= SSL_CTX_add1_chain_cert.3 SSL_set1_chain.3
+MLINKS+= SSL_CTX_add1_chain_cert.3 SSL_add0_chain_cert.3
+MLINKS+= SSL_CTX_add1_chain_cert.3 SSL_add1_chain_cert.3
+MLINKS+= SSL_CTX_add1_chain_cert.3 SSL_get0_chain_certs.3
+MLINKS+= SSL_CTX_add1_chain_cert.3 SSL_clear_chain_certs.3
+MLINKS+= SSL_CTX_add1_chain_cert.3 SSL_CTX_build_cert_chain.3
+MLINKS+= SSL_CTX_add1_chain_cert.3 SSL_build_cert_chain.3
+MLINKS+= SSL_CTX_add1_chain_cert.3 SSL_CTX_select_current_cert.3
+MLINKS+= SSL_CTX_add1_chain_cert.3 SSL_select_current_cert.3
+MLINKS+= SSL_CTX_add1_chain_cert.3 SSL_CTX_set_current_cert.3
+MLINKS+= SSL_CTX_add1_chain_cert.3 SSL_set_current_cert.3
MLINKS+= SSL_CTX_add_session.3 SSL_add_session.3
MLINKS+= SSL_CTX_add_session.3 SSL_CTX_remove_session.3
MLINKS+= SSL_CTX_add_session.3 SSL_remove_session.3
@@ -94,6 +128,9 @@ MLINKS+= SSL_CTX_ctrl.3 SSL_CTX_callback_ctrl.3
MLINKS+= SSL_CTX_ctrl.3 SSL_ctrl.3
MLINKS+= SSL_CTX_ctrl.3 SSL_callback_ctrl.3
MLINKS+= SSL_CTX_flush_sessions.3 SSL_flush_sessions.3
+MLINKS+= SSL_CTX_get0_param.3 SSL_get0_param.3
+MLINKS+= SSL_CTX_get0_param.3 SSL_CTX_set1_param.3
+MLINKS+= SSL_CTX_get0_param.3 SSL_set1_param.3
MLINKS+= SSL_CTX_get_ex_new_index.3 SSL_CTX_set_ex_data.3
MLINKS+= SSL_CTX_get_ex_new_index.3 SSL_CTX_get_ex_data.3
MLINKS+= SSL_CTX_get_verify_mode.3 SSL_get_verify_mode.3
@@ -118,12 +155,29 @@ MLINKS+= SSL_CTX_sess_set_get_cb.3 SSL_CTX_sess_set_remove_cb.3
MLINKS+= SSL_CTX_sess_set_get_cb.3 SSL_CTX_sess_get_new_cb.3
MLINKS+= SSL_CTX_sess_set_get_cb.3 SSL_CTX_sess_get_remove_cb.3
MLINKS+= SSL_CTX_sess_set_get_cb.3 SSL_CTX_sess_get_get_cb.3
+MLINKS+= SSL_CTX_set1_curves.3 SSL_CTX_set1_curves_list.3
+MLINKS+= SSL_CTX_set1_curves.3 SSL_set1_curves.3
+MLINKS+= SSL_CTX_set1_curves.3 SSL_set1_curves_list.3
+MLINKS+= SSL_CTX_set1_curves.3 SSL_get1_curves.3
+MLINKS+= SSL_CTX_set1_curves.3 SSL_get_shared_curve.3
+MLINKS+= SSL_CTX_set1_curves.3 SSL_CTX_set_ecdh_auto.3
+MLINKS+= SSL_CTX_set1_curves.3 SSL_set_ecdh_auto.3
+MLINKS+= SSL_CTX_set1_verify_cert_store.3 SSL_CTX_set0_verify_cert_store.3
+MLINKS+= SSL_CTX_set1_verify_cert_store.3 SSL_CTX_set0_chain_cert_store.3
+MLINKS+= SSL_CTX_set1_verify_cert_store.3 SSL_CTX_set1_chain_cert_store.3
+MLINKS+= SSL_CTX_set1_verify_cert_store.3 SSL_set0_verify_cert_store.3
+MLINKS+= SSL_CTX_set1_verify_cert_store.3 SSL_set1_verify_cert_store.3
+MLINKS+= SSL_CTX_set1_verify_cert_store.3 SSL_set0_chain_cert_store.3
+MLINKS+= SSL_CTX_set1_verify_cert_store.3 SSL_set1_chain_cert_store.3
+MLINKS+= SSL_CTX_set_cert_cb.3 SSL_set_cert_cb.3
MLINKS+= SSL_CTX_set_cert_store.3 SSL_CTX_get_cert_store.3
MLINKS+= SSL_CTX_set_cipher_list.3 SSL_set_cipher_list.3
MLINKS+= SSL_CTX_set_client_CA_list.3 SSL_set_client_CA_list.3
MLINKS+= SSL_CTX_set_client_CA_list.3 SSL_CTX_add_client_CA.3
MLINKS+= SSL_CTX_set_client_CA_list.3 SSL_add_client_CA.3
MLINKS+= SSL_CTX_set_client_cert_cb.3 SSL_CTX_get_client_cert_cb.3
+MLINKS+= SSL_CTX_set_custom_cli_ext.3 SSL_CTX_add_client_custom_ext.3
+MLINKS+= SSL_CTX_set_custom_cli_ext.3 SSL_CTX_add_server_custom_ext.3
MLINKS+= SSL_CTX_set_default_passwd_cb.3 SSL_CTX_set_default_passwd_cb_userdata.3
MLINKS+= SSL_CTX_set_generate_session_id.3 SSL_set_generate_session_id.3
MLINKS+= SSL_CTX_set_generate_session_id.3 SSL_has_matching_session_id.3
@@ -193,6 +247,7 @@ MLINKS+= SSL_CTX_use_certificate.3 SSL_check_private_key.3
MLINKS+= SSL_CTX_use_psk_identity_hint.3 SSL_use_psk_identity_hint.3
MLINKS+= SSL_CTX_use_psk_identity_hint.3 SSL_CTX_set_psk_server_callback.3
MLINKS+= SSL_CTX_use_psk_identity_hint.3 SSL_set_psk_server_callback.3
+MLINKS+= SSL_CTX_use_serverinfo.3 SSL_CTX_use_serverinfo_file.3
MLINKS+= SSL_SESSION_get_ex_new_index.3 SSL_SESSION_set_ex_data.3
MLINKS+= SSL_SESSION_get_ex_new_index.3 SSL_SESSION_get_ex_data.3
MLINKS+= SSL_SESSION_get_time.3 SSL_SESSION_set_time.3
diff --git a/secure/lib/libssl/man/SSL_CIPHER_get_name.3 b/secure/lib/libssl/man/SSL_CIPHER_get_name.3
index db90114..79b58fb 100644
--- a/secure/lib/libssl/man/SSL_CIPHER_get_name.3
+++ b/secure/lib/libssl/man/SSL_CIPHER_get_name.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CIPHER_get_name 3"
-.TH SSL_CIPHER_get_name 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CIPHER_get_name 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -235,10 +235,21 @@ library crashes.
If \fISSL_CIPHER_description()\fR cannot handle a built-in cipher, the according
description of the cipher property is \fBunknown\fR. This case should not
occur.
+.PP
+The standard terminology for ephemeral Diffie-Hellman schemes is \s-1DHE
+\&\s0(finite field) or \s-1ECDHE \s0(elliptic curve). This version of OpenSSL
+idiosyncratically reports these schemes as \s-1EDH\s0 and \s-1EECDH,\s0 even though
+it also accepts the standard terminology.
+.PP
+It is recommended to use the standard terminology (\s-1DHE\s0 and \s-1ECDHE\s0)
+during configuration (e.g. via SSL_CTX_set_cipher_list) for clarity of
+configuration. OpenSSL versions after 1.0.2 will report the standard
+terms via SSL_CIPHER_get_name and SSL_CIPHER_description.
.SH "RETURN VALUES"
.IX Header "RETURN VALUES"
See \s-1DESCRIPTION\s0
.SH "SEE ALSO"
.IX Header "SEE ALSO"
\&\fIssl\fR\|(3), \fISSL_get_current_cipher\fR\|(3),
-\&\fISSL_get_ciphers\fR\|(3), \fIciphers\fR\|(1)
+\&\fISSL_get_ciphers\fR\|(3), \fIciphers\fR\|(1),
+\&\fISSL_CTX_set_cipher_list\fR\|(3)
diff --git a/secure/lib/libssl/man/SSL_COMP_add_compression_method.3 b/secure/lib/libssl/man/SSL_COMP_add_compression_method.3
index d8b4a76..6b2da97 100644
--- a/secure/lib/libssl/man/SSL_COMP_add_compression_method.3
+++ b/secure/lib/libssl/man/SSL_COMP_add_compression_method.3
@@ -133,19 +133,21 @@
.\" ========================================================================
.\"
.IX Title "SSL_COMP_add_compression_method 3"
-.TH SSL_COMP_add_compression_method 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_COMP_add_compression_method 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-SSL_COMP_add_compression_method \- handle SSL/TLS integrated compression methods
+SSL_COMP_add_compression_method, SSL_COMP_free_compression_methods \- handle SSL/TLS integrated compression methods
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.Vb 1
\& #include <openssl/ssl.h>
\&
\& int SSL_COMP_add_compression_method(int id, COMP_METHOD *cm);
+\&
+\& +void SSL_COMP_free_compression_methods(void);
.Ve
.SH "DESCRIPTION"
.IX Header "DESCRIPTION"
@@ -153,6 +155,10 @@ SSL_COMP_add_compression_method \- handle SSL/TLS integrated compression methods
the identifier \fBid\fR to the list of available compression methods. This
list is globally maintained for all \s-1SSL\s0 operations within this application.
It cannot be set for specific \s-1SSL_CTX\s0 or \s-1SSL\s0 objects.
+.PP
+\&\fISSL_COMP_free_compression_methods()\fR frees the internal table of
+compression methods that were built internally, and possibly
+augmented by adding \fISSL_COMP_add_compression_method()\fR.
.SH "NOTES"
.IX Header "NOTES"
The \s-1TLS\s0 standard (or SSLv3) allows the integration of compression methods
@@ -174,8 +180,8 @@ its own compression methods and will unconditionally activate compression
when a matching identifier is found. There is no way to restrict the list
of compression methods supported on a per connection basis.
.PP
-The OpenSSL library has the compression methods \fB\f(BICOMP_rle()\fB\fR and (when
-especially enabled during compilation) \fB\f(BICOMP_zlib()\fB\fR available.
+If enabled during compilation, the OpenSSL library will have the
+\&\fICOMP_zlib()\fR compression method available.
.SH "WARNINGS"
.IX Header "WARNINGS"
Once the identities of the compression methods for the \s-1TLS\s0 protocol have
diff --git a/secure/lib/libssl/man/SSL_CONF_CTX_new.3 b/secure/lib/libssl/man/SSL_CONF_CTX_new.3
new file mode 100644
index 0000000..d72e8c6
--- /dev/null
+++ b/secure/lib/libssl/man/SSL_CONF_CTX_new.3
@@ -0,0 +1,172 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "SSL_CONF_CTX_new 3"
+.TH SSL_CONF_CTX_new 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+SSL_CONF_CTX_new, SSL_CONF_CTX_free \- SSL configuration allocation functions
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 1
+\& #include <openssl/ssl.h>
+\&
+\& SSL_CONF_CTX *SSL_CONF_CTX_new(void);
+\& void SSL_CONF_CTX_free(SSL_CONF_CTX *cctx);
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+The function \fISSL_CONF_CTX_new()\fR allocates and initialises an \fB\s-1SSL_CONF_CTX\s0\fR
+structure for use with the \s-1SSL_CONF\s0 functions.
+.PP
+The function \fISSL_CONF_CTX_free()\fR frees up the context \fBcctx\fR.
+.SH "RETURN VALUES"
+.IX Header "RETURN VALUES"
+\&\fISSL_CONF_CTX_new()\fR returns either the newly allocated \fB\s-1SSL_CONF_CTX\s0\fR structure
+or \fB\s-1NULL\s0\fR if an error occurs.
+.PP
+\&\fISSL_CONF_CTX_free()\fR does not return a value.
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+\&\fISSL_CONF_CTX_set_flags\fR\|(3),
+\&\fISSL_CONF_CTX_set_ssl_ctx\fR\|(3),
+\&\fISSL_CONF_CTX_set1_prefix\fR\|(3),
+\&\fISSL_CONF_cmd\fR\|(3),
+\&\fISSL_CONF_cmd_argv\fR\|(3)
+.SH "HISTORY"
+.IX Header "HISTORY"
+These functions were first added to OpenSSL 1.0.2
diff --git a/secure/lib/libssl/man/SSL_CONF_CTX_set1_prefix.3 b/secure/lib/libssl/man/SSL_CONF_CTX_set1_prefix.3
new file mode 100644
index 0000000..99b6f95
--- /dev/null
+++ b/secure/lib/libssl/man/SSL_CONF_CTX_set1_prefix.3
@@ -0,0 +1,180 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "SSL_CONF_CTX_set1_prefix 3"
+.TH SSL_CONF_CTX_set1_prefix 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+SSL_CONF_CTX_set1_prefix \- Set configuration context command prefix
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 1
+\& #include <openssl/ssl.h>
+\&
+\& unsigned int SSL_CONF_CTX_set1_prefix(SSL_CONF_CTX *cctx, const char *prefix);
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+The function \fISSL_CONF_CTX_set1_prefix()\fR sets the command prefix of \fBcctx\fR
+to \fBprefix\fR. If \fBprefix\fR is \fB\s-1NULL\s0\fR it is restored to the default value.
+.SH "NOTES"
+.IX Header "NOTES"
+Command prefixes alter the commands recognised by subsequent \fISSL_CTX_cmd()\fR
+calls. For example for files, if the prefix \*(L"\s-1SSL\*(R"\s0 is set then command names
+such as \*(L"SSLProtocol\*(R", \*(L"SSLOptions\*(R" etc. are recognised instead of \*(L"Protocol\*(R"
+and \*(L"Options\*(R". Similarly for command lines if the prefix is \*(L"\-\-ssl\-\*(R" then
+\&\*(L"\-\-ssl\-no_tls1_2\*(R" is recognised instead of \*(L"\-no_tls1_2\*(R".
+.PP
+If the \fB\s-1SSL_CONF_FLAG_CMDLINE\s0\fR flag is set then prefix checks are case
+sensitive and \*(L"\-\*(R" is the default. In the unlikely even an application
+explicitly wants to set no prefix it must be explicitly set to "".
+.PP
+If the \fB\s-1SSL_CONF_FLAG_FILE\s0\fR flag is set then prefix checks are case
+insensitive and no prefix is the default.
+.SH "RETURN VALUES"
+.IX Header "RETURN VALUES"
+\&\fISSL_CONF_CTX_set1_prefix()\fR returns 1 for success and 0 for failure.
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+\&\fISSL_CONF_CTX_new\fR\|(3),
+\&\fISSL_CONF_CTX_set_flags\fR\|(3),
+\&\fISSL_CONF_CTX_set_ssl_ctx\fR\|(3),
+\&\fISSL_CONF_cmd\fR\|(3),
+\&\fISSL_CONF_cmd_argv\fR\|(3)
+.SH "HISTORY"
+.IX Header "HISTORY"
+These functions were first added to OpenSSL 1.0.2
diff --git a/secure/lib/libssl/man/SSL_CONF_CTX_set_flags.3 b/secure/lib/libssl/man/SSL_CONF_CTX_set_flags.3
new file mode 100644
index 0000000..0412b59
--- /dev/null
+++ b/secure/lib/libssl/man/SSL_CONF_CTX_set_flags.3
@@ -0,0 +1,191 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "SSL_CONF_CTX_set_flags 3"
+.TH SSL_CONF_CTX_set_flags 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+SSL_CONF_CTX_set_flags, SSL_CONF_CTX_clear_flags \- Set of clear SSL configuration context flags
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 1
+\& #include <openssl/ssl.h>
+\&
+\& unsigned int SSL_CONF_CTX_set_flags(SSL_CONF_CTX *cctx, unsigned int flags);
+\& unsigned int SSL_CONF_CTX_clear_flags(SSL_CONF_CTX *cctx, unsigned int flags);
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+The function \fISSL_CONF_CTX_set_flags()\fR sets \fBflags\fR in the context \fBcctx\fR.
+.PP
+The function \fISSL_CONF_CTX_clear_flags()\fR clears \fBflags\fR in the context \fBcctx\fR.
+.SH "NOTES"
+.IX Header "NOTES"
+The flags set affect how subsequent calls to \fISSL_CONF_cmd()\fR or
+\&\fISSL_CONF_argv()\fR behave.
+.PP
+Currently the following \fBflags\fR values are recognised:
+.IP "\s-1SSL_CONF_FLAG_CMDLINE, SSL_CONF_FLAG_FILE\s0" 4
+.IX Item "SSL_CONF_FLAG_CMDLINE, SSL_CONF_FLAG_FILE"
+recognise options intended for command line or configuration file use. At
+least one of these flags must be set.
+.IP "\s-1SSL_CONF_FLAG_CLIENT, SSL_CONF_FLAG_SERVER\s0" 4
+.IX Item "SSL_CONF_FLAG_CLIENT, SSL_CONF_FLAG_SERVER"
+recognise options intended for use in \s-1SSL/TLS\s0 clients or servers. One or
+both of these flags must be set.
+.IP "\s-1SSL_CONF_FLAG_CERTIFICATE\s0" 4
+.IX Item "SSL_CONF_FLAG_CERTIFICATE"
+recognise certificate and private key options.
+.IP "\s-1SSL_CONF_FLAG_SHOW_ERRORS\s0" 4
+.IX Item "SSL_CONF_FLAG_SHOW_ERRORS"
+indicate errors relating to unrecognised options or missing arguments in
+the error queue. If this option isn't set such errors are only reflected
+in the return values of \fISSL_CONF_set_cmd()\fR or \fISSL_CONF_set_argv()\fR
+.SH "RETURN VALUES"
+.IX Header "RETURN VALUES"
+\&\fISSL_CONF_CTX_set_flags()\fR and \fISSL_CONF_CTX_clear_flags()\fR returns the new flags
+value after setting or clearing flags.
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+\&\fISSL_CONF_CTX_new\fR\|(3),
+\&\fISSL_CONF_CTX_set_ssl_ctx\fR\|(3),
+\&\fISSL_CONF_CTX_set1_prefix\fR\|(3),
+\&\fISSL_CONF_cmd\fR\|(3),
+\&\fISSL_CONF_cmd_argv\fR\|(3)
+.SH "HISTORY"
+.IX Header "HISTORY"
+These functions were first added to OpenSSL 1.0.2
diff --git a/secure/lib/libssl/man/SSL_CONF_CTX_set_ssl_ctx.3 b/secure/lib/libssl/man/SSL_CONF_CTX_set_ssl_ctx.3
new file mode 100644
index 0000000..c07a3eb
--- /dev/null
+++ b/secure/lib/libssl/man/SSL_CONF_CTX_set_ssl_ctx.3
@@ -0,0 +1,178 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "SSL_CONF_CTX_set_ssl_ctx 3"
+.TH SSL_CONF_CTX_set_ssl_ctx 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+SSL_CONF_CTX_set_ssl_ctx, SSL_CONF_CTX_set_ssl \- set context to configure
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 1
+\& #include <openssl/ssl.h>
+\&
+\& void SSL_CONF_CTX_set_ssl_ctx(SSL_CONF_CTX *cctx, SSL_CTX *ctx);
+\& void SSL_CONF_CTX_set_ssl(SSL_CONF_CTX *cctx, SSL *ssl);
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+\&\fISSL_CONF_CTX_set_ssl_ctx()\fR sets the context associated with \fBcctx\fR to the
+\&\fB\s-1SSL_CTX\s0\fR structure \fBctx\fR. Any previous \fB\s-1SSL\s0\fR or \fB\s-1SSL_CTX\s0\fR associated with
+\&\fBcctx\fR is cleared. Subsequent calls to \fISSL_CONF_cmd()\fR will be sent to
+\&\fBctx\fR.
+.PP
+\&\fISSL_CONF_CTX_set_ssl()\fR sets the context associated with \fBcctx\fR to the
+\&\fB\s-1SSL\s0\fR structure \fBssl\fR. Any previous \fB\s-1SSL\s0\fR or \fB\s-1SSL_CTX\s0\fR associated with
+\&\fBcctx\fR is cleared. Subsequent calls to \fISSL_CONF_cmd()\fR will be sent to
+\&\fBssl\fR.
+.SH "NOTES"
+.IX Header "NOTES"
+The context need not be set or it can be set to \fB\s-1NULL\s0\fR in which case only
+syntax checking of commands is performed, where possible.
+.SH "RETURN VALUES"
+.IX Header "RETURN VALUES"
+\&\fISSL_CONF_CTX_set_ssl_ctx()\fR and \fISSL_CTX_set_ssl()\fR do not return a value.
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+\&\fISSL_CONF_CTX_new\fR\|(3),
+\&\fISSL_CONF_CTX_set_flags\fR\|(3),
+\&\fISSL_CONF_CTX_set1_prefix\fR\|(3),
+\&\fISSL_CONF_cmd\fR\|(3),
+\&\fISSL_CONF_cmd_argv\fR\|(3)
+.SH "HISTORY"
+.IX Header "HISTORY"
+These functions were first added to OpenSSL 1.0.2
diff --git a/secure/lib/libssl/man/SSL_CONF_cmd.3 b/secure/lib/libssl/man/SSL_CONF_cmd.3
new file mode 100644
index 0000000..0d38c4c
--- /dev/null
+++ b/secure/lib/libssl/man/SSL_CONF_cmd.3
@@ -0,0 +1,536 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "SSL_CONF_cmd 3"
+.TH SSL_CONF_cmd 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+SSL_CONF_cmd \- send configuration command
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 1
+\& #include <openssl/ssl.h>
+\&
+\& int SSL_CONF_cmd(SSL_CONF_CTX *cctx, const char *cmd, const char *value);
+\& int SSL_CONF_cmd_value_type(SSL_CONF_CTX *cctx, const char *cmd);
+\& int SSL_CONF_finish(SSL_CONF_CTX *cctx);
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+The function \fISSL_CONF_cmd()\fR performs configuration operation \fBcmd\fR with
+optional parameter \fBvalue\fR on \fBctx\fR. Its purpose is to simplify application
+configuration of \fB\s-1SSL_CTX\s0\fR or \fB\s-1SSL\s0\fR structures by providing a common
+framework for command line options or configuration files.
+.PP
+\&\fISSL_CONF_cmd_value_type()\fR returns the type of value that \fBcmd\fR refers to.
+.PP
+The function \fISSL_CONF_finish()\fR must be called after all configuration
+operations have been completed. It is used to finalise any operations
+or to process defaults.
+.SH "SUPPORTED COMMAND LINE COMMANDS"
+.IX Header "SUPPORTED COMMAND LINE COMMANDS"
+Currently supported \fBcmd\fR names for command lines (i.e. when the
+flag \fB\s-1SSL_CONF_CMDLINE\s0\fR is set) are listed below. Note: all \fBcmd\fR names
+are case sensitive. Unless otherwise stated commands can be used by
+both clients and servers and the \fBvalue\fR parameter is not used. The default
+prefix for command line commands is \fB\-\fR and that is reflected below.
+.IP "\fB\-sigalgs\fR" 4
+.IX Item "-sigalgs"
+This sets the supported signature algorithms for \s-1TLS\s0 v1.2. For clients this
+value is used directly for the supported signature algorithms extension. For
+servers it is used to determine which signature algorithms to support.
+.Sp
+The \fBvalue\fR argument should be a colon separated list of signature algorithms
+in order of decreasing preference of the form \fBalgorithm+hash\fR. \fBalgorithm\fR
+is one of \fB\s-1RSA\s0\fR, \fB\s-1DSA\s0\fR or \fB\s-1ECDSA\s0\fR and \fBhash\fR is a supported algorithm
+\&\s-1OID\s0 short name such as \fB\s-1SHA1\s0\fR, \fB\s-1SHA224\s0\fR, \fB\s-1SHA256\s0\fR, \fB\s-1SHA384\s0\fR of \fB\s-1SHA512\s0\fR.
+Note: algorithm and hash names are case sensitive.
+.Sp
+If this option is not set then all signature algorithms supported by the
+OpenSSL library are permissible.
+.IP "\fB\-client_sigalgs\fR" 4
+.IX Item "-client_sigalgs"
+This sets the supported signature algorithms associated with client
+authentication for \s-1TLS\s0 v1.2. For servers the value is used in the supported
+signature algorithms field of a certificate request. For clients it is
+used to determine which signature algorithm to with the client certificate.
+If a server does not request a certificate this option has no effect.
+.Sp
+The syntax of \fBvalue\fR is identical to \fB\-sigalgs\fR. If not set then
+the value set for \fB\-sigalgs\fR will be used instead.
+.IP "\fB\-curves\fR" 4
+.IX Item "-curves"
+This sets the supported elliptic curves. For clients the curves are
+sent using the supported curves extension. For servers it is used
+to determine which curve to use. This setting affects curves used for both
+signatures and key exchange, if applicable.
+.Sp
+The \fBvalue\fR argument is a colon separated list of curves. The curve can be
+either the \fB\s-1NIST\s0\fR name (e.g. \fBP\-256\fR) or an OpenSSL \s-1OID\s0 name (e.g
+\&\fBprime256v1\fR). Curve names are case sensitive.
+.IP "\fB\-named_curve\fR" 4
+.IX Item "-named_curve"
+This sets the temporary curve used for ephemeral \s-1ECDH\s0 modes. Only used by
+servers
+.Sp
+The \fBvalue\fR argument is a curve name or the special value \fBauto\fR which
+picks an appropriate curve based on client and server preferences. The curve
+can be either the \fB\s-1NIST\s0\fR name (e.g. \fBP\-256\fR) or an OpenSSL \s-1OID\s0 name
+(e.g \fBprime256v1\fR). Curve names are case sensitive.
+.IP "\fB\-cipher\fR" 4
+.IX Item "-cipher"
+Sets the cipher suite list to \fBvalue\fR. Note: syntax checking of \fBvalue\fR is
+currently not performed unless a \fB\s-1SSL\s0\fR or \fB\s-1SSL_CTX\s0\fR structure is
+associated with \fBcctx\fR.
+.IP "\fB\-cert\fR" 4
+.IX Item "-cert"
+Attempts to use the file \fBvalue\fR as the certificate for the appropriate
+context. It currently uses \fISSL_CTX_use_certificate_chain_file()\fR if an \fB\s-1SSL_CTX\s0\fR
+structure is set or \fISSL_use_certificate_file()\fR with filetype \s-1PEM\s0 if an \fB\s-1SSL\s0\fR
+structure is set. This option is only supported if certificate operations
+are permitted.
+.IP "\fB\-key\fR" 4
+.IX Item "-key"
+Attempts to use the file \fBvalue\fR as the private key for the appropriate
+context. This option is only supported if certificate operations
+are permitted. Note: if no \fB\-key\fR option is set then a private key is
+not loaded: it does not currently use the \fB\-cert\fR file.
+.IP "\fB\-dhparam\fR" 4
+.IX Item "-dhparam"
+Attempts to use the file \fBvalue\fR as the set of temporary \s-1DH\s0 parameters for
+the appropriate context. This option is only supported if certificate
+operations are permitted.
+.IP "\fB\-no_ssl2\fR, \fB\-no_ssl3\fR, \fB\-no_tls1\fR, \fB\-no_tls1_1\fR, \fB\-no_tls1_2\fR" 4
+.IX Item "-no_ssl2, -no_ssl3, -no_tls1, -no_tls1_1, -no_tls1_2"
+Disables protocol support for SSLv2, SSLv3, \s-1TLS 1.0, TLS 1.1\s0 or \s-1TLS 1.2 \s0
+by setting the corresponding options \fB\s-1SSL_OP_NO_SSL2\s0\fR, \fB\s-1SSL_OP_NO_SSL3\s0\fR,
+\&\fB\s-1SSL_OP_NO_TLS1\s0\fR, \fB\s-1SSL_OP_NO_TLS1_1\s0\fR and \fB\s-1SSL_OP_NO_TLS1_2\s0\fR respectively.
+.IP "\fB\-bugs\fR" 4
+.IX Item "-bugs"
+Various bug workarounds are set, same as setting \fB\s-1SSL_OP_ALL\s0\fR.
+.IP "\fB\-no_comp\fR" 4
+.IX Item "-no_comp"
+Disables support for \s-1SSL/TLS\s0 compression, same as setting \fB\s-1SSL_OP_NO_COMPRESS\s0\fR.
+.IP "\fB\-no_ticket\fR" 4
+.IX Item "-no_ticket"
+Disables support for session tickets, same as setting \fB\s-1SSL_OP_NO_TICKET\s0\fR.
+.IP "\fB\-serverpref\fR" 4
+.IX Item "-serverpref"
+Use server and not client preference order when determining which cipher suite,
+signature algorithm or elliptic curve to use for an incoming connection.
+Equivalent to \fB\s-1SSL_OP_CIPHER_SERVER_PREFERENCE\s0\fR. Only used by servers.
+.IP "\fB\-no_resumption_on_reneg\fR" 4
+.IX Item "-no_resumption_on_reneg"
+set \s-1SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION\s0 flag. Only used by servers.
+.IP "\fB\-legacyrenegotiation\fR" 4
+.IX Item "-legacyrenegotiation"
+permits the use of unsafe legacy renegotiation. Equivalent to setting
+\&\fB\s-1SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION\s0\fR.
+.IP "\fB\-legacy_server_connect\fR, \fB\-no_legacy_server_connect\fR" 4
+.IX Item "-legacy_server_connect, -no_legacy_server_connect"
+permits or prohibits the use of unsafe legacy renegotiation for OpenSSL
+clients only. Equivalent to setting or clearing \fB\s-1SSL_OP_LEGACY_SERVER_CONNECT\s0\fR.
+Set by default.
+.IP "\fB\-strict\fR" 4
+.IX Item "-strict"
+enables strict mode protocol handling. Equivalent to setting
+\&\fB\s-1SSL_CERT_FLAG_TLS_STRICT\s0\fR.
+.IP "\fB\-debug_broken_protocol\fR" 4
+.IX Item "-debug_broken_protocol"
+disables various checks and permits several kinds of broken protocol behaviour
+for testing purposes: it should \fB\s-1NEVER\s0\fR be used in anything other than a test
+environment. Only supported if OpenSSL is configured with
+\&\fB\-DOPENSSL_SSL_DEBUG_BROKEN_PROTOCOL\fR.
+.SH "SUPPORTED CONFIGURATION FILE COMMANDS"
+.IX Header "SUPPORTED CONFIGURATION FILE COMMANDS"
+Currently supported \fBcmd\fR names for configuration files (i.e. when the
+flag \fB\s-1SSL_CONF_FLAG_FILE\s0\fR is set) are listed below. All configuration file
+\&\fBcmd\fR names and are case insensitive so \fBsignaturealgorithms\fR is recognised
+as well as \fBSignatureAlgorithms\fR. Unless otherwise stated the \fBvalue\fR names
+are also case insensitive.
+.PP
+Note: the command prefix (if set) alters the recognised \fBcmd\fR values.
+.IP "\fBCipherString\fR" 4
+.IX Item "CipherString"
+Sets the cipher suite list to \fBvalue\fR. Note: syntax checking of \fBvalue\fR is
+currently not performed unless an \fB\s-1SSL\s0\fR or \fB\s-1SSL_CTX\s0\fR structure is
+associated with \fBcctx\fR.
+.IP "\fBCertificate\fR" 4
+.IX Item "Certificate"
+Attempts to use the file \fBvalue\fR as the certificate for the appropriate
+context. It currently uses \fISSL_CTX_use_certificate_chain_file()\fR if an \fB\s-1SSL_CTX\s0\fR
+structure is set or \fISSL_use_certificate_file()\fR with filetype \s-1PEM\s0 if an \fB\s-1SSL\s0\fR
+structure is set. This option is only supported if certificate operations
+are permitted.
+.IP "\fBPrivateKey\fR" 4
+.IX Item "PrivateKey"
+Attempts to use the file \fBvalue\fR as the private key for the appropriate
+context. This option is only supported if certificate operations
+are permitted. Note: if no \fB\-key\fR option is set then a private key is
+not loaded: it does not currently use the \fBCertificate\fR file.
+.IP "\fBServerInfoFile\fR" 4
+.IX Item "ServerInfoFile"
+Attempts to use the file \fBvalue\fR in the \*(L"serverinfo\*(R" extension using the
+function SSL_CTX_use_serverinfo_file.
+.IP "\fBDHParameters\fR" 4
+.IX Item "DHParameters"
+Attempts to use the file \fBvalue\fR as the set of temporary \s-1DH\s0 parameters for
+the appropriate context. This option is only supported if certificate
+operations are permitted.
+.IP "\fBSignatureAlgorithms\fR" 4
+.IX Item "SignatureAlgorithms"
+This sets the supported signature algorithms for \s-1TLS\s0 v1.2. For clients this
+value is used directly for the supported signature algorithms extension. For
+servers it is used to determine which signature algorithms to support.
+.Sp
+The \fBvalue\fR argument should be a colon separated list of signature algorithms
+in order of decreasing preference of the form \fBalgorithm+hash\fR. \fBalgorithm\fR
+is one of \fB\s-1RSA\s0\fR, \fB\s-1DSA\s0\fR or \fB\s-1ECDSA\s0\fR and \fBhash\fR is a supported algorithm
+\&\s-1OID\s0 short name such as \fB\s-1SHA1\s0\fR, \fB\s-1SHA224\s0\fR, \fB\s-1SHA256\s0\fR, \fB\s-1SHA384\s0\fR of \fB\s-1SHA512\s0\fR.
+Note: algorithm and hash names are case sensitive.
+.Sp
+If this option is not set then all signature algorithms supported by the
+OpenSSL library are permissible.
+.IP "\fBClientSignatureAlgorithms\fR" 4
+.IX Item "ClientSignatureAlgorithms"
+This sets the supported signature algorithms associated with client
+authentication for \s-1TLS\s0 v1.2. For servers the value is used in the supported
+signature algorithms field of a certificate request. For clients it is
+used to determine which signature algorithm to with the client certificate.
+.Sp
+The syntax of \fBvalue\fR is identical to \fBSignatureAlgorithms\fR. If not set then
+the value set for \fBSignatureAlgorithms\fR will be used instead.
+.IP "\fBCurves\fR" 4
+.IX Item "Curves"
+This sets the supported elliptic curves. For clients the curves are
+sent using the supported curves extension. For servers it is used
+to determine which curve to use. This setting affects curves used for both
+signatures and key exchange, if applicable.
+.Sp
+The \fBvalue\fR argument is a colon separated list of curves. The curve can be
+either the \fB\s-1NIST\s0\fR name (e.g. \fBP\-256\fR) or an OpenSSL \s-1OID\s0 name (e.g
+\&\fBprime256v1\fR). Curve names are case sensitive.
+.IP "\fBECDHParameters\fR" 4
+.IX Item "ECDHParameters"
+This sets the temporary curve used for ephemeral \s-1ECDH\s0 modes. Only used by
+servers
+.Sp
+The \fBvalue\fR argument is a curve name or the special value \fBAutomatic\fR which
+picks an appropriate curve based on client and server preferences. The curve
+can be either the \fB\s-1NIST\s0\fR name (e.g. \fBP\-256\fR) or an OpenSSL \s-1OID\s0 name
+(e.g \fBprime256v1\fR). Curve names are case sensitive.
+.IP "\fBProtocol\fR" 4
+.IX Item "Protocol"
+The supported versions of the \s-1SSL\s0 or \s-1TLS\s0 protocol.
+.Sp
+The \fBvalue\fR argument is a comma separated list of supported protocols to
+enable or disable. If an protocol is preceded by \fB\-\fR that version is disabled.
+All versions are enabled by default, though applications may choose to
+explicitly disable some. Currently supported protocol values are \fBSSLv2\fR,
+\&\fBSSLv3\fR, \fBTLSv1\fR, \fBTLSv1.1\fR and \fBTLSv1.2\fR. The special value \fB\s-1ALL\s0\fR refers
+to all supported versions.
+.IP "\fBOptions\fR" 4
+.IX Item "Options"
+The \fBvalue\fR argument is a comma separated list of various flags to set.
+If a flag string is preceded \fB\-\fR it is disabled. See the
+\&\fBSSL_CTX_set_options\fR function for more details of individual options.
+.Sp
+Each option is listed below. Where an operation is enabled by default
+the \fB\-flag\fR syntax is needed to disable it.
+.Sp
+\&\fBSessionTicket\fR: session ticket support, enabled by default. Inverse of
+\&\fB\s-1SSL_OP_NO_TICKET\s0\fR: that is \fB\-SessionTicket\fR is the same as setting
+\&\fB\s-1SSL_OP_NO_TICKET\s0\fR.
+.Sp
+\&\fBCompression\fR: \s-1SSL/TLS\s0 compression support, enabled by default. Inverse
+of \fB\s-1SSL_OP_NO_COMPRESSION\s0\fR.
+.Sp
+\&\fBEmptyFragments\fR: use empty fragments as a countermeasure against a
+\&\s-1SSL 3.0/TLS 1.0\s0 protocol vulnerability affecting \s-1CBC\s0 ciphers. It
+is set by default. Inverse of \fB\s-1SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS\s0\fR.
+.Sp
+\&\fBBugs\fR: enable various bug workarounds. Same as \fB\s-1SSL_OP_ALL\s0\fR.
+.Sp
+\&\fBDHSingle\fR: enable single use \s-1DH\s0 keys, set by default. Inverse of
+\&\fB\s-1SSL_OP_DH_SINGLE\s0\fR. Only used by servers.
+.Sp
+\&\fBECDHSingle\fR enable single use \s-1ECDH\s0 keys, set by default. Inverse of
+\&\fB\s-1SSL_OP_ECDH_SINGLE\s0\fR. Only used by servers.
+.Sp
+\&\fBServerPreference\fR use server and not client preference order when
+determining which cipher suite, signature algorithm or elliptic curve
+to use for an incoming connection. Equivalent to
+\&\fB\s-1SSL_OP_CIPHER_SERVER_PREFERENCE\s0\fR. Only used by servers.
+.Sp
+\&\fBNoResumptionOnRenegotiation\fR set
+\&\fB\s-1SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION\s0\fR flag. Only used by servers.
+.Sp
+\&\fBUnsafeLegacyRenegotiation\fR permits the use of unsafe legacy renegotiation.
+Equivalent to \fB\s-1SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION\s0\fR.
+.Sp
+\&\fBUnsafeLegacyServerConnect\fR permits the use of unsafe legacy renegotiation
+for OpenSSL clients only. Equivalent to \fB\s-1SSL_OP_LEGACY_SERVER_CONNECT\s0\fR.
+Set by default.
+.SH "SUPPORTED COMMAND TYPES"
+.IX Header "SUPPORTED COMMAND TYPES"
+The function \fISSL_CONF_cmd_value_type()\fR currently returns one of the following
+types:
+.IP "\fB\s-1SSL_CONF_TYPE_UNKNOWN\s0\fR" 4
+.IX Item "SSL_CONF_TYPE_UNKNOWN"
+The \fBcmd\fR string is unrecognised, this return value can be use to flag
+syntax errors.
+.IP "\fB\s-1SSL_CONF_TYPE_STRING\s0\fR" 4
+.IX Item "SSL_CONF_TYPE_STRING"
+The value is a string without any specific structure.
+.IP "\fB\s-1SSL_CONF_TYPE_FILE\s0\fR" 4
+.IX Item "SSL_CONF_TYPE_FILE"
+The value is a file name.
+.IP "\fB\s-1SSL_CONF_TYPE_DIR\s0\fR" 4
+.IX Item "SSL_CONF_TYPE_DIR"
+The value is a directory name.
+.SH "NOTES"
+.IX Header "NOTES"
+The order of operations is significant. This can be used to set either defaults
+or values which cannot be overridden. For example if an application calls:
+.PP
+.Vb 2
+\& SSL_CONF_cmd(ctx, "Protocol", "\-SSLv2");
+\& SSL_CONF_cmd(ctx, userparam, uservalue);
+.Ve
+.PP
+it will disable SSLv2 support by default but the user can override it. If
+however the call sequence is:
+.PP
+.Vb 2
+\& SSL_CONF_cmd(ctx, userparam, uservalue);
+\& SSL_CONF_cmd(ctx, "Protocol", "\-SSLv2");
+.Ve
+.PP
+SSLv2 is \fBalways\fR disabled and attempt to override this by the user are
+ignored.
+.PP
+By checking the return code of \fISSL_CTX_cmd()\fR it is possible to query if a
+given \fBcmd\fR is recognised, this is useful is \fISSL_CTX_cmd()\fR values are
+mixed with additional application specific operations.
+.PP
+For example an application might call \fISSL_CTX_cmd()\fR and if it returns
+\&\-2 (unrecognised command) continue with processing of application specific
+commands.
+.PP
+Applications can also use \fISSL_CTX_cmd()\fR to process command lines though the
+utility function \fISSL_CTX_cmd_argv()\fR is normally used instead. One way
+to do this is to set the prefix to an appropriate value using
+\&\fISSL_CONF_CTX_set1_prefix()\fR, pass the current argument to \fBcmd\fR and the
+following argument to \fBvalue\fR (which may be \s-1NULL\s0).
+.PP
+In this case if the return value is positive then it is used to skip that
+number of arguments as they have been processed by \fISSL_CTX_cmd()\fR. If \-2 is
+returned then \fBcmd\fR is not recognised and application specific arguments
+can be checked instead. If \-3 is returned a required argument is missing
+and an error is indicated. If 0 is returned some other error occurred and
+this can be reported back to the user.
+.PP
+The function \fISSL_CONF_cmd_value_type()\fR can be used by applications to
+check for the existence of a command or to perform additional syntax
+checking or translation of the command value. For example if the return
+value is \fB\s-1SSL_CONF_TYPE_FILE\s0\fR an application could translate a relative
+pathname to an absolute pathname.
+.SH "EXAMPLES"
+.IX Header "EXAMPLES"
+Set supported signature algorithms:
+.PP
+.Vb 1
+\& SSL_CONF_cmd(ctx, "SignatureAlgorithms", "ECDSA+SHA256:RSA+SHA256:DSA+SHA256");
+.Ve
+.PP
+Enable all protocols except SSLv3 and SSLv2:
+.PP
+.Vb 1
+\& SSL_CONF_cmd(ctx, "Protocol", "ALL,\-SSLv3,\-SSLv2");
+.Ve
+.PP
+Only enable TLSv1.2:
+.PP
+.Vb 1
+\& SSL_CONF_cmd(ctx, "Protocol", "\-ALL,TLSv1.2");
+.Ve
+.PP
+Disable \s-1TLS\s0 session tickets:
+.PP
+.Vb 1
+\& SSL_CONF_cmd(ctx, "Options", "\-SessionTicket");
+.Ve
+.PP
+Set supported curves to P\-256, P\-384:
+.PP
+.Vb 1
+\& SSL_CONF_cmd(ctx, "Curves", "P\-256:P\-384");
+.Ve
+.PP
+Set automatic support for any elliptic curve for key exchange:
+.PP
+.Vb 1
+\& SSL_CONF_cmd(ctx, "ECDHParameters", "Automatic");
+.Ve
+.SH "RETURN VALUES"
+.IX Header "RETURN VALUES"
+\&\fISSL_CONF_cmd()\fR returns 1 if the value of \fBcmd\fR is recognised and \fBvalue\fR is
+\&\fB\s-1NOT\s0\fR used and 2 if both \fBcmd\fR and \fBvalue\fR are used. In other words it
+returns the number of arguments processed. This is useful when processing
+command lines.
+.PP
+A return value of \-2 means \fBcmd\fR is not recognised.
+.PP
+A return value of \-3 means \fBcmd\fR is recognised and the command requires a
+value but \fBvalue\fR is \s-1NULL.\s0
+.PP
+A return code of 0 indicates that both \fBcmd\fR and \fBvalue\fR are valid but an
+error occurred attempting to perform the operation: for example due to an
+error in the syntax of \fBvalue\fR in this case the error queue may provide
+additional information.
+.PP
+\&\fISSL_CONF_finish()\fR returns 1 for success and 0 for failure.
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+\&\fISSL_CONF_CTX_new\fR\|(3),
+\&\fISSL_CONF_CTX_set_flags\fR\|(3),
+\&\fISSL_CONF_CTX_set1_prefix\fR\|(3),
+\&\fISSL_CONF_CTX_set_ssl_ctx\fR\|(3),
+\&\fISSL_CONF_cmd_argv\fR\|(3)
+.SH "HISTORY"
+.IX Header "HISTORY"
+\&\fISSL_CONF_cmd()\fR was first added to OpenSSL 1.0.2
diff --git a/secure/lib/libssl/man/SSL_CONF_cmd_argv.3 b/secure/lib/libssl/man/SSL_CONF_cmd_argv.3
new file mode 100644
index 0000000..377eaac
--- /dev/null
+++ b/secure/lib/libssl/man/SSL_CONF_cmd_argv.3
@@ -0,0 +1,174 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "SSL_CONF_cmd_argv 3"
+.TH SSL_CONF_cmd_argv 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+SSL_CONF_cmd_argv \- SSL configuration command line processing.
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 1
+\& #include <openssl/ssl.h>
+\&
+\& int SSL_CONF_cmd_argv(SSL_CONF_CTX *cctx, int *pargc, char ***pargv);
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+The function \fISSL_CONF_cmd_argv()\fR processes at most two command line
+arguments from \fBpargv\fR and \fBpargc\fR. The values of \fBpargv\fR and \fBpargc\fR
+are updated to reflect the number of command options processed. The \fBpargc\fR
+argument can be set to \fB\s-1NULL\s0\fR is it is not used.
+.SH "RETURN VALUES"
+.IX Header "RETURN VALUES"
+\&\fISSL_CONF_cmd_argv()\fR returns the number of command arguments processed: 0, 1, 2
+or a negative error code.
+.PP
+If \-2 is returned then an argument for a command is missing.
+.PP
+If \-1 is returned the command is recognised but couldn't be processed due
+to an error: for example a syntax error in the argument.
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+\&\fISSL_CONF_CTX_new\fR\|(3),
+\&\fISSL_CONF_CTX_set_flags\fR\|(3),
+\&\fISSL_CONF_CTX_set1_prefix\fR\|(3),
+\&\fISSL_CONF_CTX_set_ssl_ctx\fR\|(3),
+\&\fISSL_CONF_cmd\fR\|(3)
+.SH "HISTORY"
+.IX Header "HISTORY"
+These functions were first added to OpenSSL 1.0.2
diff --git a/secure/lib/libssl/man/SSL_CTX_add1_chain_cert.3 b/secure/lib/libssl/man/SSL_CTX_add1_chain_cert.3
new file mode 100644
index 0000000..c4fb8ea
--- /dev/null
+++ b/secure/lib/libssl/man/SSL_CTX_add1_chain_cert.3
@@ -0,0 +1,280 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "SSL_CTX_add1_chain_cert 3"
+.TH SSL_CTX_add1_chain_cert 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+SSL_CTX_set0_chain, SSL_CTX_set1_chain, SSL_CTX_add0_chain_cert,
+SSL_CTX_add1_chain_cert, SSL_CTX_get0_chain_certs, SSL_CTX_clear_chain_certs,
+SSL_set0_chain, SSL_set1_chain, SSL_add0_chain_cert, SSL_add1_chain_cert,
+SSL_get0_chain_certs, SSL_clear_chain_certs, SSL_CTX_build_cert_chain,
+SSL_build_cert_chain, SSL_CTX_select_current_cert,
+SSL_select_current_cert, SSL_CTX_set_current_cert, SSL_set_current_cert \- extra
+chain certificate processing
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 1
+\& #include <openssl/ssl.h>
+\&
+\& int SSL_CTX_set0_chain(SSL_CTX *ctx, STACK_OF(X509) *sk);
+\& int SSL_CTX_set1_chain(SSL_CTX *ctx, STACK_OF(X509) *sk);
+\& int SSL_CTX_add0_chain_cert(SSL_CTX *ctx, X509 *x509);
+\& int SSL_CTX_add1_chain_cert(SSL_CTX *ctx, X509 *x509);
+\& int SSL_CTX_get0_chain_certs(SSL_CTX *ctx, STACK_OF(X509) **sk);
+\& int SSL_CTX_clear_chain_certs(SSL_CTX *ctx);
+\&
+\& int SSL_set0_chain(SSL *ssl, STACK_OF(X509) *sk);
+\& int SSL_set1_chain(SSL *ssl, STACK_OF(X509) *sk);
+\& int SSL_add0_chain_cert(SSL *ssl, X509 *x509);
+\& int SSL_add1_chain_cert(SSL *ssl, X509 *x509);
+\& int SSL_get0_chain_certs(SSL *ssl, STACK_OF(X509) **sk);
+\& int SSL_clear_chain_certs(SSL *ssl);
+\&
+\& int SSL_CTX_build_cert_chain(SSL_CTX *ctx, flags);
+\& int SSL_build_cert_chain(SSL *ssl, flags);
+\&
+\& int SSL_CTX_select_current_cert(SSL_CTX *ctx, X509 *x509);
+\& int SSL_select_current_cert(SSL *ssl, X509 *x509);
+\& int SSL_CTX_set_current_cert(SSL_CTX *ctx, long op);
+\& int SSL_set_current_cert(SSL *ssl, long op);
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+\&\fISSL_CTX_set0_chain()\fR and \fISSL_CTX_set1_chain()\fR set the certificate chain
+associated with the current certificate of \fBctx\fR to \fBsk\fR.
+.PP
+\&\fISSL_CTX_add0_chain_cert()\fR and \fISSL_CTX_add1_chain_cert()\fR append the single
+certificate \fBx509\fR to the chain associated with the current certificate of
+\&\fBctx\fR.
+.PP
+\&\fISSL_CTX_get0_chain_certs()\fR retrieves the chain associated with the current
+certificate of \fBctx\fR.
+.PP
+\&\fISSL_CTX_clear_chain_certs()\fR clears any existing chain associated with the
+current certificate of \fBctx\fR. (This is implemented by calling
+\&\fISSL_CTX_set0_chain()\fR with \fBsk\fR set to \fB\s-1NULL\s0\fR).
+.PP
+\&\fISSL_CTX_build_cert_chain()\fR builds the certificate chain for \fBctx\fR normally
+this uses the chain store or the verify store if the chain store is not set.
+If the function is successful the built chain will replace any existing chain.
+The \fBflags\fR parameter can be set to \fB\s-1SSL_BUILD_CHAIN_FLAG_UNTRUSTED\s0\fR to use
+existing chain certificates as untrusted CAs, \fB\s-1SSL_BUILD_CHAIN_FLAG_NO_ROOT\s0\fR
+to omit the root \s-1CA\s0 from the built chain, \fB\s-1SSL_BUILD_CHAIN_FLAG_CHECK\s0\fR to
+use all existing chain certificates only to build the chain (effectively
+sanity checking and rearranging them if necessary), the flag
+\&\fB\s-1SSL_BUILD_CHAIN_FLAG_IGNORE_ERROR\s0\fR ignores any errors during verification:
+if flag \fB\s-1SSL_BUILD_CHAIN_FLAG_CLEAR_ERROR\s0\fR is also set verification errors
+are cleared from the error queue.
+.PP
+Each of these functions operates on the \fIcurrent\fR end entity
+(i.e. server or client) certificate. This is the last certificate loaded or
+selected on the corresponding \fBctx\fR structure.
+.PP
+\&\fISSL_CTX_select_current_cert()\fR selects \fBx509\fR as the current end entity
+certificate, but only if \fBx509\fR has already been loaded into \fBctx\fR using a
+function such as \fISSL_CTX_use_certificate()\fR.
+.PP
+\&\fISSL_set0_chain()\fR, \fISSL_set1_chain()\fR, \fISSL_add0_chain_cert()\fR,
+\&\fISSL_add1_chain_cert()\fR, \fISSL_get0_chain_certs()\fR, \fISSL_clear_chain_certs()\fR,
+\&\fISSL_build_cert_chain()\fR, \fISSL_select_current_cert()\fR and \fISSL_set_current_cert()\fR
+are similar except they apply to \s-1SSL\s0 structure \fBssl\fR.
+.PP
+\&\fISSL_CTX_set_current_cert()\fR changes the current certificate to a value based
+on the \fBop\fR argument. Currently \fBop\fR can be \fB\s-1SSL_CERT_SET_FIRST\s0\fR to use
+the first valid certificate or \fB\s-1SSL_CERT_SET_NEXT\s0\fR to set the next valid
+certificate after the current certificate. These two operations can be
+used to iterate over all certificates in an \fB\s-1SSL_CTX\s0\fR structure.
+.PP
+\&\fISSL_set_current_cert()\fR also supports the option \fB\s-1SSL_CERT_SET_SERVER\s0\fR.
+If \fBssl\fR is a server and has sent a certificate to a connected client
+this option sets that certificate to the current certificate and returns 1.
+If the negotiated ciphersuite is anonymous (and thus no certificate will
+be sent) 2 is returned and the current certificate is unchanged. If \fBssl\fR
+is not a server or a certificate has not been sent 0 is returned and
+the current certificate is unchanged.
+.PP
+All these functions are implemented as macros. Those containing a \fB1\fR
+increment the reference count of the supplied certificate or chain so it must
+be freed at some point after the operation. Those containing a \fB0\fR do
+not increment reference counts and the supplied certificate or chain
+\&\fB\s-1MUST NOT\s0\fR be freed after the operation.
+.SH "NOTES"
+.IX Header "NOTES"
+The chains associate with an \s-1SSL_CTX\s0 structure are copied to any \s-1SSL\s0
+structures when \fISSL_new()\fR is called. \s-1SSL\s0 structures will not be affected
+by any chains subsequently changed in the parent \s-1SSL_CTX.\s0
+.PP
+One chain can be set for each key type supported by a server. So, for example,
+an \s-1RSA\s0 and a \s-1DSA\s0 certificate can (and often will) have different chains.
+.PP
+The functions \fISSL_CTX_build_cert_chain()\fR and \fISSL_build_cert_chain()\fR can
+be used to check application configuration and to ensure any necessary
+subordinate CAs are sent in the correct order. Misconfigured applications
+sending incorrect certificate chains often cause problems with peers.
+.PP
+For example an application can add any set of certificates using
+\&\fISSL_CTX_use_certificate_chain_file()\fR then call \fISSL_CTX_build_cert_chain()\fR
+with the option \fB\s-1SSL_BUILD_CHAIN_FLAG_CHECK\s0\fR to check and reorder them.
+.PP
+Applications can issue non fatal warnings when checking chains by setting
+the flag \fB\s-1SSL_BUILD_CHAIN_FLAG_IGNORE_ERRORS\s0\fR and checking the return
+value.
+.PP
+Calling \fISSL_CTX_build_cert_chain()\fR or \fISSL_build_cert_chain()\fR is more
+efficient than the automatic chain building as it is only performed once.
+Automatic chain building is performed on each new session.
+.PP
+If any certificates are added using these functions no certificates added
+using \fISSL_CTX_add_extra_chain_cert()\fR will be used.
+.SH "RETURN VALUES"
+.IX Header "RETURN VALUES"
+\&\fISSL_set_current_cert()\fR with \fB\s-1SSL_CERT_SET_SERVER\s0\fR return 1 for success, 2 if
+no server certificate is used because the ciphersuites is anonymous and 0
+for failure.
+.PP
+\&\fISSL_CTX_build_cert_chain()\fR and \fISSL_build_cert_chain()\fR return 1 for success
+and 0 for failure. If the flag \fB\s-1SSL_BUILD_CHAIN_FLAG_IGNORE_ERROR\s0\fR and
+a verification error occurs then 2 is returned.
+.PP
+All other functions return 1 for success and 0 for failure.
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+\&\fISSL_CTX_add_extra_chain_cert\fR\|(3)
+.SH "HISTORY"
+.IX Header "HISTORY"
+These functions were first added to OpenSSL 1.0.2.
diff --git a/secure/lib/libssl/man/SSL_CTX_add_extra_chain_cert.3 b/secure/lib/libssl/man/SSL_CTX_add_extra_chain_cert.3
index aba4756..fcaf811 100644
--- a/secure/lib/libssl/man/SSL_CTX_add_extra_chain_cert.3
+++ b/secure/lib/libssl/man/SSL_CTX_add_extra_chain_cert.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_add_extra_chain_cert 3"
-.TH SSL_CTX_add_extra_chain_cert 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_add_extra_chain_cert 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -167,7 +167,8 @@ Only one set of extra chain certificates can be specified per \s-1SSL_CTX\s0
structure. Different chains for different certificates (for example if both
\&\s-1RSA\s0 and \s-1DSA\s0 certificates are specified by the same server) or different \s-1SSL\s0
structures with the same parent \s-1SSL_CTX\s0 cannot be specified using this
-function.
+function. For more flexibility functions such as \fISSL_add1_chain_cert()\fR should
+be used instead.
.SH "RETURN VALUES"
.IX Header "RETURN VALUES"
\&\fISSL_CTX_add_extra_chain_cert()\fR returns 1 on success. Check out the
@@ -178,3 +179,13 @@ error stack to find out the reason for failure otherwise.
\&\fISSL_CTX_use_certificate\fR\|(3),
\&\fISSL_CTX_set_client_cert_cb\fR\|(3),
\&\fISSL_CTX_load_verify_locations\fR\|(3)
+\&\fISSL_CTX_set0_chain\fR\|(3)
+\&\fISSL_CTX_set1_chain\fR\|(3)
+\&\fISSL_CTX_add0_chain_cert\fR\|(3)
+\&\fISSL_CTX_add1_chain_cert\fR\|(3)
+\&\fISSL_set0_chain\fR\|(3)
+\&\fISSL_set1_chain\fR\|(3)
+\&\fISSL_add0_chain_cert\fR\|(3)
+\&\fISSL_add1_chain_cert\fR\|(3)
+\&\fISSL_CTX_build_cert_chain\fR\|(3)
+\&\fISSL_build_cert_chain\fR\|(3)
diff --git a/secure/lib/libssl/man/SSL_CTX_add_session.3 b/secure/lib/libssl/man/SSL_CTX_add_session.3
index 5215c74..4a3b113 100644
--- a/secure/lib/libssl/man/SSL_CTX_add_session.3
+++ b/secure/lib/libssl/man/SSL_CTX_add_session.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_add_session 3"
-.TH SSL_CTX_add_session 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_add_session 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_ctrl.3 b/secure/lib/libssl/man/SSL_CTX_ctrl.3
index b74fd36..74d9683 100644
--- a/secure/lib/libssl/man/SSL_CTX_ctrl.3
+++ b/secure/lib/libssl/man/SSL_CTX_ctrl.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_ctrl 3"
-.TH SSL_CTX_ctrl 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_ctrl 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_flush_sessions.3 b/secure/lib/libssl/man/SSL_CTX_flush_sessions.3
index 671ad58..1bc8b52 100644
--- a/secure/lib/libssl/man/SSL_CTX_flush_sessions.3
+++ b/secure/lib/libssl/man/SSL_CTX_flush_sessions.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_flush_sessions 3"
-.TH SSL_CTX_flush_sessions 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_flush_sessions 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_free.3 b/secure/lib/libssl/man/SSL_CTX_free.3
index b627fc9..7df44e9 100644
--- a/secure/lib/libssl/man/SSL_CTX_free.3
+++ b/secure/lib/libssl/man/SSL_CTX_free.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_free 3"
-.TH SSL_CTX_free 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_free 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_get0_param.3 b/secure/lib/libssl/man/SSL_CTX_get0_param.3
new file mode 100644
index 0000000..6e1eb27
--- /dev/null
+++ b/secure/lib/libssl/man/SSL_CTX_get0_param.3
@@ -0,0 +1,187 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "SSL_CTX_get0_param 3"
+.TH SSL_CTX_get0_param 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+SSL_CTX_get0_param, SSL_get0_param, SSL_CTX_set1_param, SSL_set1_param \-
+get and set verification parameters
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 1
+\& #include <openssl/ssl.h>
+\&
+\& X509_VERIFY_PARAM *SSL_CTX_get0_param(SSL_CTX *ctx)
+\& X509_VERIFY_PARAM *SSL_get0_param(SSL *ssl)
+\& int SSL_CTX_set1_param(SSL_CTX *ctx, X509_VERIFY_PARAM *vpm)
+\& int SSL_set1_param(SSL *ssl, X509_VERIFY_PARAM *vpm)
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+\&\fISSL_CTX_get0_param()\fR and \fISSL_get0_param()\fR retrieve an internal pointer to
+the verification parameters for \fBctx\fR or \fBssl\fR respectively. The returned
+pointer must not be freed by the calling application.
+.PP
+\&\fISSL_CTX_set1_param()\fR and \fISSL_set1_param()\fR set the verification parameters
+to \fBvpm\fR for \fBctx\fR or \fBssl\fR.
+.SH "NOTES"
+.IX Header "NOTES"
+Typically parameters are retrieved from an \fB\s-1SSL_CTX\s0\fR or \fB\s-1SSL\s0\fR structure
+using \fISSL_CTX_get0_param()\fR or \fISSL_get0_param()\fR and an application modifies
+them to suit its needs: for example to add a hostname check.
+.SH "EXAMPLE"
+.IX Header "EXAMPLE"
+Check hostname matches \*(L"www.foo.com\*(R" in peer certificate:
+.PP
+.Vb 2
+\& X509_VERIFY_PARAM *vpm = SSL_get0_param(ssl);
+\& X509_VERIFY_PARAM_set1_host(vpm, "www.foo.com");
+.Ve
+.SH "RETURN VALUES"
+.IX Header "RETURN VALUES"
+\&\fISSL_CTX_get0_param()\fR and \fISSL_get0_param()\fR return a pointer to an
+\&\fBX509_VERIFY_PARAM\fR structure.
+.PP
+\&\fISSL_CTX_set1_param()\fR and \fISSL_set1_param()\fR return 1 for success and 0
+for failure.
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+\&\fIX509_VERIFY_PARAM_set_flags\fR\|(3)
+.SH "HISTORY"
+.IX Header "HISTORY"
+These functions were first added to OpenSSL 1.0.2.
diff --git a/secure/lib/libssl/man/SSL_CTX_get_ex_new_index.3 b/secure/lib/libssl/man/SSL_CTX_get_ex_new_index.3
index f8ebb59..5dd1820 100644
--- a/secure/lib/libssl/man/SSL_CTX_get_ex_new_index.3
+++ b/secure/lib/libssl/man/SSL_CTX_get_ex_new_index.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_get_ex_new_index 3"
-.TH SSL_CTX_get_ex_new_index 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_get_ex_new_index 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_get_verify_mode.3 b/secure/lib/libssl/man/SSL_CTX_get_verify_mode.3
index 0169da7..be53503 100644
--- a/secure/lib/libssl/man/SSL_CTX_get_verify_mode.3
+++ b/secure/lib/libssl/man/SSL_CTX_get_verify_mode.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_get_verify_mode 3"
-.TH SSL_CTX_get_verify_mode 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_get_verify_mode 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_load_verify_locations.3 b/secure/lib/libssl/man/SSL_CTX_load_verify_locations.3
index ac1ebcf..5942be4 100644
--- a/secure/lib/libssl/man/SSL_CTX_load_verify_locations.3
+++ b/secure/lib/libssl/man/SSL_CTX_load_verify_locations.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_load_verify_locations 3"
-.TH SSL_CTX_load_verify_locations 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_load_verify_locations 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_new.3 b/secure/lib/libssl/man/SSL_CTX_new.3
index 1395429..f81b32f 100644
--- a/secure/lib/libssl/man/SSL_CTX_new.3
+++ b/secure/lib/libssl/man/SSL_CTX_new.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_new 3"
-.TH SSL_CTX_new 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_new 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_sess_number.3 b/secure/lib/libssl/man/SSL_CTX_sess_number.3
index d980b8b..2f8194b 100644
--- a/secure/lib/libssl/man/SSL_CTX_sess_number.3
+++ b/secure/lib/libssl/man/SSL_CTX_sess_number.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_sess_number 3"
-.TH SSL_CTX_sess_number 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_sess_number 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_sess_set_cache_size.3 b/secure/lib/libssl/man/SSL_CTX_sess_set_cache_size.3
index 8bf5a0f..32a5ecc 100644
--- a/secure/lib/libssl/man/SSL_CTX_sess_set_cache_size.3
+++ b/secure/lib/libssl/man/SSL_CTX_sess_set_cache_size.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_sess_set_cache_size 3"
-.TH SSL_CTX_sess_set_cache_size 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_sess_set_cache_size 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -152,6 +152,7 @@ SSL_CTX_sess_set_cache_size, SSL_CTX_sess_get_cache_size \- manipulate session c
.IX Header "DESCRIPTION"
\&\fISSL_CTX_sess_set_cache_size()\fR sets the size of the internal session cache
of context \fBctx\fR to \fBt\fR.
+This value is a hint and not an absolute; see the notes below.
.PP
\&\fISSL_CTX_sess_get_cache_size()\fR returns the currently valid session cache size.
.SH "NOTES"
@@ -161,8 +162,9 @@ currently 1024*20, so that up to 20000 sessions can be held. This size
can be modified using the \fISSL_CTX_sess_set_cache_size()\fR call. A special
case is the size 0, which is used for unlimited size.
.PP
-When the maximum number of sessions is reached, no more new sessions are
-added to the cache. New space may be added by calling
+If adding the session makes the cache exceed its size, then unused
+sessions are dropped from the end of the cache.
+Cache space may also be reclaimed by calling
\&\fISSL_CTX_flush_sessions\fR\|(3) to remove
expired sessions.
.PP
diff --git a/secure/lib/libssl/man/SSL_CTX_sess_set_get_cb.3 b/secure/lib/libssl/man/SSL_CTX_sess_set_get_cb.3
index 370c2e4..140755e 100644
--- a/secure/lib/libssl/man/SSL_CTX_sess_set_get_cb.3
+++ b/secure/lib/libssl/man/SSL_CTX_sess_set_get_cb.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_sess_set_get_cb 3"
-.TH SSL_CTX_sess_set_get_cb 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_sess_set_get_cb 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_sessions.3 b/secure/lib/libssl/man/SSL_CTX_sessions.3
index 36dbf68..9dbd080 100644
--- a/secure/lib/libssl/man/SSL_CTX_sessions.3
+++ b/secure/lib/libssl/man/SSL_CTX_sessions.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_sessions 3"
-.TH SSL_CTX_sessions 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_sessions 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_set1_curves.3 b/secure/lib/libssl/man/SSL_CTX_set1_curves.3
new file mode 100644
index 0000000..0e5fe2d
--- /dev/null
+++ b/secure/lib/libssl/man/SSL_CTX_set1_curves.3
@@ -0,0 +1,236 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "SSL_CTX_set1_curves 3"
+.TH SSL_CTX_set1_curves 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+SSL_CTX_set1_curves, SSL_CTX_set1_curves_list, SSL_set1_curves,
+SSL_set1_curves_list, SSL_get1_curves, SSL_get_shared_curve,
+SSL_CTX_set_ecdh_auto, SSL_set_ecdh_auto \- EC supported curve functions
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 1
+\& #include <openssl/ssl.h>
+\&
+\& int SSL_CTX_set1_curves(SSL_CTX *ctx, int *clist, int clistlen);
+\& int SSL_CTX_set1_curves_list(SSL_CTX *ctx, char *list);
+\&
+\& int SSL_set1_curves(SSL *ssl, int *clist, int clistlen);
+\& int SSL_set1_curves_list(SSL *ssl, char *list);
+\&
+\& int SSL_get1_curves(SSL *ssl, int *curves);
+\& int SSL_get_shared_curve(SSL *s, int n);
+\&
+\& int SSL_CTX_set_ecdh_auto(SSL_CTX *ctx, int onoff);
+\& int SSL_set_ecdh_auto(SSL *s, int onoff);
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+\&\fISSL_CTX_set1_curves()\fR sets the supported curves for \fBctx\fR to \fBclistlen\fR
+curves in the array \fBclist\fR. The array consist of all NIDs of curves in
+preference order. For a \s-1TLS\s0 client the curves are used directly in the
+supported curves extension. For a \s-1TLS\s0 server the curves are used to
+determine the set of shared curves.
+.PP
+\&\fISSL_CTX_set1_curves_list()\fR sets the supported curves for \fBctx\fR to
+string \fBlist\fR. The string is a colon separated list of curve NIDs or
+names, for example \*(L"P\-521:P\-384:P\-256\*(R".
+.PP
+\&\fISSL_set1_curves()\fR and \fISSL_set1_curves_list()\fR are similar except they set
+supported curves for the \s-1SSL\s0 structure \fBssl\fR.
+.PP
+\&\fISSL_get1_curves()\fR returns the set of supported curves sent by a client
+in the supported curves extension. It returns the total number of
+supported curves. The \fBcurves\fR parameter can be \fB\s-1NULL\s0\fR to simply
+return the number of curves for memory allocation purposes. The
+\&\fBcurves\fR array is in the form of a set of curve NIDs in preference
+order. It can return zero if the client did not send a supported curves
+extension.
+.PP
+\&\fISSL_get_shared_curve()\fR returns shared curve \fBn\fR for a server-side
+\&\s-1SSL \s0\fBssl\fR. If \fBn\fR is \-1 then the total number of shared curves is
+returned, which may be zero. Other than for diagnostic purposes,
+most applications will only be interested in the first shared curve
+so \fBn\fR is normally set to zero. If the value \fBn\fR is out of range,
+NID_undef is returned.
+.PP
+\&\fISSL_CTX_set_ecdh_auto()\fR and \fISSL_set_ecdh_auto()\fR set automatic curve
+selection for server \fBctx\fR or \fBssl\fR to \fBonoff\fR. If \fBonoff\fR is 1 then
+the highest preference curve is automatically used for \s-1ECDH\s0 temporary
+keys used during key exchange.
+.PP
+All these functions are implemented as macros.
+.SH "NOTES"
+.IX Header "NOTES"
+If an application wishes to make use of several of these functions for
+configuration purposes either on a command line or in a file it should
+consider using the \s-1SSL_CONF\s0 interface instead of manually parsing options.
+.PP
+The functions \fISSL_CTX_set_ecdh_auto()\fR and \fISSL_set_ecdh_auto()\fR can be used to
+make a server always choose the most appropriate curve for a client. If set
+it will override any temporary \s-1ECDH\s0 parameters set by a server. Previous
+versions of OpenSSL could effectively only use a single \s-1ECDH\s0 curve set
+using a function such as \fISSL_CTX_set_ecdh_tmp()\fR. Newer applications should
+just call:
+.PP
+.Vb 1
+\& SSL_CTX_set_ecdh_auto(ctx, 1);
+.Ve
+.PP
+and they will automatically support \s-1ECDH\s0 using the most appropriate shared
+curve.
+.SH "RETURN VALUES"
+.IX Header "RETURN VALUES"
+\&\fISSL_CTX_set1_curves()\fR, \fISSL_CTX_set1_curves_list()\fR, \fISSL_set1_curves()\fR,
+\&\fISSL_set1_curves_list()\fR, \fISSL_CTX_set_ecdh_auto()\fR and \fISSL_set_ecdh_auto()\fR
+return 1 for success and 0 for failure.
+.PP
+\&\fISSL_get1_curves()\fR returns the number of curves, which may be zero.
+.PP
+\&\fISSL_get_shared_curve()\fR returns the \s-1NID\s0 of shared curve \fBn\fR or NID_undef if there
+is no shared curve \fBn\fR; or the total number of shared curves if \fBn\fR
+is \-1.
+.PP
+When called on a client \fBssl\fR, \fISSL_get_shared_curve()\fR has no meaning and
+returns \-1.
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+\&\fISSL_CTX_add_extra_chain_cert\fR\|(3)
+.SH "HISTORY"
+.IX Header "HISTORY"
+These functions were first added to OpenSSL 1.0.2.
diff --git a/secure/lib/libssl/man/SSL_CTX_set1_verify_cert_store.3 b/secure/lib/libssl/man/SSL_CTX_set1_verify_cert_store.3
new file mode 100644
index 0000000..8709286
--- /dev/null
+++ b/secure/lib/libssl/man/SSL_CTX_set1_verify_cert_store.3
@@ -0,0 +1,222 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "SSL_CTX_set1_verify_cert_store 3"
+.TH SSL_CTX_set1_verify_cert_store 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+SSL_CTX_set0_verify_cert_store, SSL_CTX_set1_verify_cert_store,
+SSL_CTX_set0_chain_cert_store, SSL_CTX_set1_chain_cert_store,
+SSL_set0_verify_cert_store, SSL_set1_verify_cert_store,
+SSL_set0_chain_cert_store, SSL_set1_chain_cert_store \- set certificate
+verification or chain store
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 1
+\& #include <openssl/ssl.h>
+\&
+\& int SSL_CTX_set0_verify_cert_store(SSL_CTX *ctx, X509_STORE *st);
+\& int SSL_CTX_set1_verify_cert_store(SSL_CTX *ctx, X509_STORE *st);
+\& int SSL_CTX_set0_chain_cert_store(SSL_CTX *ctx, X509_STORE *st);
+\& int SSL_CTX_set1_chain_cert_store(SSL_CTX *ctx, X509_STORE *st);
+\&
+\& int SSL_set0_verify_cert_store(SSL_CTX *ctx, X509_STORE *st);
+\& int SSL_set1_verify_cert_store(SSL_CTX *ctx, X509_STORE *st);
+\& int SSL_set0_chain_cert_store(SSL_CTX *ctx, X509_STORE *st);
+\& int SSL_set1_chain_cert_store(SSL_CTX *ctx, X509_STORE *st);
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+\&\fISSL_CTX_set0_verify_cert_store()\fR and \fISSL_CTX_set1_verify_cert_store()\fR
+set the certificate store used for certificate verification to \fBst\fR.
+.PP
+\&\fISSL_CTX_set0_chain_cert_store()\fR and \fISSL_CTX_set1_chain_cert_store()\fR
+set the certificate store used for certificate chain building to \fBst\fR.
+.PP
+\&\fISSL_set0_verify_cert_store()\fR, \fISSL_set1_verify_cert_store()\fR,
+\&\fISSL_set0_chain_cert_store()\fR and \fISSL_set1_chain_cert_store()\fR are similar
+except they apply to \s-1SSL\s0 structure \fBssl\fR.
+.PP
+All these functions are implemented as macros. Those containing a \fB1\fR
+increment the reference count of the supplied store so it must
+be freed at some point after the operation. Those containing a \fB0\fR do
+not increment reference counts and the supplied store \fB\s-1MUST NOT\s0\fR be freed
+after the operation.
+.SH "NOTES"
+.IX Header "NOTES"
+The stores pointers associated with an \s-1SSL_CTX\s0 structure are copied to any \s-1SSL\s0
+structures when \fISSL_new()\fR is called. As a result \s-1SSL\s0 structures will not be
+affected if the parent \s-1SSL_CTX\s0 store pointer is set to a new value.
+.PP
+The verification store is used to verify the certificate chain sent by the
+peer: that is an \s-1SSL/TLS\s0 client will use the verification store to verify
+the server's certificate chain and a \s-1SSL/TLS\s0 server will use it to verify
+any client certificate chain.
+.PP
+The chain store is used to build the certificate chain.
+.PP
+If the mode \fB\s-1SSL_MODE_NO_AUTO_CHAIN\s0\fR is set or a certificate chain is
+configured already (for example using the functions such as
+\&\fISSL_CTX_add1_chain_cert\fR\|(3) or
+\&\fISSL_CTX_add_extra_chain_cert\fR\|(3)) then
+automatic chain building is disabled.
+.PP
+If the mode \fB\s-1SSL_MODE_NO_AUTO_CHAIN\s0\fR is set then automatic chain building
+is disabled.
+.PP
+If the chain or the verification store is not set then the store associated
+with the parent \s-1SSL_CTX\s0 is used instead to retain compatibility with previous
+versions of OpenSSL.
+.SH "RETURN VALUES"
+.IX Header "RETURN VALUES"
+All these functions return 1 for success and 0 for failure.
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+\&\fISSL_CTX_add_extra_chain_cert\fR\|(3)
+\&\fISSL_CTX_set0_chain\fR\|(3)
+\&\fISSL_CTX_set1_chain\fR\|(3)
+\&\fISSL_CTX_add0_chain_cert\fR\|(3)
+\&\fISSL_CTX_add1_chain_cert\fR\|(3)
+\&\fISSL_set0_chain\fR\|(3)
+\&\fISSL_set1_chain\fR\|(3)
+\&\fISSL_add0_chain_cert\fR\|(3)
+\&\fISSL_add1_chain_cert\fR\|(3)
+\&\fISSL_CTX_build_cert_chain\fR\|(3)
+\&\fISSL_build_cert_chain\fR\|(3)
+.SH "HISTORY"
+.IX Header "HISTORY"
+These functions were first added to OpenSSL 1.0.2.
diff --git a/secure/lib/libssl/man/SSL_CTX_set_cert_cb.3 b/secure/lib/libssl/man/SSL_CTX_set_cert_cb.3
new file mode 100644
index 0000000..b858902
--- /dev/null
+++ b/secure/lib/libssl/man/SSL_CTX_set_cert_cb.3
@@ -0,0 +1,201 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "SSL_CTX_set_cert_cb 3"
+.TH SSL_CTX_set_cert_cb 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+SSL_CTX_set_cert_cb, SSL_set_cert_cb \- handle certificate callback function
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 1
+\& #include <openssl/ssl.h>
+\&
+\& void SSL_CTX_set_cert_cb(SSL_CTX *c, int (*cert_cb)(SSL *ssl, void *arg), void *arg);
+\& void SSL_set_cert_cb(SSL *s, int (*cert_cb)(SSL *ssl, void *arg), void *arg);
+\&
+\& int (*cert_cb)(SSL *ssl, void *arg);
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+\&\fISSL_CTX_set_cert_cb()\fR and \fISSL_set_cert_cb()\fR sets the \fB\f(BIcert_cb()\fB\fR callback,
+\&\fBarg\fR value is pointer which is passed to the application callback.
+.PP
+When \fB\f(BIcert_cb()\fB\fR is \s-1NULL,\s0 no callback function is used.
+.PP
+\&\fIcert_cb()\fR is the application defined callback. It is called before a
+certificate will be used by a client or server. The callback can then inspect
+the passed \fBssl\fR structure and set or clear any appropriate certificates. If
+the callback is successful it \fB\s-1MUST\s0\fR return 1 even if no certificates have
+been set. A zero is returned on error which will abort the handshake with a
+fatal internal error alert. A negative return value will suspend the handshake
+and the handshake function will return immediately.
+\&\fISSL_get_error\fR\|(3) will return \s-1SSL_ERROR_WANT_X509_LOOKUP\s0 to
+indicate, that the handshake was suspended. The next call to the handshake
+function will again lead to the call of \fIcert_cb()\fR. It is the job of the
+\&\fIcert_cb()\fR to store information about the state of the last call,
+if required to continue.
+.SH "NOTES"
+.IX Header "NOTES"
+An application will typically call \fISSL_use_certificate()\fR and
+\&\fISSL_use_PrivateKey()\fR to set the end entity certificate and private key.
+It can add intermediate and optionally the root \s-1CA\s0 certificates using
+\&\fISSL_add1_chain_cert()\fR.
+.PP
+It might also call \fISSL_certs_clear()\fR to delete any certificates associated
+with the \fB\s-1SSL\s0\fR object.
+.PP
+The certificate callback functionality supercedes the (largely broken)
+functionality provided by the old client certificate callback interface.
+It is \fBalways\fR called even is a certificate is already set so the callback
+can modify or delete the existing certificate.
+.PP
+A more advanced callback might examine the handshake parameters and set
+whatever chain is appropriate. For example a legacy client supporting only
+\&\s-1TLS\s0 v1.0 might receive a certificate chain signed using \s-1SHA1\s0 whereas a
+\&\s-1TLS\s0 v1.2 client which advertises support for \s-1SHA256\s0 could receive a chain
+using \s-1SHA256.\s0
+.PP
+Normal server sanity checks are performed on any certificates set
+by the callback. So if an \s-1EC\s0 chain is set for a curve the client does not
+support it will \fBnot\fR be used.
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+\&\fIssl\fR\|(3), \fISSL_use_certificate\fR\|(3),
+\&\fISSL_add1_chain_cert\fR\|(3),
+\&\fISSL_get_client_CA_list\fR\|(3),
+\&\fISSL_clear\fR\|(3), \fISSL_free\fR\|(3)
diff --git a/secure/lib/libssl/man/SSL_CTX_set_cert_store.3 b/secure/lib/libssl/man/SSL_CTX_set_cert_store.3
index 5d17a91..4253a53 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_cert_store.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_cert_store.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_cert_store 3"
-.TH SSL_CTX_set_cert_store 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_cert_store 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -177,6 +177,12 @@ overridden with the \fIverify_callback()\fR set via the
\&\fISSL_CTX_set_verify\fR\|(3) family of functions.
This document must therefore be updated when documentation about the
X509_STORE object and its handling becomes available.
+.SH "RESTRICTIONS"
+.IX Header "RESTRICTIONS"
+The X509_STORE structure used by an \s-1SSL_CTX\s0 is used for verifying peer
+certificates and building certificate chains, it is also shared by
+every child \s-1SSL\s0 structure. Applications wanting finer control can use
+functions such as \fISSL_CTX_set1_verify_cert_store()\fR instead.
.SH "RETURN VALUES"
.IX Header "RETURN VALUES"
\&\fISSL_CTX_set_cert_store()\fR does not return diagnostic output.
diff --git a/secure/lib/libssl/man/SSL_CTX_set_cert_verify_callback.3 b/secure/lib/libssl/man/SSL_CTX_set_cert_verify_callback.3
index 5d86dc3..a6a590e 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_cert_verify_callback.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_cert_verify_callback.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_cert_verify_callback 3"
-.TH SSL_CTX_set_cert_verify_callback 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_cert_verify_callback 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_set_cipher_list.3 b/secure/lib/libssl/man/SSL_CTX_set_cipher_list.3
index e0243d6..3d70ced 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_cipher_list.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_cipher_list.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_cipher_list 3"
-.TH SSL_CTX_set_cipher_list 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_cipher_list 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -177,7 +177,7 @@ A \s-1RSA\s0 cipher can only be chosen, when a \s-1RSA\s0 certificate is availab
a temporary 512 bit \s-1RSA\s0 key, as typically the supplied key has a length
of 1024 bit (see
\&\fISSL_CTX_set_tmp_rsa_callback\fR\|(3)).
-\&\s-1RSA\s0 ciphers using \s-1EDH\s0 need a certificate and key and additional DH-parameters
+\&\s-1RSA\s0 ciphers using \s-1DHE\s0 need a certificate and key and additional DH-parameters
(see \fISSL_CTX_set_tmp_dh_callback\fR\|(3)).
.PP
A \s-1DSA\s0 cipher can only be chosen, when a \s-1DSA\s0 certificate is available.
diff --git a/secure/lib/libssl/man/SSL_CTX_set_client_CA_list.3 b/secure/lib/libssl/man/SSL_CTX_set_client_CA_list.3
index 3746b3b..203378c 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_client_CA_list.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_client_CA_list.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_client_CA_list 3"
-.TH SSL_CTX_set_client_CA_list 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_client_CA_list 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_set_client_cert_cb.3 b/secure/lib/libssl/man/SSL_CTX_set_client_cert_cb.3
index ef24866..d9a4b7e 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_client_cert_cb.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_client_cert_cb.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_client_cert_cb 3"
-.TH SSL_CTX_set_client_cert_cb 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_client_cert_cb 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_set_custom_cli_ext.3 b/secure/lib/libssl/man/SSL_CTX_set_custom_cli_ext.3
new file mode 100644
index 0000000..4bec817
--- /dev/null
+++ b/secure/lib/libssl/man/SSL_CTX_set_custom_cli_ext.3
@@ -0,0 +1,264 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "SSL_CTX_set_custom_cli_ext 3"
+.TH SSL_CTX_set_custom_cli_ext 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+SSL_CTX_add_client_custom_ext, SSL_CTX_add_server_custom_ext \- custom TLS extension handling
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 1
+\& #include <openssl/ssl.h>
+\&
+\& int SSL_CTX_add_client_custom_ext(SSL_CTX *ctx, unsigned int ext_type,
+\& custom_ext_add_cb add_cb,
+\& custom_ext_free_cb free_cb, void *add_arg,
+\& custom_ext_parse_cb parse_cb,
+\& void *parse_arg);
+\&
+\& int SSL_CTX_add_server_custom_ext(SSL_CTX *ctx, unsigned int ext_type,
+\& custom_ext_add_cb add_cb,
+\& custom_ext_free_cb free_cb, void *add_arg,
+\& custom_ext_parse_cb parse_cb,
+\& void *parse_arg);
+\&
+\& int SSL_extension_supported(unsigned int ext_type);
+\&
+\& typedef int (*custom_ext_add_cb)(SSL *s, unsigned int ext_type,
+\& const unsigned char **out,
+\& size_t *outlen, int *al,
+\& void *add_arg);
+\&
+\& typedef void (*custom_ext_free_cb)(SSL *s, unsigned int ext_type,
+\& const unsigned char *out,
+\& void *add_arg);
+\&
+\& typedef int (*custom_ext_parse_cb)(SSL *s, unsigned int ext_type,
+\& const unsigned char *in,
+\& size_t inlen, int *al,
+\& void *parse_arg);
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+\&\fISSL_CTX_add_client_custom_ext()\fR adds a custom extension for a \s-1TLS\s0 client
+with extension type \fBext_type\fR and callbacks \fBadd_cb\fR, \fBfree_cb\fR and
+\&\fBparse_cb\fR.
+.PP
+\&\fISSL_CTX_add_server_custom_ext()\fR adds a custom extension for a \s-1TLS\s0 server
+with extension type \fBext_type\fR and callbacks \fBadd_cb\fR, \fBfree_cb\fR and
+\&\fBparse_cb\fR.
+.PP
+In both cases the extension type must not be handled by OpenSSL internally
+or an error occurs.
+.PP
+\&\fISSL_extension_supported()\fR returns 1 if the extension \fBext_type\fR is handled
+internally by OpenSSL and 0 otherwise.
+.SH "EXTENSION CALLBACKS"
+.IX Header "EXTENSION CALLBACKS"
+The callback \fBadd_cb\fR is called to send custom extension data to be
+included in ClientHello for \s-1TLS\s0 clients or ServerHello for servers. The
+\&\fBext_type\fR parameter is set to the extension type which will be added and
+\&\fBadd_arg\fR to the value set when the extension handler was added.
+.PP
+If the application wishes to include the extension \fBext_type\fR it should
+set \fB*out\fR to the extension data, set \fB*outlen\fR to the length of the
+extension data and return 1.
+.PP
+If the \fBadd_cb\fR does not wish to include the extension it must return 0.
+.PP
+If \fBadd_cb\fR returns \-1 a fatal handshake error occurs using the \s-1TLS\s0
+alert value specified in \fB*al\fR.
+.PP
+For clients (but not servers) if \fBadd_cb\fR is set to \s-1NULL\s0 a zero length
+extension is added for \fBext_type\fR.
+.PP
+For clients every registered \fBadd_cb\fR is always called to see if the
+application wishes to add an extension to ClientHello.
+.PP
+For servers every registered \fBadd_cb\fR is called once if and only if the
+corresponding extension was received in ClientHello to see if the application
+wishes to add the extension to ServerHello. That is, if no corresponding extension
+was received in ClientHello then \fBadd_cb\fR will not be called.
+.PP
+If an extension is added (that is \fBadd_cb\fR returns 1) \fBfree_cb\fR is called
+(if it is set) with the value of \fBout\fR set by the add callback. It can be
+used to free up any dynamic extension data set by \fBadd_cb\fR. Since \fBout\fR is
+constant (to permit use of constant data in \fBadd_cb\fR) applications may need to
+cast away const to free the data.
+.PP
+The callback \fBparse_cb\fR receives data for \s-1TLS\s0 extensions. For \s-1TLS\s0 clients
+the extension data will come from ServerHello and for \s-1TLS\s0 servers it will
+come from ClientHello.
+.PP
+The extension data consists of \fBinlen\fR bytes in the buffer \fBin\fR for the
+extension \fBextension_type\fR.
+.PP
+If the \fBparse_cb\fR considers the extension data acceptable it must return
+1. If it returns 0 or a negative value a fatal handshake error occurs
+using the \s-1TLS\s0 alert value specified in \fB*al\fR.
+.PP
+The buffer \fBin\fR is a temporary internal buffer which will not be valid after
+the callback returns.
+.SH "NOTES"
+.IX Header "NOTES"
+The \fBadd_arg\fR and \fBparse_arg\fR parameters can be set to arbitrary values
+which will be passed to the corresponding callbacks. They can, for example,
+be used to store the extension data received in a convenient structure or
+pass the extension data to be added or freed when adding extensions.
+.PP
+The \fBext_type\fR parameter corresponds to the \fBextension_type\fR field of
+\&\s-1RFC5246\s0 et al. It is \fBnot\fR a \s-1NID.\s0
+.PP
+If the same custom extension type is received multiple times a fatal
+\&\fBdecode_error\fR alert is sent and the handshake aborts. If a custom extension
+is received in ServerHello which was not sent in ClientHello a fatal
+\&\fBunsupported_extension\fR alert is sent and the handshake is aborted. The
+ServerHello \fBadd_cb\fR callback is only called if the corresponding extension
+was received in ClientHello. This is compliant with the \s-1TLS\s0 specifications.
+This behaviour ensures that each callback is called at most once and that
+an application can never send unsolicited extensions.
+.SH "RETURN VALUES"
+.IX Header "RETURN VALUES"
+\&\fISSL_CTX_add_client_custom_ext()\fR and \fISSL_CTX_add_server_custom_ext()\fR return 1 for
+success and 0 for failure. A failure can occur if an attempt is made to
+add the same \fBext_type\fR more than once, if an attempt is made to use an
+extension type handled internally by OpenSSL or if an internal error occurs
+(for example a memory allocation failure).
+.PP
+\&\fISSL_extension_supported()\fR returns 1 if the extension \fBext_type\fR is handled
+internally by OpenSSL and 0 otherwise.
diff --git a/secure/lib/libssl/man/SSL_CTX_set_default_passwd_cb.3 b/secure/lib/libssl/man/SSL_CTX_set_default_passwd_cb.3
index 9dab343..7388cb1 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_default_passwd_cb.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_default_passwd_cb.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_default_passwd_cb 3"
-.TH SSL_CTX_set_default_passwd_cb 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_default_passwd_cb 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_set_generate_session_id.3 b/secure/lib/libssl/man/SSL_CTX_set_generate_session_id.3
index cd51338..602d00c 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_generate_session_id.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_generate_session_id.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_generate_session_id 3"
-.TH SSL_CTX_set_generate_session_id 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_generate_session_id 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_set_info_callback.3 b/secure/lib/libssl/man/SSL_CTX_set_info_callback.3
index 58ee7a2..d8bcc3e 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_info_callback.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_info_callback.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_info_callback 3"
-.TH SSL_CTX_set_info_callback 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_info_callback 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_set_max_cert_list.3 b/secure/lib/libssl/man/SSL_CTX_set_max_cert_list.3
index 265cc55..c018083 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_max_cert_list.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_max_cert_list.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_max_cert_list 3"
-.TH SSL_CTX_set_max_cert_list 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_max_cert_list 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_set_mode.3 b/secure/lib/libssl/man/SSL_CTX_set_mode.3
index b1502ff..d0e0442 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_mode.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_mode.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_mode 3"
-.TH SSL_CTX_set_mode 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_mode 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_set_msg_callback.3 b/secure/lib/libssl/man/SSL_CTX_set_msg_callback.3
index 3bfa3d5..8c5cc1d 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_msg_callback.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_msg_callback.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_msg_callback 3"
-.TH SSL_CTX_set_msg_callback 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_msg_callback 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_set_options.3 b/secure/lib/libssl/man/SSL_CTX_set_options.3
index 3747de6..2be16ce 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_options.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_options.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_options 3"
-.TH SSL_CTX_set_options 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_options 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_set_psk_client_callback.3 b/secure/lib/libssl/man/SSL_CTX_set_psk_client_callback.3
index 54bbe43..0596370 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_psk_client_callback.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_psk_client_callback.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_psk_client_callback 3"
-.TH SSL_CTX_set_psk_client_callback 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_psk_client_callback 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_set_quiet_shutdown.3 b/secure/lib/libssl/man/SSL_CTX_set_quiet_shutdown.3
index 9520211..1184c4a 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_quiet_shutdown.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_quiet_shutdown.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_quiet_shutdown 3"
-.TH SSL_CTX_set_quiet_shutdown 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_quiet_shutdown 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_set_read_ahead.3 b/secure/lib/libssl/man/SSL_CTX_set_read_ahead.3
index d72eb46..c5fbc2c 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_read_ahead.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_read_ahead.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_read_ahead 3"
-.TH SSL_CTX_set_read_ahead 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_read_ahead 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_set_session_cache_mode.3 b/secure/lib/libssl/man/SSL_CTX_set_session_cache_mode.3
index dac4b7d..40585e6 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_session_cache_mode.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_session_cache_mode.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_session_cache_mode 3"
-.TH SSL_CTX_set_session_cache_mode 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_session_cache_mode 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_set_session_id_context.3 b/secure/lib/libssl/man/SSL_CTX_set_session_id_context.3
index 31dea8b..1ade776 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_session_id_context.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_session_id_context.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_session_id_context 3"
-.TH SSL_CTX_set_session_id_context 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_session_id_context 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_set_ssl_version.3 b/secure/lib/libssl/man/SSL_CTX_set_ssl_version.3
index ebce586..c3037c8 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_ssl_version.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_ssl_version.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_ssl_version 3"
-.TH SSL_CTX_set_ssl_version 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_ssl_version 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_set_timeout.3 b/secure/lib/libssl/man/SSL_CTX_set_timeout.3
index e9471a0..6b085c8 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_timeout.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_timeout.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_timeout 3"
-.TH SSL_CTX_set_timeout 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_timeout 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_set_tlsext_ticket_key_cb.3 b/secure/lib/libssl/man/SSL_CTX_set_tlsext_ticket_key_cb.3
index 0e442ea..ca7e7aa 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_tlsext_ticket_key_cb.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_tlsext_ticket_key_cb.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_tlsext_ticket_key_cb 3"
-.TH SSL_CTX_set_tlsext_ticket_key_cb 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_tlsext_ticket_key_cb 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_set_tmp_dh_callback.3 b/secure/lib/libssl/man/SSL_CTX_set_tmp_dh_callback.3
index d58f518..22e2e6b 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_tmp_dh_callback.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_tmp_dh_callback.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_tmp_dh_callback 3"
-.TH SSL_CTX_set_tmp_dh_callback 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_tmp_dh_callback 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_set_tmp_rsa_callback.3 b/secure/lib/libssl/man/SSL_CTX_set_tmp_rsa_callback.3
index 2338021..efd8ee0 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_tmp_rsa_callback.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_tmp_rsa_callback.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_tmp_rsa_callback 3"
-.TH SSL_CTX_set_tmp_rsa_callback 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_tmp_rsa_callback 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -206,7 +206,7 @@ the \s-1TLS\s0 standard, when the \s-1RSA\s0 key can be used for signing only, t
for export ciphers. Using ephemeral \s-1RSA\s0 key exchange for other purposes
violates the standard and can break interoperability with clients.
It is therefore strongly recommended to not use ephemeral \s-1RSA\s0 key
-exchange and use \s-1EDH \s0(Ephemeral Diffie-Hellman) key exchange instead
+exchange and use \s-1DHE \s0(Ephemeral Diffie-Hellman) key exchange instead
in order to achieve forward secrecy (see
\&\fISSL_CTX_set_tmp_dh_callback\fR\|(3)).
.PP
diff --git a/secure/lib/libssl/man/SSL_CTX_set_verify.3 b/secure/lib/libssl/man/SSL_CTX_set_verify.3
index 1e4b37d..6a5961e 100644
--- a/secure/lib/libssl/man/SSL_CTX_set_verify.3
+++ b/secure/lib/libssl/man/SSL_CTX_set_verify.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_set_verify 3"
-.TH SSL_CTX_set_verify 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_set_verify 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_CTX_use_certificate.3 b/secure/lib/libssl/man/SSL_CTX_use_certificate.3
index 293b584..c5c6eea 100644
--- a/secure/lib/libssl/man/SSL_CTX_use_certificate.3
+++ b/secure/lib/libssl/man/SSL_CTX_use_certificate.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_use_certificate 3"
-.TH SSL_CTX_use_certificate 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_use_certificate 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -244,10 +244,9 @@ the same check for \fBssl\fR. If no key/certificate was explicitly added for
this \fBssl\fR, the last item added into \fBctx\fR will be checked.
.SH "NOTES"
.IX Header "NOTES"
-The internal certificate store of OpenSSL can hold two private key/certificate
-pairs at a time: one key/certificate of type \s-1RSA\s0 and one key/certificate
-of type \s-1DSA.\s0 The certificate used depends on the cipher select, see
-also \fISSL_CTX_set_cipher_list\fR\|(3).
+The internal certificate store of OpenSSL can hold several private
+key/certificate pairs at a time. The certificate used depends on the
+cipher selected, see also \fISSL_CTX_set_cipher_list\fR\|(3).
.PP
When reading certificates and private keys from file, files of type
\&\s-1SSL_FILETYPE_ASN1 \s0(also known as \fB\s-1DER\s0\fR, binary encoding) can only contain
@@ -257,16 +256,13 @@ Files of type \s-1SSL_FILETYPE_PEM\s0 can contain more than one item.
.PP
\&\fISSL_CTX_use_certificate_chain_file()\fR adds the first certificate found
in the file to the certificate store. The other certificates are added
-to the store of chain certificates using
-\&\fISSL_CTX_add_extra_chain_cert\fR\|(3).
-There exists only one extra chain store, so that the same chain is appended
-to both types of certificates, \s-1RSA\s0 and \s-1DSA\s0! If it is not intended to use
-both type of certificate at the same time, it is recommended to use the
-\&\fISSL_CTX_use_certificate_chain_file()\fR instead of the
-\&\fISSL_CTX_use_certificate_file()\fR function in order to allow the use of
-complete certificate chains even when no trusted \s-1CA\s0 storage is used or
-when the \s-1CA\s0 issuing the certificate shall not be added to the trusted
-\&\s-1CA\s0 storage.
+to the store of chain certificates using \fISSL_CTX_add1_chain_cert\fR\|(3). Note: versions of OpenSSL before 1.0.2 only had a single
+certificate chain store for all certificate types, OpenSSL 1.0.2 and later
+have a separate chain store for each type. \fISSL_CTX_use_certificate_chain_file()\fR
+should be used instead of the \fISSL_CTX_use_certificate_file()\fR function in order
+to allow the use of complete certificate chains even when no trusted \s-1CA\s0
+storage is used or when the \s-1CA\s0 issuing the certificate shall not be added to
+the trusted \s-1CA\s0 storage.
.PP
If additional certificates are needed to complete the chain during the
\&\s-1TLS\s0 negotiation, \s-1CA\s0 certificates are additionally looked up in the
diff --git a/secure/lib/libssl/man/SSL_CTX_use_psk_identity_hint.3 b/secure/lib/libssl/man/SSL_CTX_use_psk_identity_hint.3
index 04ea12a..3061fbc 100644
--- a/secure/lib/libssl/man/SSL_CTX_use_psk_identity_hint.3
+++ b/secure/lib/libssl/man/SSL_CTX_use_psk_identity_hint.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_CTX_use_psk_identity_hint 3"
-.TH SSL_CTX_use_psk_identity_hint 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_CTX_use_psk_identity_hint 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -184,8 +184,11 @@ by the client in parameter \fBidentity\fR, and a buffer \fBpsk\fR of length
1 on success, 0 otherwise.
.PP
Return values from the server callback are interpreted as follows:
-.IP "> 0" 4
-.IX Item "> 0"
+.IP "0" 4
+\&\s-1PSK\s0 identity was not found. An \*(L"unknown_psk_identity\*(R" alert message
+will be sent and the connection setup fails.
+.IP ">0" 4
+.IX Item ">0"
\&\s-1PSK\s0 identity was found and the server callback has provided the \s-1PSK\s0
successfully in parameter \fBpsk\fR. Return value is the length of
\&\fBpsk\fR in bytes. It is an error to return a value greater than
@@ -196,6 +199,3 @@ protocol to continue anyway, the callback must provide some random
data to \fBpsk\fR and return the length of the random data, so the
connection will fail with decryption_error before it will be finished
completely.
-.IP "0" 4
-\&\s-1PSK\s0 identity was not found. An \*(L"unknown_psk_identity\*(R" alert message
-will be sent and the connection setup fails.
diff --git a/secure/lib/libssl/man/SSL_CTX_use_serverinfo.3 b/secure/lib/libssl/man/SSL_CTX_use_serverinfo.3
new file mode 100644
index 0000000..124d00a
--- /dev/null
+++ b/secure/lib/libssl/man/SSL_CTX_use_serverinfo.3
@@ -0,0 +1,179 @@
+.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.30)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+. ds C`
+. ds C'
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.\"
+.\" Avoid warning from groff about undefined register 'F'.
+.de IX
+..
+.nr rF 0
+.if \n(.g .if rF .nr rF 1
+.if (\n(rF:(\n(.g==0)) \{
+. if \nF \{
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. if !\nF==2 \{
+. nr % 0
+. nr F 2
+. \}
+. \}
+.\}
+.rr rF
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "SSL_CTX_use_serverinfo 3"
+.TH SSL_CTX_use_serverinfo 3 "2015-07-09" "1.0.2d" "OpenSSL"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+SSL_CTX_use_serverinfo, SSL_CTX_use_serverinfo_file \- use serverinfo extension
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+.Vb 1
+\& #include <openssl/ssl.h>
+\&
+\& int SSL_CTX_use_serverinfo(SSL_CTX *ctx, const unsigned char *serverinfo,
+\& size_t serverinfo_length);
+\&
+\& int SSL_CTX_use_serverinfo_file(SSL_CTX *ctx, const char *file);
+.Ve
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+These functions load \*(L"serverinfo\*(R" \s-1TLS\s0 ServerHello Extensions into the \s-1SSL_CTX.
+A \s0\*(L"serverinfo\*(R" extension is returned in response to an empty ClientHello
+Extension.
+.PP
+\&\fISSL_CTX_use_serverinfo()\fR loads one or more serverinfo extensions from
+a byte array into \fBctx\fR. The extensions must be concatenated into a
+sequence of bytes. Each extension must consist of a 2\-byte Extension Type,
+a 2\-byte length, and then length bytes of extension_data.
+.PP
+\&\fISSL_CTX_use_serverinfo_file()\fR loads one or more serverinfo extensions from
+\&\fBfile\fR into \fBctx\fR. The extensions must be in \s-1PEM\s0 format. Each extension
+must consist of a 2\-byte Extension Type, a 2\-byte length, and then length
+bytes of extension_data. Each \s-1PEM\s0 extension name must begin with the phrase
+\&\*(L"\s-1BEGIN SERVERINFO FOR \*(R".\s0
+.SH "NOTES"
+.IX Header "NOTES"
+.SH "RETURN VALUES"
+.IX Header "RETURN VALUES"
+On success, the functions return 1.
+On failure, the functions return 0. Check out the error stack to find out
+the reason.
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+.SH "HISTORY"
+.IX Header "HISTORY"
diff --git a/secure/lib/libssl/man/SSL_SESSION_free.3 b/secure/lib/libssl/man/SSL_SESSION_free.3
index 52a5490..31dc630 100644
--- a/secure/lib/libssl/man/SSL_SESSION_free.3
+++ b/secure/lib/libssl/man/SSL_SESSION_free.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_SESSION_free 3"
-.TH SSL_SESSION_free 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_SESSION_free 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_SESSION_get_ex_new_index.3 b/secure/lib/libssl/man/SSL_SESSION_get_ex_new_index.3
index b861fe3..f84542d 100644
--- a/secure/lib/libssl/man/SSL_SESSION_get_ex_new_index.3
+++ b/secure/lib/libssl/man/SSL_SESSION_get_ex_new_index.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_SESSION_get_ex_new_index 3"
-.TH SSL_SESSION_get_ex_new_index 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_SESSION_get_ex_new_index 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_SESSION_get_time.3 b/secure/lib/libssl/man/SSL_SESSION_get_time.3
index 669901e..5dc18e6 100644
--- a/secure/lib/libssl/man/SSL_SESSION_get_time.3
+++ b/secure/lib/libssl/man/SSL_SESSION_get_time.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_SESSION_get_time 3"
-.TH SSL_SESSION_get_time 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_SESSION_get_time 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_accept.3 b/secure/lib/libssl/man/SSL_accept.3
index 0636013..9c9608a 100644
--- a/secure/lib/libssl/man/SSL_accept.3
+++ b/secure/lib/libssl/man/SSL_accept.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_accept 3"
-.TH SSL_accept 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_accept 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -157,10 +157,7 @@ The communication channel must already have been set and assigned to the
The behaviour of \fISSL_accept()\fR depends on the underlying \s-1BIO. \s0
.PP
If the underlying \s-1BIO\s0 is \fBblocking\fR, \fISSL_accept()\fR will only return once the
-handshake has been finished or an error occurred, except for \s-1SGC \s0(Server
-Gated Cryptography). For \s-1SGC,\s0 \fISSL_accept()\fR may return with \-1, but
-\&\fISSL_get_error()\fR will yield \fB\s-1SSL_ERROR_WANT_READ/WRITE\s0\fR and \fISSL_accept()\fR
-should be called again.
+handshake has been finished or an error occurred.
.PP
If the underlying \s-1BIO\s0 is \fBnon-blocking\fR, \fISSL_accept()\fR will also return
when the underlying \s-1BIO\s0 could not satisfy the needs of \fISSL_accept()\fR
diff --git a/secure/lib/libssl/man/SSL_alert_type_string.3 b/secure/lib/libssl/man/SSL_alert_type_string.3
index 476983d..f4d2f4e 100644
--- a/secure/lib/libssl/man/SSL_alert_type_string.3
+++ b/secure/lib/libssl/man/SSL_alert_type_string.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_alert_type_string 3"
-.TH SSL_alert_type_string 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_alert_type_string 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_clear.3 b/secure/lib/libssl/man/SSL_clear.3
index 1226faa..26bca44 100644
--- a/secure/lib/libssl/man/SSL_clear.3
+++ b/secure/lib/libssl/man/SSL_clear.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_clear 3"
-.TH SSL_clear 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_clear 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_connect.3 b/secure/lib/libssl/man/SSL_connect.3
index b82e103..a1039ba 100644
--- a/secure/lib/libssl/man/SSL_connect.3
+++ b/secure/lib/libssl/man/SSL_connect.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_connect 3"
-.TH SSL_connect 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_connect 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_do_handshake.3 b/secure/lib/libssl/man/SSL_do_handshake.3
index 3d62274..5471b70 100644
--- a/secure/lib/libssl/man/SSL_do_handshake.3
+++ b/secure/lib/libssl/man/SSL_do_handshake.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_do_handshake 3"
-.TH SSL_do_handshake 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_do_handshake 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -159,10 +159,7 @@ routines may have to be explicitly set in advance using either
The behaviour of \fISSL_do_handshake()\fR depends on the underlying \s-1BIO.\s0
.PP
If the underlying \s-1BIO\s0 is \fBblocking\fR, \fISSL_do_handshake()\fR will only return
-once the handshake has been finished or an error occurred, except for \s-1SGC
-\&\s0(Server Gated Cryptography). For \s-1SGC,\s0 \fISSL_do_handshake()\fR may return with \-1,
-but \fISSL_get_error()\fR will yield \fB\s-1SSL_ERROR_WANT_READ/WRITE\s0\fR and
-\&\fISSL_do_handshake()\fR should be called again.
+once the handshake has been finished or an error occurred.
.PP
If the underlying \s-1BIO\s0 is \fBnon-blocking\fR, \fISSL_do_handshake()\fR will also return
when the underlying \s-1BIO\s0 could not satisfy the needs of \fISSL_do_handshake()\fR
diff --git a/secure/lib/libssl/man/SSL_free.3 b/secure/lib/libssl/man/SSL_free.3
index f2dc9d8..1c87807 100644
--- a/secure/lib/libssl/man/SSL_free.3
+++ b/secure/lib/libssl/man/SSL_free.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_free 3"
-.TH SSL_free 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_free 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_get_SSL_CTX.3 b/secure/lib/libssl/man/SSL_get_SSL_CTX.3
index e33e11e..2960a43 100644
--- a/secure/lib/libssl/man/SSL_get_SSL_CTX.3
+++ b/secure/lib/libssl/man/SSL_get_SSL_CTX.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_get_SSL_CTX 3"
-.TH SSL_get_SSL_CTX 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_get_SSL_CTX 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_get_ciphers.3 b/secure/lib/libssl/man/SSL_get_ciphers.3
index e31ba68..37fa048 100644
--- a/secure/lib/libssl/man/SSL_get_ciphers.3
+++ b/secure/lib/libssl/man/SSL_get_ciphers.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_get_ciphers 3"
-.TH SSL_get_ciphers 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_get_ciphers 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_get_client_CA_list.3 b/secure/lib/libssl/man/SSL_get_client_CA_list.3
index 47a8ba5..370f86f 100644
--- a/secure/lib/libssl/man/SSL_get_client_CA_list.3
+++ b/secure/lib/libssl/man/SSL_get_client_CA_list.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_get_client_CA_list 3"
-.TH SSL_get_client_CA_list 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_get_client_CA_list 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_get_current_cipher.3 b/secure/lib/libssl/man/SSL_get_current_cipher.3
index 041ec39..1c27046 100644
--- a/secure/lib/libssl/man/SSL_get_current_cipher.3
+++ b/secure/lib/libssl/man/SSL_get_current_cipher.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_get_current_cipher 3"
-.TH SSL_get_current_cipher 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_get_current_cipher 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_get_default_timeout.3 b/secure/lib/libssl/man/SSL_get_default_timeout.3
index fd3298a..bfa05ef 100644
--- a/secure/lib/libssl/man/SSL_get_default_timeout.3
+++ b/secure/lib/libssl/man/SSL_get_default_timeout.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_get_default_timeout 3"
-.TH SSL_get_default_timeout 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_get_default_timeout 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_get_error.3 b/secure/lib/libssl/man/SSL_get_error.3
index ce9d766..68201a0 100644
--- a/secure/lib/libssl/man/SSL_get_error.3
+++ b/secure/lib/libssl/man/SSL_get_error.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_get_error 3"
-.TH SSL_get_error 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_get_error 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_get_ex_data_X509_STORE_CTX_idx.3 b/secure/lib/libssl/man/SSL_get_ex_data_X509_STORE_CTX_idx.3
index 0758afa..0f067ed 100644
--- a/secure/lib/libssl/man/SSL_get_ex_data_X509_STORE_CTX_idx.3
+++ b/secure/lib/libssl/man/SSL_get_ex_data_X509_STORE_CTX_idx.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_get_ex_data_X509_STORE_CTX_idx 3"
-.TH SSL_get_ex_data_X509_STORE_CTX_idx 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_get_ex_data_X509_STORE_CTX_idx 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_get_ex_new_index.3 b/secure/lib/libssl/man/SSL_get_ex_new_index.3
index 8343610..3a7177c 100644
--- a/secure/lib/libssl/man/SSL_get_ex_new_index.3
+++ b/secure/lib/libssl/man/SSL_get_ex_new_index.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_get_ex_new_index 3"
-.TH SSL_get_ex_new_index 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_get_ex_new_index 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_get_fd.3 b/secure/lib/libssl/man/SSL_get_fd.3
index 610e409..1660074 100644
--- a/secure/lib/libssl/man/SSL_get_fd.3
+++ b/secure/lib/libssl/man/SSL_get_fd.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_get_fd 3"
-.TH SSL_get_fd 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_get_fd 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_get_peer_cert_chain.3 b/secure/lib/libssl/man/SSL_get_peer_cert_chain.3
index e63045b..3659142 100644
--- a/secure/lib/libssl/man/SSL_get_peer_cert_chain.3
+++ b/secure/lib/libssl/man/SSL_get_peer_cert_chain.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_get_peer_cert_chain 3"
-.TH SSL_get_peer_cert_chain 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_get_peer_cert_chain 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_get_peer_certificate.3 b/secure/lib/libssl/man/SSL_get_peer_certificate.3
index b908dd7..2199634 100644
--- a/secure/lib/libssl/man/SSL_get_peer_certificate.3
+++ b/secure/lib/libssl/man/SSL_get_peer_certificate.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_get_peer_certificate 3"
-.TH SSL_get_peer_certificate 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_get_peer_certificate 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_get_psk_identity.3 b/secure/lib/libssl/man/SSL_get_psk_identity.3
index 7849428..a67c8a1 100644
--- a/secure/lib/libssl/man/SSL_get_psk_identity.3
+++ b/secure/lib/libssl/man/SSL_get_psk_identity.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_get_psk_identity 3"
-.TH SSL_get_psk_identity 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_get_psk_identity 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_get_rbio.3 b/secure/lib/libssl/man/SSL_get_rbio.3
index b4f6682..8e2aca5 100644
--- a/secure/lib/libssl/man/SSL_get_rbio.3
+++ b/secure/lib/libssl/man/SSL_get_rbio.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_get_rbio 3"
-.TH SSL_get_rbio 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_get_rbio 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_get_session.3 b/secure/lib/libssl/man/SSL_get_session.3
index 58ccee0..8071642 100644
--- a/secure/lib/libssl/man/SSL_get_session.3
+++ b/secure/lib/libssl/man/SSL_get_session.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_get_session 3"
-.TH SSL_get_session 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_get_session 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_get_verify_result.3 b/secure/lib/libssl/man/SSL_get_verify_result.3
index 898c0a0..85bd8a7 100644
--- a/secure/lib/libssl/man/SSL_get_verify_result.3
+++ b/secure/lib/libssl/man/SSL_get_verify_result.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_get_verify_result 3"
-.TH SSL_get_verify_result 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_get_verify_result 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_get_version.3 b/secure/lib/libssl/man/SSL_get_version.3
index 672e51f..faf1c43 100644
--- a/secure/lib/libssl/man/SSL_get_version.3
+++ b/secure/lib/libssl/man/SSL_get_version.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_get_version 3"
-.TH SSL_get_version 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_get_version 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_library_init.3 b/secure/lib/libssl/man/SSL_library_init.3
index 41c9a78..bc49f6c 100644
--- a/secure/lib/libssl/man/SSL_library_init.3
+++ b/secure/lib/libssl/man/SSL_library_init.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_library_init 3"
-.TH SSL_library_init 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_library_init 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_load_client_CA_file.3 b/secure/lib/libssl/man/SSL_load_client_CA_file.3
index 2fadca2..4382fea 100644
--- a/secure/lib/libssl/man/SSL_load_client_CA_file.3
+++ b/secure/lib/libssl/man/SSL_load_client_CA_file.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_load_client_CA_file 3"
-.TH SSL_load_client_CA_file 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_load_client_CA_file 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_new.3 b/secure/lib/libssl/man/SSL_new.3
index dd76ac5..a6a5319 100644
--- a/secure/lib/libssl/man/SSL_new.3
+++ b/secure/lib/libssl/man/SSL_new.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_new 3"
-.TH SSL_new 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_new 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_pending.3 b/secure/lib/libssl/man/SSL_pending.3
index 975f6e1..1f25900 100644
--- a/secure/lib/libssl/man/SSL_pending.3
+++ b/secure/lib/libssl/man/SSL_pending.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_pending 3"
-.TH SSL_pending 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_pending 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_read.3 b/secure/lib/libssl/man/SSL_read.3
index 5cb35f8..9f899c5 100644
--- a/secure/lib/libssl/man/SSL_read.3
+++ b/secure/lib/libssl/man/SSL_read.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_read 3"
-.TH SSL_read 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_read 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_rstate_string.3 b/secure/lib/libssl/man/SSL_rstate_string.3
index 6c83879..ba560a5 100644
--- a/secure/lib/libssl/man/SSL_rstate_string.3
+++ b/secure/lib/libssl/man/SSL_rstate_string.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_rstate_string 3"
-.TH SSL_rstate_string 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_rstate_string 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_session_reused.3 b/secure/lib/libssl/man/SSL_session_reused.3
index 278e81a..c977ef6 100644
--- a/secure/lib/libssl/man/SSL_session_reused.3
+++ b/secure/lib/libssl/man/SSL_session_reused.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_session_reused 3"
-.TH SSL_session_reused 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_session_reused 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_set_bio.3 b/secure/lib/libssl/man/SSL_set_bio.3
index c7c3bf9..d097281 100644
--- a/secure/lib/libssl/man/SSL_set_bio.3
+++ b/secure/lib/libssl/man/SSL_set_bio.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_set_bio 3"
-.TH SSL_set_bio 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_set_bio 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_set_connect_state.3 b/secure/lib/libssl/man/SSL_set_connect_state.3
index b937dc1..0fe480a 100644
--- a/secure/lib/libssl/man/SSL_set_connect_state.3
+++ b/secure/lib/libssl/man/SSL_set_connect_state.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_set_connect_state 3"
-.TH SSL_set_connect_state 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_set_connect_state 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_set_fd.3 b/secure/lib/libssl/man/SSL_set_fd.3
index 2cb4fdc..56bde1d 100644
--- a/secure/lib/libssl/man/SSL_set_fd.3
+++ b/secure/lib/libssl/man/SSL_set_fd.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_set_fd 3"
-.TH SSL_set_fd 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_set_fd 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_set_session.3 b/secure/lib/libssl/man/SSL_set_session.3
index 42c6eb0..63eecd5 100644
--- a/secure/lib/libssl/man/SSL_set_session.3
+++ b/secure/lib/libssl/man/SSL_set_session.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_set_session 3"
-.TH SSL_set_session 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_set_session 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_set_shutdown.3 b/secure/lib/libssl/man/SSL_set_shutdown.3
index ec6b0ba..6c401f4 100644
--- a/secure/lib/libssl/man/SSL_set_shutdown.3
+++ b/secure/lib/libssl/man/SSL_set_shutdown.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_set_shutdown 3"
-.TH SSL_set_shutdown 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_set_shutdown 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_set_verify_result.3 b/secure/lib/libssl/man/SSL_set_verify_result.3
index 3e00707..f0ce4c3 100644
--- a/secure/lib/libssl/man/SSL_set_verify_result.3
+++ b/secure/lib/libssl/man/SSL_set_verify_result.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_set_verify_result 3"
-.TH SSL_set_verify_result 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_set_verify_result 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_shutdown.3 b/secure/lib/libssl/man/SSL_shutdown.3
index 24b81bc..cef5bfb 100644
--- a/secure/lib/libssl/man/SSL_shutdown.3
+++ b/secure/lib/libssl/man/SSL_shutdown.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_shutdown 3"
-.TH SSL_shutdown 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_shutdown 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -218,8 +218,8 @@ erroneous \s-1SSL_ERROR_SYSCALL\s0 may be flagged even though no error occurred.
.IX Item "1"
The shutdown was successfully completed. The \*(L"close notify\*(R" alert was sent
and the peer's \*(L"close notify\*(R" alert was received.
-.IP "\-1" 4
-.IX Item "-1"
+.IP "<0" 4
+.IX Item "<0"
The shutdown was not successful because a fatal error occurred either
at the protocol level or a connection failure occurred. It can also occur if
action is need to continue the operation for non-blocking BIOs.
diff --git a/secure/lib/libssl/man/SSL_state_string.3 b/secure/lib/libssl/man/SSL_state_string.3
index 5c44d91..f9ecb1e 100644
--- a/secure/lib/libssl/man/SSL_state_string.3
+++ b/secure/lib/libssl/man/SSL_state_string.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_state_string 3"
-.TH SSL_state_string 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_state_string 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_want.3 b/secure/lib/libssl/man/SSL_want.3
index 87b19d8..b45588f 100644
--- a/secure/lib/libssl/man/SSL_want.3
+++ b/secure/lib/libssl/man/SSL_want.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_want 3"
-.TH SSL_want 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_want 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/SSL_write.3 b/secure/lib/libssl/man/SSL_write.3
index c66c99d..8b258da 100644
--- a/secure/lib/libssl/man/SSL_write.3
+++ b/secure/lib/libssl/man/SSL_write.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "SSL_write 3"
-.TH SSL_write 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH SSL_write 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/d2i_SSL_SESSION.3 b/secure/lib/libssl/man/d2i_SSL_SESSION.3
index fc6269a..02f691e 100644
--- a/secure/lib/libssl/man/d2i_SSL_SESSION.3
+++ b/secure/lib/libssl/man/d2i_SSL_SESSION.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "d2i_SSL_SESSION 3"
-.TH d2i_SSL_SESSION 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH d2i_SSL_SESSION 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/secure/lib/libssl/man/ssl.3 b/secure/lib/libssl/man/ssl.3
index b4c2649..c155431 100644
--- a/secure/lib/libssl/man/ssl.3
+++ b/secure/lib/libssl/man/ssl.3
@@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "ssl 3"
-.TH ssl 3 "2015-07-09" "1.0.1p" "OpenSSL"
+.TH ssl 3 "2015-07-09" "1.0.2d" "OpenSSL"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -467,6 +467,10 @@ session instead of a context.
.IX Item "int SSL_CTX_use_certificate_ASN1(SSL_CTX *ctx, int len, unsigned char *d);"
.IP "int \fBSSL_CTX_use_certificate_file\fR(\s-1SSL_CTX\s0 *ctx, char *file, int type);" 4
.IX Item "int SSL_CTX_use_certificate_file(SSL_CTX *ctx, char *file, int type);"
+.IP "X509 *\fBSSL_CTX_get0_certificate\fR(const \s-1SSL_CTX\s0 *ctx);" 4
+.IX Item "X509 *SSL_CTX_get0_certificate(const SSL_CTX *ctx);"
+.IP "\s-1EVP_PKEY\s0 *\fBSSL_CTX_get0_privatekey\fR(const \s-1SSL_CTX\s0 *ctx);" 4
+.IX Item "EVP_PKEY *SSL_CTX_get0_privatekey(const SSL_CTX *ctx);"
.IP "void \fBSSL_CTX_set_psk_client_callback\fR(\s-1SSL_CTX\s0 *ctx, unsigned int (*callback)(\s-1SSL\s0 *ssl, const char *hint, char *identity, unsigned int max_identity_len, unsigned char *psk, unsigned int max_psk_len));" 4
.IX Item "void SSL_CTX_set_psk_client_callback(SSL_CTX *ctx, unsigned int (*callback)(SSL *ssl, const char *hint, char *identity, unsigned int max_identity_len, unsigned char *psk, unsigned int max_psk_len));"
.IP "int \fBSSL_CTX_use_psk_identity_hint\fR(\s-1SSL_CTX\s0 *ctx, const char *hint);" 4
@@ -591,8 +595,8 @@ connection defined in the \fB\s-1SSL\s0\fR structure.
.IX Item "STACK *SSL_get_peer_cert_chain(const SSL *ssl);"
.IP "X509 *\fBSSL_get_peer_certificate\fR(const \s-1SSL\s0 *ssl);" 4
.IX Item "X509 *SSL_get_peer_certificate(const SSL *ssl);"
-.IP "\s-1EVP_PKEY\s0 *\fBSSL_get_privatekey\fR(\s-1SSL\s0 *ssl);" 4
-.IX Item "EVP_PKEY *SSL_get_privatekey(SSL *ssl);"
+.IP "\s-1EVP_PKEY\s0 *\fBSSL_get_privatekey\fR(const \s-1SSL\s0 *ssl);" 4
+.IX Item "EVP_PKEY *SSL_get_privatekey(const SSL *ssl);"
.IP "int \fBSSL_get_quiet_shutdown\fR(const \s-1SSL\s0 *ssl);" 4
.IX Item "int SSL_get_quiet_shutdown(const SSL *ssl);"
.IP "\s-1BIO\s0 *\fBSSL_get_rbio\fR(const \s-1SSL\s0 *ssl);" 4
OpenPOWER on IntegriCloud