summaryrefslogtreecommitdiffstats
path: root/crypto/openssl/crypto/md5
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/openssl/crypto/md5')
-rw-r--r--crypto/openssl/crypto/md5/Makefile30
-rw-r--r--crypto/openssl/crypto/md5/asm/md5-586.pl2
-rw-r--r--crypto/openssl/crypto/md5/asm/md5-sparcv9.S1031
-rwxr-xr-xcrypto/openssl/crypto/md5/asm/md5-x86_64.pl8
-rw-r--r--crypto/openssl/crypto/md5/md5.h3
-rw-r--r--crypto/openssl/crypto/md5/md5_dgst.c113
-rw-r--r--crypto/openssl/crypto/md5/md5_locl.h54
-rw-r--r--crypto/openssl/crypto/md5/md5test.c6
8 files changed, 28 insertions, 1219 deletions
diff --git a/crypto/openssl/crypto/md5/Makefile b/crypto/openssl/crypto/md5/Makefile
index 849a0a5..3c450fc 100644
--- a/crypto/openssl/crypto/md5/Makefile
+++ b/crypto/openssl/crypto/md5/Makefile
@@ -38,7 +38,7 @@ top:
all: lib
lib: $(LIBOBJ)
- $(AR) $(LIB) $(LIBOBJ)
+ $(ARX) $(LIB) $(LIBOBJ)
$(RANLIB) $(LIB) || echo Never mind.
@touch lib
@@ -52,24 +52,6 @@ mx86-cof.s: asm/md5-586.pl ../perlasm/x86asm.pl
mx86-out.s: asm/md5-586.pl ../perlasm/x86asm.pl
(cd asm; $(PERL) md5-586.pl a.out $(CFLAGS) > ../$@)
-md5-sparcv8plus.o: asm/md5-sparcv9.S
- $(CC) $(ASFLAGS) -DMD5_BLOCK_DATA_ORDER -c \
- -o md5-sparcv8plus.o asm/md5-sparcv9.S
-
-# Old GNU assembler doesn't understand V9 instructions, so we
-# hire /usr/ccs/bin/as to do the job. Note that option is called
-# *-gcc27, but even gcc 2>=8 users may experience similar problem
-# if they didn't bother to upgrade GNU assembler. Such users should
-# not choose this option, but be adviced to *remove* GNU assembler
-# or upgrade it.
-md5-sparcv8plus-gcc27.o: asm/md5-sparcv9.S
- $(CC) $(ASFLAGS) -DMD5_BLOCK_DATA_ORDER -E asm/md5-sparcv9.S | \
- /usr/ccs/bin/as -xarch=v8plus - -o md5-sparcv8plus-gcc27.o
-
-md5-sparcv9.o: asm/md5-sparcv9.S
- $(CC) $(ASFLAGS) -DMD5_BLOCK_DATA_ORDER -c \
- -o md5-sparcv9.o asm/md5-sparcv9.S
-
md5-x86_64.s: asm/md5-x86_64.pl; $(PERL) asm/md5-x86_64.pl $@
files:
@@ -109,9 +91,13 @@ clean:
# DO NOT DELETE THIS LINE -- make depend depends on it.
-md5_dgst.o: ../../include/openssl/e_os2.h ../../include/openssl/md5.h
-md5_dgst.o: ../../include/openssl/opensslconf.h
-md5_dgst.o: ../../include/openssl/opensslv.h ../md32_common.h md5_dgst.c
+md5_dgst.o: ../../include/openssl/bio.h ../../include/openssl/crypto.h
+md5_dgst.o: ../../include/openssl/e_os2.h ../../include/openssl/err.h
+md5_dgst.o: ../../include/openssl/fips.h ../../include/openssl/lhash.h
+md5_dgst.o: ../../include/openssl/md5.h ../../include/openssl/opensslconf.h
+md5_dgst.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h
+md5_dgst.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h
+md5_dgst.o: ../../include/openssl/symhacks.h ../md32_common.h md5_dgst.c
md5_dgst.o: md5_locl.h
md5_one.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h
md5_one.o: ../../include/openssl/md5.h ../../include/openssl/opensslconf.h
diff --git a/crypto/openssl/crypto/md5/asm/md5-586.pl b/crypto/openssl/crypto/md5/asm/md5-586.pl
index fa3fa3b..76ac235 100644
--- a/crypto/openssl/crypto/md5/asm/md5-586.pl
+++ b/crypto/openssl/crypto/md5/asm/md5-586.pl
@@ -29,7 +29,7 @@ $X="esi";
0, 7, 14, 5, 12, 3, 10, 1, 8, 15, 6, 13, 4, 11, 2, 9, # R3
);
-&md5_block("md5_block_asm_host_order");
+&md5_block("md5_block_asm_data_order");
&asm_finish();
sub Np
diff --git a/crypto/openssl/crypto/md5/asm/md5-sparcv9.S b/crypto/openssl/crypto/md5/asm/md5-sparcv9.S
deleted file mode 100644
index db45aa4..0000000
--- a/crypto/openssl/crypto/md5/asm/md5-sparcv9.S
+++ /dev/null
@@ -1,1031 +0,0 @@
-.ident "md5-sparcv9.S, Version 1.0"
-.ident "SPARC V9 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
-.file "md5-sparcv9.S"
-
-/*
- * ====================================================================
- * Copyright (c) 1999 Andy Polyakov <appro@fy.chalmers.se>.
- *
- * Rights for redistribution and usage in source and binary forms are
- * granted as long as above copyright notices are retained. Warranty
- * of any kind is (of course:-) disclaimed.
- * ====================================================================
- */
-
-/*
- * This is my modest contribution to OpenSSL project (see
- * http://www.openssl.org/ for more information about it) and is an
- * assembler implementation of MD5 block hash function. I've hand-coded
- * this for the sole reason to reach UltraSPARC-specific "load in
- * little-endian byte order" instruction. This gives up to 15%
- * performance improvement for cases when input message is aligned at
- * 32 bits boundary. The module was tested under both 32 *and* 64 bit
- * kernels. For updates see http://fy.chalmers.se/~appro/hpe/.
- *
- * To compile with SC4.x/SC5.x:
- *
- * cc -xarch=v[9|8plus] -DOPENSSL_SYSNAME_ULTRASPARC -DMD5_BLOCK_DATA_ORDER \
- * -c md5-sparcv9.S
- *
- * and with gcc:
- *
- * gcc -mcpu=ultrasparc -DOPENSSL_SYSNAME_ULTRASPARC -DMD5_BLOCK_DATA_ORDER \
- * -c md5-sparcv9.S
- *
- * or if above fails (it does if you have gas):
- *
- * gcc -E -DOPENSSL_SYSNAMEULTRASPARC -DMD5_BLOCK_DATA_ORDER md5_block.sparc.S | \
- * as -xarch=v8plus /dev/fd/0 -o md5-sparcv9.o
- */
-
-#include <openssl/e_os2.h>
-
-#define A %o0
-#define B %o1
-#define C %o2
-#define D %o3
-#define T1 %o4
-#define T2 %o5
-
-#define R0 %l0
-#define R1 %l1
-#define R2 %l2
-#define R3 %l3
-#define R4 %l4
-#define R5 %l5
-#define R6 %l6
-#define R7 %l7
-#define R8 %i3
-#define R9 %i4
-#define R10 %i5
-#define R11 %g1
-#define R12 %g2
-#define R13 %g3
-#define RX %g4
-
-#define Aptr %i0+0
-#define Bptr %i0+4
-#define Cptr %i0+8
-#define Dptr %i0+12
-
-#define Aval R5 /* those not used at the end of the last round */
-#define Bval R6
-#define Cval R7
-#define Dval R8
-
-#if defined(MD5_BLOCK_DATA_ORDER)
-# if defined(OPENSSL_SYSNAME_ULTRASPARC)
-# define LOAD lda
-# define X(i) [%i1+i*4]%asi
-# define md5_block md5_block_asm_data_order_aligned
-# define ASI_PRIMARY_LITTLE 0x88
-# else
-# error "MD5_BLOCK_DATA_ORDER is supported only on UltraSPARC!"
-# endif
-#else
-# define LOAD ld
-# define X(i) [%i1+i*4]
-# define md5_block md5_block_asm_host_order
-#endif
-
-.section ".text",#alloc,#execinstr
-
-#if defined(__SUNPRO_C) && defined(__sparcv9)
- /* They've said -xarch=v9 at command line */
- .register %g2,#scratch
- .register %g3,#scratch
-# define FRAME -192
-#elif defined(__GNUC__) && defined(__arch64__)
- /* They've said -m64 at command line */
- .register %g2,#scratch
- .register %g3,#scratch
-# define FRAME -192
-#else
-# define FRAME -96
-#endif
-
-.align 32
-
-.global md5_block
-md5_block:
- save %sp,FRAME,%sp
-
- ld [Dptr],D
- ld [Cptr],C
- ld [Bptr],B
- ld [Aptr],A
-#ifdef ASI_PRIMARY_LITTLE
- rd %asi,%o7 ! How dare I? Well, I just do:-)
- wr %g0,ASI_PRIMARY_LITTLE,%asi
-#endif
- LOAD X(0),R0
-
-.Lmd5_block_loop:
-
-!!!!!!!!Round 0
-
- xor C,D,T1
- sethi %hi(0xd76aa478),T2
- and T1,B,T1
- or T2,%lo(0xd76aa478),T2 !=
- xor T1,D,T1
- add T1,R0,T1
- LOAD X(1),R1
- add T1,T2,T1 !=
- add A,T1,A
- sll A,7,T2
- srl A,32-7,A
- or A,T2,A !=
- xor B,C,T1
- add A,B,A
-
- sethi %hi(0xe8c7b756),T2
- and T1,A,T1 !=
- or T2,%lo(0xe8c7b756),T2
- xor T1,C,T1
- LOAD X(2),R2
- add T1,R1,T1 !=
- add T1,T2,T1
- add D,T1,D
- sll D,12,T2
- srl D,32-12,D !=
- or D,T2,D
- xor A,B,T1
- add D,A,D
-
- sethi %hi(0x242070db),T2 !=
- and T1,D,T1
- or T2,%lo(0x242070db),T2
- xor T1,B,T1
- add T1,R2,T1 !=
- LOAD X(3),R3
- add T1,T2,T1
- add C,T1,C
- sll C,17,T2 !=
- srl C,32-17,C
- or C,T2,C
- xor D,A,T1
- add C,D,C !=
-
- sethi %hi(0xc1bdceee),T2
- and T1,C,T1
- or T2,%lo(0xc1bdceee),T2
- xor T1,A,T1 !=
- add T1,R3,T1
- LOAD X(4),R4
- add T1,T2,T1
- add B,T1,B !=
- sll B,22,T2
- srl B,32-22,B
- or B,T2,B
- xor C,D,T1 !=
- add B,C,B
-
- sethi %hi(0xf57c0faf),T2
- and T1,B,T1
- or T2,%lo(0xf57c0faf),T2 !=
- xor T1,D,T1
- add T1,R4,T1
- LOAD X(5),R5
- add T1,T2,T1 !=
- add A,T1,A
- sll A,7,T2
- srl A,32-7,A
- or A,T2,A !=
- xor B,C,T1
- add A,B,A
-
- sethi %hi(0x4787c62a),T2
- and T1,A,T1 !=
- or T2,%lo(0x4787c62a),T2
- xor T1,C,T1
- LOAD X(6),R6
- add T1,R5,T1 !=
- add T1,T2,T1
- add D,T1,D
- sll D,12,T2
- srl D,32-12,D !=
- or D,T2,D
- xor A,B,T1
- add D,A,D
-
- sethi %hi(0xa8304613),T2 !=
- and T1,D,T1
- or T2,%lo(0xa8304613),T2
- xor T1,B,T1
- add T1,R6,T1 !=
- LOAD X(7),R7
- add T1,T2,T1
- add C,T1,C
- sll C,17,T2 !=
- srl C,32-17,C
- or C,T2,C
- xor D,A,T1
- add C,D,C !=
-
- sethi %hi(0xfd469501),T2
- and T1,C,T1
- or T2,%lo(0xfd469501),T2
- xor T1,A,T1 !=
- add T1,R7,T1
- LOAD X(8),R8
- add T1,T2,T1
- add B,T1,B !=
- sll B,22,T2
- srl B,32-22,B
- or B,T2,B
- xor C,D,T1 !=
- add B,C,B
-
- sethi %hi(0x698098d8),T2
- and T1,B,T1
- or T2,%lo(0x698098d8),T2 !=
- xor T1,D,T1
- add T1,R8,T1
- LOAD X(9),R9
- add T1,T2,T1 !=
- add A,T1,A
- sll A,7,T2
- srl A,32-7,A
- or A,T2,A !=
- xor B,C,T1
- add A,B,A
-
- sethi %hi(0x8b44f7af),T2
- and T1,A,T1 !=
- or T2,%lo(0x8b44f7af),T2
- xor T1,C,T1
- LOAD X(10),R10
- add T1,R9,T1 !=
- add T1,T2,T1
- add D,T1,D
- sll D,12,T2
- srl D,32-12,D !=
- or D,T2,D
- xor A,B,T1
- add D,A,D
-
- sethi %hi(0xffff5bb1),T2 !=
- and T1,D,T1
- or T2,%lo(0xffff5bb1),T2
- xor T1,B,T1
- add T1,R10,T1 !=
- LOAD X(11),R11
- add T1,T2,T1
- add C,T1,C
- sll C,17,T2 !=
- srl C,32-17,C
- or C,T2,C
- xor D,A,T1
- add C,D,C !=
-
- sethi %hi(0x895cd7be),T2
- and T1,C,T1
- or T2,%lo(0x895cd7be),T2
- xor T1,A,T1 !=
- add T1,R11,T1
- LOAD X(12),R12
- add T1,T2,T1
- add B,T1,B !=
- sll B,22,T2
- srl B,32-22,B
- or B,T2,B
- xor C,D,T1 !=
- add B,C,B
-
- sethi %hi(0x6b901122),T2
- and T1,B,T1
- or T2,%lo(0x6b901122),T2 !=
- xor T1,D,T1
- add T1,R12,T1
- LOAD X(13),R13
- add T1,T2,T1 !=
- add A,T1,A
- sll A,7,T2
- srl A,32-7,A
- or A,T2,A !=
- xor B,C,T1
- add A,B,A
-
- sethi %hi(0xfd987193),T2
- and T1,A,T1 !=
- or T2,%lo(0xfd987193),T2
- xor T1,C,T1
- LOAD X(14),RX
- add T1,R13,T1 !=
- add T1,T2,T1
- add D,T1,D
- sll D,12,T2
- srl D,32-12,D !=
- or D,T2,D
- xor A,B,T1
- add D,A,D
-
- sethi %hi(0xa679438e),T2 !=
- and T1,D,T1
- or T2,%lo(0xa679438e),T2
- xor T1,B,T1
- add T1,RX,T1 !=
- LOAD X(15),RX
- add T1,T2,T1
- add C,T1,C
- sll C,17,T2 !=
- srl C,32-17,C
- or C,T2,C
- xor D,A,T1
- add C,D,C !=
-
- sethi %hi(0x49b40821),T2
- and T1,C,T1
- or T2,%lo(0x49b40821),T2
- xor T1,A,T1 !=
- add T1,RX,T1
- !pre-LOADed X(1),R1
- add T1,T2,T1
- add B,T1,B
- sll B,22,T2 !=
- srl B,32-22,B
- or B,T2,B
- add B,C,B
-
-!!!!!!!!Round 1
-
- xor B,C,T1 !=
- sethi %hi(0xf61e2562),T2
- and T1,D,T1
- or T2,%lo(0xf61e2562),T2
- xor T1,C,T1 !=
- add T1,R1,T1
- !pre-LOADed X(6),R6
- add T1,T2,T1
- add A,T1,A
- sll A,5,T2 !=
- srl A,32-5,A
- or A,T2,A
- add A,B,A
-
- xor A,B,T1 !=
- sethi %hi(0xc040b340),T2
- and T1,C,T1
- or T2,%lo(0xc040b340),T2
- xor T1,B,T1 !=
- add T1,R6,T1
- !pre-LOADed X(11),R11
- add T1,T2,T1
- add D,T1,D
- sll D,9,T2 !=
- srl D,32-9,D
- or D,T2,D
- add D,A,D
-
- xor D,A,T1 !=
- sethi %hi(0x265e5a51),T2
- and T1,B,T1
- or T2,%lo(0x265e5a51),T2
- xor T1,A,T1 !=
- add T1,R11,T1
- !pre-LOADed X(0),R0
- add T1,T2,T1
- add C,T1,C
- sll C,14,T2 !=
- srl C,32-14,C
- or C,T2,C
- add C,D,C
-
- xor C,D,T1 !=
- sethi %hi(0xe9b6c7aa),T2
- and T1,A,T1
- or T2,%lo(0xe9b6c7aa),T2
- xor T1,D,T1 !=
- add T1,R0,T1
- !pre-LOADed X(5),R5
- add T1,T2,T1
- add B,T1,B
- sll B,20,T2 !=
- srl B,32-20,B
- or B,T2,B
- add B,C,B
-
- xor B,C,T1 !=
- sethi %hi(0xd62f105d),T2
- and T1,D,T1
- or T2,%lo(0xd62f105d),T2
- xor T1,C,T1 !=
- add T1,R5,T1
- !pre-LOADed X(10),R10
- add T1,T2,T1
- add A,T1,A
- sll A,5,T2 !=
- srl A,32-5,A
- or A,T2,A
- add A,B,A
-
- xor A,B,T1 !=
- sethi %hi(0x02441453),T2
- and T1,C,T1
- or T2,%lo(0x02441453),T2
- xor T1,B,T1 !=
- add T1,R10,T1
- LOAD X(15),RX
- add T1,T2,T1
- add D,T1,D !=
- sll D,9,T2
- srl D,32-9,D
- or D,T2,D
- add D,A,D !=
-
- xor D,A,T1
- sethi %hi(0xd8a1e681),T2
- and T1,B,T1
- or T2,%lo(0xd8a1e681),T2 !=
- xor T1,A,T1
- add T1,RX,T1
- !pre-LOADed X(4),R4
- add T1,T2,T1
- add C,T1,C !=
- sll C,14,T2
- srl C,32-14,C
- or C,T2,C
- add C,D,C !=
-
- xor C,D,T1
- sethi %hi(0xe7d3fbc8),T2
- and T1,A,T1
- or T2,%lo(0xe7d3fbc8),T2 !=
- xor T1,D,T1
- add T1,R4,T1
- !pre-LOADed X(9),R9
- add T1,T2,T1
- add B,T1,B !=
- sll B,20,T2
- srl B,32-20,B
- or B,T2,B
- add B,C,B !=
-
- xor B,C,T1
- sethi %hi(0x21e1cde6),T2
- and T1,D,T1
- or T2,%lo(0x21e1cde6),T2 !=
- xor T1,C,T1
- add T1,R9,T1
- LOAD X(14),RX
- add T1,T2,T1 !=
- add A,T1,A
- sll A,5,T2
- srl A,32-5,A
- or A,T2,A !=
- add A,B,A
-
- xor A,B,T1
- sethi %hi(0xc33707d6),T2
- and T1,C,T1 !=
- or T2,%lo(0xc33707d6),T2
- xor T1,B,T1
- add T1,RX,T1
- !pre-LOADed X(3),R3
- add T1,T2,T1 !=
- add D,T1,D
- sll D,9,T2
- srl D,32-9,D
- or D,T2,D !=
- add D,A,D
-
- xor D,A,T1
- sethi %hi(0xf4d50d87),T2
- and T1,B,T1 !=
- or T2,%lo(0xf4d50d87),T2
- xor T1,A,T1
- add T1,R3,T1
- !pre-LOADed X(8),R8
- add T1,T2,T1 !=
- add C,T1,C
- sll C,14,T2
- srl C,32-14,C
- or C,T2,C !=
- add C,D,C
-
- xor C,D,T1
- sethi %hi(0x455a14ed),T2
- and T1,A,T1 !=
- or T2,%lo(0x455a14ed),T2
- xor T1,D,T1
- add T1,R8,T1
- !pre-LOADed X(13),R13
- add T1,T2,T1 !=
- add B,T1,B
- sll B,20,T2
- srl B,32-20,B
- or B,T2,B !=
- add B,C,B
-
- xor B,C,T1
- sethi %hi(0xa9e3e905),T2
- and T1,D,T1 !=
- or T2,%lo(0xa9e3e905),T2
- xor T1,C,T1
- add T1,R13,T1
- !pre-LOADed X(2),R2
- add T1,T2,T1 !=
- add A,T1,A
- sll A,5,T2
- srl A,32-5,A
- or A,T2,A !=
- add A,B,A
-
- xor A,B,T1
- sethi %hi(0xfcefa3f8),T2
- and T1,C,T1 !=
- or T2,%lo(0xfcefa3f8),T2
- xor T1,B,T1
- add T1,R2,T1
- !pre-LOADed X(7),R7
- add T1,T2,T1 !=
- add D,T1,D
- sll D,9,T2
- srl D,32-9,D
- or D,T2,D !=
- add D,A,D
-
- xor D,A,T1
- sethi %hi(0x676f02d9),T2
- and T1,B,T1 !=
- or T2,%lo(0x676f02d9),T2
- xor T1,A,T1
- add T1,R7,T1
- !pre-LOADed X(12),R12
- add T1,T2,T1 !=
- add C,T1,C
- sll C,14,T2
- srl C,32-14,C
- or C,T2,C !=
- add C,D,C
-
- xor C,D,T1
- sethi %hi(0x8d2a4c8a),T2
- and T1,A,T1 !=
- or T2,%lo(0x8d2a4c8a),T2
- xor T1,D,T1
- add T1,R12,T1
- !pre-LOADed X(5),R5
- add T1,T2,T1 !=
- add B,T1,B
- sll B,20,T2
- srl B,32-20,B
- or B,T2,B !=
- add B,C,B
-
-!!!!!!!!Round 2
-
- xor B,C,T1
- sethi %hi(0xfffa3942),T2
- xor T1,D,T1 !=
- or T2,%lo(0xfffa3942),T2
- add T1,R5,T1
- !pre-LOADed X(8),R8
- add T1,T2,T1
- add A,T1,A !=
- sll A,4,T2
- srl A,32-4,A
- or A,T2,A
- add A,B,A !=
-
- xor A,B,T1
- sethi %hi(0x8771f681),T2
- xor T1,C,T1
- or T2,%lo(0x8771f681),T2 !=
- add T1,R8,T1
- !pre-LOADed X(11),R11
- add T1,T2,T1
- add D,T1,D
- sll D,11,T2 !=
- srl D,32-11,D
- or D,T2,D
- add D,A,D
-
- xor D,A,T1 !=
- sethi %hi(0x6d9d6122),T2
- xor T1,B,T1
- or T2,%lo(0x6d9d6122),T2
- add T1,R11,T1 !=
- LOAD X(14),RX
- add T1,T2,T1
- add C,T1,C
- sll C,16,T2 !=
- srl C,32-16,C
- or C,T2,C
- add C,D,C
-
- xor C,D,T1 !=
- sethi %hi(0xfde5380c),T2
- xor T1,A,T1
- or T2,%lo(0xfde5380c),T2
- add T1,RX,T1 !=
- !pre-LOADed X(1),R1
- add T1,T2,T1
- add B,T1,B
- sll B,23,T2
- srl B,32-23,B !=
- or B,T2,B
- add B,C,B
-
- xor B,C,T1
- sethi %hi(0xa4beea44),T2 !=
- xor T1,D,T1
- or T2,%lo(0xa4beea44),T2
- add T1,R1,T1
- !pre-LOADed X(4),R4
- add T1,T2,T1 !=
- add A,T1,A
- sll A,4,T2
- srl A,32-4,A
- or A,T2,A !=
- add A,B,A
-
- xor A,B,T1
- sethi %hi(0x4bdecfa9),T2
- xor T1,C,T1 !=
- or T2,%lo(0x4bdecfa9),T2
- add T1,R4,T1
- !pre-LOADed X(7),R7
- add T1,T2,T1
- add D,T1,D !=
- sll D,11,T2
- srl D,32-11,D
- or D,T2,D
- add D,A,D !=
-
- xor D,A,T1
- sethi %hi(0xf6bb4b60),T2
- xor T1,B,T1
- or T2,%lo(0xf6bb4b60),T2 !=
- add T1,R7,T1
- !pre-LOADed X(10),R10
- add T1,T2,T1
- add C,T1,C
- sll C,16,T2 !=
- srl C,32-16,C
- or C,T2,C
- add C,D,C
-
- xor C,D,T1 !=
- sethi %hi(0xbebfbc70),T2
- xor T1,A,T1
- or T2,%lo(0xbebfbc70),T2
- add T1,R10,T1 !=
- !pre-LOADed X(13),R13
- add T1,T2,T1
- add B,T1,B
- sll B,23,T2
- srl B,32-23,B !=
- or B,T2,B
- add B,C,B
-
- xor B,C,T1
- sethi %hi(0x289b7ec6),T2 !=
- xor T1,D,T1
- or T2,%lo(0x289b7ec6),T2
- add T1,R13,T1
- !pre-LOADed X(0),R0
- add T1,T2,T1 !=
- add A,T1,A
- sll A,4,T2
- srl A,32-4,A
- or A,T2,A !=
- add A,B,A
-
- xor A,B,T1
- sethi %hi(0xeaa127fa),T2
- xor T1,C,T1 !=
- or T2,%lo(0xeaa127fa),T2
- add T1,R0,T1
- !pre-LOADed X(3),R3
- add T1,T2,T1
- add D,T1,D !=
- sll D,11,T2
- srl D,32-11,D
- or D,T2,D
- add D,A,D !=
-
- xor D,A,T1
- sethi %hi(0xd4ef3085),T2
- xor T1,B,T1
- or T2,%lo(0xd4ef3085),T2 !=
- add T1,R3,T1
- !pre-LOADed X(6),R6
- add T1,T2,T1
- add C,T1,C
- sll C,16,T2 !=
- srl C,32-16,C
- or C,T2,C
- add C,D,C
-
- xor C,D,T1 !=
- sethi %hi(0x04881d05),T2
- xor T1,A,T1
- or T2,%lo(0x04881d05),T2
- add T1,R6,T1 !=
- !pre-LOADed X(9),R9
- add T1,T2,T1
- add B,T1,B
- sll B,23,T2
- srl B,32-23,B !=
- or B,T2,B
- add B,C,B
-
- xor B,C,T1
- sethi %hi(0xd9d4d039),T2 !=
- xor T1,D,T1
- or T2,%lo(0xd9d4d039),T2
- add T1,R9,T1
- !pre-LOADed X(12),R12
- add T1,T2,T1 !=
- add A,T1,A
- sll A,4,T2
- srl A,32-4,A
- or A,T2,A !=
- add A,B,A
-
- xor A,B,T1
- sethi %hi(0xe6db99e5),T2
- xor T1,C,T1 !=
- or T2,%lo(0xe6db99e5),T2
- add T1,R12,T1
- LOAD X(15),RX
- add T1,T2,T1 !=
- add D,T1,D
- sll D,11,T2
- srl D,32-11,D
- or D,T2,D !=
- add D,A,D
-
- xor D,A,T1
- sethi %hi(0x1fa27cf8),T2
- xor T1,B,T1 !=
- or T2,%lo(0x1fa27cf8),T2
- add T1,RX,T1
- !pre-LOADed X(2),R2
- add T1,T2,T1
- add C,T1,C !=
- sll C,16,T2
- srl C,32-16,C
- or C,T2,C
- add C,D,C !=
-
- xor C,D,T1
- sethi %hi(0xc4ac5665),T2
- xor T1,A,T1
- or T2,%lo(0xc4ac5665),T2 !=
- add T1,R2,T1
- !pre-LOADed X(0),R0
- add T1,T2,T1
- add B,T1,B
- sll B,23,T2 !=
- srl B,32-23,B
- or B,T2,B
- add B,C,B
-
-!!!!!!!!Round 3
-
- orn B,D,T1 !=
- sethi %hi(0xf4292244),T2
- xor T1,C,T1
- or T2,%lo(0xf4292244),T2
- add T1,R0,T1 !=
- !pre-LOADed X(7),R7
- add T1,T2,T1
- add A,T1,A
- sll A,6,T2
- srl A,32-6,A !=
- or A,T2,A
- add A,B,A
-
- orn A,C,T1
- sethi %hi(0x432aff97),T2 !=
- xor T1,B,T1
- or T2,%lo(0x432aff97),T2
- LOAD X(14),RX
- add T1,R7,T1 !=
- add T1,T2,T1
- add D,T1,D
- sll D,10,T2
- srl D,32-10,D !=
- or D,T2,D
- add D,A,D
-
- orn D,B,T1
- sethi %hi(0xab9423a7),T2 !=
- xor T1,A,T1
- or T2,%lo(0xab9423a7),T2
- add T1,RX,T1
- !pre-LOADed X(5),R5
- add T1,T2,T1 !=
- add C,T1,C
- sll C,15,T2
- srl C,32-15,C
- or C,T2,C !=
- add C,D,C
-
- orn C,A,T1
- sethi %hi(0xfc93a039),T2
- xor T1,D,T1 !=
- or T2,%lo(0xfc93a039),T2
- add T1,R5,T1
- !pre-LOADed X(12),R12
- add T1,T2,T1
- add B,T1,B !=
- sll B,21,T2
- srl B,32-21,B
- or B,T2,B
- add B,C,B !=
-
- orn B,D,T1
- sethi %hi(0x655b59c3),T2
- xor T1,C,T1
- or T2,%lo(0x655b59c3),T2 !=
- add T1,R12,T1
- !pre-LOADed X(3),R3
- add T1,T2,T1
- add A,T1,A
- sll A,6,T2 !=
- srl A,32-6,A
- or A,T2,A
- add A,B,A
-
- orn A,C,T1 !=
- sethi %hi(0x8f0ccc92),T2
- xor T1,B,T1
- or T2,%lo(0x8f0ccc92),T2
- add T1,R3,T1 !=
- !pre-LOADed X(10),R10
- add T1,T2,T1
- add D,T1,D
- sll D,10,T2
- srl D,32-10,D !=
- or D,T2,D
- add D,A,D
-
- orn D,B,T1
- sethi %hi(0xffeff47d),T2 !=
- xor T1,A,T1
- or T2,%lo(0xffeff47d),T2
- add T1,R10,T1
- !pre-LOADed X(1),R1
- add T1,T2,T1 !=
- add C,T1,C
- sll C,15,T2
- srl C,32-15,C
- or C,T2,C !=
- add C,D,C
-
- orn C,A,T1
- sethi %hi(0x85845dd1),T2
- xor T1,D,T1 !=
- or T2,%lo(0x85845dd1),T2
- add T1,R1,T1
- !pre-LOADed X(8),R8
- add T1,T2,T1
- add B,T1,B !=
- sll B,21,T2
- srl B,32-21,B
- or B,T2,B
- add B,C,B !=
-
- orn B,D,T1
- sethi %hi(0x6fa87e4f),T2
- xor T1,C,T1
- or T2,%lo(0x6fa87e4f),T2 !=
- add T1,R8,T1
- LOAD X(15),RX
- add T1,T2,T1
- add A,T1,A !=
- sll A,6,T2
- srl A,32-6,A
- or A,T2,A
- add A,B,A !=
-
- orn A,C,T1
- sethi %hi(0xfe2ce6e0),T2
- xor T1,B,T1
- or T2,%lo(0xfe2ce6e0),T2 !=
- add T1,RX,T1
- !pre-LOADed X(6),R6
- add T1,T2,T1
- add D,T1,D
- sll D,10,T2 !=
- srl D,32-10,D
- or D,T2,D
- add D,A,D
-
- orn D,B,T1 !=
- sethi %hi(0xa3014314),T2
- xor T1,A,T1
- or T2,%lo(0xa3014314),T2
- add T1,R6,T1 !=
- !pre-LOADed X(13),R13
- add T1,T2,T1
- add C,T1,C
- sll C,15,T2
- srl C,32-15,C !=
- or C,T2,C
- add C,D,C
-
- orn C,A,T1
- sethi %hi(0x4e0811a1),T2 !=
- xor T1,D,T1
- or T2,%lo(0x4e0811a1),T2
- !pre-LOADed X(4),R4
- ld [Aptr],Aval
- add T1,R13,T1 !=
- add T1,T2,T1
- add B,T1,B
- sll B,21,T2
- srl B,32-21,B !=
- or B,T2,B
- add B,C,B
-
- orn B,D,T1
- sethi %hi(0xf7537e82),T2 !=
- xor T1,C,T1
- or T2,%lo(0xf7537e82),T2
- !pre-LOADed X(11),R11
- ld [Dptr],Dval
- add T1,R4,T1 !=
- add T1,T2,T1
- add A,T1,A
- sll A,6,T2
- srl A,32-6,A !=
- or A,T2,A
- add A,B,A
-
- orn A,C,T1
- sethi %hi(0xbd3af235),T2 !=
- xor T1,B,T1
- or T2,%lo(0xbd3af235),T2
- !pre-LOADed X(2),R2
- ld [Cptr],Cval
- add T1,R11,T1 !=
- add T1,T2,T1
- add D,T1,D
- sll D,10,T2
- srl D,32-10,D !=
- or D,T2,D
- add D,A,D
-
- orn D,B,T1
- sethi %hi(0x2ad7d2bb),T2 !=
- xor T1,A,T1
- or T2,%lo(0x2ad7d2bb),T2
- !pre-LOADed X(9),R9
- ld [Bptr],Bval
- add T1,R2,T1 !=
- add Aval,A,Aval
- add T1,T2,T1
- st Aval,[Aptr]
- add C,T1,C !=
- sll C,15,T2
- add Dval,D,Dval
- srl C,32-15,C
- or C,T2,C !=
- st Dval,[Dptr]
- add C,D,C
-
- orn C,A,T1
- sethi %hi(0xeb86d391),T2 !=
- xor T1,D,T1
- or T2,%lo(0xeb86d391),T2
- add T1,R9,T1
- !pre-LOADed X(0),R0
- mov Aval,A !=
- add T1,T2,T1
- mov Dval,D
- add B,T1,B
- sll B,21,T2 !=
- add Cval,C,Cval
- srl B,32-21,B
- st Cval,[Cptr]
- or B,T2,B !=
- add B,C,B
-
- deccc %i2
- mov Cval,C
- add B,Bval,B !=
- inc 64,%i1
- nop
- st B,[Bptr]
- nop !=
-
-#ifdef OPENSSL_SYSNAME_ULTRASPARC
- bg,a,pt %icc,.Lmd5_block_loop
-#else
- bg,a .Lmd5_block_loop
-#endif
- LOAD X(0),R0
-
-#ifdef ASI_PRIMARY_LITTLE
- wr %g0,%o7,%asi
-#endif
- ret
- restore %g0,0,%o0
-
-.type md5_block,#function
-.size md5_block,(.-md5_block)
diff --git a/crypto/openssl/crypto/md5/asm/md5-x86_64.pl b/crypto/openssl/crypto/md5/asm/md5-x86_64.pl
index c36a7fe..9a6fa67 100755
--- a/crypto/openssl/crypto/md5/asm/md5-x86_64.pl
+++ b/crypto/openssl/crypto/md5/asm/md5-x86_64.pl
@@ -111,9 +111,9 @@ $code .= <<EOF;
.text
.align 16
-.globl md5_block_asm_host_order
-.type md5_block_asm_host_order,\@function,3
-md5_block_asm_host_order:
+.globl md5_block_asm_data_order
+.type md5_block_asm_data_order,\@function,3
+md5_block_asm_data_order:
push %rbp
push %rbx
push %r14
@@ -237,7 +237,7 @@ $code .= <<EOF;
pop %rbx
pop %rbp
ret
-.size md5_block_asm_host_order,.-md5_block_asm_host_order
+.size md5_block_asm_data_order,.-md5_block_asm_data_order
EOF
print $code;
diff --git a/crypto/openssl/crypto/md5/md5.h b/crypto/openssl/crypto/md5/md5.h
index dbdc0e1..0761f84 100644
--- a/crypto/openssl/crypto/md5/md5.h
+++ b/crypto/openssl/crypto/md5/md5.h
@@ -105,6 +105,9 @@ typedef struct MD5state_st
unsigned int num;
} MD5_CTX;
+#ifdef OPENSSL_FIPS
+int private_MD5_Init(MD5_CTX *c);
+#endif
int MD5_Init(MD5_CTX *c);
int MD5_Update(MD5_CTX *c, const void *data, size_t len);
int MD5_Final(unsigned char *md, MD5_CTX *c);
diff --git a/crypto/openssl/crypto/md5/md5_dgst.c b/crypto/openssl/crypto/md5/md5_dgst.c
index 953f049..47bb902 100644
--- a/crypto/openssl/crypto/md5/md5_dgst.c
+++ b/crypto/openssl/crypto/md5/md5_dgst.c
@@ -59,6 +59,11 @@
#include <stdio.h>
#include "md5_locl.h"
#include <openssl/opensslv.h>
+#include <openssl/err.h>
+#ifdef OPENSSL_FIPS
+#include <openssl/fips.h>
+#endif
+
const char MD5_version[]="MD5" OPENSSL_VERSION_PTEXT;
@@ -70,7 +75,7 @@ const char MD5_version[]="MD5" OPENSSL_VERSION_PTEXT;
#define INIT_DATA_C (unsigned long)0x98badcfeL
#define INIT_DATA_D (unsigned long)0x10325476L
-int MD5_Init(MD5_CTX *c)
+FIPS_NON_FIPS_MD_Init(MD5)
{
c->A=INIT_DATA_A;
c->B=INIT_DATA_B;
@@ -82,96 +87,6 @@ int MD5_Init(MD5_CTX *c)
return 1;
}
-#ifndef md5_block_host_order
-void md5_block_host_order (MD5_CTX *c, const void *data, size_t num)
- {
- const MD5_LONG *X=data;
- register unsigned MD32_REG_T A,B,C,D;
-
- A=c->A;
- B=c->B;
- C=c->C;
- D=c->D;
-
- for (;num--;X+=HASH_LBLOCK)
- {
- /* Round 0 */
- R0(A,B,C,D,X[ 0], 7,0xd76aa478L);
- R0(D,A,B,C,X[ 1],12,0xe8c7b756L);
- R0(C,D,A,B,X[ 2],17,0x242070dbL);
- R0(B,C,D,A,X[ 3],22,0xc1bdceeeL);
- R0(A,B,C,D,X[ 4], 7,0xf57c0fafL);
- R0(D,A,B,C,X[ 5],12,0x4787c62aL);
- R0(C,D,A,B,X[ 6],17,0xa8304613L);
- R0(B,C,D,A,X[ 7],22,0xfd469501L);
- R0(A,B,C,D,X[ 8], 7,0x698098d8L);
- R0(D,A,B,C,X[ 9],12,0x8b44f7afL);
- R0(C,D,A,B,X[10],17,0xffff5bb1L);
- R0(B,C,D,A,X[11],22,0x895cd7beL);
- R0(A,B,C,D,X[12], 7,0x6b901122L);
- R0(D,A,B,C,X[13],12,0xfd987193L);
- R0(C,D,A,B,X[14],17,0xa679438eL);
- R0(B,C,D,A,X[15],22,0x49b40821L);
- /* Round 1 */
- R1(A,B,C,D,X[ 1], 5,0xf61e2562L);
- R1(D,A,B,C,X[ 6], 9,0xc040b340L);
- R1(C,D,A,B,X[11],14,0x265e5a51L);
- R1(B,C,D,A,X[ 0],20,0xe9b6c7aaL);
- R1(A,B,C,D,X[ 5], 5,0xd62f105dL);
- R1(D,A,B,C,X[10], 9,0x02441453L);
- R1(C,D,A,B,X[15],14,0xd8a1e681L);
- R1(B,C,D,A,X[ 4],20,0xe7d3fbc8L);
- R1(A,B,C,D,X[ 9], 5,0x21e1cde6L);
- R1(D,A,B,C,X[14], 9,0xc33707d6L);
- R1(C,D,A,B,X[ 3],14,0xf4d50d87L);
- R1(B,C,D,A,X[ 8],20,0x455a14edL);
- R1(A,B,C,D,X[13], 5,0xa9e3e905L);
- R1(D,A,B,C,X[ 2], 9,0xfcefa3f8L);
- R1(C,D,A,B,X[ 7],14,0x676f02d9L);
- R1(B,C,D,A,X[12],20,0x8d2a4c8aL);
- /* Round 2 */
- R2(A,B,C,D,X[ 5], 4,0xfffa3942L);
- R2(D,A,B,C,X[ 8],11,0x8771f681L);
- R2(C,D,A,B,X[11],16,0x6d9d6122L);
- R2(B,C,D,A,X[14],23,0xfde5380cL);
- R2(A,B,C,D,X[ 1], 4,0xa4beea44L);
- R2(D,A,B,C,X[ 4],11,0x4bdecfa9L);
- R2(C,D,A,B,X[ 7],16,0xf6bb4b60L);
- R2(B,C,D,A,X[10],23,0xbebfbc70L);
- R2(A,B,C,D,X[13], 4,0x289b7ec6L);
- R2(D,A,B,C,X[ 0],11,0xeaa127faL);
- R2(C,D,A,B,X[ 3],16,0xd4ef3085L);
- R2(B,C,D,A,X[ 6],23,0x04881d05L);
- R2(A,B,C,D,X[ 9], 4,0xd9d4d039L);
- R2(D,A,B,C,X[12],11,0xe6db99e5L);
- R2(C,D,A,B,X[15],16,0x1fa27cf8L);
- R2(B,C,D,A,X[ 2],23,0xc4ac5665L);
- /* Round 3 */
- R3(A,B,C,D,X[ 0], 6,0xf4292244L);
- R3(D,A,B,C,X[ 7],10,0x432aff97L);
- R3(C,D,A,B,X[14],15,0xab9423a7L);
- R3(B,C,D,A,X[ 5],21,0xfc93a039L);
- R3(A,B,C,D,X[12], 6,0x655b59c3L);
- R3(D,A,B,C,X[ 3],10,0x8f0ccc92L);
- R3(C,D,A,B,X[10],15,0xffeff47dL);
- R3(B,C,D,A,X[ 1],21,0x85845dd1L);
- R3(A,B,C,D,X[ 8], 6,0x6fa87e4fL);
- R3(D,A,B,C,X[15],10,0xfe2ce6e0L);
- R3(C,D,A,B,X[ 6],15,0xa3014314L);
- R3(B,C,D,A,X[13],21,0x4e0811a1L);
- R3(A,B,C,D,X[ 4], 6,0xf7537e82L);
- R3(D,A,B,C,X[11],10,0xbd3af235L);
- R3(C,D,A,B,X[ 2],15,0x2ad7d2bbL);
- R3(B,C,D,A,X[ 9],21,0xeb86d391L);
-
- A = c->A += A;
- B = c->B += B;
- C = c->C += C;
- D = c->D += D;
- }
- }
-#endif
-
#ifndef md5_block_data_order
#ifdef X
#undef X
@@ -274,19 +189,3 @@ void md5_block_data_order (MD5_CTX *c, const void *data_, size_t num)
}
}
#endif
-
-#ifdef undef
-int printit(unsigned long *l)
- {
- int i,ii;
-
- for (i=0; i<2; i++)
- {
- for (ii=0; ii<8; ii++)
- {
- fprintf(stderr,"%08lx ",l[i*8+ii]);
- }
- fprintf(stderr,"\n");
- }
- }
-#endif
diff --git a/crypto/openssl/crypto/md5/md5_locl.h b/crypto/openssl/crypto/md5/md5_locl.h
index 94f395f..84e81b9 100644
--- a/crypto/openssl/crypto/md5/md5_locl.h
+++ b/crypto/openssl/crypto/md5/md5_locl.h
@@ -66,53 +66,19 @@
#endif
#ifdef MD5_ASM
-# if defined(__i386) || defined(__i386__) || defined(_M_IX86) || defined(__INTEL__) || defined(__x86_64) || defined(__x86_64__)
-# if !defined(B_ENDIAN)
-# define md5_block_host_order md5_block_asm_host_order
-# endif
-# elif defined(__sparc) && defined(OPENSSL_SYS_ULTRASPARC)
- void md5_block_asm_data_order_aligned (MD5_CTX *c, const MD5_LONG *p,size_t num);
-# define HASH_BLOCK_DATA_ORDER_ALIGNED md5_block_asm_data_order_aligned
+# if defined(__i386) || defined(__i386__) || defined(_M_IX86) || defined(__INTEL__) || \
+ defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || defined(_M_X64)
+# define md5_block_data_order md5_block_asm_data_order
# endif
#endif
-void md5_block_host_order (MD5_CTX *c, const void *p,size_t num);
void md5_block_data_order (MD5_CTX *c, const void *p,size_t num);
-#if defined(__i386) || defined(__i386__) || defined(_M_IX86) || defined(__INTEL__) || defined(__x86_64) || defined(__x86_64__)
-# if !defined(B_ENDIAN)
-/*
- * *_block_host_order is expected to handle aligned data while
- * *_block_data_order - unaligned. As algorithm and host (x86)
- * are in this case of the same "endianness" these two are
- * otherwise indistinguishable. But normally you don't want to
- * call the same function because unaligned access in places
- * where alignment is expected is usually a "Bad Thing". Indeed,
- * on RISCs you get punished with BUS ERROR signal or *severe*
- * performance degradation. Intel CPUs are in turn perfectly
- * capable of loading unaligned data without such drastic side
- * effect. Yes, they say it's slower than aligned load, but no
- * exception is generated and therefore performance degradation
- * is *incomparable* with RISCs. What we should weight here is
- * costs of unaligned access against costs of aligning data.
- * According to my measurements allowing unaligned access results
- * in ~9% performance improvement on Pentium II operating at
- * 266MHz. I won't be surprised if the difference will be higher
- * on faster systems:-)
- *
- * <appro@fy.chalmers.se>
- */
-# define md5_block_data_order md5_block_host_order
-# endif
-#endif
-
#define DATA_ORDER_IS_LITTLE_ENDIAN
#define HASH_LONG MD5_LONG
-#define HASH_LONG_LOG2 MD5_LONG_LOG2
#define HASH_CTX MD5_CTX
#define HASH_CBLOCK MD5_CBLOCK
-#define HASH_LBLOCK MD5_LBLOCK
#define HASH_UPDATE MD5_Update
#define HASH_TRANSFORM MD5_Transform
#define HASH_FINAL MD5_Final
@@ -123,21 +89,7 @@ void md5_block_data_order (MD5_CTX *c, const void *p,size_t num);
ll=(c)->C; HOST_l2c(ll,(s)); \
ll=(c)->D; HOST_l2c(ll,(s)); \
} while (0)
-#define HASH_BLOCK_HOST_ORDER md5_block_host_order
-#if !defined(L_ENDIAN) || defined(md5_block_data_order)
#define HASH_BLOCK_DATA_ORDER md5_block_data_order
-/*
- * Little-endians (Intel and Alpha) feel better without this.
- * It looks like memcpy does better job than generic
- * md5_block_data_order on copying-n-aligning input data.
- * But frankly speaking I didn't expect such result on Alpha.
- * On the other hand I've got this with egcs-1.0.2 and if
- * program is compiled with another (better?) compiler it
- * might turn out other way around.
- *
- * <appro@fy.chalmers.se>
- */
-#endif
#include "md32_common.h"
diff --git a/crypto/openssl/crypto/md5/md5test.c b/crypto/openssl/crypto/md5/md5test.c
index 0628053..2b37190 100644
--- a/crypto/openssl/crypto/md5/md5test.c
+++ b/crypto/openssl/crypto/md5/md5test.c
@@ -97,12 +97,12 @@ static char *pt(unsigned char *md);
int main(int argc, char *argv[])
{
int i,err=0;
- unsigned char **P,**R;
+ char **P,**R;
char *p;
unsigned char md[MD5_DIGEST_LENGTH];
- P=(unsigned char **)test;
- R=(unsigned char **)ret;
+ P=test;
+ R=ret;
i=1;
while (*P != NULL)
{
OpenPOWER on IntegriCloud