summaryrefslogtreecommitdiffstats
path: root/crypto/md5
diff options
context:
space:
mode:
authorsimon <simon@FreeBSD.org>2008-08-23 10:51:00 +0000
committersimon <simon@FreeBSD.org>2008-08-23 10:51:00 +0000
commit64fcbc70db16b1c94d818662746bb0e9c4fdb705 (patch)
tree596c39f00d5968b1519e8cd7f0546412b14c20f0 /crypto/md5
parent8f21bfc1756ff75fb4caf97e5c9612a4d7106243 (diff)
downloadFreeBSD-src-64fcbc70db16b1c94d818662746bb0e9c4fdb705.zip
FreeBSD-src-64fcbc70db16b1c94d818662746bb0e9c4fdb705.tar.gz
Flatten OpenSSL vendor tree.
Diffstat (limited to 'crypto/md5')
-rw-r--r--crypto/md5/Makefile120
-rw-r--r--crypto/md5/asm/md5-586.pl306
-rw-r--r--crypto/md5/asm/md5-sparcv9.S1031
-rwxr-xr-xcrypto/md5/asm/md5-x86_64.pl245
-rw-r--r--crypto/md5/md5.c127
-rw-r--r--crypto/md5/md5.h117
-rw-r--r--crypto/md5/md5_dgst.c292
-rw-r--r--crypto/md5/md5_locl.h176
-rw-r--r--crypto/md5/md5_one.c97
-rw-r--r--crypto/md5/md5s.cpp78
-rw-r--r--crypto/md5/md5test.c140
11 files changed, 2729 insertions, 0 deletions
diff --git a/crypto/md5/Makefile b/crypto/md5/Makefile
new file mode 100644
index 0000000..849a0a5
--- /dev/null
+++ b/crypto/md5/Makefile
@@ -0,0 +1,120 @@
+#
+# OpenSSL/crypto/md5/Makefile
+#
+
+DIR= md5
+TOP= ../..
+CC= cc
+CPP= $(CC) -E
+INCLUDES=-I.. -I$(TOP) -I../../include
+CFLAG=-g
+MAKEFILE= Makefile
+AR= ar r
+
+MD5_ASM_OBJ=
+
+CFLAGS= $(INCLUDES) $(CFLAG)
+ASFLAGS= $(INCLUDES) $(ASFLAG)
+AFLAGS= $(ASFLAGS)
+
+GENERAL=Makefile
+TEST=md5test.c
+APPS=
+
+LIB=$(TOP)/libcrypto.a
+LIBSRC=md5_dgst.c md5_one.c
+LIBOBJ=md5_dgst.o md5_one.o $(MD5_ASM_OBJ)
+
+SRC= $(LIBSRC)
+
+EXHEADER= md5.h
+HEADER= md5_locl.h $(EXHEADER)
+
+ALL= $(GENERAL) $(SRC) $(HEADER)
+
+top:
+ (cd ../..; $(MAKE) DIRS=crypto SDIRS=$(DIR) sub_all)
+
+all: lib
+
+lib: $(LIBOBJ)
+ $(AR) $(LIB) $(LIBOBJ)
+ $(RANLIB) $(LIB) || echo Never mind.
+ @touch lib
+
+# ELF
+mx86-elf.s: asm/md5-586.pl ../perlasm/x86asm.pl
+ (cd asm; $(PERL) md5-586.pl elf $(CFLAGS) > ../$@)
+# COFF
+mx86-cof.s: asm/md5-586.pl ../perlasm/x86asm.pl
+ (cd asm; $(PERL) md5-586.pl coff $(CFLAGS) > ../$@)
+# a.out
+mx86-out.s: asm/md5-586.pl ../perlasm/x86asm.pl
+ (cd asm; $(PERL) md5-586.pl a.out $(CFLAGS) > ../$@)
+
+md5-sparcv8plus.o: asm/md5-sparcv9.S
+ $(CC) $(ASFLAGS) -DMD5_BLOCK_DATA_ORDER -c \
+ -o md5-sparcv8plus.o asm/md5-sparcv9.S
+
+# Old GNU assembler doesn't understand V9 instructions, so we
+# hire /usr/ccs/bin/as to do the job. Note that option is called
+# *-gcc27, but even gcc 2>=8 users may experience similar problem
+# if they didn't bother to upgrade GNU assembler. Such users should
+# not choose this option, but be adviced to *remove* GNU assembler
+# or upgrade it.
+md5-sparcv8plus-gcc27.o: asm/md5-sparcv9.S
+ $(CC) $(ASFLAGS) -DMD5_BLOCK_DATA_ORDER -E asm/md5-sparcv9.S | \
+ /usr/ccs/bin/as -xarch=v8plus - -o md5-sparcv8plus-gcc27.o
+
+md5-sparcv9.o: asm/md5-sparcv9.S
+ $(CC) $(ASFLAGS) -DMD5_BLOCK_DATA_ORDER -c \
+ -o md5-sparcv9.o asm/md5-sparcv9.S
+
+md5-x86_64.s: asm/md5-x86_64.pl; $(PERL) asm/md5-x86_64.pl $@
+
+files:
+ $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO
+
+links:
+ @$(PERL) $(TOP)/util/mklink.pl ../../include/openssl $(EXHEADER)
+ @$(PERL) $(TOP)/util/mklink.pl ../../test $(TEST)
+ @$(PERL) $(TOP)/util/mklink.pl ../../apps $(APPS)
+
+install:
+ @[ -n "$(INSTALLTOP)" ] # should be set by top Makefile...
+ @headerlist="$(EXHEADER)"; for i in $$headerlist ; \
+ do \
+ (cp $$i $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i; \
+ chmod 644 $(INSTALL_PREFIX)$(INSTALLTOP)/include/openssl/$$i ); \
+ done;
+
+tags:
+ ctags $(SRC)
+
+tests:
+
+lint:
+ lint -DLINT $(INCLUDES) $(SRC)>fluff
+
+depend:
+ @[ -n "$(MAKEDEPEND)" ] # should be set by upper Makefile...
+ $(MAKEDEPEND) -- $(CFLAG) $(INCLUDES) $(DEPFLAG) -- $(PROGS) $(LIBSRC)
+
+dclean:
+ $(PERL) -pe 'if (/^# DO NOT DELETE THIS LINE/) {print; exit(0);}' $(MAKEFILE) >Makefile.new
+ mv -f Makefile.new $(MAKEFILE)
+
+clean:
+ rm -f *.s *.o *.obj lib tags core .pure .nfs* *.old *.bak fluff
+
+# DO NOT DELETE THIS LINE -- make depend depends on it.
+
+md5_dgst.o: ../../include/openssl/e_os2.h ../../include/openssl/md5.h
+md5_dgst.o: ../../include/openssl/opensslconf.h
+md5_dgst.o: ../../include/openssl/opensslv.h ../md32_common.h md5_dgst.c
+md5_dgst.o: md5_locl.h
+md5_one.o: ../../include/openssl/crypto.h ../../include/openssl/e_os2.h
+md5_one.o: ../../include/openssl/md5.h ../../include/openssl/opensslconf.h
+md5_one.o: ../../include/openssl/opensslv.h ../../include/openssl/ossl_typ.h
+md5_one.o: ../../include/openssl/safestack.h ../../include/openssl/stack.h
+md5_one.o: ../../include/openssl/symhacks.h md5_one.c
diff --git a/crypto/md5/asm/md5-586.pl b/crypto/md5/asm/md5-586.pl
new file mode 100644
index 0000000..fa3fa3b
--- /dev/null
+++ b/crypto/md5/asm/md5-586.pl
@@ -0,0 +1,306 @@
+#!/usr/local/bin/perl
+
+# Normal is the
+# md5_block_x86(MD5_CTX *c, ULONG *X);
+# version, non-normal is the
+# md5_block_x86(MD5_CTX *c, ULONG *X,int blocks);
+
+$normal=0;
+
+push(@INC,"perlasm","../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],$0);
+
+$A="eax";
+$B="ebx";
+$C="ecx";
+$D="edx";
+$tmp1="edi";
+$tmp2="ebp";
+$X="esi";
+
+# What we need to load into $tmp for the next round
+%Ltmp1=("R0",&Np($C), "R1",&Np($C), "R2",&Np($C), "R3",&Np($D));
+@xo=(
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, # R0
+ 1, 6, 11, 0, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12, # R1
+ 5, 8, 11, 14, 1, 4, 7, 10, 13, 0, 3, 6, 9, 12, 15, 2, # R2
+ 0, 7, 14, 5, 12, 3, 10, 1, 8, 15, 6, 13, 4, 11, 2, 9, # R3
+ );
+
+&md5_block("md5_block_asm_host_order");
+&asm_finish();
+
+sub Np
+ {
+ local($p)=@_;
+ local(%n)=($A,$D,$B,$A,$C,$B,$D,$C);
+ return($n{$p});
+ }
+
+sub R0
+ {
+ local($pos,$a,$b,$c,$d,$K,$ki,$s,$t)=@_;
+
+ &mov($tmp1,$C) if $pos < 0;
+ &mov($tmp2,&DWP($xo[$ki]*4,$K,"",0)) if $pos < 0; # very first one
+
+ # body proper
+
+ &comment("R0 $ki");
+ &xor($tmp1,$d); # F function - part 2
+
+ &and($tmp1,$b); # F function - part 3
+ &lea($a,&DWP($t,$a,$tmp2,1));
+
+ &xor($tmp1,$d); # F function - part 4
+
+ &add($a,$tmp1);
+ &mov($tmp1,&Np($c)) if $pos < 1; # next tmp1 for R0
+ &mov($tmp1,&Np($c)) if $pos == 1; # next tmp1 for R1
+
+ &rotl($a,$s);
+
+ &mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0)) if ($pos != 2);
+
+ &add($a,$b);
+ }
+
+sub R1
+ {
+ local($pos,$a,$b,$c,$d,$K,$ki,$s,$t)=@_;
+
+ &comment("R1 $ki");
+
+ &lea($a,&DWP($t,$a,$tmp2,1));
+
+ &xor($tmp1,$b); # G function - part 2
+ &and($tmp1,$d); # G function - part 3
+
+ &mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0)) if ($pos != 2);
+ &xor($tmp1,$c); # G function - part 4
+
+ &add($a,$tmp1);
+ &mov($tmp1,&Np($c)) if $pos < 1; # G function - part 1
+ &mov($tmp1,&Np($c)) if $pos == 1; # G function - part 1
+
+ &rotl($a,$s);
+
+ &add($a,$b);
+ }
+
+sub R2
+ {
+ local($n,$pos,$a,$b,$c,$d,$K,$ki,$s,$t)=@_;
+ # This one is different, only 3 logical operations
+
+if (($n & 1) == 0)
+ {
+ &comment("R2 $ki");
+ # make sure to do 'D' first, not 'B', else we clash with
+ # the last add from the previous round.
+
+ &xor($tmp1,$d); # H function - part 2
+
+ &xor($tmp1,$b); # H function - part 3
+ &lea($a,&DWP($t,$a,$tmp2,1));
+
+ &add($a,$tmp1);
+
+ &rotl($a,$s);
+
+ &mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0));
+ &mov($tmp1,&Np($c));
+ }
+else
+ {
+ &comment("R2 $ki");
+ # make sure to do 'D' first, not 'B', else we clash with
+ # the last add from the previous round.
+
+ &lea($a,&DWP($t,$a,$tmp2,1));
+
+ &add($b,$c); # MOVED FORWARD
+ &xor($tmp1,$d); # H function - part 2
+
+ &xor($tmp1,$b); # H function - part 3
+ &mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0)) if ($pos != 2);
+
+ &add($a,$tmp1);
+ &mov($tmp1,&Np($c)) if $pos < 1; # H function - part 1
+ &mov($tmp1,-1) if $pos == 1; # I function - part 1
+
+ &rotl($a,$s);
+
+ &add($a,$b);
+ }
+ }
+
+sub R3
+ {
+ local($pos,$a,$b,$c,$d,$K,$ki,$s,$t)=@_;
+
+ &comment("R3 $ki");
+
+ # &not($tmp1)
+ &xor($tmp1,$d) if $pos < 0; # I function - part 2
+
+ &or($tmp1,$b); # I function - part 3
+ &lea($a,&DWP($t,$a,$tmp2,1));
+
+ &xor($tmp1,$c); # I function - part 4
+ &mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0)) if $pos != 2; # load X/k value
+ &mov($tmp2,&wparam(0)) if $pos == 2;
+
+ &add($a,$tmp1);
+ &mov($tmp1,-1) if $pos < 1; # H function - part 1
+ &add($K,64) if $pos >=1 && !$normal;
+
+ &rotl($a,$s);
+
+ &xor($tmp1,&Np($d)) if $pos <= 0; # I function - part = first time
+ &mov($tmp1,&DWP( 0,$tmp2,"",0)) if $pos > 0;
+ &add($a,$b);
+ }
+
+
+sub md5_block
+ {
+ local($name)=@_;
+
+ &function_begin_B($name,"",3);
+
+ # parameter 1 is the MD5_CTX structure.
+ # A 0
+ # B 4
+ # C 8
+ # D 12
+
+ &push("esi");
+ &push("edi");
+ &mov($tmp1, &wparam(0)); # edi
+ &mov($X, &wparam(1)); # esi
+ &mov($C, &wparam(2));
+ &push("ebp");
+ &shl($C, 6);
+ &push("ebx");
+ &add($C, $X); # offset we end at
+ &sub($C, 64);
+ &mov($A, &DWP( 0,$tmp1,"",0));
+ &push($C); # Put on the TOS
+ &mov($B, &DWP( 4,$tmp1,"",0));
+ &mov($C, &DWP( 8,$tmp1,"",0));
+ &mov($D, &DWP(12,$tmp1,"",0));
+
+ &set_label("start") unless $normal;
+ &comment("");
+ &comment("R0 section");
+
+ &R0(-2,$A,$B,$C,$D,$X, 0, 7,0xd76aa478);
+ &R0( 0,$D,$A,$B,$C,$X, 1,12,0xe8c7b756);
+ &R0( 0,$C,$D,$A,$B,$X, 2,17,0x242070db);
+ &R0( 0,$B,$C,$D,$A,$X, 3,22,0xc1bdceee);
+ &R0( 0,$A,$B,$C,$D,$X, 4, 7,0xf57c0faf);
+ &R0( 0,$D,$A,$B,$C,$X, 5,12,0x4787c62a);
+ &R0( 0,$C,$D,$A,$B,$X, 6,17,0xa8304613);
+ &R0( 0,$B,$C,$D,$A,$X, 7,22,0xfd469501);
+ &R0( 0,$A,$B,$C,$D,$X, 8, 7,0x698098d8);
+ &R0( 0,$D,$A,$B,$C,$X, 9,12,0x8b44f7af);
+ &R0( 0,$C,$D,$A,$B,$X,10,17,0xffff5bb1);
+ &R0( 0,$B,$C,$D,$A,$X,11,22,0x895cd7be);
+ &R0( 0,$A,$B,$C,$D,$X,12, 7,0x6b901122);
+ &R0( 0,$D,$A,$B,$C,$X,13,12,0xfd987193);
+ &R0( 0,$C,$D,$A,$B,$X,14,17,0xa679438e);
+ &R0( 1,$B,$C,$D,$A,$X,15,22,0x49b40821);
+
+ &comment("");
+ &comment("R1 section");
+ &R1(-1,$A,$B,$C,$D,$X,16, 5,0xf61e2562);
+ &R1( 0,$D,$A,$B,$C,$X,17, 9,0xc040b340);
+ &R1( 0,$C,$D,$A,$B,$X,18,14,0x265e5a51);
+ &R1( 0,$B,$C,$D,$A,$X,19,20,0xe9b6c7aa);
+ &R1( 0,$A,$B,$C,$D,$X,20, 5,0xd62f105d);
+ &R1( 0,$D,$A,$B,$C,$X,21, 9,0x02441453);
+ &R1( 0,$C,$D,$A,$B,$X,22,14,0xd8a1e681);
+ &R1( 0,$B,$C,$D,$A,$X,23,20,0xe7d3fbc8);
+ &R1( 0,$A,$B,$C,$D,$X,24, 5,0x21e1cde6);
+ &R1( 0,$D,$A,$B,$C,$X,25, 9,0xc33707d6);
+ &R1( 0,$C,$D,$A,$B,$X,26,14,0xf4d50d87);
+ &R1( 0,$B,$C,$D,$A,$X,27,20,0x455a14ed);
+ &R1( 0,$A,$B,$C,$D,$X,28, 5,0xa9e3e905);
+ &R1( 0,$D,$A,$B,$C,$X,29, 9,0xfcefa3f8);
+ &R1( 0,$C,$D,$A,$B,$X,30,14,0x676f02d9);
+ &R1( 1,$B,$C,$D,$A,$X,31,20,0x8d2a4c8a);
+
+ &comment("");
+ &comment("R2 section");
+ &R2( 0,-1,$A,$B,$C,$D,$X,32, 4,0xfffa3942);
+ &R2( 1, 0,$D,$A,$B,$C,$X,33,11,0x8771f681);
+ &R2( 2, 0,$C,$D,$A,$B,$X,34,16,0x6d9d6122);
+ &R2( 3, 0,$B,$C,$D,$A,$X,35,23,0xfde5380c);
+ &R2( 4, 0,$A,$B,$C,$D,$X,36, 4,0xa4beea44);
+ &R2( 5, 0,$D,$A,$B,$C,$X,37,11,0x4bdecfa9);
+ &R2( 6, 0,$C,$D,$A,$B,$X,38,16,0xf6bb4b60);
+ &R2( 7, 0,$B,$C,$D,$A,$X,39,23,0xbebfbc70);
+ &R2( 8, 0,$A,$B,$C,$D,$X,40, 4,0x289b7ec6);
+ &R2( 9, 0,$D,$A,$B,$C,$X,41,11,0xeaa127fa);
+ &R2(10, 0,$C,$D,$A,$B,$X,42,16,0xd4ef3085);
+ &R2(11, 0,$B,$C,$D,$A,$X,43,23,0x04881d05);
+ &R2(12, 0,$A,$B,$C,$D,$X,44, 4,0xd9d4d039);
+ &R2(13, 0,$D,$A,$B,$C,$X,45,11,0xe6db99e5);
+ &R2(14, 0,$C,$D,$A,$B,$X,46,16,0x1fa27cf8);
+ &R2(15, 1,$B,$C,$D,$A,$X,47,23,0xc4ac5665);
+
+ &comment("");
+ &comment("R3 section");
+ &R3(-1,$A,$B,$C,$D,$X,48, 6,0xf4292244);
+ &R3( 0,$D,$A,$B,$C,$X,49,10,0x432aff97);
+ &R3( 0,$C,$D,$A,$B,$X,50,15,0xab9423a7);
+ &R3( 0,$B,$C,$D,$A,$X,51,21,0xfc93a039);
+ &R3( 0,$A,$B,$C,$D,$X,52, 6,0x655b59c3);
+ &R3( 0,$D,$A,$B,$C,$X,53,10,0x8f0ccc92);
+ &R3( 0,$C,$D,$A,$B,$X,54,15,0xffeff47d);
+ &R3( 0,$B,$C,$D,$A,$X,55,21,0x85845dd1);
+ &R3( 0,$A,$B,$C,$D,$X,56, 6,0x6fa87e4f);
+ &R3( 0,$D,$A,$B,$C,$X,57,10,0xfe2ce6e0);
+ &R3( 0,$C,$D,$A,$B,$X,58,15,0xa3014314);
+ &R3( 0,$B,$C,$D,$A,$X,59,21,0x4e0811a1);
+ &R3( 0,$A,$B,$C,$D,$X,60, 6,0xf7537e82);
+ &R3( 0,$D,$A,$B,$C,$X,61,10,0xbd3af235);
+ &R3( 0,$C,$D,$A,$B,$X,62,15,0x2ad7d2bb);
+ &R3( 2,$B,$C,$D,$A,$X,63,21,0xeb86d391);
+
+ # &mov($tmp2,&wparam(0)); # done in the last R3
+ # &mov($tmp1, &DWP( 0,$tmp2,"",0)); # done is the last R3
+
+ &add($A,$tmp1);
+ &mov($tmp1, &DWP( 4,$tmp2,"",0));
+
+ &add($B,$tmp1);
+ &mov($tmp1, &DWP( 8,$tmp2,"",0));
+
+ &add($C,$tmp1);
+ &mov($tmp1, &DWP(12,$tmp2,"",0));
+
+ &add($D,$tmp1);
+ &mov(&DWP( 0,$tmp2,"",0),$A);
+
+ &mov(&DWP( 4,$tmp2,"",0),$B);
+ &mov($tmp1,&swtmp(0)) unless $normal;
+
+ &mov(&DWP( 8,$tmp2,"",0),$C);
+ &mov(&DWP(12,$tmp2,"",0),$D);
+
+ &cmp($tmp1,$X) unless $normal; # check count
+ &jae(&label("start")) unless $normal;
+
+ &pop("eax"); # pop the temp variable off the stack
+ &pop("ebx");
+ &pop("ebp");
+ &pop("edi");
+ &pop("esi");
+ &ret();
+ &function_end_B($name);
+ }
+
diff --git a/crypto/md5/asm/md5-sparcv9.S b/crypto/md5/asm/md5-sparcv9.S
new file mode 100644
index 0000000..db45aa4
--- /dev/null
+++ b/crypto/md5/asm/md5-sparcv9.S
@@ -0,0 +1,1031 @@
+.ident "md5-sparcv9.S, Version 1.0"
+.ident "SPARC V9 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
+.file "md5-sparcv9.S"
+
+/*
+ * ====================================================================
+ * Copyright (c) 1999 Andy Polyakov <appro@fy.chalmers.se>.
+ *
+ * Rights for redistribution and usage in source and binary forms are
+ * granted as long as above copyright notices are retained. Warranty
+ * of any kind is (of course:-) disclaimed.
+ * ====================================================================
+ */
+
+/*
+ * This is my modest contribution to OpenSSL project (see
+ * http://www.openssl.org/ for more information about it) and is an
+ * assembler implementation of MD5 block hash function. I've hand-coded
+ * this for the sole reason to reach UltraSPARC-specific "load in
+ * little-endian byte order" instruction. This gives up to 15%
+ * performance improvement for cases when input message is aligned at
+ * 32 bits boundary. The module was tested under both 32 *and* 64 bit
+ * kernels. For updates see http://fy.chalmers.se/~appro/hpe/.
+ *
+ * To compile with SC4.x/SC5.x:
+ *
+ * cc -xarch=v[9|8plus] -DOPENSSL_SYSNAME_ULTRASPARC -DMD5_BLOCK_DATA_ORDER \
+ * -c md5-sparcv9.S
+ *
+ * and with gcc:
+ *
+ * gcc -mcpu=ultrasparc -DOPENSSL_SYSNAME_ULTRASPARC -DMD5_BLOCK_DATA_ORDER \
+ * -c md5-sparcv9.S
+ *
+ * or if above fails (it does if you have gas):
+ *
+ * gcc -E -DOPENSSL_SYSNAMEULTRASPARC -DMD5_BLOCK_DATA_ORDER md5_block.sparc.S | \
+ * as -xarch=v8plus /dev/fd/0 -o md5-sparcv9.o
+ */
+
+#include <openssl/e_os2.h>
+
+#define A %o0
+#define B %o1
+#define C %o2
+#define D %o3
+#define T1 %o4
+#define T2 %o5
+
+#define R0 %l0
+#define R1 %l1
+#define R2 %l2
+#define R3 %l3
+#define R4 %l4
+#define R5 %l5
+#define R6 %l6
+#define R7 %l7
+#define R8 %i3
+#define R9 %i4
+#define R10 %i5
+#define R11 %g1
+#define R12 %g2
+#define R13 %g3
+#define RX %g4
+
+#define Aptr %i0+0
+#define Bptr %i0+4
+#define Cptr %i0+8
+#define Dptr %i0+12
+
+#define Aval R5 /* those not used at the end of the last round */
+#define Bval R6
+#define Cval R7
+#define Dval R8
+
+#if defined(MD5_BLOCK_DATA_ORDER)
+# if defined(OPENSSL_SYSNAME_ULTRASPARC)
+# define LOAD lda
+# define X(i) [%i1+i*4]%asi
+# define md5_block md5_block_asm_data_order_aligned
+# define ASI_PRIMARY_LITTLE 0x88
+# else
+# error "MD5_BLOCK_DATA_ORDER is supported only on UltraSPARC!"
+# endif
+#else
+# define LOAD ld
+# define X(i) [%i1+i*4]
+# define md5_block md5_block_asm_host_order
+#endif
+
+.section ".text",#alloc,#execinstr
+
+#if defined(__SUNPRO_C) && defined(__sparcv9)
+ /* They've said -xarch=v9 at command line */
+ .register %g2,#scratch
+ .register %g3,#scratch
+# define FRAME -192
+#elif defined(__GNUC__) && defined(__arch64__)
+ /* They've said -m64 at command line */
+ .register %g2,#scratch
+ .register %g3,#scratch
+# define FRAME -192
+#else
+# define FRAME -96
+#endif
+
+.align 32
+
+.global md5_block
+md5_block:
+ save %sp,FRAME,%sp
+
+ ld [Dptr],D
+ ld [Cptr],C
+ ld [Bptr],B
+ ld [Aptr],A
+#ifdef ASI_PRIMARY_LITTLE
+ rd %asi,%o7 ! How dare I? Well, I just do:-)
+ wr %g0,ASI_PRIMARY_LITTLE,%asi
+#endif
+ LOAD X(0),R0
+
+.Lmd5_block_loop:
+
+!!!!!!!!Round 0
+
+ xor C,D,T1
+ sethi %hi(0xd76aa478),T2
+ and T1,B,T1
+ or T2,%lo(0xd76aa478),T2 !=
+ xor T1,D,T1
+ add T1,R0,T1
+ LOAD X(1),R1
+ add T1,T2,T1 !=
+ add A,T1,A
+ sll A,7,T2
+ srl A,32-7,A
+ or A,T2,A !=
+ xor B,C,T1
+ add A,B,A
+
+ sethi %hi(0xe8c7b756),T2
+ and T1,A,T1 !=
+ or T2,%lo(0xe8c7b756),T2
+ xor T1,C,T1
+ LOAD X(2),R2
+ add T1,R1,T1 !=
+ add T1,T2,T1
+ add D,T1,D
+ sll D,12,T2
+ srl D,32-12,D !=
+ or D,T2,D
+ xor A,B,T1
+ add D,A,D
+
+ sethi %hi(0x242070db),T2 !=
+ and T1,D,T1
+ or T2,%lo(0x242070db),T2
+ xor T1,B,T1
+ add T1,R2,T1 !=
+ LOAD X(3),R3
+ add T1,T2,T1
+ add C,T1,C
+ sll C,17,T2 !=
+ srl C,32-17,C
+ or C,T2,C
+ xor D,A,T1
+ add C,D,C !=
+
+ sethi %hi(0xc1bdceee),T2
+ and T1,C,T1
+ or T2,%lo(0xc1bdceee),T2
+ xor T1,A,T1 !=
+ add T1,R3,T1
+ LOAD X(4),R4
+ add T1,T2,T1
+ add B,T1,B !=
+ sll B,22,T2
+ srl B,32-22,B
+ or B,T2,B
+ xor C,D,T1 !=
+ add B,C,B
+
+ sethi %hi(0xf57c0faf),T2
+ and T1,B,T1
+ or T2,%lo(0xf57c0faf),T2 !=
+ xor T1,D,T1
+ add T1,R4,T1
+ LOAD X(5),R5
+ add T1,T2,T1 !=
+ add A,T1,A
+ sll A,7,T2
+ srl A,32-7,A
+ or A,T2,A !=
+ xor B,C,T1
+ add A,B,A
+
+ sethi %hi(0x4787c62a),T2
+ and T1,A,T1 !=
+ or T2,%lo(0x4787c62a),T2
+ xor T1,C,T1
+ LOAD X(6),R6
+ add T1,R5,T1 !=
+ add T1,T2,T1
+ add D,T1,D
+ sll D,12,T2
+ srl D,32-12,D !=
+ or D,T2,D
+ xor A,B,T1
+ add D,A,D
+
+ sethi %hi(0xa8304613),T2 !=
+ and T1,D,T1
+ or T2,%lo(0xa8304613),T2
+ xor T1,B,T1
+ add T1,R6,T1 !=
+ LOAD X(7),R7
+ add T1,T2,T1
+ add C,T1,C
+ sll C,17,T2 !=
+ srl C,32-17,C
+ or C,T2,C
+ xor D,A,T1
+ add C,D,C !=
+
+ sethi %hi(0xfd469501),T2
+ and T1,C,T1
+ or T2,%lo(0xfd469501),T2
+ xor T1,A,T1 !=
+ add T1,R7,T1
+ LOAD X(8),R8
+ add T1,T2,T1
+ add B,T1,B !=
+ sll B,22,T2
+ srl B,32-22,B
+ or B,T2,B
+ xor C,D,T1 !=
+ add B,C,B
+
+ sethi %hi(0x698098d8),T2
+ and T1,B,T1
+ or T2,%lo(0x698098d8),T2 !=
+ xor T1,D,T1
+ add T1,R8,T1
+ LOAD X(9),R9
+ add T1,T2,T1 !=
+ add A,T1,A
+ sll A,7,T2
+ srl A,32-7,A
+ or A,T2,A !=
+ xor B,C,T1
+ add A,B,A
+
+ sethi %hi(0x8b44f7af),T2
+ and T1,A,T1 !=
+ or T2,%lo(0x8b44f7af),T2
+ xor T1,C,T1
+ LOAD X(10),R10
+ add T1,R9,T1 !=
+ add T1,T2,T1
+ add D,T1,D
+ sll D,12,T2
+ srl D,32-12,D !=
+ or D,T2,D
+ xor A,B,T1
+ add D,A,D
+
+ sethi %hi(0xffff5bb1),T2 !=
+ and T1,D,T1
+ or T2,%lo(0xffff5bb1),T2
+ xor T1,B,T1
+ add T1,R10,T1 !=
+ LOAD X(11),R11
+ add T1,T2,T1
+ add C,T1,C
+ sll C,17,T2 !=
+ srl C,32-17,C
+ or C,T2,C
+ xor D,A,T1
+ add C,D,C !=
+
+ sethi %hi(0x895cd7be),T2
+ and T1,C,T1
+ or T2,%lo(0x895cd7be),T2
+ xor T1,A,T1 !=
+ add T1,R11,T1
+ LOAD X(12),R12
+ add T1,T2,T1
+ add B,T1,B !=
+ sll B,22,T2
+ srl B,32-22,B
+ or B,T2,B
+ xor C,D,T1 !=
+ add B,C,B
+
+ sethi %hi(0x6b901122),T2
+ and T1,B,T1
+ or T2,%lo(0x6b901122),T2 !=
+ xor T1,D,T1
+ add T1,R12,T1
+ LOAD X(13),R13
+ add T1,T2,T1 !=
+ add A,T1,A
+ sll A,7,T2
+ srl A,32-7,A
+ or A,T2,A !=
+ xor B,C,T1
+ add A,B,A
+
+ sethi %hi(0xfd987193),T2
+ and T1,A,T1 !=
+ or T2,%lo(0xfd987193),T2
+ xor T1,C,T1
+ LOAD X(14),RX
+ add T1,R13,T1 !=
+ add T1,T2,T1
+ add D,T1,D
+ sll D,12,T2
+ srl D,32-12,D !=
+ or D,T2,D
+ xor A,B,T1
+ add D,A,D
+
+ sethi %hi(0xa679438e),T2 !=
+ and T1,D,T1
+ or T2,%lo(0xa679438e),T2
+ xor T1,B,T1
+ add T1,RX,T1 !=
+ LOAD X(15),RX
+ add T1,T2,T1
+ add C,T1,C
+ sll C,17,T2 !=
+ srl C,32-17,C
+ or C,T2,C
+ xor D,A,T1
+ add C,D,C !=
+
+ sethi %hi(0x49b40821),T2
+ and T1,C,T1
+ or T2,%lo(0x49b40821),T2
+ xor T1,A,T1 !=
+ add T1,RX,T1
+ !pre-LOADed X(1),R1
+ add T1,T2,T1
+ add B,T1,B
+ sll B,22,T2 !=
+ srl B,32-22,B
+ or B,T2,B
+ add B,C,B
+
+!!!!!!!!Round 1
+
+ xor B,C,T1 !=
+ sethi %hi(0xf61e2562),T2
+ and T1,D,T1
+ or T2,%lo(0xf61e2562),T2
+ xor T1,C,T1 !=
+ add T1,R1,T1
+ !pre-LOADed X(6),R6
+ add T1,T2,T1
+ add A,T1,A
+ sll A,5,T2 !=
+ srl A,32-5,A
+ or A,T2,A
+ add A,B,A
+
+ xor A,B,T1 !=
+ sethi %hi(0xc040b340),T2
+ and T1,C,T1
+ or T2,%lo(0xc040b340),T2
+ xor T1,B,T1 !=
+ add T1,R6,T1
+ !pre-LOADed X(11),R11
+ add T1,T2,T1
+ add D,T1,D
+ sll D,9,T2 !=
+ srl D,32-9,D
+ or D,T2,D
+ add D,A,D
+
+ xor D,A,T1 !=
+ sethi %hi(0x265e5a51),T2
+ and T1,B,T1
+ or T2,%lo(0x265e5a51),T2
+ xor T1,A,T1 !=
+ add T1,R11,T1
+ !pre-LOADed X(0),R0
+ add T1,T2,T1
+ add C,T1,C
+ sll C,14,T2 !=
+ srl C,32-14,C
+ or C,T2,C
+ add C,D,C
+
+ xor C,D,T1 !=
+ sethi %hi(0xe9b6c7aa),T2
+ and T1,A,T1
+ or T2,%lo(0xe9b6c7aa),T2
+ xor T1,D,T1 !=
+ add T1,R0,T1
+ !pre-LOADed X(5),R5
+ add T1,T2,T1
+ add B,T1,B
+ sll B,20,T2 !=
+ srl B,32-20,B
+ or B,T2,B
+ add B,C,B
+
+ xor B,C,T1 !=
+ sethi %hi(0xd62f105d),T2
+ and T1,D,T1
+ or T2,%lo(0xd62f105d),T2
+ xor T1,C,T1 !=
+ add T1,R5,T1
+ !pre-LOADed X(10),R10
+ add T1,T2,T1
+ add A,T1,A
+ sll A,5,T2 !=
+ srl A,32-5,A
+ or A,T2,A
+ add A,B,A
+
+ xor A,B,T1 !=
+ sethi %hi(0x02441453),T2
+ and T1,C,T1
+ or T2,%lo(0x02441453),T2
+ xor T1,B,T1 !=
+ add T1,R10,T1
+ LOAD X(15),RX
+ add T1,T2,T1
+ add D,T1,D !=
+ sll D,9,T2
+ srl D,32-9,D
+ or D,T2,D
+ add D,A,D !=
+
+ xor D,A,T1
+ sethi %hi(0xd8a1e681),T2
+ and T1,B,T1
+ or T2,%lo(0xd8a1e681),T2 !=
+ xor T1,A,T1
+ add T1,RX,T1
+ !pre-LOADed X(4),R4
+ add T1,T2,T1
+ add C,T1,C !=
+ sll C,14,T2
+ srl C,32-14,C
+ or C,T2,C
+ add C,D,C !=
+
+ xor C,D,T1
+ sethi %hi(0xe7d3fbc8),T2
+ and T1,A,T1
+ or T2,%lo(0xe7d3fbc8),T2 !=
+ xor T1,D,T1
+ add T1,R4,T1
+ !pre-LOADed X(9),R9
+ add T1,T2,T1
+ add B,T1,B !=
+ sll B,20,T2
+ srl B,32-20,B
+ or B,T2,B
+ add B,C,B !=
+
+ xor B,C,T1
+ sethi %hi(0x21e1cde6),T2
+ and T1,D,T1
+ or T2,%lo(0x21e1cde6),T2 !=
+ xor T1,C,T1
+ add T1,R9,T1
+ LOAD X(14),RX
+ add T1,T2,T1 !=
+ add A,T1,A
+ sll A,5,T2
+ srl A,32-5,A
+ or A,T2,A !=
+ add A,B,A
+
+ xor A,B,T1
+ sethi %hi(0xc33707d6),T2
+ and T1,C,T1 !=
+ or T2,%lo(0xc33707d6),T2
+ xor T1,B,T1
+ add T1,RX,T1
+ !pre-LOADed X(3),R3
+ add T1,T2,T1 !=
+ add D,T1,D
+ sll D,9,T2
+ srl D,32-9,D
+ or D,T2,D !=
+ add D,A,D
+
+ xor D,A,T1
+ sethi %hi(0xf4d50d87),T2
+ and T1,B,T1 !=
+ or T2,%lo(0xf4d50d87),T2
+ xor T1,A,T1
+ add T1,R3,T1
+ !pre-LOADed X(8),R8
+ add T1,T2,T1 !=
+ add C,T1,C
+ sll C,14,T2
+ srl C,32-14,C
+ or C,T2,C !=
+ add C,D,C
+
+ xor C,D,T1
+ sethi %hi(0x455a14ed),T2
+ and T1,A,T1 !=
+ or T2,%lo(0x455a14ed),T2
+ xor T1,D,T1
+ add T1,R8,T1
+ !pre-LOADed X(13),R13
+ add T1,T2,T1 !=
+ add B,T1,B
+ sll B,20,T2
+ srl B,32-20,B
+ or B,T2,B !=
+ add B,C,B
+
+ xor B,C,T1
+ sethi %hi(0xa9e3e905),T2
+ and T1,D,T1 !=
+ or T2,%lo(0xa9e3e905),T2
+ xor T1,C,T1
+ add T1,R13,T1
+ !pre-LOADed X(2),R2
+ add T1,T2,T1 !=
+ add A,T1,A
+ sll A,5,T2
+ srl A,32-5,A
+ or A,T2,A !=
+ add A,B,A
+
+ xor A,B,T1
+ sethi %hi(0xfcefa3f8),T2
+ and T1,C,T1 !=
+ or T2,%lo(0xfcefa3f8),T2
+ xor T1,B,T1
+ add T1,R2,T1
+ !pre-LOADed X(7),R7
+ add T1,T2,T1 !=
+ add D,T1,D
+ sll D,9,T2
+ srl D,32-9,D
+ or D,T2,D !=
+ add D,A,D
+
+ xor D,A,T1
+ sethi %hi(0x676f02d9),T2
+ and T1,B,T1 !=
+ or T2,%lo(0x676f02d9),T2
+ xor T1,A,T1
+ add T1,R7,T1
+ !pre-LOADed X(12),R12
+ add T1,T2,T1 !=
+ add C,T1,C
+ sll C,14,T2
+ srl C,32-14,C
+ or C,T2,C !=
+ add C,D,C
+
+ xor C,D,T1
+ sethi %hi(0x8d2a4c8a),T2
+ and T1,A,T1 !=
+ or T2,%lo(0x8d2a4c8a),T2
+ xor T1,D,T1
+ add T1,R12,T1
+ !pre-LOADed X(5),R5
+ add T1,T2,T1 !=
+ add B,T1,B
+ sll B,20,T2
+ srl B,32-20,B
+ or B,T2,B !=
+ add B,C,B
+
+!!!!!!!!Round 2
+
+ xor B,C,T1
+ sethi %hi(0xfffa3942),T2
+ xor T1,D,T1 !=
+ or T2,%lo(0xfffa3942),T2
+ add T1,R5,T1
+ !pre-LOADed X(8),R8
+ add T1,T2,T1
+ add A,T1,A !=
+ sll A,4,T2
+ srl A,32-4,A
+ or A,T2,A
+ add A,B,A !=
+
+ xor A,B,T1
+ sethi %hi(0x8771f681),T2
+ xor T1,C,T1
+ or T2,%lo(0x8771f681),T2 !=
+ add T1,R8,T1
+ !pre-LOADed X(11),R11
+ add T1,T2,T1
+ add D,T1,D
+ sll D,11,T2 !=
+ srl D,32-11,D
+ or D,T2,D
+ add D,A,D
+
+ xor D,A,T1 !=
+ sethi %hi(0x6d9d6122),T2
+ xor T1,B,T1
+ or T2,%lo(0x6d9d6122),T2
+ add T1,R11,T1 !=
+ LOAD X(14),RX
+ add T1,T2,T1
+ add C,T1,C
+ sll C,16,T2 !=
+ srl C,32-16,C
+ or C,T2,C
+ add C,D,C
+
+ xor C,D,T1 !=
+ sethi %hi(0xfde5380c),T2
+ xor T1,A,T1
+ or T2,%lo(0xfde5380c),T2
+ add T1,RX,T1 !=
+ !pre-LOADed X(1),R1
+ add T1,T2,T1
+ add B,T1,B
+ sll B,23,T2
+ srl B,32-23,B !=
+ or B,T2,B
+ add B,C,B
+
+ xor B,C,T1
+ sethi %hi(0xa4beea44),T2 !=
+ xor T1,D,T1
+ or T2,%lo(0xa4beea44),T2
+ add T1,R1,T1
+ !pre-LOADed X(4),R4
+ add T1,T2,T1 !=
+ add A,T1,A
+ sll A,4,T2
+ srl A,32-4,A
+ or A,T2,A !=
+ add A,B,A
+
+ xor A,B,T1
+ sethi %hi(0x4bdecfa9),T2
+ xor T1,C,T1 !=
+ or T2,%lo(0x4bdecfa9),T2
+ add T1,R4,T1
+ !pre-LOADed X(7),R7
+ add T1,T2,T1
+ add D,T1,D !=
+ sll D,11,T2
+ srl D,32-11,D
+ or D,T2,D
+ add D,A,D !=
+
+ xor D,A,T1
+ sethi %hi(0xf6bb4b60),T2
+ xor T1,B,T1
+ or T2,%lo(0xf6bb4b60),T2 !=
+ add T1,R7,T1
+ !pre-LOADed X(10),R10
+ add T1,T2,T1
+ add C,T1,C
+ sll C,16,T2 !=
+ srl C,32-16,C
+ or C,T2,C
+ add C,D,C
+
+ xor C,D,T1 !=
+ sethi %hi(0xbebfbc70),T2
+ xor T1,A,T1
+ or T2,%lo(0xbebfbc70),T2
+ add T1,R10,T1 !=
+ !pre-LOADed X(13),R13
+ add T1,T2,T1
+ add B,T1,B
+ sll B,23,T2
+ srl B,32-23,B !=
+ or B,T2,B
+ add B,C,B
+
+ xor B,C,T1
+ sethi %hi(0x289b7ec6),T2 !=
+ xor T1,D,T1
+ or T2,%lo(0x289b7ec6),T2
+ add T1,R13,T1
+ !pre-LOADed X(0),R0
+ add T1,T2,T1 !=
+ add A,T1,A
+ sll A,4,T2
+ srl A,32-4,A
+ or A,T2,A !=
+ add A,B,A
+
+ xor A,B,T1
+ sethi %hi(0xeaa127fa),T2
+ xor T1,C,T1 !=
+ or T2,%lo(0xeaa127fa),T2
+ add T1,R0,T1
+ !pre-LOADed X(3),R3
+ add T1,T2,T1
+ add D,T1,D !=
+ sll D,11,T2
+ srl D,32-11,D
+ or D,T2,D
+ add D,A,D !=
+
+ xor D,A,T1
+ sethi %hi(0xd4ef3085),T2
+ xor T1,B,T1
+ or T2,%lo(0xd4ef3085),T2 !=
+ add T1,R3,T1
+ !pre-LOADed X(6),R6
+ add T1,T2,T1
+ add C,T1,C
+ sll C,16,T2 !=
+ srl C,32-16,C
+ or C,T2,C
+ add C,D,C
+
+ xor C,D,T1 !=
+ sethi %hi(0x04881d05),T2
+ xor T1,A,T1
+ or T2,%lo(0x04881d05),T2
+ add T1,R6,T1 !=
+ !pre-LOADed X(9),R9
+ add T1,T2,T1
+ add B,T1,B
+ sll B,23,T2
+ srl B,32-23,B !=
+ or B,T2,B
+ add B,C,B
+
+ xor B,C,T1
+ sethi %hi(0xd9d4d039),T2 !=
+ xor T1,D,T1
+ or T2,%lo(0xd9d4d039),T2
+ add T1,R9,T1
+ !pre-LOADed X(12),R12
+ add T1,T2,T1 !=
+ add A,T1,A
+ sll A,4,T2
+ srl A,32-4,A
+ or A,T2,A !=
+ add A,B,A
+
+ xor A,B,T1
+ sethi %hi(0xe6db99e5),T2
+ xor T1,C,T1 !=
+ or T2,%lo(0xe6db99e5),T2
+ add T1,R12,T1
+ LOAD X(15),RX
+ add T1,T2,T1 !=
+ add D,T1,D
+ sll D,11,T2
+ srl D,32-11,D
+ or D,T2,D !=
+ add D,A,D
+
+ xor D,A,T1
+ sethi %hi(0x1fa27cf8),T2
+ xor T1,B,T1 !=
+ or T2,%lo(0x1fa27cf8),T2
+ add T1,RX,T1
+ !pre-LOADed X(2),R2
+ add T1,T2,T1
+ add C,T1,C !=
+ sll C,16,T2
+ srl C,32-16,C
+ or C,T2,C
+ add C,D,C !=
+
+ xor C,D,T1
+ sethi %hi(0xc4ac5665),T2
+ xor T1,A,T1
+ or T2,%lo(0xc4ac5665),T2 !=
+ add T1,R2,T1
+ !pre-LOADed X(0),R0
+ add T1,T2,T1
+ add B,T1,B
+ sll B,23,T2 !=
+ srl B,32-23,B
+ or B,T2,B
+ add B,C,B
+
+!!!!!!!!Round 3
+
+ orn B,D,T1 !=
+ sethi %hi(0xf4292244),T2
+ xor T1,C,T1
+ or T2,%lo(0xf4292244),T2
+ add T1,R0,T1 !=
+ !pre-LOADed X(7),R7
+ add T1,T2,T1
+ add A,T1,A
+ sll A,6,T2
+ srl A,32-6,A !=
+ or A,T2,A
+ add A,B,A
+
+ orn A,C,T1
+ sethi %hi(0x432aff97),T2 !=
+ xor T1,B,T1
+ or T2,%lo(0x432aff97),T2
+ LOAD X(14),RX
+ add T1,R7,T1 !=
+ add T1,T2,T1
+ add D,T1,D
+ sll D,10,T2
+ srl D,32-10,D !=
+ or D,T2,D
+ add D,A,D
+
+ orn D,B,T1
+ sethi %hi(0xab9423a7),T2 !=
+ xor T1,A,T1
+ or T2,%lo(0xab9423a7),T2
+ add T1,RX,T1
+ !pre-LOADed X(5),R5
+ add T1,T2,T1 !=
+ add C,T1,C
+ sll C,15,T2
+ srl C,32-15,C
+ or C,T2,C !=
+ add C,D,C
+
+ orn C,A,T1
+ sethi %hi(0xfc93a039),T2
+ xor T1,D,T1 !=
+ or T2,%lo(0xfc93a039),T2
+ add T1,R5,T1
+ !pre-LOADed X(12),R12
+ add T1,T2,T1
+ add B,T1,B !=
+ sll B,21,T2
+ srl B,32-21,B
+ or B,T2,B
+ add B,C,B !=
+
+ orn B,D,T1
+ sethi %hi(0x655b59c3),T2
+ xor T1,C,T1
+ or T2,%lo(0x655b59c3),T2 !=
+ add T1,R12,T1
+ !pre-LOADed X(3),R3
+ add T1,T2,T1
+ add A,T1,A
+ sll A,6,T2 !=
+ srl A,32-6,A
+ or A,T2,A
+ add A,B,A
+
+ orn A,C,T1 !=
+ sethi %hi(0x8f0ccc92),T2
+ xor T1,B,T1
+ or T2,%lo(0x8f0ccc92),T2
+ add T1,R3,T1 !=
+ !pre-LOADed X(10),R10
+ add T1,T2,T1
+ add D,T1,D
+ sll D,10,T2
+ srl D,32-10,D !=
+ or D,T2,D
+ add D,A,D
+
+ orn D,B,T1
+ sethi %hi(0xffeff47d),T2 !=
+ xor T1,A,T1
+ or T2,%lo(0xffeff47d),T2
+ add T1,R10,T1
+ !pre-LOADed X(1),R1
+ add T1,T2,T1 !=
+ add C,T1,C
+ sll C,15,T2
+ srl C,32-15,C
+ or C,T2,C !=
+ add C,D,C
+
+ orn C,A,T1
+ sethi %hi(0x85845dd1),T2
+ xor T1,D,T1 !=
+ or T2,%lo(0x85845dd1),T2
+ add T1,R1,T1
+ !pre-LOADed X(8),R8
+ add T1,T2,T1
+ add B,T1,B !=
+ sll B,21,T2
+ srl B,32-21,B
+ or B,T2,B
+ add B,C,B !=
+
+ orn B,D,T1
+ sethi %hi(0x6fa87e4f),T2
+ xor T1,C,T1
+ or T2,%lo(0x6fa87e4f),T2 !=
+ add T1,R8,T1
+ LOAD X(15),RX
+ add T1,T2,T1
+ add A,T1,A !=
+ sll A,6,T2
+ srl A,32-6,A
+ or A,T2,A
+ add A,B,A !=
+
+ orn A,C,T1
+ sethi %hi(0xfe2ce6e0),T2
+ xor T1,B,T1
+ or T2,%lo(0xfe2ce6e0),T2 !=
+ add T1,RX,T1
+ !pre-LOADed X(6),R6
+ add T1,T2,T1
+ add D,T1,D
+ sll D,10,T2 !=
+ srl D,32-10,D
+ or D,T2,D
+ add D,A,D
+
+ orn D,B,T1 !=
+ sethi %hi(0xa3014314),T2
+ xor T1,A,T1
+ or T2,%lo(0xa3014314),T2
+ add T1,R6,T1 !=
+ !pre-LOADed X(13),R13
+ add T1,T2,T1
+ add C,T1,C
+ sll C,15,T2
+ srl C,32-15,C !=
+ or C,T2,C
+ add C,D,C
+
+ orn C,A,T1
+ sethi %hi(0x4e0811a1),T2 !=
+ xor T1,D,T1
+ or T2,%lo(0x4e0811a1),T2
+ !pre-LOADed X(4),R4
+ ld [Aptr],Aval
+ add T1,R13,T1 !=
+ add T1,T2,T1
+ add B,T1,B
+ sll B,21,T2
+ srl B,32-21,B !=
+ or B,T2,B
+ add B,C,B
+
+ orn B,D,T1
+ sethi %hi(0xf7537e82),T2 !=
+ xor T1,C,T1
+ or T2,%lo(0xf7537e82),T2
+ !pre-LOADed X(11),R11
+ ld [Dptr],Dval
+ add T1,R4,T1 !=
+ add T1,T2,T1
+ add A,T1,A
+ sll A,6,T2
+ srl A,32-6,A !=
+ or A,T2,A
+ add A,B,A
+
+ orn A,C,T1
+ sethi %hi(0xbd3af235),T2 !=
+ xor T1,B,T1
+ or T2,%lo(0xbd3af235),T2
+ !pre-LOADed X(2),R2
+ ld [Cptr],Cval
+ add T1,R11,T1 !=
+ add T1,T2,T1
+ add D,T1,D
+ sll D,10,T2
+ srl D,32-10,D !=
+ or D,T2,D
+ add D,A,D
+
+ orn D,B,T1
+ sethi %hi(0x2ad7d2bb),T2 !=
+ xor T1,A,T1
+ or T2,%lo(0x2ad7d2bb),T2
+ !pre-LOADed X(9),R9
+ ld [Bptr],Bval
+ add T1,R2,T1 !=
+ add Aval,A,Aval
+ add T1,T2,T1
+ st Aval,[Aptr]
+ add C,T1,C !=
+ sll C,15,T2
+ add Dval,D,Dval
+ srl C,32-15,C
+ or C,T2,C !=
+ st Dval,[Dptr]
+ add C,D,C
+
+ orn C,A,T1
+ sethi %hi(0xeb86d391),T2 !=
+ xor T1,D,T1
+ or T2,%lo(0xeb86d391),T2
+ add T1,R9,T1
+ !pre-LOADed X(0),R0
+ mov Aval,A !=
+ add T1,T2,T1
+ mov Dval,D
+ add B,T1,B
+ sll B,21,T2 !=
+ add Cval,C,Cval
+ srl B,32-21,B
+ st Cval,[Cptr]
+ or B,T2,B !=
+ add B,C,B
+
+ deccc %i2
+ mov Cval,C
+ add B,Bval,B !=
+ inc 64,%i1
+ nop
+ st B,[Bptr]
+ nop !=
+
+#ifdef OPENSSL_SYSNAME_ULTRASPARC
+ bg,a,pt %icc,.Lmd5_block_loop
+#else
+ bg,a .Lmd5_block_loop
+#endif
+ LOAD X(0),R0
+
+#ifdef ASI_PRIMARY_LITTLE
+ wr %g0,%o7,%asi
+#endif
+ ret
+ restore %g0,0,%o0
+
+.type md5_block,#function
+.size md5_block,(.-md5_block)
diff --git a/crypto/md5/asm/md5-x86_64.pl b/crypto/md5/asm/md5-x86_64.pl
new file mode 100755
index 0000000..c36a7fe
--- /dev/null
+++ b/crypto/md5/asm/md5-x86_64.pl
@@ -0,0 +1,245 @@
+#!/usr/bin/perl -w
+#
+# MD5 optimized for AMD64.
+#
+# Author: Marc Bevand <bevand_m (at) epita.fr>
+# Licence: I hereby disclaim the copyright on this code and place it
+# in the public domain.
+#
+
+use strict;
+
+my $code;
+
+# round1_step() does:
+# dst = x + ((dst + F(x,y,z) + X[k] + T_i) <<< s)
+# %r10d = X[k_next]
+# %r11d = z' (copy of z for the next step)
+# Each round1_step() takes about 5.71 clocks (9 instructions, 1.58 IPC)
+sub round1_step
+{
+ my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
+ $code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n" if ($pos == -1);
+ $code .= " mov %edx, %r11d /* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
+ $code .= <<EOF;
+ xor $y, %r11d /* y ^ ... */
+ lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
+ and $x, %r11d /* x & ... */
+ xor $z, %r11d /* z ^ ... */
+ mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
+ add %r11d, $dst /* dst += ... */
+ rol \$$s, $dst /* dst <<< s */
+ mov $y, %r11d /* (NEXT STEP) z' = $y */
+ add $x, $dst /* dst += x */
+EOF
+}
+
+# round2_step() does:
+# dst = x + ((dst + G(x,y,z) + X[k] + T_i) <<< s)
+# %r10d = X[k_next]
+# %r11d = y' (copy of y for the next step)
+# Each round2_step() takes about 6.22 clocks (9 instructions, 1.45 IPC)
+sub round2_step
+{
+ my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
+ $code .= " mov 1*4(%rsi), %r10d /* (NEXT STEP) X[1] */\n" if ($pos == -1);
+ $code .= " mov %ecx, %r11d /* (NEXT STEP) y' = %ecx */\n" if ($pos == -1);
+ $code .= <<EOF;
+ xor $x, %r11d /* x ^ ... */
+ lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
+ and $z, %r11d /* z & ... */
+ xor $y, %r11d /* y ^ ... */
+ mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
+ add %r11d, $dst /* dst += ... */
+ rol \$$s, $dst /* dst <<< s */
+ mov $x, %r11d /* (NEXT STEP) y' = $x */
+ add $x, $dst /* dst += x */
+EOF
+}
+
+# round3_step() does:
+# dst = x + ((dst + H(x,y,z) + X[k] + T_i) <<< s)
+# %r10d = X[k_next]
+# %r11d = y' (copy of y for the next step)
+# Each round3_step() takes about 4.26 clocks (8 instructions, 1.88 IPC)
+sub round3_step
+{
+ my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
+ $code .= " mov 5*4(%rsi), %r10d /* (NEXT STEP) X[5] */\n" if ($pos == -1);
+ $code .= " mov %ecx, %r11d /* (NEXT STEP) y' = %ecx */\n" if ($pos == -1);
+ $code .= <<EOF;
+ lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
+ mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
+ xor $z, %r11d /* z ^ ... */
+ xor $x, %r11d /* x ^ ... */
+ add %r11d, $dst /* dst += ... */
+ rol \$$s, $dst /* dst <<< s */
+ mov $x, %r11d /* (NEXT STEP) y' = $x */
+ add $x, $dst /* dst += x */
+EOF
+}
+
+# round4_step() does:
+# dst = x + ((dst + I(x,y,z) + X[k] + T_i) <<< s)
+# %r10d = X[k_next]
+# %r11d = not z' (copy of not z for the next step)
+# Each round4_step() takes about 5.27 clocks (9 instructions, 1.71 IPC)
+sub round4_step
+{
+ my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
+ $code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n" if ($pos == -1);
+ $code .= " mov \$0xffffffff, %r11d\n" if ($pos == -1);
+ $code .= " xor %edx, %r11d /* (NEXT STEP) not z' = not %edx*/\n"
+ if ($pos == -1);
+ $code .= <<EOF;
+ lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
+ or $x, %r11d /* x | ... */
+ xor $y, %r11d /* y ^ ... */
+ add %r11d, $dst /* dst += ... */
+ mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
+ mov \$0xffffffff, %r11d
+ rol \$$s, $dst /* dst <<< s */
+ xor $y, %r11d /* (NEXT STEP) not z' = not $y */
+ add $x, $dst /* dst += x */
+EOF
+}
+
+my $output = shift;
+open STDOUT,"| $^X ../perlasm/x86_64-xlate.pl $output";
+
+$code .= <<EOF;
+.text
+.align 16
+
+.globl md5_block_asm_host_order
+.type md5_block_asm_host_order,\@function,3
+md5_block_asm_host_order:
+ push %rbp
+ push %rbx
+ push %r14
+ push %r15
+
+ # rdi = arg #1 (ctx, MD5_CTX pointer)
+ # rsi = arg #2 (ptr, data pointer)
+ # rdx = arg #3 (nbr, number of 16-word blocks to process)
+ mov %rdi, %rbp # rbp = ctx
+ shl \$6, %rdx # rdx = nbr in bytes
+ lea (%rsi,%rdx), %rdi # rdi = end
+ mov 0*4(%rbp), %eax # eax = ctx->A
+ mov 1*4(%rbp), %ebx # ebx = ctx->B
+ mov 2*4(%rbp), %ecx # ecx = ctx->C
+ mov 3*4(%rbp), %edx # edx = ctx->D
+ # end is 'rdi'
+ # ptr is 'rsi'
+ # A is 'eax'
+ # B is 'ebx'
+ # C is 'ecx'
+ # D is 'edx'
+
+ cmp %rdi, %rsi # cmp end with ptr
+ je .Lend # jmp if ptr == end
+
+ # BEGIN of loop over 16-word blocks
+.Lloop: # save old values of A, B, C, D
+ mov %eax, %r8d
+ mov %ebx, %r9d
+ mov %ecx, %r14d
+ mov %edx, %r15d
+EOF
+round1_step(-1,'%eax','%ebx','%ecx','%edx', '1','0xd76aa478', '7');
+round1_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xe8c7b756','12');
+round1_step( 0,'%ecx','%edx','%eax','%ebx', '3','0x242070db','17');
+round1_step( 0,'%ebx','%ecx','%edx','%eax', '4','0xc1bdceee','22');
+round1_step( 0,'%eax','%ebx','%ecx','%edx', '5','0xf57c0faf', '7');
+round1_step( 0,'%edx','%eax','%ebx','%ecx', '6','0x4787c62a','12');
+round1_step( 0,'%ecx','%edx','%eax','%ebx', '7','0xa8304613','17');
+round1_step( 0,'%ebx','%ecx','%edx','%eax', '8','0xfd469501','22');
+round1_step( 0,'%eax','%ebx','%ecx','%edx', '9','0x698098d8', '7');
+round1_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8b44f7af','12');
+round1_step( 0,'%ecx','%edx','%eax','%ebx','11','0xffff5bb1','17');
+round1_step( 0,'%ebx','%ecx','%edx','%eax','12','0x895cd7be','22');
+round1_step( 0,'%eax','%ebx','%ecx','%edx','13','0x6b901122', '7');
+round1_step( 0,'%edx','%eax','%ebx','%ecx','14','0xfd987193','12');
+round1_step( 0,'%ecx','%edx','%eax','%ebx','15','0xa679438e','17');
+round1_step( 1,'%ebx','%ecx','%edx','%eax', '0','0x49b40821','22');
+
+round2_step(-1,'%eax','%ebx','%ecx','%edx', '6','0xf61e2562', '5');
+round2_step( 0,'%edx','%eax','%ebx','%ecx','11','0xc040b340', '9');
+round2_step( 0,'%ecx','%edx','%eax','%ebx', '0','0x265e5a51','14');
+round2_step( 0,'%ebx','%ecx','%edx','%eax', '5','0xe9b6c7aa','20');
+round2_step( 0,'%eax','%ebx','%ecx','%edx','10','0xd62f105d', '5');
+round2_step( 0,'%edx','%eax','%ebx','%ecx','15', '0x2441453', '9');
+round2_step( 0,'%ecx','%edx','%eax','%ebx', '4','0xd8a1e681','14');
+round2_step( 0,'%ebx','%ecx','%edx','%eax', '9','0xe7d3fbc8','20');
+round2_step( 0,'%eax','%ebx','%ecx','%edx','14','0x21e1cde6', '5');
+round2_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xc33707d6', '9');
+round2_step( 0,'%ecx','%edx','%eax','%ebx', '8','0xf4d50d87','14');
+round2_step( 0,'%ebx','%ecx','%edx','%eax','13','0x455a14ed','20');
+round2_step( 0,'%eax','%ebx','%ecx','%edx', '2','0xa9e3e905', '5');
+round2_step( 0,'%edx','%eax','%ebx','%ecx', '7','0xfcefa3f8', '9');
+round2_step( 0,'%ecx','%edx','%eax','%ebx','12','0x676f02d9','14');
+round2_step( 1,'%ebx','%ecx','%edx','%eax', '0','0x8d2a4c8a','20');
+
+round3_step(-1,'%eax','%ebx','%ecx','%edx', '8','0xfffa3942', '4');
+round3_step( 0,'%edx','%eax','%ebx','%ecx','11','0x8771f681','11');
+round3_step( 0,'%ecx','%edx','%eax','%ebx','14','0x6d9d6122','16');
+round3_step( 0,'%ebx','%ecx','%edx','%eax', '1','0xfde5380c','23');
+round3_step( 0,'%eax','%ebx','%ecx','%edx', '4','0xa4beea44', '4');
+round3_step( 0,'%edx','%eax','%ebx','%ecx', '7','0x4bdecfa9','11');
+round3_step( 0,'%ecx','%edx','%eax','%ebx','10','0xf6bb4b60','16');
+round3_step( 0,'%ebx','%ecx','%edx','%eax','13','0xbebfbc70','23');
+round3_step( 0,'%eax','%ebx','%ecx','%edx', '0','0x289b7ec6', '4');
+round3_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xeaa127fa','11');
+round3_step( 0,'%ecx','%edx','%eax','%ebx', '6','0xd4ef3085','16');
+round3_step( 0,'%ebx','%ecx','%edx','%eax', '9', '0x4881d05','23');
+round3_step( 0,'%eax','%ebx','%ecx','%edx','12','0xd9d4d039', '4');
+round3_step( 0,'%edx','%eax','%ebx','%ecx','15','0xe6db99e5','11');
+round3_step( 0,'%ecx','%edx','%eax','%ebx', '2','0x1fa27cf8','16');
+round3_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xc4ac5665','23');
+
+round4_step(-1,'%eax','%ebx','%ecx','%edx', '7','0xf4292244', '6');
+round4_step( 0,'%edx','%eax','%ebx','%ecx','14','0x432aff97','10');
+round4_step( 0,'%ecx','%edx','%eax','%ebx', '5','0xab9423a7','15');
+round4_step( 0,'%ebx','%ecx','%edx','%eax','12','0xfc93a039','21');
+round4_step( 0,'%eax','%ebx','%ecx','%edx', '3','0x655b59c3', '6');
+round4_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8f0ccc92','10');
+round4_step( 0,'%ecx','%edx','%eax','%ebx', '1','0xffeff47d','15');
+round4_step( 0,'%ebx','%ecx','%edx','%eax', '8','0x85845dd1','21');
+round4_step( 0,'%eax','%ebx','%ecx','%edx','15','0x6fa87e4f', '6');
+round4_step( 0,'%edx','%eax','%ebx','%ecx', '6','0xfe2ce6e0','10');
+round4_step( 0,'%ecx','%edx','%eax','%ebx','13','0xa3014314','15');
+round4_step( 0,'%ebx','%ecx','%edx','%eax', '4','0x4e0811a1','21');
+round4_step( 0,'%eax','%ebx','%ecx','%edx','11','0xf7537e82', '6');
+round4_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xbd3af235','10');
+round4_step( 0,'%ecx','%edx','%eax','%ebx', '9','0x2ad7d2bb','15');
+round4_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xeb86d391','21');
+$code .= <<EOF;
+ # add old values of A, B, C, D
+ add %r8d, %eax
+ add %r9d, %ebx
+ add %r14d, %ecx
+ add %r15d, %edx
+
+ # loop control
+ add \$64, %rsi # ptr += 64
+ cmp %rdi, %rsi # cmp end with ptr
+ jb .Lloop # jmp if ptr < end
+ # END of loop over 16-word blocks
+
+.Lend:
+ mov %eax, 0*4(%rbp) # ctx->A = A
+ mov %ebx, 1*4(%rbp) # ctx->B = B
+ mov %ecx, 2*4(%rbp) # ctx->C = C
+ mov %edx, 3*4(%rbp) # ctx->D = D
+
+ pop %r15
+ pop %r14
+ pop %rbx
+ pop %rbp
+ ret
+.size md5_block_asm_host_order,.-md5_block_asm_host_order
+EOF
+
+print $code;
+
+close STDOUT;
diff --git a/crypto/md5/md5.c b/crypto/md5/md5.c
new file mode 100644
index 0000000..563733a
--- /dev/null
+++ b/crypto/md5/md5.c
@@ -0,0 +1,127 @@
+/* crypto/md5/md5.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <openssl/md5.h>
+
+#define BUFSIZE 1024*16
+
+void do_fp(FILE *f);
+void pt(unsigned char *md);
+#if !defined(_OSD_POSIX) && !defined(__DJGPP__)
+int read(int, void *, unsigned int);
+#endif
+
+int main(int argc, char **argv)
+ {
+ int i,err=0;
+ FILE *IN;
+
+ if (argc == 1)
+ {
+ do_fp(stdin);
+ }
+ else
+ {
+ for (i=1; i<argc; i++)
+ {
+ IN=fopen(argv[i],"r");
+ if (IN == NULL)
+ {
+ perror(argv[i]);
+ err++;
+ continue;
+ }
+ printf("MD5(%s)= ",argv[i]);
+ do_fp(IN);
+ fclose(IN);
+ }
+ }
+ exit(err);
+ }
+
+void do_fp(FILE *f)
+ {
+ MD5_CTX c;
+ unsigned char md[MD5_DIGEST_LENGTH];
+ int fd;
+ int i;
+ static unsigned char buf[BUFSIZE];
+
+ fd=fileno(f);
+ MD5_Init(&c);
+ for (;;)
+ {
+ i=read(fd,buf,BUFSIZE);
+ if (i <= 0) break;
+ MD5_Update(&c,buf,(unsigned long)i);
+ }
+ MD5_Final(&(md[0]),&c);
+ pt(md);
+ }
+
+void pt(unsigned char *md)
+ {
+ int i;
+
+ for (i=0; i<MD5_DIGEST_LENGTH; i++)
+ printf("%02x",md[i]);
+ printf("\n");
+ }
+
diff --git a/crypto/md5/md5.h b/crypto/md5/md5.h
new file mode 100644
index 0000000..dbdc0e1
--- /dev/null
+++ b/crypto/md5/md5.h
@@ -0,0 +1,117 @@
+/* crypto/md5/md5.h */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#ifndef HEADER_MD5_H
+#define HEADER_MD5_H
+
+#include <openssl/e_os2.h>
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef OPENSSL_NO_MD5
+#error MD5 is disabled.
+#endif
+
+/*
+ * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ * ! MD5_LONG has to be at least 32 bits wide. If it's wider, then !
+ * ! MD5_LONG_LOG2 has to be defined along. !
+ * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ */
+
+#if defined(OPENSSL_SYS_WIN16) || defined(__LP32__)
+#define MD5_LONG unsigned long
+#elif defined(OPENSSL_SYS_CRAY) || defined(__ILP64__)
+#define MD5_LONG unsigned long
+#define MD5_LONG_LOG2 3
+/*
+ * _CRAY note. I could declare short, but I have no idea what impact
+ * does it have on performance on none-T3E machines. I could declare
+ * int, but at least on C90 sizeof(int) can be chosen at compile time.
+ * So I've chosen long...
+ * <appro@fy.chalmers.se>
+ */
+#else
+#define MD5_LONG unsigned int
+#endif
+
+#define MD5_CBLOCK 64
+#define MD5_LBLOCK (MD5_CBLOCK/4)
+#define MD5_DIGEST_LENGTH 16
+
+typedef struct MD5state_st
+ {
+ MD5_LONG A,B,C,D;
+ MD5_LONG Nl,Nh;
+ MD5_LONG data[MD5_LBLOCK];
+ unsigned int num;
+ } MD5_CTX;
+
+int MD5_Init(MD5_CTX *c);
+int MD5_Update(MD5_CTX *c, const void *data, size_t len);
+int MD5_Final(unsigned char *md, MD5_CTX *c);
+unsigned char *MD5(const unsigned char *d, size_t n, unsigned char *md);
+void MD5_Transform(MD5_CTX *c, const unsigned char *b);
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/crypto/md5/md5_dgst.c b/crypto/md5/md5_dgst.c
new file mode 100644
index 0000000..953f049
--- /dev/null
+++ b/crypto/md5/md5_dgst.c
@@ -0,0 +1,292 @@
+/* crypto/md5/md5_dgst.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <stdio.h>
+#include "md5_locl.h"
+#include <openssl/opensslv.h>
+
+const char MD5_version[]="MD5" OPENSSL_VERSION_PTEXT;
+
+/* Implemented from RFC1321 The MD5 Message-Digest Algorithm
+ */
+
+#define INIT_DATA_A (unsigned long)0x67452301L
+#define INIT_DATA_B (unsigned long)0xefcdab89L
+#define INIT_DATA_C (unsigned long)0x98badcfeL
+#define INIT_DATA_D (unsigned long)0x10325476L
+
+int MD5_Init(MD5_CTX *c)
+ {
+ c->A=INIT_DATA_A;
+ c->B=INIT_DATA_B;
+ c->C=INIT_DATA_C;
+ c->D=INIT_DATA_D;
+ c->Nl=0;
+ c->Nh=0;
+ c->num=0;
+ return 1;
+ }
+
+#ifndef md5_block_host_order
+void md5_block_host_order (MD5_CTX *c, const void *data, size_t num)
+ {
+ const MD5_LONG *X=data;
+ register unsigned MD32_REG_T A,B,C,D;
+
+ A=c->A;
+ B=c->B;
+ C=c->C;
+ D=c->D;
+
+ for (;num--;X+=HASH_LBLOCK)
+ {
+ /* Round 0 */
+ R0(A,B,C,D,X[ 0], 7,0xd76aa478L);
+ R0(D,A,B,C,X[ 1],12,0xe8c7b756L);
+ R0(C,D,A,B,X[ 2],17,0x242070dbL);
+ R0(B,C,D,A,X[ 3],22,0xc1bdceeeL);
+ R0(A,B,C,D,X[ 4], 7,0xf57c0fafL);
+ R0(D,A,B,C,X[ 5],12,0x4787c62aL);
+ R0(C,D,A,B,X[ 6],17,0xa8304613L);
+ R0(B,C,D,A,X[ 7],22,0xfd469501L);
+ R0(A,B,C,D,X[ 8], 7,0x698098d8L);
+ R0(D,A,B,C,X[ 9],12,0x8b44f7afL);
+ R0(C,D,A,B,X[10],17,0xffff5bb1L);
+ R0(B,C,D,A,X[11],22,0x895cd7beL);
+ R0(A,B,C,D,X[12], 7,0x6b901122L);
+ R0(D,A,B,C,X[13],12,0xfd987193L);
+ R0(C,D,A,B,X[14],17,0xa679438eL);
+ R0(B,C,D,A,X[15],22,0x49b40821L);
+ /* Round 1 */
+ R1(A,B,C,D,X[ 1], 5,0xf61e2562L);
+ R1(D,A,B,C,X[ 6], 9,0xc040b340L);
+ R1(C,D,A,B,X[11],14,0x265e5a51L);
+ R1(B,C,D,A,X[ 0],20,0xe9b6c7aaL);
+ R1(A,B,C,D,X[ 5], 5,0xd62f105dL);
+ R1(D,A,B,C,X[10], 9,0x02441453L);
+ R1(C,D,A,B,X[15],14,0xd8a1e681L);
+ R1(B,C,D,A,X[ 4],20,0xe7d3fbc8L);
+ R1(A,B,C,D,X[ 9], 5,0x21e1cde6L);
+ R1(D,A,B,C,X[14], 9,0xc33707d6L);
+ R1(C,D,A,B,X[ 3],14,0xf4d50d87L);
+ R1(B,C,D,A,X[ 8],20,0x455a14edL);
+ R1(A,B,C,D,X[13], 5,0xa9e3e905L);
+ R1(D,A,B,C,X[ 2], 9,0xfcefa3f8L);
+ R1(C,D,A,B,X[ 7],14,0x676f02d9L);
+ R1(B,C,D,A,X[12],20,0x8d2a4c8aL);
+ /* Round 2 */
+ R2(A,B,C,D,X[ 5], 4,0xfffa3942L);
+ R2(D,A,B,C,X[ 8],11,0x8771f681L);
+ R2(C,D,A,B,X[11],16,0x6d9d6122L);
+ R2(B,C,D,A,X[14],23,0xfde5380cL);
+ R2(A,B,C,D,X[ 1], 4,0xa4beea44L);
+ R2(D,A,B,C,X[ 4],11,0x4bdecfa9L);
+ R2(C,D,A,B,X[ 7],16,0xf6bb4b60L);
+ R2(B,C,D,A,X[10],23,0xbebfbc70L);
+ R2(A,B,C,D,X[13], 4,0x289b7ec6L);
+ R2(D,A,B,C,X[ 0],11,0xeaa127faL);
+ R2(C,D,A,B,X[ 3],16,0xd4ef3085L);
+ R2(B,C,D,A,X[ 6],23,0x04881d05L);
+ R2(A,B,C,D,X[ 9], 4,0xd9d4d039L);
+ R2(D,A,B,C,X[12],11,0xe6db99e5L);
+ R2(C,D,A,B,X[15],16,0x1fa27cf8L);
+ R2(B,C,D,A,X[ 2],23,0xc4ac5665L);
+ /* Round 3 */
+ R3(A,B,C,D,X[ 0], 6,0xf4292244L);
+ R3(D,A,B,C,X[ 7],10,0x432aff97L);
+ R3(C,D,A,B,X[14],15,0xab9423a7L);
+ R3(B,C,D,A,X[ 5],21,0xfc93a039L);
+ R3(A,B,C,D,X[12], 6,0x655b59c3L);
+ R3(D,A,B,C,X[ 3],10,0x8f0ccc92L);
+ R3(C,D,A,B,X[10],15,0xffeff47dL);
+ R3(B,C,D,A,X[ 1],21,0x85845dd1L);
+ R3(A,B,C,D,X[ 8], 6,0x6fa87e4fL);
+ R3(D,A,B,C,X[15],10,0xfe2ce6e0L);
+ R3(C,D,A,B,X[ 6],15,0xa3014314L);
+ R3(B,C,D,A,X[13],21,0x4e0811a1L);
+ R3(A,B,C,D,X[ 4], 6,0xf7537e82L);
+ R3(D,A,B,C,X[11],10,0xbd3af235L);
+ R3(C,D,A,B,X[ 2],15,0x2ad7d2bbL);
+ R3(B,C,D,A,X[ 9],21,0xeb86d391L);
+
+ A = c->A += A;
+ B = c->B += B;
+ C = c->C += C;
+ D = c->D += D;
+ }
+ }
+#endif
+
+#ifndef md5_block_data_order
+#ifdef X
+#undef X
+#endif
+void md5_block_data_order (MD5_CTX *c, const void *data_, size_t num)
+ {
+ const unsigned char *data=data_;
+ register unsigned MD32_REG_T A,B,C,D,l;
+#ifndef MD32_XARRAY
+ /* See comment in crypto/sha/sha_locl.h for details. */
+ unsigned MD32_REG_T XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
+ XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
+# define X(i) XX##i
+#else
+ MD5_LONG XX[MD5_LBLOCK];
+# define X(i) XX[i]
+#endif
+
+ A=c->A;
+ B=c->B;
+ C=c->C;
+ D=c->D;
+
+ for (;num--;)
+ {
+ HOST_c2l(data,l); X( 0)=l; HOST_c2l(data,l); X( 1)=l;
+ /* Round 0 */
+ R0(A,B,C,D,X( 0), 7,0xd76aa478L); HOST_c2l(data,l); X( 2)=l;
+ R0(D,A,B,C,X( 1),12,0xe8c7b756L); HOST_c2l(data,l); X( 3)=l;
+ R0(C,D,A,B,X( 2),17,0x242070dbL); HOST_c2l(data,l); X( 4)=l;
+ R0(B,C,D,A,X( 3),22,0xc1bdceeeL); HOST_c2l(data,l); X( 5)=l;
+ R0(A,B,C,D,X( 4), 7,0xf57c0fafL); HOST_c2l(data,l); X( 6)=l;
+ R0(D,A,B,C,X( 5),12,0x4787c62aL); HOST_c2l(data,l); X( 7)=l;
+ R0(C,D,A,B,X( 6),17,0xa8304613L); HOST_c2l(data,l); X( 8)=l;
+ R0(B,C,D,A,X( 7),22,0xfd469501L); HOST_c2l(data,l); X( 9)=l;
+ R0(A,B,C,D,X( 8), 7,0x698098d8L); HOST_c2l(data,l); X(10)=l;
+ R0(D,A,B,C,X( 9),12,0x8b44f7afL); HOST_c2l(data,l); X(11)=l;
+ R0(C,D,A,B,X(10),17,0xffff5bb1L); HOST_c2l(data,l); X(12)=l;
+ R0(B,C,D,A,X(11),22,0x895cd7beL); HOST_c2l(data,l); X(13)=l;
+ R0(A,B,C,D,X(12), 7,0x6b901122L); HOST_c2l(data,l); X(14)=l;
+ R0(D,A,B,C,X(13),12,0xfd987193L); HOST_c2l(data,l); X(15)=l;
+ R0(C,D,A,B,X(14),17,0xa679438eL);
+ R0(B,C,D,A,X(15),22,0x49b40821L);
+ /* Round 1 */
+ R1(A,B,C,D,X( 1), 5,0xf61e2562L);
+ R1(D,A,B,C,X( 6), 9,0xc040b340L);
+ R1(C,D,A,B,X(11),14,0x265e5a51L);
+ R1(B,C,D,A,X( 0),20,0xe9b6c7aaL);
+ R1(A,B,C,D,X( 5), 5,0xd62f105dL);
+ R1(D,A,B,C,X(10), 9,0x02441453L);
+ R1(C,D,A,B,X(15),14,0xd8a1e681L);
+ R1(B,C,D,A,X( 4),20,0xe7d3fbc8L);
+ R1(A,B,C,D,X( 9), 5,0x21e1cde6L);
+ R1(D,A,B,C,X(14), 9,0xc33707d6L);
+ R1(C,D,A,B,X( 3),14,0xf4d50d87L);
+ R1(B,C,D,A,X( 8),20,0x455a14edL);
+ R1(A,B,C,D,X(13), 5,0xa9e3e905L);
+ R1(D,A,B,C,X( 2), 9,0xfcefa3f8L);
+ R1(C,D,A,B,X( 7),14,0x676f02d9L);
+ R1(B,C,D,A,X(12),20,0x8d2a4c8aL);
+ /* Round 2 */
+ R2(A,B,C,D,X( 5), 4,0xfffa3942L);
+ R2(D,A,B,C,X( 8),11,0x8771f681L);
+ R2(C,D,A,B,X(11),16,0x6d9d6122L);
+ R2(B,C,D,A,X(14),23,0xfde5380cL);
+ R2(A,B,C,D,X( 1), 4,0xa4beea44L);
+ R2(D,A,B,C,X( 4),11,0x4bdecfa9L);
+ R2(C,D,A,B,X( 7),16,0xf6bb4b60L);
+ R2(B,C,D,A,X(10),23,0xbebfbc70L);
+ R2(A,B,C,D,X(13), 4,0x289b7ec6L);
+ R2(D,A,B,C,X( 0),11,0xeaa127faL);
+ R2(C,D,A,B,X( 3),16,0xd4ef3085L);
+ R2(B,C,D,A,X( 6),23,0x04881d05L);
+ R2(A,B,C,D,X( 9), 4,0xd9d4d039L);
+ R2(D,A,B,C,X(12),11,0xe6db99e5L);
+ R2(C,D,A,B,X(15),16,0x1fa27cf8L);
+ R2(B,C,D,A,X( 2),23,0xc4ac5665L);
+ /* Round 3 */
+ R3(A,B,C,D,X( 0), 6,0xf4292244L);
+ R3(D,A,B,C,X( 7),10,0x432aff97L);
+ R3(C,D,A,B,X(14),15,0xab9423a7L);
+ R3(B,C,D,A,X( 5),21,0xfc93a039L);
+ R3(A,B,C,D,X(12), 6,0x655b59c3L);
+ R3(D,A,B,C,X( 3),10,0x8f0ccc92L);
+ R3(C,D,A,B,X(10),15,0xffeff47dL);
+ R3(B,C,D,A,X( 1),21,0x85845dd1L);
+ R3(A,B,C,D,X( 8), 6,0x6fa87e4fL);
+ R3(D,A,B,C,X(15),10,0xfe2ce6e0L);
+ R3(C,D,A,B,X( 6),15,0xa3014314L);
+ R3(B,C,D,A,X(13),21,0x4e0811a1L);
+ R3(A,B,C,D,X( 4), 6,0xf7537e82L);
+ R3(D,A,B,C,X(11),10,0xbd3af235L);
+ R3(C,D,A,B,X( 2),15,0x2ad7d2bbL);
+ R3(B,C,D,A,X( 9),21,0xeb86d391L);
+
+ A = c->A += A;
+ B = c->B += B;
+ C = c->C += C;
+ D = c->D += D;
+ }
+ }
+#endif
+
+#ifdef undef
+int printit(unsigned long *l)
+ {
+ int i,ii;
+
+ for (i=0; i<2; i++)
+ {
+ for (ii=0; ii<8; ii++)
+ {
+ fprintf(stderr,"%08lx ",l[i*8+ii]);
+ }
+ fprintf(stderr,"\n");
+ }
+ }
+#endif
diff --git a/crypto/md5/md5_locl.h b/crypto/md5/md5_locl.h
new file mode 100644
index 0000000..94f395f
--- /dev/null
+++ b/crypto/md5/md5_locl.h
@@ -0,0 +1,176 @@
+/* crypto/md5/md5_locl.h */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <openssl/e_os2.h>
+#include <openssl/md5.h>
+
+#ifndef MD5_LONG_LOG2
+#define MD5_LONG_LOG2 2 /* default to 32 bits */
+#endif
+
+#ifdef MD5_ASM
+# if defined(__i386) || defined(__i386__) || defined(_M_IX86) || defined(__INTEL__) || defined(__x86_64) || defined(__x86_64__)
+# if !defined(B_ENDIAN)
+# define md5_block_host_order md5_block_asm_host_order
+# endif
+# elif defined(__sparc) && defined(OPENSSL_SYS_ULTRASPARC)
+ void md5_block_asm_data_order_aligned (MD5_CTX *c, const MD5_LONG *p,size_t num);
+# define HASH_BLOCK_DATA_ORDER_ALIGNED md5_block_asm_data_order_aligned
+# endif
+#endif
+
+void md5_block_host_order (MD5_CTX *c, const void *p,size_t num);
+void md5_block_data_order (MD5_CTX *c, const void *p,size_t num);
+
+#if defined(__i386) || defined(__i386__) || defined(_M_IX86) || defined(__INTEL__) || defined(__x86_64) || defined(__x86_64__)
+# if !defined(B_ENDIAN)
+/*
+ * *_block_host_order is expected to handle aligned data while
+ * *_block_data_order - unaligned. As algorithm and host (x86)
+ * are in this case of the same "endianness" these two are
+ * otherwise indistinguishable. But normally you don't want to
+ * call the same function because unaligned access in places
+ * where alignment is expected is usually a "Bad Thing". Indeed,
+ * on RISCs you get punished with BUS ERROR signal or *severe*
+ * performance degradation. Intel CPUs are in turn perfectly
+ * capable of loading unaligned data without such drastic side
+ * effect. Yes, they say it's slower than aligned load, but no
+ * exception is generated and therefore performance degradation
+ * is *incomparable* with RISCs. What we should weight here is
+ * costs of unaligned access against costs of aligning data.
+ * According to my measurements allowing unaligned access results
+ * in ~9% performance improvement on Pentium II operating at
+ * 266MHz. I won't be surprised if the difference will be higher
+ * on faster systems:-)
+ *
+ * <appro@fy.chalmers.se>
+ */
+# define md5_block_data_order md5_block_host_order
+# endif
+#endif
+
+#define DATA_ORDER_IS_LITTLE_ENDIAN
+
+#define HASH_LONG MD5_LONG
+#define HASH_LONG_LOG2 MD5_LONG_LOG2
+#define HASH_CTX MD5_CTX
+#define HASH_CBLOCK MD5_CBLOCK
+#define HASH_LBLOCK MD5_LBLOCK
+#define HASH_UPDATE MD5_Update
+#define HASH_TRANSFORM MD5_Transform
+#define HASH_FINAL MD5_Final
+#define HASH_MAKE_STRING(c,s) do { \
+ unsigned long ll; \
+ ll=(c)->A; HOST_l2c(ll,(s)); \
+ ll=(c)->B; HOST_l2c(ll,(s)); \
+ ll=(c)->C; HOST_l2c(ll,(s)); \
+ ll=(c)->D; HOST_l2c(ll,(s)); \
+ } while (0)
+#define HASH_BLOCK_HOST_ORDER md5_block_host_order
+#if !defined(L_ENDIAN) || defined(md5_block_data_order)
+#define HASH_BLOCK_DATA_ORDER md5_block_data_order
+/*
+ * Little-endians (Intel and Alpha) feel better without this.
+ * It looks like memcpy does better job than generic
+ * md5_block_data_order on copying-n-aligning input data.
+ * But frankly speaking I didn't expect such result on Alpha.
+ * On the other hand I've got this with egcs-1.0.2 and if
+ * program is compiled with another (better?) compiler it
+ * might turn out other way around.
+ *
+ * <appro@fy.chalmers.se>
+ */
+#endif
+
+#include "md32_common.h"
+
+/*
+#define F(x,y,z) (((x) & (y)) | ((~(x)) & (z)))
+#define G(x,y,z) (((x) & (z)) | ((y) & (~(z))))
+*/
+
+/* As pointed out by Wei Dai <weidai@eskimo.com>, the above can be
+ * simplified to the code below. Wei attributes these optimizations
+ * to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel.
+ */
+#define F(b,c,d) ((((c) ^ (d)) & (b)) ^ (d))
+#define G(b,c,d) ((((b) ^ (c)) & (d)) ^ (c))
+#define H(b,c,d) ((b) ^ (c) ^ (d))
+#define I(b,c,d) (((~(d)) | (b)) ^ (c))
+
+#define R0(a,b,c,d,k,s,t) { \
+ a+=((k)+(t)+F((b),(c),(d))); \
+ a=ROTATE(a,s); \
+ a+=b; };\
+
+#define R1(a,b,c,d,k,s,t) { \
+ a+=((k)+(t)+G((b),(c),(d))); \
+ a=ROTATE(a,s); \
+ a+=b; };
+
+#define R2(a,b,c,d,k,s,t) { \
+ a+=((k)+(t)+H((b),(c),(d))); \
+ a=ROTATE(a,s); \
+ a+=b; };
+
+#define R3(a,b,c,d,k,s,t) { \
+ a+=((k)+(t)+I((b),(c),(d))); \
+ a=ROTATE(a,s); \
+ a+=b; };
diff --git a/crypto/md5/md5_one.c b/crypto/md5/md5_one.c
new file mode 100644
index 0000000..43fee89
--- /dev/null
+++ b/crypto/md5/md5_one.c
@@ -0,0 +1,97 @@
+/* crypto/md5/md5_one.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <openssl/md5.h>
+#include <openssl/crypto.h>
+
+#ifdef CHARSET_EBCDIC
+#include <openssl/ebcdic.h>
+#endif
+
+unsigned char *MD5(const unsigned char *d, size_t n, unsigned char *md)
+ {
+ MD5_CTX c;
+ static unsigned char m[MD5_DIGEST_LENGTH];
+
+ if (md == NULL) md=m;
+ if (!MD5_Init(&c))
+ return NULL;
+#ifndef CHARSET_EBCDIC
+ MD5_Update(&c,d,n);
+#else
+ {
+ char temp[1024];
+ unsigned long chunk;
+
+ while (n > 0)
+ {
+ chunk = (n > sizeof(temp)) ? sizeof(temp) : n;
+ ebcdic2ascii(temp, d, chunk);
+ MD5_Update(&c,temp,chunk);
+ n -= chunk;
+ d += chunk;
+ }
+ }
+#endif
+ MD5_Final(md,&c);
+ OPENSSL_cleanse(&c,sizeof(c)); /* security consideration */
+ return(md);
+ }
+
diff --git a/crypto/md5/md5s.cpp b/crypto/md5/md5s.cpp
new file mode 100644
index 0000000..dd343fd
--- /dev/null
+++ b/crypto/md5/md5s.cpp
@@ -0,0 +1,78 @@
+//
+// gettsc.inl
+//
+// gives access to the Pentium's (secret) cycle counter
+//
+// This software was written by Leonard Janke (janke@unixg.ubc.ca)
+// in 1996-7 and is entered, by him, into the public domain.
+
+#if defined(__WATCOMC__)
+void GetTSC(unsigned long&);
+#pragma aux GetTSC = 0x0f 0x31 "mov [edi], eax" parm [edi] modify [edx eax];
+#elif defined(__GNUC__)
+inline
+void GetTSC(unsigned long& tsc)
+{
+ asm volatile(".byte 15, 49\n\t"
+ : "=eax" (tsc)
+ :
+ : "%edx", "%eax");
+}
+#elif defined(_MSC_VER)
+inline
+void GetTSC(unsigned long& tsc)
+{
+ unsigned long a;
+ __asm _emit 0fh
+ __asm _emit 31h
+ __asm mov a, eax;
+ tsc=a;
+}
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <openssl/md5.h>
+
+extern "C" {
+void md5_block_x86(MD5_CTX *ctx, unsigned char *buffer,int num);
+}
+
+void main(int argc,char *argv[])
+ {
+ unsigned char buffer[64*256];
+ MD5_CTX ctx;
+ unsigned long s1,s2,e1,e2;
+ unsigned char k[16];
+ unsigned long data[2];
+ unsigned char iv[8];
+ int i,num=0,numm;
+ int j=0;
+
+ if (argc >= 2)
+ num=atoi(argv[1]);
+
+ if (num == 0) num=16;
+ if (num > 250) num=16;
+ numm=num+2;
+ num*=64;
+ numm*=64;
+
+ for (j=0; j<6; j++)
+ {
+ for (i=0; i<10; i++) /**/
+ {
+ md5_block_x86(&ctx,buffer,numm);
+ GetTSC(s1);
+ md5_block_x86(&ctx,buffer,numm);
+ GetTSC(e1);
+ GetTSC(s2);
+ md5_block_x86(&ctx,buffer,num);
+ GetTSC(e2);
+ md5_block_x86(&ctx,buffer,num);
+ }
+ printf("md5 (%d bytes) %d %d (%.2f)\n",num,
+ e1-s1,e2-s2,(double)((e1-s1)-(e2-s2))/2);
+ }
+ }
+
diff --git a/crypto/md5/md5test.c b/crypto/md5/md5test.c
new file mode 100644
index 0000000..0628053
--- /dev/null
+++ b/crypto/md5/md5test.c
@@ -0,0 +1,140 @@
+/* crypto/md5/md5test.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "../e_os.h"
+
+#ifdef OPENSSL_NO_MD5
+int main(int argc, char *argv[])
+{
+ printf("No MD5 support\n");
+ return(0);
+}
+#else
+#include <openssl/evp.h>
+#include <openssl/md5.h>
+
+static char *test[]={
+ "",
+ "a",
+ "abc",
+ "message digest",
+ "abcdefghijklmnopqrstuvwxyz",
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
+ "12345678901234567890123456789012345678901234567890123456789012345678901234567890",
+ NULL,
+ };
+
+static char *ret[]={
+ "d41d8cd98f00b204e9800998ecf8427e",
+ "0cc175b9c0f1b6a831c399e269772661",
+ "900150983cd24fb0d6963f7d28e17f72",
+ "f96b697d7cb7938d525a2f31aaf161d0",
+ "c3fcd3d76192e4007dfb496cca67e13b",
+ "d174ab98d277d9f5a5611c2c9f419d9f",
+ "57edf4a22be3c955ac49da2e2107b67a",
+ };
+
+static char *pt(unsigned char *md);
+int main(int argc, char *argv[])
+ {
+ int i,err=0;
+ unsigned char **P,**R;
+ char *p;
+ unsigned char md[MD5_DIGEST_LENGTH];
+
+ P=(unsigned char **)test;
+ R=(unsigned char **)ret;
+ i=1;
+ while (*P != NULL)
+ {
+ EVP_Digest(&(P[0][0]),strlen((char *)*P),md,NULL,EVP_md5(), NULL);
+ p=pt(md);
+ if (strcmp(p,(char *)*R) != 0)
+ {
+ printf("error calculating MD5 on '%s'\n",*P);
+ printf("got %s instead of %s\n",p,*R);
+ err++;
+ }
+ else
+ printf("test %d ok\n",i);
+ i++;
+ R++;
+ P++;
+ }
+
+#ifdef OPENSSL_SYS_NETWARE
+ if (err) printf("ERROR: %d\n", err);
+#endif
+ EXIT(err);
+ return(0);
+ }
+
+static char *pt(unsigned char *md)
+ {
+ int i;
+ static char buf[80];
+
+ for (i=0; i<MD5_DIGEST_LENGTH; i++)
+ sprintf(&(buf[i*2]),"%02x",md[i]);
+ return(buf);
+ }
+#endif
OpenPOWER on IntegriCloud