summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorwollman <wollman@FreeBSD.org>1999-02-26 18:41:47 +0000
committerwollman <wollman@FreeBSD.org>1999-02-26 18:41:47 +0000
commitdb4b7a727b6b70ad291fe1dd62d4ce4b5ac85fcb (patch)
tree1bdd07fdb8f0374d02ad2a36e84752eb4c9babe0 /lib
parent663aa4f0a8d13099acb684f0e521bc3698ffc0b8 (diff)
downloadFreeBSD-src-db4b7a727b6b70ad291fe1dd62d4ce4b5ac85fcb.zip
FreeBSD-src-db4b7a727b6b70ad291fe1dd62d4ce4b5ac85fcb.tar.gz
Add Eric Young's RIPEMD160 implementation as well, in case SHA-1
should prove weak. Also fix a few problems with the SHA-1 build.
Diffstat (limited to 'lib')
-rw-r--r--lib/libmd/Makefile40
-rw-r--r--lib/libmd/i386/rmd160.S2016
-rw-r--r--lib/libmd/ripemd.3110
-rw-r--r--lib/libmd/ripemd.h89
-rw-r--r--lib/libmd/rmd160c.c544
-rw-r--r--lib/libmd/rmd_locl.h216
-rw-r--r--lib/libmd/rmdconst.h399
-rw-r--r--lib/libmd/rmddriver.c51
-rw-r--r--lib/libmd/sha.37
-rw-r--r--lib/libmd/sha.h9
-rw-r--r--lib/libmd/sha1c.c2
11 files changed, 3468 insertions, 15 deletions
diff --git a/lib/libmd/Makefile b/lib/libmd/Makefile
index c648ef5..a0658f5 100644
--- a/lib/libmd/Makefile
+++ b/lib/libmd/Makefile
@@ -1,9 +1,10 @@
-# $Id: Makefile,v 1.28 1998/11/12 04:08:10 jdp Exp $
+# $Id: Makefile,v 1.29 1999/02/26 04:24:56 wollman Exp $
LIB= md
SRCS= md2c.c md4c.c md5c.c md2hl.c md4hl.c md5hl.c \
+ rmd160c.c rmd160hl.c \
sha0c.c sha0hl.c sha1c.c sha1hl.c
-MAN3+= md2.3 md4.3 md5.3 sha.3
+MAN3+= md2.3 md4.3 md5.3 ripemd.3 sha.3
MLINKS+=md2.3 MD2Init.3 md2.3 MD2Update.3 md2.3 MD2Final.3
MLINKS+=md2.3 MD2End.3 md2.3 MD2File.3 md2.3 MD2Data.3
MLINKS+=md4.3 MD4Init.3 md4.3 MD4Update.3 md4.3 MD4Final.3
@@ -11,13 +12,19 @@ MLINKS+=md4.3 MD4End.3 md4.3 MD4File.3 md4.3 MD4Data.3
MLINKS+=md5.3 MD5Init.3 md5.3 MD5Update.3 md5.3 MD5Final.3
MLINKS+=md5.3 MD5End.3 md5.3 MD5File.3 md5.3 MD5Data.3
CLEANFILES+= md[245]hl.c md[245].ref md[245].3 mddriver \
+ rmd160.ref rmd160hl.c rmddriver \
sha0.ref sha0hl.c sha1.ref sha1hl.c shadriver
CFLAGS+= -I${.CURDIR}
+.PATH: ${.CURDIR}/${MACHINE_ARCH}
.if exists(${MACHINE_ARCH}/sha.S)
-SRCS+= ${MACHINE_ARCH}/sha.S
+SRCS+= sha.S
CFLAGS+= -DSHA1_ASM -DELF
.endif
+.if exists(${MACHINE_ARCH}/rmd160.S)
+SRCS+= rmd160.S
+CFLAGS+= -DRMD160_ASM -DELF
+.endif
all: md2.3 md4.3 md5.3
@@ -43,6 +50,12 @@ sha1hl.c: mdXhl.c
sed -e 's/mdX/sha/g' -e 's/MDX/SHA1_/g' -e 's/SHA1__/SHA1_/g' \
${.ALLSRC}) > ${.TARGET}
+rmd160hl.c: mdXhl.c
+ (echo '#define LENGTH 20'; \
+ sed -e 's/mdX/ripemd/g' -e 's/MDX/RIPEMD160_/g' \
+ -e 's/RIPEMD160__/RIPEMD160_/g' \
+ ${.ALLSRC}) > ${.TARGET}
+
md2.3: ${.CURDIR}/mdX.3
sed -e 's/mdX/md2/g' -e 's/MDX/MD2/g' ${.ALLSRC} > ${.TARGET}
cat ${.CURDIR}/md2.copyright >> ${.TARGET}
@@ -111,7 +124,20 @@ sha1.ref:
echo 'SHA-1 ("12345678901234567890123456789012345678901234567890123456789012345678901234567890") =' \
'50abf5706a150990a08b2c5ea40fa0e585554732' ) > ${.TARGET}
-test: md2.ref md4.ref md5.ref sha0.ref sha1.ref
+rmd160.ref:
+ (echo 'RIPEMD160 test suite:'; \
+ echo 'RIPEMD160 ("") = 9c1185a5c5e9fc54612808977ee8f548b2258d31'; \
+ echo 'RIPEMD160 ("abc") = 8eb208f7e05d987a9b044a8e98c6b087f15a0bfc'; \
+ echo 'RIPEMD160 ("message digest") =' \
+ '5d0689ef49d2fae572b881b123a85ffa21595f36'; \
+ echo 'RIPEMD160 ("abcdefghijklmnopqrstuvwxyz") =' \
+ 'f71c27109c692c1b56bbdceb5b9d2865b3708dbc'; \
+ echo 'RIPEMD160 ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") =' \
+ 'b0e20b6e3116640286ed3a87a5713079b21f5189'; \
+ echo 'RIPEMD160 ("12345678901234567890123456789012345678901234567890123456789012345678901234567890") =' \
+ '9b752e45573d4b39f4dbd3323cab82bf63326bfb' ) > ${.TARGET}
+
+test: md2.ref md4.ref md5.ref sha0.ref rmd160.ref sha1.ref
@${ECHO} if any of these test fail, the code produces wrong results
@${ECHO} and should NOT be used.
${CC} -static ${CFLAGS} ${LDFLAGS} -DMD=2 -o mddriver ${.CURDIR}/mddriver.c -L. -lmd
@@ -124,6 +150,10 @@ test: md2.ref md4.ref md5.ref sha0.ref sha1.ref
./mddriver | cmp md5.ref -
@${ECHO} MD5 passed test
-rm -f mddriver
+ ${CC} -static ${CFLAGS} ${LDFLAGS} -o rmddriver ${.CURDIR}/rmddriver.c -L. -lmd
+ ./rmddriver | cmp rmd160.ref -
+ @${ECHO} RIPEMD160 passed test
+ -rm -f rmddriver
${CC} -static ${CFLAGS} ${LDFLAGS} -DSHA=0 -o shadriver ${.CURDIR}/shadriver.c -L. -lmd
./shadriver | cmp sha0.ref -
@${ECHO} SHA-0 passed test
@@ -133,7 +163,7 @@ test: md2.ref md4.ref md5.ref sha0.ref sha1.ref
-rm -f shadriver
beforeinstall:
-.for i in md2.h md4.h md5.h sha.h
+.for i in md2.h md4.h md5.h ripemd.h sha.h
${INSTALL} -C -o ${BINOWN} -g ${BINGRP} -m 444 ${.CURDIR}/$i \
${DESTDIR}/usr/include
.endfor
diff --git a/lib/libmd/i386/rmd160.S b/lib/libmd/i386/rmd160.S
new file mode 100644
index 0000000..f90f6f2
--- /dev/null
+++ b/lib/libmd/i386/rmd160.S
@@ -0,0 +1,2016 @@
+/* Run the C pre-processor over this file with one of the following defined
+ * ELF - elf object files,
+ * OUT - a.out object files,
+ * BSDI - BSDI style a.out object files
+ * SOL - Solaris style elf
+ */
+
+#define TYPE(a,b) .type a,b
+#define SIZE(a,b) .size a,b
+
+#if defined(OUT) || defined(BSDI)
+#define ripemd160_block_x86 _ripemd160_block_x86
+
+#endif
+
+#ifdef OUT
+#define OK 1
+#define ALIGN 4
+#endif
+
+#ifdef BSDI
+#define OK 1
+#define ALIGN 4
+#undef SIZE
+#undef TYPE
+#define SIZE(a,b)
+#define TYPE(a,b)
+#endif
+
+#if defined(ELF) || defined(SOL)
+#define OK 1
+#define ALIGN 16
+#endif
+
+#ifndef OK
+You need to define one of
+ELF - elf systems - linux-elf, NetBSD and DG-UX
+OUT - a.out systems - linux-a.out and FreeBSD
+SOL - solaris systems, which are elf with strange comment lines
+BSDI - a.out with a very primative version of as.
+#endif
+
+/* Let the Assembler begin :-) */
+ /* Don't even think of reading this code */
+ /* It was automatically generated by rmd-586.pl */
+ /* Which is a perl program used to generate the x86 assember for */
+ /* any of elf, a.out, BSDI,Win32, or Solaris */
+ /* eric <eay@cryptsoft.com> */
+
+ .file "rmd-586.s"
+ .version "01.01"
+gcc2_compiled.:
+.text
+ .align ALIGN
+.globl ripemd160_block_x86
+ TYPE(ripemd160_block_x86,@function)
+ripemd160_block_x86:
+ pushl %esi
+ movl 16(%esp), %ecx
+ pushl %edi
+ movl 16(%esp), %esi
+ pushl %ebp
+ addl %esi, %ecx
+ pushl %ebx
+ subl $64, %ecx
+ subl $88, %esp
+ movl %ecx, (%esp)
+ movl 108(%esp), %edi
+.L000start:
+
+ movl (%esi), %eax
+ movl 4(%esi), %ebx
+ movl %eax, 4(%esp)
+ movl %ebx, 8(%esp)
+ movl 8(%esi), %eax
+ movl 12(%esi), %ebx
+ movl %eax, 12(%esp)
+ movl %ebx, 16(%esp)
+ movl 16(%esi), %eax
+ movl 20(%esi), %ebx
+ movl %eax, 20(%esp)
+ movl %ebx, 24(%esp)
+ movl 24(%esi), %eax
+ movl 28(%esi), %ebx
+ movl %eax, 28(%esp)
+ movl %ebx, 32(%esp)
+ movl 32(%esi), %eax
+ movl 36(%esi), %ebx
+ movl %eax, 36(%esp)
+ movl %ebx, 40(%esp)
+ movl 40(%esi), %eax
+ movl 44(%esi), %ebx
+ movl %eax, 44(%esp)
+ movl %ebx, 48(%esp)
+ movl 48(%esi), %eax
+ movl 52(%esi), %ebx
+ movl %eax, 52(%esp)
+ movl %ebx, 56(%esp)
+ movl 56(%esi), %eax
+ movl 60(%esi), %ebx
+ movl %eax, 60(%esp)
+ movl %ebx, 64(%esp)
+ addl $64, %esi
+ movl (%edi), %eax
+ movl %esi, 112(%esp)
+ movl 4(%edi), %ebx
+ movl 8(%edi), %ecx
+ movl 12(%edi), %edx
+ movl 16(%edi), %ebp
+ /* 0 */
+ movl %ecx, %esi
+ xorl %edx, %esi
+ movl 4(%esp), %edi
+ xorl %ebx, %esi
+ addl %edi, %eax
+ roll $10, %ecx
+ addl %esi, %eax
+ movl %ebx, %esi
+ roll $11, %eax
+ addl %ebp, %eax
+ /* 1 */
+ xorl %ecx, %esi
+ movl 8(%esp), %edi
+ xorl %eax, %esi
+ addl %esi, %ebp
+ movl %eax, %esi
+ roll $10, %ebx
+ addl %edi, %ebp
+ xorl %ebx, %esi
+ roll $14, %ebp
+ addl %edx, %ebp
+ /* 2 */
+ movl 12(%esp), %edi
+ xorl %ebp, %esi
+ addl %edi, %edx
+ roll $10, %eax
+ addl %esi, %edx
+ movl %ebp, %esi
+ roll $15, %edx
+ addl %ecx, %edx
+ /* 3 */
+ xorl %eax, %esi
+ movl 16(%esp), %edi
+ xorl %edx, %esi
+ addl %esi, %ecx
+ movl %edx, %esi
+ roll $10, %ebp
+ addl %edi, %ecx
+ xorl %ebp, %esi
+ roll $12, %ecx
+ addl %ebx, %ecx
+ /* 4 */
+ movl 20(%esp), %edi
+ xorl %ecx, %esi
+ addl %edi, %ebx
+ roll $10, %edx
+ addl %esi, %ebx
+ movl %ecx, %esi
+ roll $5, %ebx
+ addl %eax, %ebx
+ /* 5 */
+ xorl %edx, %esi
+ movl 24(%esp), %edi
+ xorl %ebx, %esi
+ addl %esi, %eax
+ movl %ebx, %esi
+ roll $10, %ecx
+ addl %edi, %eax
+ xorl %ecx, %esi
+ roll $8, %eax
+ addl %ebp, %eax
+ /* 6 */
+ movl 28(%esp), %edi
+ xorl %eax, %esi
+ addl %edi, %ebp
+ roll $10, %ebx
+ addl %esi, %ebp
+ movl %eax, %esi
+ roll $7, %ebp
+ addl %edx, %ebp
+ /* 7 */
+ xorl %ebx, %esi
+ movl 32(%esp), %edi
+ xorl %ebp, %esi
+ addl %esi, %edx
+ movl %ebp, %esi
+ roll $10, %eax
+ addl %edi, %edx
+ xorl %eax, %esi
+ roll $9, %edx
+ addl %ecx, %edx
+ /* 8 */
+ movl 36(%esp), %edi
+ xorl %edx, %esi
+ addl %edi, %ecx
+ roll $10, %ebp
+ addl %esi, %ecx
+ movl %edx, %esi
+ roll $11, %ecx
+ addl %ebx, %ecx
+ /* 9 */
+ xorl %ebp, %esi
+ movl 40(%esp), %edi
+ xorl %ecx, %esi
+ addl %esi, %ebx
+ movl %ecx, %esi
+ roll $10, %edx
+ addl %edi, %ebx
+ xorl %edx, %esi
+ roll $13, %ebx
+ addl %eax, %ebx
+ /* 10 */
+ movl 44(%esp), %edi
+ xorl %ebx, %esi
+ addl %edi, %eax
+ roll $10, %ecx
+ addl %esi, %eax
+ movl %ebx, %esi
+ roll $14, %eax
+ addl %ebp, %eax
+ /* 11 */
+ xorl %ecx, %esi
+ movl 48(%esp), %edi
+ xorl %eax, %esi
+ addl %esi, %ebp
+ movl %eax, %esi
+ roll $10, %ebx
+ addl %edi, %ebp
+ xorl %ebx, %esi
+ roll $15, %ebp
+ addl %edx, %ebp
+ /* 12 */
+ movl 52(%esp), %edi
+ xorl %ebp, %esi
+ addl %edi, %edx
+ roll $10, %eax
+ addl %esi, %edx
+ movl %ebp, %esi
+ roll $6, %edx
+ addl %ecx, %edx
+ /* 13 */
+ xorl %eax, %esi
+ movl 56(%esp), %edi
+ xorl %edx, %esi
+ addl %esi, %ecx
+ movl %edx, %esi
+ roll $10, %ebp
+ addl %edi, %ecx
+ xorl %ebp, %esi
+ roll $7, %ecx
+ addl %ebx, %ecx
+ /* 14 */
+ movl 60(%esp), %edi
+ xorl %ecx, %esi
+ addl %edi, %ebx
+ roll $10, %edx
+ addl %esi, %ebx
+ movl %ecx, %esi
+ roll $9, %ebx
+ addl %eax, %ebx
+ /* 15 */
+ xorl %edx, %esi
+ movl 64(%esp), %edi
+ xorl %ebx, %esi
+ addl %esi, %eax
+ movl $-1, %esi
+ roll $10, %ecx
+ addl %edi, %eax
+ movl 32(%esp), %edi
+ roll $8, %eax
+ addl %ebp, %eax
+ /* 16 */
+ addl %edi, %ebp
+ movl %ebx, %edi
+ subl %eax, %esi
+ andl %eax, %edi
+ andl %ecx, %esi
+ orl %esi, %edi
+ movl 20(%esp), %esi
+ roll $10, %ebx
+ leal 1518500249(%ebp,%edi,1),%ebp
+ movl $-1, %edi
+ roll $7, %ebp
+ addl %edx, %ebp
+ /* 17 */
+ addl %esi, %edx
+ movl %eax, %esi
+ subl %ebp, %edi
+ andl %ebp, %esi
+ andl %ebx, %edi
+ orl %edi, %esi
+ movl 56(%esp), %edi
+ roll $10, %eax
+ leal 1518500249(%edx,%esi,1),%edx
+ movl $-1, %esi
+ roll $6, %edx
+ addl %ecx, %edx
+ /* 18 */
+ addl %edi, %ecx
+ movl %ebp, %edi
+ subl %edx, %esi
+ andl %edx, %edi
+ andl %eax, %esi
+ orl %esi, %edi
+ movl 8(%esp), %esi
+ roll $10, %ebp
+ leal 1518500249(%ecx,%edi,1),%ecx
+ movl $-1, %edi
+ roll $8, %ecx
+ addl %ebx, %ecx
+ /* 19 */
+ addl %esi, %ebx
+ movl %edx, %esi
+ subl %ecx, %edi
+ andl %ecx, %esi
+ andl %ebp, %edi
+ orl %edi, %esi
+ movl 44(%esp), %edi
+ roll $10, %edx
+ leal 1518500249(%ebx,%esi,1),%ebx
+ movl $-1, %esi
+ roll $13, %ebx
+ addl %eax, %ebx
+ /* 20 */
+ addl %edi, %eax
+ movl %ecx, %edi
+ subl %ebx, %esi
+ andl %ebx, %edi
+ andl %edx, %esi
+ orl %esi, %edi
+ movl 28(%esp), %esi
+ roll $10, %ecx
+ leal 1518500249(%eax,%edi,1),%eax
+ movl $-1, %edi
+ roll $11, %eax
+ addl %ebp, %eax
+ /* 21 */
+ addl %esi, %ebp
+ movl %ebx, %esi
+ subl %eax, %edi
+ andl %eax, %esi
+ andl %ecx, %edi
+ orl %edi, %esi
+ movl 64(%esp), %edi
+ roll $10, %ebx
+ leal 1518500249(%ebp,%esi,1),%ebp
+ movl $-1, %esi
+ roll $9, %ebp
+ addl %edx, %ebp
+ /* 22 */
+ addl %edi, %edx
+ movl %eax, %edi
+ subl %ebp, %esi
+ andl %ebp, %edi
+ andl %ebx, %esi
+ orl %esi, %edi
+ movl 16(%esp), %esi
+ roll $10, %eax
+ leal 1518500249(%edx,%edi,1),%edx
+ movl $-1, %edi
+ roll $7, %edx
+ addl %ecx, %edx
+ /* 23 */
+ addl %esi, %ecx
+ movl %ebp, %esi
+ subl %edx, %edi
+ andl %edx, %esi
+ andl %eax, %edi
+ orl %edi, %esi
+ movl 52(%esp), %edi
+ roll $10, %ebp
+ leal 1518500249(%ecx,%esi,1),%ecx
+ movl $-1, %esi
+ roll $15, %ecx
+ addl %ebx, %ecx
+ /* 24 */
+ addl %edi, %ebx
+ movl %edx, %edi
+ subl %ecx, %esi
+ andl %ecx, %edi
+ andl %ebp, %esi
+ orl %esi, %edi
+ movl 4(%esp), %esi
+ roll $10, %edx
+ leal 1518500249(%ebx,%edi,1),%ebx
+ movl $-1, %edi
+ roll $7, %ebx
+ addl %eax, %ebx
+ /* 25 */
+ addl %esi, %eax
+ movl %ecx, %esi
+ subl %ebx, %edi
+ andl %ebx, %esi
+ andl %edx, %edi
+ orl %edi, %esi
+ movl 40(%esp), %edi
+ roll $10, %ecx
+ leal 1518500249(%eax,%esi,1),%eax
+ movl $-1, %esi
+ roll $12, %eax
+ addl %ebp, %eax
+ /* 26 */
+ addl %edi, %ebp
+ movl %ebx, %edi
+ subl %eax, %esi
+ andl %eax, %edi
+ andl %ecx, %esi
+ orl %esi, %edi
+ movl 24(%esp), %esi
+ roll $10, %ebx
+ leal 1518500249(%ebp,%edi,1),%ebp
+ movl $-1, %edi
+ roll $15, %ebp
+ addl %edx, %ebp
+ /* 27 */
+ addl %esi, %edx
+ movl %eax, %esi
+ subl %ebp, %edi
+ andl %ebp, %esi
+ andl %ebx, %edi
+ orl %edi, %esi
+ movl 12(%esp), %edi
+ roll $10, %eax
+ leal 1518500249(%edx,%esi,1),%edx
+ movl $-1, %esi
+ roll $9, %edx
+ addl %ecx, %edx
+ /* 28 */
+ addl %edi, %ecx
+ movl %ebp, %edi
+ subl %edx, %esi
+ andl %edx, %edi
+ andl %eax, %esi
+ orl %esi, %edi
+ movl 60(%esp), %esi
+ roll $10, %ebp
+ leal 1518500249(%ecx,%edi,1),%ecx
+ movl $-1, %edi
+ roll $11, %ecx
+ addl %ebx, %ecx
+ /* 29 */
+ addl %esi, %ebx
+ movl %edx, %esi
+ subl %ecx, %edi
+ andl %ecx, %esi
+ andl %ebp, %edi
+ orl %edi, %esi
+ movl 48(%esp), %edi
+ roll $10, %edx
+ leal 1518500249(%ebx,%esi,1),%ebx
+ movl $-1, %esi
+ roll $7, %ebx
+ addl %eax, %ebx
+ /* 30 */
+ addl %edi, %eax
+ movl %ecx, %edi
+ subl %ebx, %esi
+ andl %ebx, %edi
+ andl %edx, %esi
+ orl %esi, %edi
+ movl 36(%esp), %esi
+ roll $10, %ecx
+ leal 1518500249(%eax,%edi,1),%eax
+ movl $-1, %edi
+ roll $13, %eax
+ addl %ebp, %eax
+ /* 31 */
+ addl %esi, %ebp
+ movl %ebx, %esi
+ subl %eax, %edi
+ andl %eax, %esi
+ andl %ecx, %edi
+ orl %edi, %esi
+ movl $-1, %edi
+ roll $10, %ebx
+ leal 1518500249(%ebp,%esi,1),%ebp
+ subl %eax, %edi
+ roll $12, %ebp
+ addl %edx, %ebp
+ /* 32 */
+ movl 16(%esp), %esi
+ orl %ebp, %edi
+ addl %esi, %edx
+ xorl %ebx, %edi
+ movl $-1, %esi
+ roll $10, %eax
+ leal 1859775393(%edx,%edi,1),%edx
+ subl %ebp, %esi
+ roll $11, %edx
+ addl %ecx, %edx
+ /* 33 */
+ movl 44(%esp), %edi
+ orl %edx, %esi
+ addl %edi, %ecx
+ xorl %eax, %esi
+ movl $-1, %edi
+ roll $10, %ebp
+ leal 1859775393(%ecx,%esi,1),%ecx
+ subl %edx, %edi
+ roll $13, %ecx
+ addl %ebx, %ecx
+ /* 34 */
+ movl 60(%esp), %esi
+ orl %ecx, %edi
+ addl %esi, %ebx
+ xorl %ebp, %edi
+ movl $-1, %esi
+ roll $10, %edx
+ leal 1859775393(%ebx,%edi,1),%ebx
+ subl %ecx, %esi
+ roll $6, %ebx
+ addl %eax, %ebx
+ /* 35 */
+ movl 20(%esp), %edi
+ orl %ebx, %esi
+ addl %edi, %eax
+ xorl %edx, %esi
+ movl $-1, %edi
+ roll $10, %ecx
+ leal 1859775393(%eax,%esi,1),%eax
+ subl %ebx, %edi
+ roll $7, %eax
+ addl %ebp, %eax
+ /* 36 */
+ movl 40(%esp), %esi
+ orl %eax, %edi
+ addl %esi, %ebp
+ xorl %ecx, %edi
+ movl $-1, %esi
+ roll $10, %ebx
+ leal 1859775393(%ebp,%edi,1),%ebp
+ subl %eax, %esi
+ roll $14, %ebp
+ addl %edx, %ebp
+ /* 37 */
+ movl 64(%esp), %edi
+ orl %ebp, %esi
+ addl %edi, %edx
+ xorl %ebx, %esi
+ movl $-1, %edi
+ roll $10, %eax
+ leal 1859775393(%edx,%esi,1),%edx
+ subl %ebp, %edi
+ roll $9, %edx
+ addl %ecx, %edx
+ /* 38 */
+ movl 36(%esp), %esi
+ orl %edx, %edi
+ addl %esi, %ecx
+ xorl %eax, %edi
+ movl $-1, %esi
+ roll $10, %ebp
+ leal 1859775393(%ecx,%edi,1),%ecx
+ subl %edx, %esi
+ roll $13, %ecx
+ addl %ebx, %ecx
+ /* 39 */
+ movl 8(%esp), %edi
+ orl %ecx, %esi
+ addl %edi, %ebx
+ xorl %ebp, %esi
+ movl $-1, %edi
+ roll $10, %edx
+ leal 1859775393(%ebx,%esi,1),%ebx
+ subl %ecx, %edi
+ roll $15, %ebx
+ addl %eax, %ebx
+ /* 40 */
+ movl 12(%esp), %esi
+ orl %ebx, %edi
+ addl %esi, %eax
+ xorl %edx, %edi
+ movl $-1, %esi
+ roll $10, %ecx
+ leal 1859775393(%eax,%edi,1),%eax
+ subl %ebx, %esi
+ roll $14, %eax
+ addl %ebp, %eax
+ /* 41 */
+ movl 32(%esp), %edi
+ orl %eax, %esi
+ addl %edi, %ebp
+ xorl %ecx, %esi
+ movl $-1, %edi
+ roll $10, %ebx
+ leal 1859775393(%ebp,%esi,1),%ebp
+ subl %eax, %edi
+ roll $8, %ebp
+ addl %edx, %ebp
+ /* 42 */
+ movl 4(%esp), %esi
+ orl %ebp, %edi
+ addl %esi, %edx
+ xorl %ebx, %edi
+ movl $-1, %esi
+ roll $10, %eax
+ leal 1859775393(%edx,%edi,1),%edx
+ subl %ebp, %esi
+ roll $13, %edx
+ addl %ecx, %edx
+ /* 43 */
+ movl 28(%esp), %edi
+ orl %edx, %esi
+ addl %edi, %ecx
+ xorl %eax, %esi
+ movl $-1, %edi
+ roll $10, %ebp
+ leal 1859775393(%ecx,%esi,1),%ecx
+ subl %edx, %edi
+ roll $6, %ecx
+ addl %ebx, %ecx
+ /* 44 */
+ movl 56(%esp), %esi
+ orl %ecx, %edi
+ addl %esi, %ebx
+ xorl %ebp, %edi
+ movl $-1, %esi
+ roll $10, %edx
+ leal 1859775393(%ebx,%edi,1),%ebx
+ subl %ecx, %esi
+ roll $5, %ebx
+ addl %eax, %ebx
+ /* 45 */
+ movl 48(%esp), %edi
+ orl %ebx, %esi
+ addl %edi, %eax
+ xorl %edx, %esi
+ movl $-1, %edi
+ roll $10, %ecx
+ leal 1859775393(%eax,%esi,1),%eax
+ subl %ebx, %edi
+ roll $12, %eax
+ addl %ebp, %eax
+ /* 46 */
+ movl 24(%esp), %esi
+ orl %eax, %edi
+ addl %esi, %ebp
+ xorl %ecx, %edi
+ movl $-1, %esi
+ roll $10, %ebx
+ leal 1859775393(%ebp,%edi,1),%ebp
+ subl %eax, %esi
+ roll $7, %ebp
+ addl %edx, %ebp
+ /* 47 */
+ movl 52(%esp), %edi
+ orl %ebp, %esi
+ addl %edi, %edx
+ xorl %ebx, %esi
+ movl $-1, %edi
+ roll $10, %eax
+ leal 1859775393(%edx,%esi,1),%edx
+ movl %eax, %esi
+ roll $5, %edx
+ addl %ecx, %edx
+ /* 48 */
+ subl %eax, %edi
+ andl %edx, %esi
+ andl %ebp, %edi
+ orl %esi, %edi
+ movl 8(%esp), %esi
+ roll $10, %ebp
+ leal 2400959708(%ecx,%edi,),%ecx
+ movl $-1, %edi
+ addl %esi, %ecx
+ movl %ebp, %esi
+ roll $11, %ecx
+ addl %ebx, %ecx
+ /* 49 */
+ subl %ebp, %edi
+ andl %ecx, %esi
+ andl %edx, %edi
+ orl %esi, %edi
+ movl 40(%esp), %esi
+ roll $10, %edx
+ leal 2400959708(%ebx,%edi,),%ebx
+ movl $-1, %edi
+ addl %esi, %ebx
+ movl %edx, %esi
+ roll $12, %ebx
+ addl %eax, %ebx
+ /* 50 */
+ subl %edx, %edi
+ andl %ebx, %esi
+ andl %ecx, %edi
+ orl %esi, %edi
+ movl 48(%esp), %esi
+ roll $10, %ecx
+ leal 2400959708(%eax,%edi,),%eax
+ movl $-1, %edi
+ addl %esi, %eax
+ movl %ecx, %esi
+ roll $14, %eax
+ addl %ebp, %eax
+ /* 51 */
+ subl %ecx, %edi
+ andl %eax, %esi
+ andl %ebx, %edi
+ orl %esi, %edi
+ movl 44(%esp), %esi
+ roll $10, %ebx
+ leal 2400959708(%ebp,%edi,),%ebp
+ movl $-1, %edi
+ addl %esi, %ebp
+ movl %ebx, %esi
+ roll $15, %ebp
+ addl %edx, %ebp
+ /* 52 */
+ subl %ebx, %edi
+ andl %ebp, %esi
+ andl %eax, %edi
+ orl %esi, %edi
+ movl 4(%esp), %esi
+ roll $10, %eax
+ leal 2400959708(%edx,%edi,),%edx
+ movl $-1, %edi
+ addl %esi, %edx
+ movl %eax, %esi
+ roll $14, %edx
+ addl %ecx, %edx
+ /* 53 */
+ subl %eax, %edi
+ andl %edx, %esi
+ andl %ebp, %edi
+ orl %esi, %edi
+ movl 36(%esp), %esi
+ roll $10, %ebp
+ leal 2400959708(%ecx,%edi,),%ecx
+ movl $-1, %edi
+ addl %esi, %ecx
+ movl %ebp, %esi
+ roll $15, %ecx
+ addl %ebx, %ecx
+ /* 54 */
+ subl %ebp, %edi
+ andl %ecx, %esi
+ andl %edx, %edi
+ orl %esi, %edi
+ movl 52(%esp), %esi
+ roll $10, %edx
+ leal 2400959708(%ebx,%edi,),%ebx
+ movl $-1, %edi
+ addl %esi, %ebx
+ movl %edx, %esi
+ roll $9, %ebx
+ addl %eax, %ebx
+ /* 55 */
+ subl %edx, %edi
+ andl %ebx, %esi
+ andl %ecx, %edi
+ orl %esi, %edi
+ movl 20(%esp), %esi
+ roll $10, %ecx
+ leal 2400959708(%eax,%edi,),%eax
+ movl $-1, %edi
+ addl %esi, %eax
+ movl %ecx, %esi
+ roll $8, %eax
+ addl %ebp, %eax
+ /* 56 */
+ subl %ecx, %edi
+ andl %eax, %esi
+ andl %ebx, %edi
+ orl %esi, %edi
+ movl 56(%esp), %esi
+ roll $10, %ebx
+ leal 2400959708(%ebp,%edi,),%ebp
+ movl $-1, %edi
+ addl %esi, %ebp
+ movl %ebx, %esi
+ roll $9, %ebp
+ addl %edx, %ebp
+ /* 57 */
+ subl %ebx, %edi
+ andl %ebp, %esi
+ andl %eax, %edi
+ orl %esi, %edi
+ movl 16(%esp), %esi
+ roll $10, %eax
+ leal 2400959708(%edx,%edi,),%edx
+ movl $-1, %edi
+ addl %esi, %edx
+ movl %eax, %esi
+ roll $14, %edx
+ addl %ecx, %edx
+ /* 58 */
+ subl %eax, %edi
+ andl %edx, %esi
+ andl %ebp, %edi
+ orl %esi, %edi
+ movl 32(%esp), %esi
+ roll $10, %ebp
+ leal 2400959708(%ecx,%edi,),%ecx
+ movl $-1, %edi
+ addl %esi, %ecx
+ movl %ebp, %esi
+ roll $5, %ecx
+ addl %ebx, %ecx
+ /* 59 */
+ subl %ebp, %edi
+ andl %ecx, %esi
+ andl %edx, %edi
+ orl %esi, %edi
+ movl 64(%esp), %esi
+ roll $10, %edx
+ leal 2400959708(%ebx,%edi,),%ebx
+ movl $-1, %edi
+ addl %esi, %ebx
+ movl %edx, %esi
+ roll $6, %ebx
+ addl %eax, %ebx
+ /* 60 */
+ subl %edx, %edi
+ andl %ebx, %esi
+ andl %ecx, %edi
+ orl %esi, %edi
+ movl 60(%esp), %esi
+ roll $10, %ecx
+ leal 2400959708(%eax,%edi,),%eax
+ movl $-1, %edi
+ addl %esi, %eax
+ movl %ecx, %esi
+ roll $8, %eax
+ addl %ebp, %eax
+ /* 61 */
+ subl %ecx, %edi
+ andl %eax, %esi
+ andl %ebx, %edi
+ orl %esi, %edi
+ movl 24(%esp), %esi
+ roll $10, %ebx
+ leal 2400959708(%ebp,%edi,),%ebp
+ movl $-1, %edi
+ addl %esi, %ebp
+ movl %ebx, %esi
+ roll $6, %ebp
+ addl %edx, %ebp
+ /* 62 */
+ subl %ebx, %edi
+ andl %ebp, %esi
+ andl %eax, %edi
+ orl %esi, %edi
+ movl 28(%esp), %esi
+ roll $10, %eax
+ leal 2400959708(%edx,%edi,),%edx
+ movl $-1, %edi
+ addl %esi, %edx
+ movl %eax, %esi
+ roll $5, %edx
+ addl %ecx, %edx
+ /* 63 */
+ subl %eax, %edi
+ andl %edx, %esi
+ andl %ebp, %edi
+ orl %esi, %edi
+ movl 12(%esp), %esi
+ roll $10, %ebp
+ leal 2400959708(%ecx,%edi,),%ecx
+ movl $-1, %edi
+ addl %esi, %ecx
+ subl %ebp, %edi
+ roll $12, %ecx
+ addl %ebx, %ecx
+ /* 64 */
+ movl 20(%esp), %esi
+ orl %edx, %edi
+ addl %esi, %ebx
+ xorl %ecx, %edi
+ movl $-1, %esi
+ roll $10, %edx
+ leal 2840853838(%ebx,%edi,1),%ebx
+ subl %edx, %esi
+ roll $9, %ebx
+ addl %eax, %ebx
+ /* 65 */
+ movl 4(%esp), %edi
+ orl %ecx, %esi
+ addl %edi, %eax
+ xorl %ebx, %esi
+ movl $-1, %edi
+ roll $10, %ecx
+ leal 2840853838(%eax,%esi,1),%eax
+ subl %ecx, %edi
+ roll $15, %eax
+ addl %ebp, %eax
+ /* 66 */
+ movl 24(%esp), %esi
+ orl %ebx, %edi
+ addl %esi, %ebp
+ xorl %eax, %edi
+ movl $-1, %esi
+ roll $10, %ebx
+ leal 2840853838(%ebp,%edi,1),%ebp
+ subl %ebx, %esi
+ roll $5, %ebp
+ addl %edx, %ebp
+ /* 67 */
+ movl 40(%esp), %edi
+ orl %eax, %esi
+ addl %edi, %edx
+ xorl %ebp, %esi
+ movl $-1, %edi
+ roll $10, %eax
+ leal 2840853838(%edx,%esi,1),%edx
+ subl %eax, %edi
+ roll $11, %edx
+ addl %ecx, %edx
+ /* 68 */
+ movl 32(%esp), %esi
+ orl %ebp, %edi
+ addl %esi, %ecx
+ xorl %edx, %edi
+ movl $-1, %esi
+ roll $10, %ebp
+ leal 2840853838(%ecx,%edi,1),%ecx
+ subl %ebp, %esi
+ roll $6, %ecx
+ addl %ebx, %ecx
+ /* 69 */
+ movl 52(%esp), %edi
+ orl %edx, %esi
+ addl %edi, %ebx
+ xorl %ecx, %esi
+ movl $-1, %edi
+ roll $10, %edx
+ leal 2840853838(%ebx,%esi,1),%ebx
+ subl %edx, %edi
+ roll $8, %ebx
+ addl %eax, %ebx
+ /* 70 */
+ movl 12(%esp), %esi
+ orl %ecx, %edi
+ addl %esi, %eax
+ xorl %ebx, %edi
+ movl $-1, %esi
+ roll $10, %ecx
+ leal 2840853838(%eax,%edi,1),%eax
+ subl %ecx, %esi
+ roll $13, %eax
+ addl %ebp, %eax
+ /* 71 */
+ movl 44(%esp), %edi
+ orl %ebx, %esi
+ addl %edi, %ebp
+ xorl %eax, %esi
+ movl $-1, %edi
+ roll $10, %ebx
+ leal 2840853838(%ebp,%esi,1),%ebp
+ subl %ebx, %edi
+ roll $12, %ebp
+ addl %edx, %ebp
+ /* 72 */
+ movl 60(%esp), %esi
+ orl %eax, %edi
+ addl %esi, %edx
+ xorl %ebp, %edi
+ movl $-1, %esi
+ roll $10, %eax
+ leal 2840853838(%edx,%edi,1),%edx
+ subl %eax, %esi
+ roll $5, %edx
+ addl %ecx, %edx
+ /* 73 */
+ movl 8(%esp), %edi
+ orl %ebp, %esi
+ addl %edi, %ecx
+ xorl %edx, %esi
+ movl $-1, %edi
+ roll $10, %ebp
+ leal 2840853838(%ecx,%esi,1),%ecx
+ subl %ebp, %edi
+ roll $12, %ecx
+ addl %ebx, %ecx
+ /* 74 */
+ movl 16(%esp), %esi
+ orl %edx, %edi
+ addl %esi, %ebx
+ xorl %ecx, %edi
+ movl $-1, %esi
+ roll $10, %edx
+ leal 2840853838(%ebx,%edi,1),%ebx
+ subl %edx, %esi
+ roll $13, %ebx
+ addl %eax, %ebx
+ /* 75 */
+ movl 36(%esp), %edi
+ orl %ecx, %esi
+ addl %edi, %eax
+ xorl %ebx, %esi
+ movl $-1, %edi
+ roll $10, %ecx
+ leal 2840853838(%eax,%esi,1),%eax
+ subl %ecx, %edi
+ roll $14, %eax
+ addl %ebp, %eax
+ /* 76 */
+ movl 48(%esp), %esi
+ orl %ebx, %edi
+ addl %esi, %ebp
+ xorl %eax, %edi
+ movl $-1, %esi
+ roll $10, %ebx
+ leal 2840853838(%ebp,%edi,1),%ebp
+ subl %ebx, %esi
+ roll $11, %ebp
+ addl %edx, %ebp
+ /* 77 */
+ movl 28(%esp), %edi
+ orl %eax, %esi
+ addl %edi, %edx
+ xorl %ebp, %esi
+ movl $-1, %edi
+ roll $10, %eax
+ leal 2840853838(%edx,%esi,1),%edx
+ subl %eax, %edi
+ roll $8, %edx
+ addl %ecx, %edx
+ /* 78 */
+ movl 64(%esp), %esi
+ orl %ebp, %edi
+ addl %esi, %ecx
+ xorl %edx, %edi
+ movl $-1, %esi
+ roll $10, %ebp
+ leal 2840853838(%ecx,%edi,1),%ecx
+ subl %ebp, %esi
+ roll $5, %ecx
+ addl %ebx, %ecx
+ /* 79 */
+ movl 56(%esp), %edi
+ orl %edx, %esi
+ addl %edi, %ebx
+ xorl %ecx, %esi
+ movl 108(%esp), %edi
+ roll $10, %edx
+ leal 2840853838(%ebx,%esi,1),%ebx
+ movl %eax, 68(%esp)
+ roll $6, %ebx
+ addl %eax, %ebx
+ movl (%edi), %eax
+ movl %ebx, 72(%esp)
+ movl %ecx, 76(%esp)
+ movl 4(%edi), %ebx
+ movl %edx, 80(%esp)
+ movl 8(%edi), %ecx
+ movl %ebp, 84(%esp)
+ movl 12(%edi), %edx
+ movl 16(%edi), %ebp
+ /* 80 */
+ movl $-1, %edi
+ subl %edx, %edi
+ movl 24(%esp), %esi
+ orl %ecx, %edi
+ addl %esi, %eax
+ xorl %ebx, %edi
+ movl $-1, %esi
+ roll $10, %ecx
+ leal 1352829926(%eax,%edi,1),%eax
+ subl %ecx, %esi
+ roll $8, %eax
+ addl %ebp, %eax
+ /* 81 */
+ movl 60(%esp), %edi
+ orl %ebx, %esi
+ addl %edi, %ebp
+ xorl %eax, %esi
+ movl $-1, %edi
+ roll $10, %ebx
+ leal 1352829926(%ebp,%esi,1),%ebp
+ subl %ebx, %edi
+ roll $9, %ebp
+ addl %edx, %ebp
+ /* 82 */
+ movl 32(%esp), %esi
+ orl %eax, %edi
+ addl %esi, %edx
+ xorl %ebp, %edi
+ movl $-1, %esi
+ roll $10, %eax
+ leal 1352829926(%edx,%edi,1),%edx
+ subl %eax, %esi
+ roll $9, %edx
+ addl %ecx, %edx
+ /* 83 */
+ movl 4(%esp), %edi
+ orl %ebp, %esi
+ addl %edi, %ecx
+ xorl %edx, %esi
+ movl $-1, %edi
+ roll $10, %ebp
+ leal 1352829926(%ecx,%esi,1),%ecx
+ subl %ebp, %edi
+ roll $11, %ecx
+ addl %ebx, %ecx
+ /* 84 */
+ movl 40(%esp), %esi
+ orl %edx, %edi
+ addl %esi, %ebx
+ xorl %ecx, %edi
+ movl $-1, %esi
+ roll $10, %edx
+ leal 1352829926(%ebx,%edi,1),%ebx
+ subl %edx, %esi
+ roll $13, %ebx
+ addl %eax, %ebx
+ /* 85 */
+ movl 12(%esp), %edi
+ orl %ecx, %esi
+ addl %edi, %eax
+ xorl %ebx, %esi
+ movl $-1, %edi
+ roll $10, %ecx
+ leal 1352829926(%eax,%esi,1),%eax
+ subl %ecx, %edi
+ roll $15, %eax
+ addl %ebp, %eax
+ /* 86 */
+ movl 48(%esp), %esi
+ orl %ebx, %edi
+ addl %esi, %ebp
+ xorl %eax, %edi
+ movl $-1, %esi
+ roll $10, %ebx
+ leal 1352829926(%ebp,%edi,1),%ebp
+ subl %ebx, %esi
+ roll $15, %ebp
+ addl %edx, %ebp
+ /* 87 */
+ movl 20(%esp), %edi
+ orl %eax, %esi
+ addl %edi, %edx
+ xorl %ebp, %esi
+ movl $-1, %edi
+ roll $10, %eax
+ leal 1352829926(%edx,%esi,1),%edx
+ subl %eax, %edi
+ roll $5, %edx
+ addl %ecx, %edx
+ /* 88 */
+ movl 56(%esp), %esi
+ orl %ebp, %edi
+ addl %esi, %ecx
+ xorl %edx, %edi
+ movl $-1, %esi
+ roll $10, %ebp
+ leal 1352829926(%ecx,%edi,1),%ecx
+ subl %ebp, %esi
+ roll $7, %ecx
+ addl %ebx, %ecx
+ /* 89 */
+ movl 28(%esp), %edi
+ orl %edx, %esi
+ addl %edi, %ebx
+ xorl %ecx, %esi
+ movl $-1, %edi
+ roll $10, %edx
+ leal 1352829926(%ebx,%esi,1),%ebx
+ subl %edx, %edi
+ roll $7, %ebx
+ addl %eax, %ebx
+ /* 90 */
+ movl 64(%esp), %esi
+ orl %ecx, %edi
+ addl %esi, %eax
+ xorl %ebx, %edi
+ movl $-1, %esi
+ roll $10, %ecx
+ leal 1352829926(%eax,%edi,1),%eax
+ subl %ecx, %esi
+ roll $8, %eax
+ addl %ebp, %eax
+ /* 91 */
+ movl 36(%esp), %edi
+ orl %ebx, %esi
+ addl %edi, %ebp
+ xorl %eax, %esi
+ movl $-1, %edi
+ roll $10, %ebx
+ leal 1352829926(%ebp,%esi,1),%ebp
+ subl %ebx, %edi
+ roll $11, %ebp
+ addl %edx, %ebp
+ /* 92 */
+ movl 8(%esp), %esi
+ orl %eax, %edi
+ addl %esi, %edx
+ xorl %ebp, %edi
+ movl $-1, %esi
+ roll $10, %eax
+ leal 1352829926(%edx,%edi,1),%edx
+ subl %eax, %esi
+ roll $14, %edx
+ addl %ecx, %edx
+ /* 93 */
+ movl 44(%esp), %edi
+ orl %ebp, %esi
+ addl %edi, %ecx
+ xorl %edx, %esi
+ movl $-1, %edi
+ roll $10, %ebp
+ leal 1352829926(%ecx,%esi,1),%ecx
+ subl %ebp, %edi
+ roll $14, %ecx
+ addl %ebx, %ecx
+ /* 94 */
+ movl 16(%esp), %esi
+ orl %edx, %edi
+ addl %esi, %ebx
+ xorl %ecx, %edi
+ movl $-1, %esi
+ roll $10, %edx
+ leal 1352829926(%ebx,%edi,1),%ebx
+ subl %edx, %esi
+ roll $12, %ebx
+ addl %eax, %ebx
+ /* 95 */
+ movl 52(%esp), %edi
+ orl %ecx, %esi
+ addl %edi, %eax
+ xorl %ebx, %esi
+ movl $-1, %edi
+ roll $10, %ecx
+ leal 1352829926(%eax,%esi,1),%eax
+ movl %ecx, %esi
+ roll $6, %eax
+ addl %ebp, %eax
+ /* 96 */
+ subl %ecx, %edi
+ andl %eax, %esi
+ andl %ebx, %edi
+ orl %esi, %edi
+ movl 28(%esp), %esi
+ roll $10, %ebx
+ leal 1548603684(%ebp,%edi,),%ebp
+ movl $-1, %edi
+ addl %esi, %ebp
+ movl %ebx, %esi
+ roll $9, %ebp
+ addl %edx, %ebp
+ /* 97 */
+ subl %ebx, %edi
+ andl %ebp, %esi
+ andl %eax, %edi
+ orl %esi, %edi
+ movl 48(%esp), %esi
+ roll $10, %eax
+ leal 1548603684(%edx,%edi,),%edx
+ movl $-1, %edi
+ addl %esi, %edx
+ movl %eax, %esi
+ roll $13, %edx
+ addl %ecx, %edx
+ /* 98 */
+ subl %eax, %edi
+ andl %edx, %esi
+ andl %ebp, %edi
+ orl %esi, %edi
+ movl 16(%esp), %esi
+ roll $10, %ebp
+ leal 1548603684(%ecx,%edi,),%ecx
+ movl $-1, %edi
+ addl %esi, %ecx
+ movl %ebp, %esi
+ roll $15, %ecx
+ addl %ebx, %ecx
+ /* 99 */
+ subl %ebp, %edi
+ andl %ecx, %esi
+ andl %edx, %edi
+ orl %esi, %edi
+ movl 32(%esp), %esi
+ roll $10, %edx
+ leal 1548603684(%ebx,%edi,),%ebx
+ movl $-1, %edi
+ addl %esi, %ebx
+ movl %edx, %esi
+ roll $7, %ebx
+ addl %eax, %ebx
+ /* 100 */
+ subl %edx, %edi
+ andl %ebx, %esi
+ andl %ecx, %edi
+ orl %esi, %edi
+ movl 4(%esp), %esi
+ roll $10, %ecx
+ leal 1548603684(%eax,%edi,),%eax
+ movl $-1, %edi
+ addl %esi, %eax
+ movl %ecx, %esi
+ roll $12, %eax
+ addl %ebp, %eax
+ /* 101 */
+ subl %ecx, %edi
+ andl %eax, %esi
+ andl %ebx, %edi
+ orl %esi, %edi
+ movl 56(%esp), %esi
+ roll $10, %ebx
+ leal 1548603684(%ebp,%edi,),%ebp
+ movl $-1, %edi
+ addl %esi, %ebp
+ movl %ebx, %esi
+ roll $8, %ebp
+ addl %edx, %ebp
+ /* 102 */
+ subl %ebx, %edi
+ andl %ebp, %esi
+ andl %eax, %edi
+ orl %esi, %edi
+ movl 24(%esp), %esi
+ roll $10, %eax
+ leal 1548603684(%edx,%edi,),%edx
+ movl $-1, %edi
+ addl %esi, %edx
+ movl %eax, %esi
+ roll $9, %edx
+ addl %ecx, %edx
+ /* 103 */
+ subl %eax, %edi
+ andl %edx, %esi
+ andl %ebp, %edi
+ orl %esi, %edi
+ movl 44(%esp), %esi
+ roll $10, %ebp
+ leal 1548603684(%ecx,%edi,),%ecx
+ movl $-1, %edi
+ addl %esi, %ecx
+ movl %ebp, %esi
+ roll $11, %ecx
+ addl %ebx, %ecx
+ /* 104 */
+ subl %ebp, %edi
+ andl %ecx, %esi
+ andl %edx, %edi
+ orl %esi, %edi
+ movl 60(%esp), %esi
+ roll $10, %edx
+ leal 1548603684(%ebx,%edi,),%ebx
+ movl $-1, %edi
+ addl %esi, %ebx
+ movl %edx, %esi
+ roll $7, %ebx
+ addl %eax, %ebx
+ /* 105 */
+ subl %edx, %edi
+ andl %ebx, %esi
+ andl %ecx, %edi
+ orl %esi, %edi
+ movl 64(%esp), %esi
+ roll $10, %ecx
+ leal 1548603684(%eax,%edi,),%eax
+ movl $-1, %edi
+ addl %esi, %eax
+ movl %ecx, %esi
+ roll $7, %eax
+ addl %ebp, %eax
+ /* 106 */
+ subl %ecx, %edi
+ andl %eax, %esi
+ andl %ebx, %edi
+ orl %esi, %edi
+ movl 36(%esp), %esi
+ roll $10, %ebx
+ leal 1548603684(%ebp,%edi,),%ebp
+ movl $-1, %edi
+ addl %esi, %ebp
+ movl %ebx, %esi
+ roll $12, %ebp
+ addl %edx, %ebp
+ /* 107 */
+ subl %ebx, %edi
+ andl %ebp, %esi
+ andl %eax, %edi
+ orl %esi, %edi
+ movl 52(%esp), %esi
+ roll $10, %eax
+ leal 1548603684(%edx,%edi,),%edx
+ movl $-1, %edi
+ addl %esi, %edx
+ movl %eax, %esi
+ roll $7, %edx
+ addl %ecx, %edx
+ /* 108 */
+ subl %eax, %edi
+ andl %edx, %esi
+ andl %ebp, %edi
+ orl %esi, %edi
+ movl 20(%esp), %esi
+ roll $10, %ebp
+ leal 1548603684(%ecx,%edi,),%ecx
+ movl $-1, %edi
+ addl %esi, %ecx
+ movl %ebp, %esi
+ roll $6, %ecx
+ addl %ebx, %ecx
+ /* 109 */
+ subl %ebp, %edi
+ andl %ecx, %esi
+ andl %edx, %edi
+ orl %esi, %edi
+ movl 40(%esp), %esi
+ roll $10, %edx
+ leal 1548603684(%ebx,%edi,),%ebx
+ movl $-1, %edi
+ addl %esi, %ebx
+ movl %edx, %esi
+ roll $15, %ebx
+ addl %eax, %ebx
+ /* 110 */
+ subl %edx, %edi
+ andl %ebx, %esi
+ andl %ecx, %edi
+ orl %esi, %edi
+ movl 8(%esp), %esi
+ roll $10, %ecx
+ leal 1548603684(%eax,%edi,),%eax
+ movl $-1, %edi
+ addl %esi, %eax
+ movl %ecx, %esi
+ roll $13, %eax
+ addl %ebp, %eax
+ /* 111 */
+ subl %ecx, %edi
+ andl %eax, %esi
+ andl %ebx, %edi
+ orl %esi, %edi
+ movl 12(%esp), %esi
+ roll $10, %ebx
+ leal 1548603684(%ebp,%edi,),%ebp
+ movl $-1, %edi
+ addl %esi, %ebp
+ subl %eax, %edi
+ roll $11, %ebp
+ addl %edx, %ebp
+ /* 112 */
+ movl 64(%esp), %esi
+ orl %ebp, %edi
+ addl %esi, %edx
+ xorl %ebx, %edi
+ movl $-1, %esi
+ roll $10, %eax
+ leal 1836072691(%edx,%edi,1),%edx
+ subl %ebp, %esi
+ roll $9, %edx
+ addl %ecx, %edx
+ /* 113 */
+ movl 24(%esp), %edi
+ orl %edx, %esi
+ addl %edi, %ecx
+ xorl %eax, %esi
+ movl $-1, %edi
+ roll $10, %ebp
+ leal 1836072691(%ecx,%esi,1),%ecx
+ subl %edx, %edi
+ roll $7, %ecx
+ addl %ebx, %ecx
+ /* 114 */
+ movl 8(%esp), %esi
+ orl %ecx, %edi
+ addl %esi, %ebx
+ xorl %ebp, %edi
+ movl $-1, %esi
+ roll $10, %edx
+ leal 1836072691(%ebx,%edi,1),%ebx
+ subl %ecx, %esi
+ roll $15, %ebx
+ addl %eax, %ebx
+ /* 115 */
+ movl 16(%esp), %edi
+ orl %ebx, %esi
+ addl %edi, %eax
+ xorl %edx, %esi
+ movl $-1, %edi
+ roll $10, %ecx
+ leal 1836072691(%eax,%esi,1),%eax
+ subl %ebx, %edi
+ roll $11, %eax
+ addl %ebp, %eax
+ /* 116 */
+ movl 32(%esp), %esi
+ orl %eax, %edi
+ addl %esi, %ebp
+ xorl %ecx, %edi
+ movl $-1, %esi
+ roll $10, %ebx
+ leal 1836072691(%ebp,%edi,1),%ebp
+ subl %eax, %esi
+ roll $8, %ebp
+ addl %edx, %ebp
+ /* 117 */
+ movl 60(%esp), %edi
+ orl %ebp, %esi
+ addl %edi, %edx
+ xorl %ebx, %esi
+ movl $-1, %edi
+ roll $10, %eax
+ leal 1836072691(%edx,%esi,1),%edx
+ subl %ebp, %edi
+ roll $6, %edx
+ addl %ecx, %edx
+ /* 118 */
+ movl 28(%esp), %esi
+ orl %edx, %edi
+ addl %esi, %ecx
+ xorl %eax, %edi
+ movl $-1, %esi
+ roll $10, %ebp
+ leal 1836072691(%ecx,%edi,1),%ecx
+ subl %edx, %esi
+ roll $6, %ecx
+ addl %ebx, %ecx
+ /* 119 */
+ movl 40(%esp), %edi
+ orl %ecx, %esi
+ addl %edi, %ebx
+ xorl %ebp, %esi
+ movl $-1, %edi
+ roll $10, %edx
+ leal 1836072691(%ebx,%esi,1),%ebx
+ subl %ecx, %edi
+ roll $14, %ebx
+ addl %eax, %ebx
+ /* 120 */
+ movl 48(%esp), %esi
+ orl %ebx, %edi
+ addl %esi, %eax
+ xorl %edx, %edi
+ movl $-1, %esi
+ roll $10, %ecx
+ leal 1836072691(%eax,%edi,1),%eax
+ subl %ebx, %esi
+ roll $12, %eax
+ addl %ebp, %eax
+ /* 121 */
+ movl 36(%esp), %edi
+ orl %eax, %esi
+ addl %edi, %ebp
+ xorl %ecx, %esi
+ movl $-1, %edi
+ roll $10, %ebx
+ leal 1836072691(%ebp,%esi,1),%ebp
+ subl %eax, %edi
+ roll $13, %ebp
+ addl %edx, %ebp
+ /* 122 */
+ movl 52(%esp), %esi
+ orl %ebp, %edi
+ addl %esi, %edx
+ xorl %ebx, %edi
+ movl $-1, %esi
+ roll $10, %eax
+ leal 1836072691(%edx,%edi,1),%edx
+ subl %ebp, %esi
+ roll $5, %edx
+ addl %ecx, %edx
+ /* 123 */
+ movl 12(%esp), %edi
+ orl %edx, %esi
+ addl %edi, %ecx
+ xorl %eax, %esi
+ movl $-1, %edi
+ roll $10, %ebp
+ leal 1836072691(%ecx,%esi,1),%ecx
+ subl %edx, %edi
+ roll $14, %ecx
+ addl %ebx, %ecx
+ /* 124 */
+ movl 44(%esp), %esi
+ orl %ecx, %edi
+ addl %esi, %ebx
+ xorl %ebp, %edi
+ movl $-1, %esi
+ roll $10, %edx
+ leal 1836072691(%ebx,%edi,1),%ebx
+ subl %ecx, %esi
+ roll $13, %ebx
+ addl %eax, %ebx
+ /* 125 */
+ movl 4(%esp), %edi
+ orl %ebx, %esi
+ addl %edi, %eax
+ xorl %edx, %esi
+ movl $-1, %edi
+ roll $10, %ecx
+ leal 1836072691(%eax,%esi,1),%eax
+ subl %ebx, %edi
+ roll $13, %eax
+ addl %ebp, %eax
+ /* 126 */
+ movl 20(%esp), %esi
+ orl %eax, %edi
+ addl %esi, %ebp
+ xorl %ecx, %edi
+ movl $-1, %esi
+ roll $10, %ebx
+ leal 1836072691(%ebp,%edi,1),%ebp
+ subl %eax, %esi
+ roll $7, %ebp
+ addl %edx, %ebp
+ /* 127 */
+ movl 56(%esp), %edi
+ orl %ebp, %esi
+ addl %edi, %edx
+ xorl %ebx, %esi
+ movl 36(%esp), %edi
+ roll $10, %eax
+ leal 1836072691(%edx,%esi,1),%edx
+ movl $-1, %esi
+ roll $5, %edx
+ addl %ecx, %edx
+ /* 128 */
+ addl %edi, %ecx
+ movl %ebp, %edi
+ subl %edx, %esi
+ andl %edx, %edi
+ andl %eax, %esi
+ orl %esi, %edi
+ movl 28(%esp), %esi
+ roll $10, %ebp
+ leal 2053994217(%ecx,%edi,1),%ecx
+ movl $-1, %edi
+ roll $15, %ecx
+ addl %ebx, %ecx
+ /* 129 */
+ addl %esi, %ebx
+ movl %edx, %esi
+ subl %ecx, %edi
+ andl %ecx, %esi
+ andl %ebp, %edi
+ orl %edi, %esi
+ movl 20(%esp), %edi
+ roll $10, %edx
+ leal 2053994217(%ebx,%esi,1),%ebx
+ movl $-1, %esi
+ roll $5, %ebx
+ addl %eax, %ebx
+ /* 130 */
+ addl %edi, %eax
+ movl %ecx, %edi
+ subl %ebx, %esi
+ andl %ebx, %edi
+ andl %edx, %esi
+ orl %esi, %edi
+ movl 8(%esp), %esi
+ roll $10, %ecx
+ leal 2053994217(%eax,%edi,1),%eax
+ movl $-1, %edi
+ roll $8, %eax
+ addl %ebp, %eax
+ /* 131 */
+ addl %esi, %ebp
+ movl %ebx, %esi
+ subl %eax, %edi
+ andl %eax, %esi
+ andl %ecx, %edi
+ orl %edi, %esi
+ movl 16(%esp), %edi
+ roll $10, %ebx
+ leal 2053994217(%ebp,%esi,1),%ebp
+ movl $-1, %esi
+ roll $11, %ebp
+ addl %edx, %ebp
+ /* 132 */
+ addl %edi, %edx
+ movl %eax, %edi
+ subl %ebp, %esi
+ andl %ebp, %edi
+ andl %ebx, %esi
+ orl %esi, %edi
+ movl 48(%esp), %esi
+ roll $10, %eax
+ leal 2053994217(%edx,%edi,1),%edx
+ movl $-1, %edi
+ roll $14, %edx
+ addl %ecx, %edx
+ /* 133 */
+ addl %esi, %ecx
+ movl %ebp, %esi
+ subl %edx, %edi
+ andl %edx, %esi
+ andl %eax, %edi
+ orl %edi, %esi
+ movl 64(%esp), %edi
+ roll $10, %ebp
+ leal 2053994217(%ecx,%esi,1),%ecx
+ movl $-1, %esi
+ roll $14, %ecx
+ addl %ebx, %ecx
+ /* 134 */
+ addl %edi, %ebx
+ movl %edx, %edi
+ subl %ecx, %esi
+ andl %ecx, %edi
+ andl %ebp, %esi
+ orl %esi, %edi
+ movl 4(%esp), %esi
+ roll $10, %edx
+ leal 2053994217(%ebx,%edi,1),%ebx
+ movl $-1, %edi
+ roll $6, %ebx
+ addl %eax, %ebx
+ /* 135 */
+ addl %esi, %eax
+ movl %ecx, %esi
+ subl %ebx, %edi
+ andl %ebx, %esi
+ andl %edx, %edi
+ orl %edi, %esi
+ movl 24(%esp), %edi
+ roll $10, %ecx
+ leal 2053994217(%eax,%esi,1),%eax
+ movl $-1, %esi
+ roll $14, %eax
+ addl %ebp, %eax
+ /* 136 */
+ addl %edi, %ebp
+ movl %ebx, %edi
+ subl %eax, %esi
+ andl %eax, %edi
+ andl %ecx, %esi
+ orl %esi, %edi
+ movl 52(%esp), %esi
+ roll $10, %ebx
+ leal 2053994217(%ebp,%edi,1),%ebp
+ movl $-1, %edi
+ roll $6, %ebp
+ addl %edx, %ebp
+ /* 137 */
+ addl %esi, %edx
+ movl %eax, %esi
+ subl %ebp, %edi
+ andl %ebp, %esi
+ andl %ebx, %edi
+ orl %edi, %esi
+ movl 12(%esp), %edi
+ roll $10, %eax
+ leal 2053994217(%edx,%esi,1),%edx
+ movl $-1, %esi
+ roll $9, %edx
+ addl %ecx, %edx
+ /* 138 */
+ addl %edi, %ecx
+ movl %ebp, %edi
+ subl %edx, %esi
+ andl %edx, %edi
+ andl %eax, %esi
+ orl %esi, %edi
+ movl 56(%esp), %esi
+ roll $10, %ebp
+ leal 2053994217(%ecx,%edi,1),%ecx
+ movl $-1, %edi
+ roll $12, %ecx
+ addl %ebx, %ecx
+ /* 139 */
+ addl %esi, %ebx
+ movl %edx, %esi
+ subl %ecx, %edi
+ andl %ecx, %esi
+ andl %ebp, %edi
+ orl %edi, %esi
+ movl 40(%esp), %edi
+ roll $10, %edx
+ leal 2053994217(%ebx,%esi,1),%ebx
+ movl $-1, %esi
+ roll $9, %ebx
+ addl %eax, %ebx
+ /* 140 */
+ addl %edi, %eax
+ movl %ecx, %edi
+ subl %ebx, %esi
+ andl %ebx, %edi
+ andl %edx, %esi
+ orl %esi, %edi
+ movl 32(%esp), %esi
+ roll $10, %ecx
+ leal 2053994217(%eax,%edi,1),%eax
+ movl $-1, %edi
+ roll $12, %eax
+ addl %ebp, %eax
+ /* 141 */
+ addl %esi, %ebp
+ movl %ebx, %esi
+ subl %eax, %edi
+ andl %eax, %esi
+ andl %ecx, %edi
+ orl %edi, %esi
+ movl 44(%esp), %edi
+ roll $10, %ebx
+ leal 2053994217(%ebp,%esi,1),%ebp
+ movl $-1, %esi
+ roll $5, %ebp
+ addl %edx, %ebp
+ /* 142 */
+ addl %edi, %edx
+ movl %eax, %edi
+ subl %ebp, %esi
+ andl %ebp, %edi
+ andl %ebx, %esi
+ orl %esi, %edi
+ movl 60(%esp), %esi
+ roll $10, %eax
+ leal 2053994217(%edx,%edi,1),%edx
+ movl $-1, %edi
+ roll $15, %edx
+ addl %ecx, %edx
+ /* 143 */
+ addl %esi, %ecx
+ movl %ebp, %esi
+ subl %edx, %edi
+ andl %edx, %esi
+ andl %eax, %edi
+ orl %esi, %edi
+ movl %edx, %esi
+ roll $10, %ebp
+ leal 2053994217(%ecx,%edi,1),%ecx
+ xorl %ebp, %esi
+ roll $8, %ecx
+ addl %ebx, %ecx
+ /* 144 */
+ movl 52(%esp), %edi
+ xorl %ecx, %esi
+ addl %edi, %ebx
+ roll $10, %edx
+ addl %esi, %ebx
+ movl %ecx, %esi
+ roll $8, %ebx
+ addl %eax, %ebx
+ /* 145 */
+ xorl %edx, %esi
+ movl 64(%esp), %edi
+ xorl %ebx, %esi
+ addl %esi, %eax
+ movl %ebx, %esi
+ roll $10, %ecx
+ addl %edi, %eax
+ xorl %ecx, %esi
+ roll $5, %eax
+ addl %ebp, %eax
+ /* 146 */
+ movl 44(%esp), %edi
+ xorl %eax, %esi
+ addl %edi, %ebp
+ roll $10, %ebx
+ addl %esi, %ebp
+ movl %eax, %esi
+ roll $12, %ebp
+ addl %edx, %ebp
+ /* 147 */
+ xorl %ebx, %esi
+ movl 20(%esp), %edi
+ xorl %ebp, %esi
+ addl %esi, %edx
+ movl %ebp, %esi
+ roll $10, %eax
+ addl %edi, %edx
+ xorl %eax, %esi
+ roll $9, %edx
+ addl %ecx, %edx
+ /* 148 */
+ movl 8(%esp), %edi
+ xorl %edx, %esi
+ addl %edi, %ecx
+ roll $10, %ebp
+ addl %esi, %ecx
+ movl %edx, %esi
+ roll $12, %ecx
+ addl %ebx, %ecx
+ /* 149 */
+ xorl %ebp, %esi
+ movl 24(%esp), %edi
+ xorl %ecx, %esi
+ addl %esi, %ebx
+ movl %ecx, %esi
+ roll $10, %edx
+ addl %edi, %ebx
+ xorl %edx, %esi
+ roll $5, %ebx
+ addl %eax, %ebx
+ /* 150 */
+ movl 36(%esp), %edi
+ xorl %ebx, %esi
+ addl %edi, %eax
+ roll $10, %ecx
+ addl %esi, %eax
+ movl %ebx, %esi
+ roll $14, %eax
+ addl %ebp, %eax
+ /* 151 */
+ xorl %ecx, %esi
+ movl 32(%esp), %edi
+ xorl %eax, %esi
+ addl %esi, %ebp
+ movl %eax, %esi
+ roll $10, %ebx
+ addl %edi, %ebp
+ xorl %ebx, %esi
+ roll $6, %ebp
+ addl %edx, %ebp
+ /* 152 */
+ movl 28(%esp), %edi
+ xorl %ebp, %esi
+ addl %edi, %edx
+ roll $10, %eax
+ addl %esi, %edx
+ movl %ebp, %esi
+ roll $8, %edx
+ addl %ecx, %edx
+ /* 153 */
+ xorl %eax, %esi
+ movl 12(%esp), %edi
+ xorl %edx, %esi
+ addl %esi, %ecx
+ movl %edx, %esi
+ roll $10, %ebp
+ addl %edi, %ecx
+ xorl %ebp, %esi
+ roll $13, %ecx
+ addl %ebx, %ecx
+ /* 154 */
+ movl 56(%esp), %edi
+ xorl %ecx, %esi
+ addl %edi, %ebx
+ roll $10, %edx
+ addl %esi, %ebx
+ movl %ecx, %esi
+ roll $6, %ebx
+ addl %eax, %ebx
+ /* 155 */
+ xorl %edx, %esi
+ movl 60(%esp), %edi
+ xorl %ebx, %esi
+ addl %esi, %eax
+ movl %ebx, %esi
+ roll $10, %ecx
+ addl %edi, %eax
+ xorl %ecx, %esi
+ roll $5, %eax
+ addl %ebp, %eax
+ /* 156 */
+ movl 4(%esp), %edi
+ xorl %eax, %esi
+ addl %edi, %ebp
+ roll $10, %ebx
+ addl %esi, %ebp
+ movl %eax, %esi
+ roll $15, %ebp
+ addl %edx, %ebp
+ /* 157 */
+ xorl %ebx, %esi
+ movl 16(%esp), %edi
+ xorl %ebp, %esi
+ addl %esi, %edx
+ movl %ebp, %esi
+ roll $10, %eax
+ addl %edi, %edx
+ xorl %eax, %esi
+ roll $13, %edx
+ addl %ecx, %edx
+ /* 158 */
+ movl 40(%esp), %edi
+ xorl %edx, %esi
+ addl %edi, %ecx
+ roll $10, %ebp
+ addl %esi, %ecx
+ movl %edx, %esi
+ roll $11, %ecx
+ addl %ebx, %ecx
+ /* 159 */
+ xorl %ebp, %esi
+ movl 48(%esp), %edi
+ xorl %ecx, %esi
+ addl %esi, %ebx
+ roll $10, %edx
+ addl %edi, %ebx
+ movl 108(%esp), %edi
+ roll $11, %ebx
+ addl %eax, %ebx
+ movl 4(%edi), %esi
+ addl %esi, %edx
+ movl 76(%esp), %esi
+ addl %esi, %edx
+ movl 8(%edi), %esi
+ addl %esi, %ebp
+ movl 80(%esp), %esi
+ addl %esi, %ebp
+ movl 12(%edi), %esi
+ addl %esi, %eax
+ movl 84(%esp), %esi
+ addl %esi, %eax
+ movl 16(%edi), %esi
+ addl %esi, %ebx
+ movl 68(%esp), %esi
+ addl %esi, %ebx
+ movl (%edi), %esi
+ addl %esi, %ecx
+ movl 72(%esp), %esi
+ addl %esi, %ecx
+ movl %edx, (%edi)
+ movl %ebp, 4(%edi)
+ movl %eax, 8(%edi)
+ movl %ebx, 12(%edi)
+ movl %ecx, 16(%edi)
+ movl (%esp), %edi
+ movl 112(%esp), %esi
+ cmpl %esi, %edi
+ movl 108(%esp), %edi
+ jge .L000start
+ addl $88, %esp
+ popl %ebx
+ popl %ebp
+ popl %edi
+ popl %esi
+ ret
+.ripemd160_block_x86_end:
+ SIZE(ripemd160_block_x86,.ripemd160_block_x86_end-ripemd160_block_x86)
+.ident "desasm.pl"
diff --git a/lib/libmd/ripemd.3 b/lib/libmd/ripemd.3
new file mode 100644
index 0000000..7adc021
--- /dev/null
+++ b/lib/libmd/ripemd.3
@@ -0,0 +1,110 @@
+.\"
+.\" ----------------------------------------------------------------------------
+.\" "THE BEER-WARE LICENSE" (Revision 42):
+.\" <phk@login.dkuug.dk> wrote this file. As long as you retain this notice you
+.\" can do whatever you want with this stuff. If we meet some day, and you think
+.\" this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
+.\" ----------------------------------------------------------------------------
+.\"
+.\" From: Id: mdX.3,v 1.14 1999/02/11 20:31:49 wollman Exp
+.\" $Id: sha.3,v 1.1 1999/02/26 04:24:56 wollman Exp $
+.\"
+.Dd February 26, 1999
+.Dt RIPEMD 3
+.Os FreeBSD 4.0
+.Sh NAME
+.Nm RIPEMD160_Init ,
+.Nm RIPEMD160_Update ,
+.Nm RIPEMD160_Final ,
+.Nm RIPEMD160_End ,
+.Nm RIPEMD160_File ,
+.Nm RIPEMD160_Data
+.Nd calculate the RIPEMD160 message digest
+.Sh SYNOPSIS
+.Fd #include <sys/types.h>
+.Fd #include <ripemd.h>
+.Ft void
+.Fn RIPEMD160_Init "RIPEMD160_CTX *context"
+.Ft void
+.Fn RIPEMD160_Update "RIPEMD160_CTX *context" "const unsigned char *data" "unsigned int len"
+.Ft void
+.Fn RIPEMD160_Final "unsigned char digest[20]" "RIPEMD160_CTX *context"
+.Ft "char *"
+.Fn RIPEMD160_End "RIPEMD160_CTX *context" "char *buf"
+.Ft "char *"
+.Fn RIPEMD160_File "const char *filename" "char *buf"
+.Ft "char *"
+.Fn RIPEMD160_Data "const unsigned char *data" "unsigned int len" "char *buf"
+.Sh DESCRIPTION
+The
+.Li RIPEMD160_
+functions calculate a 160-bit cryptographic checksum (digest)
+for any number of input bytes. A cryptographic checksum is a one-way
+hash function; that is, it is computationally impractical to find
+the input corresponding to a particular output. This net result is
+a ``fingerprint'' of the input-data, which doesn't disclose the actual
+input.
+.Pp
+The
+.Fn RIPEMD160_Init ,
+.Fn RIPEMD160_Update ,
+and
+.Fn RIPEMD160_Final
+functions are the core functions. Allocate an RIPEMD160_CTX, initialize it with
+.Fn RIPEMD160_Init ,
+run over the data with
+.Fn RIPEMD160_Update ,
+and finally extract the result using
+.Fn RIPEMD160_Final .
+.Pp
+.Fn RIPEMD160_End
+is a wrapper for
+.Fn RIPEMD160_Final
+which converts the return value to a 41-character
+(including the terminating '\e0')
+.Tn ASCII
+string which represents the 160 bits in hexadecimal.
+.Pp
+.Fn RIPEMD160_File
+calculates the digest of a file, and uses
+.Fn RIPEMD160_End
+to return the result.
+If the file cannot be opened, a null pointer is returned.
+.Fn RIPEMD160_Data
+calculates the digest of a chunk of data in memory, and uses
+.Fn RIPEMD160_End
+to return the result.
+.Pp
+When using
+.Fn RIPEMD160_End ,
+.Fn RIPEMD160_File ,
+or
+.Fn RIPEMD160_Data ,
+the
+.Ar buf
+argument can be a null pointer, in which case the returned string
+is allocated with
+.Xr malloc 3
+and subsequently must be explicitly deallocated using
+.Xr free 3
+after use.
+If the
+.Ar buf
+argument is non-null it must point to at least 41 characters of buffer space.
+.Sh SEE ALSO
+.Xr md2 3 ,
+.Xr md4 3 ,
+.Xr md5 3 ,
+.Xr sha 3
+.Sh AUTHORS
+The core hash routines were implemented by Eric Young based on the
+published
+.Tn RIPEMD160
+specification.
+.Sh HISTORY
+These functions appeared in
+.Fx 4.0 .
+.Sh BUGS
+No method is known to exist which finds two files having the same hash value,
+nor to find a file with a specific hash value.
+There is on the other hand no guarantee that such a method doesn't exist.
diff --git a/lib/libmd/ripemd.h b/lib/libmd/ripemd.h
new file mode 100644
index 0000000..1399d32
--- /dev/null
+++ b/lib/libmd/ripemd.h
@@ -0,0 +1,89 @@
+/* crypto/ripemd/ripemd.h */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#ifndef HEADER_RIPEMD_H
+#define HEADER_RIPEMD_H
+
+#include <sys/cdefs.h>
+#include <sys/types.h> /* XXX switch to machine/ansi.h and __ types */
+
+#define RIPEMD160_CBLOCK 64
+#define RIPEMD160_LBLOCK 16
+#define RIPEMD160_BLOCK 16
+#define RIPEMD160_LAST_BLOCK 56
+#define RIPEMD160_LENGTH_BLOCK 8
+#define RIPEMD160_DIGEST_LENGTH 20
+
+typedef struct RIPEMD160state_st {
+ u_int32_t A,B,C,D,E;
+ u_int32_t Nl,Nh;
+ u_int32_t data[RIPEMD160_LBLOCK];
+ int num;
+} RIPEMD160_CTX;
+
+__BEGIN_DECLS
+void RIPEMD160_Init(RIPEMD160_CTX *c);
+void RIPEMD160_Update(RIPEMD160_CTX *c, const unsigned char *data,
+ size_t len);
+void RIPEMD160_Final(unsigned char *md, RIPEMD160_CTX *c);
+char *RIPEMD160_End(RIPEMD160_CTX *, char *);
+char *RIPEMD160_File(const char *, char *);
+char *RIPEMD160_Data(const unsigned char *, unsigned int, char *);
+__END_DECLS
+
+#endif
diff --git a/lib/libmd/rmd160c.c b/lib/libmd/rmd160c.c
new file mode 100644
index 0000000..5f4d9cc
--- /dev/null
+++ b/lib/libmd/rmd160c.c
@@ -0,0 +1,544 @@
+/* crypto/ripemd/rmd_dgst.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ *
+ * $Id$
+ */
+
+#include <sys/types.h>
+
+#include <stdio.h>
+
+#if 0
+#include <machine/ansi.h> /* we use the __ variants of bit-sized types */
+#endif
+#include <machine/endian.h>
+
+#include "rmd_locl.h"
+
+/*
+ * The assembly-language code is not position-independent, so don't
+ * try to use it in a shared library.
+ */
+#ifdef PIC
+#undef RMD160_ASM
+#endif
+
+char *RMD160_version="RIPEMD160 part of SSLeay 0.9.0b 11-Oct-1998";
+
+#ifdef RMD160_ASM
+void ripemd160_block_x86(RIPEMD160_CTX *c, const u_int32_t *p,int num);
+#define ripemd160_block ripemd160_block_x86
+#else
+void ripemd160_block(RIPEMD160_CTX *c, const u_int32_t *p,int num);
+#endif
+
+void RIPEMD160_Init(c)
+RIPEMD160_CTX *c;
+ {
+ c->A=RIPEMD160_A;
+ c->B=RIPEMD160_B;
+ c->C=RIPEMD160_C;
+ c->D=RIPEMD160_D;
+ c->E=RIPEMD160_E;
+ c->Nl=0;
+ c->Nh=0;
+ c->num=0;
+ }
+
+void RIPEMD160_Update(c, data, len)
+RIPEMD160_CTX *c;
+const unsigned char *data;
+size_t len;
+ {
+ register u_int32_t *p;
+ int sw,sc;
+ u_int32_t l;
+
+ if (len == 0) return;
+
+ l=(c->Nl+(len<<3))&0xffffffffL;
+ if (l < c->Nl) /* overflow */
+ c->Nh++;
+ c->Nh+=(len>>29);
+ c->Nl=l;
+
+ if (c->num != 0)
+ {
+ p=c->data;
+ sw=c->num>>2;
+ sc=c->num&0x03;
+
+ if ((c->num+len) >= RIPEMD160_CBLOCK)
+ {
+ l= p[sw];
+ p_c2l(data,l,sc);
+ p[sw++]=l;
+ for (; sw<RIPEMD160_LBLOCK; sw++)
+ {
+ c2l(data,l);
+ p[sw]=l;
+ }
+ len-=(RIPEMD160_CBLOCK-c->num);
+
+ ripemd160_block(c,p,64);
+ c->num=0;
+ /* drop through and do the rest */
+ }
+ else
+ {
+ int ew,ec;
+
+ c->num+=(int)len;
+ if ((sc+len) < 4) /* ugly, add char's to a word */
+ {
+ l= p[sw];
+ p_c2l_p(data,l,sc,len);
+ p[sw]=l;
+ }
+ else
+ {
+ ew=(c->num>>2);
+ ec=(c->num&0x03);
+ l= p[sw];
+ p_c2l(data,l,sc);
+ p[sw++]=l;
+ for (; sw < ew; sw++)
+ { c2l(data,l); p[sw]=l; }
+ if (ec)
+ {
+ c2l_p(data,l,ec);
+ p[sw]=l;
+ }
+ }
+ return;
+ }
+ }
+ /* we now can process the input data in blocks of RIPEMD160_CBLOCK
+ * chars and save the leftovers to c->data. */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ if ((((unsigned long)data)%sizeof(u_int32_t)) == 0)
+ {
+ sw=(int)len/RIPEMD160_CBLOCK;
+ if (sw > 0)
+ {
+ sw*=RIPEMD160_CBLOCK;
+ ripemd160_block(c,(u_int32_t *)data,sw);
+ data+=sw;
+ len-=sw;
+ }
+ }
+#endif
+ p=c->data;
+ while (len >= RIPEMD160_CBLOCK)
+ {
+#if BYTE_ORDER == LITTLE_ENDIAN || BYTE_ORDER == BIG_ENDIAN
+ if (p != (u_int32_t *)data)
+ memcpy(p,data,RIPEMD160_CBLOCK);
+ data+=RIPEMD160_CBLOCK;
+#if BYTE_ORDER == BIG_ENDIAN
+ for (sw=(RIPEMD160_LBLOCK/4); sw; sw--)
+ {
+ Endian_Reverse32(p[0]);
+ Endian_Reverse32(p[1]);
+ Endian_Reverse32(p[2]);
+ Endian_Reverse32(p[3]);
+ p+=4;
+ }
+#endif
+#else
+ for (sw=(RIPEMD160_LBLOCK/4); sw; sw--)
+ {
+ c2l(data,l); *(p++)=l;
+ c2l(data,l); *(p++)=l;
+ c2l(data,l); *(p++)=l;
+ c2l(data,l); *(p++)=l;
+ }
+#endif
+ p=c->data;
+ ripemd160_block(c,p,64);
+ len-=RIPEMD160_CBLOCK;
+ }
+ sc=(int)len;
+ c->num=sc;
+ if (sc)
+ {
+ sw=sc>>2; /* words to copy */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ p[sw]=0;
+ memcpy(p,data,sc);
+#else
+ sc&=0x03;
+ for ( ; sw; sw--)
+ { c2l(data,l); *(p++)=l; }
+ c2l_p(data,l,sc);
+ *p=l;
+#endif
+ }
+ }
+
+void RIPEMD160_Transform(c,b)
+RIPEMD160_CTX *c;
+unsigned char *b;
+ {
+ u_int32_t p[16];
+#if BYTE_ORDER != LITTLE_ENDIAN
+ u_int32_t *q;
+ int i;
+#endif
+
+#if BYTE_ORDER == BIG_ENDIAN || BYTE_ORDER == LITTLE_ENDIAN
+ memcpy(p,b,64);
+#if BYTE_ORDER == BIG_ENDIAN
+ q=p;
+ for (i=(RIPEMD160_LBLOCK/4); i; i--)
+ {
+ Endian_Reverse32(q[0]);
+ Endian_Reverse32(q[1]);
+ Endian_Reverse32(q[2]);
+ Endian_Reverse32(q[3]);
+ q+=4;
+ }
+#endif
+#else
+ q=p;
+ for (i=(RIPEMD160_LBLOCK/4); i; i--)
+ {
+ u_int32_t l;
+ c2l(b,l); *(q++)=l;
+ c2l(b,l); *(q++)=l;
+ c2l(b,l); *(q++)=l;
+ c2l(b,l); *(q++)=l;
+ }
+#endif
+ ripemd160_block(c,p,64);
+ }
+
+#ifndef RMD160_ASM
+
+void ripemd160_block(ctx, X, num)
+RIPEMD160_CTX *ctx;
+const u_int32_t *X;
+int num;
+ {
+ register u_int32_t A,B,C,D,E;
+ u_int32_t a,b,c,d,e;
+
+ for (;;)
+ {
+ A=ctx->A; B=ctx->B; C=ctx->C; D=ctx->D; E=ctx->E;
+
+ RIP1(A,B,C,D,E,WL00,SL00);
+ RIP1(E,A,B,C,D,WL01,SL01);
+ RIP1(D,E,A,B,C,WL02,SL02);
+ RIP1(C,D,E,A,B,WL03,SL03);
+ RIP1(B,C,D,E,A,WL04,SL04);
+ RIP1(A,B,C,D,E,WL05,SL05);
+ RIP1(E,A,B,C,D,WL06,SL06);
+ RIP1(D,E,A,B,C,WL07,SL07);
+ RIP1(C,D,E,A,B,WL08,SL08);
+ RIP1(B,C,D,E,A,WL09,SL09);
+ RIP1(A,B,C,D,E,WL10,SL10);
+ RIP1(E,A,B,C,D,WL11,SL11);
+ RIP1(D,E,A,B,C,WL12,SL12);
+ RIP1(C,D,E,A,B,WL13,SL13);
+ RIP1(B,C,D,E,A,WL14,SL14);
+ RIP1(A,B,C,D,E,WL15,SL15);
+
+ RIP2(E,A,B,C,D,WL16,SL16,KL1);
+ RIP2(D,E,A,B,C,WL17,SL17,KL1);
+ RIP2(C,D,E,A,B,WL18,SL18,KL1);
+ RIP2(B,C,D,E,A,WL19,SL19,KL1);
+ RIP2(A,B,C,D,E,WL20,SL20,KL1);
+ RIP2(E,A,B,C,D,WL21,SL21,KL1);
+ RIP2(D,E,A,B,C,WL22,SL22,KL1);
+ RIP2(C,D,E,A,B,WL23,SL23,KL1);
+ RIP2(B,C,D,E,A,WL24,SL24,KL1);
+ RIP2(A,B,C,D,E,WL25,SL25,KL1);
+ RIP2(E,A,B,C,D,WL26,SL26,KL1);
+ RIP2(D,E,A,B,C,WL27,SL27,KL1);
+ RIP2(C,D,E,A,B,WL28,SL28,KL1);
+ RIP2(B,C,D,E,A,WL29,SL29,KL1);
+ RIP2(A,B,C,D,E,WL30,SL30,KL1);
+ RIP2(E,A,B,C,D,WL31,SL31,KL1);
+
+ RIP3(D,E,A,B,C,WL32,SL32,KL2);
+ RIP3(C,D,E,A,B,WL33,SL33,KL2);
+ RIP3(B,C,D,E,A,WL34,SL34,KL2);
+ RIP3(A,B,C,D,E,WL35,SL35,KL2);
+ RIP3(E,A,B,C,D,WL36,SL36,KL2);
+ RIP3(D,E,A,B,C,WL37,SL37,KL2);
+ RIP3(C,D,E,A,B,WL38,SL38,KL2);
+ RIP3(B,C,D,E,A,WL39,SL39,KL2);
+ RIP3(A,B,C,D,E,WL40,SL40,KL2);
+ RIP3(E,A,B,C,D,WL41,SL41,KL2);
+ RIP3(D,E,A,B,C,WL42,SL42,KL2);
+ RIP3(C,D,E,A,B,WL43,SL43,KL2);
+ RIP3(B,C,D,E,A,WL44,SL44,KL2);
+ RIP3(A,B,C,D,E,WL45,SL45,KL2);
+ RIP3(E,A,B,C,D,WL46,SL46,KL2);
+ RIP3(D,E,A,B,C,WL47,SL47,KL2);
+
+ RIP4(C,D,E,A,B,WL48,SL48,KL3);
+ RIP4(B,C,D,E,A,WL49,SL49,KL3);
+ RIP4(A,B,C,D,E,WL50,SL50,KL3);
+ RIP4(E,A,B,C,D,WL51,SL51,KL3);
+ RIP4(D,E,A,B,C,WL52,SL52,KL3);
+ RIP4(C,D,E,A,B,WL53,SL53,KL3);
+ RIP4(B,C,D,E,A,WL54,SL54,KL3);
+ RIP4(A,B,C,D,E,WL55,SL55,KL3);
+ RIP4(E,A,B,C,D,WL56,SL56,KL3);
+ RIP4(D,E,A,B,C,WL57,SL57,KL3);
+ RIP4(C,D,E,A,B,WL58,SL58,KL3);
+ RIP4(B,C,D,E,A,WL59,SL59,KL3);
+ RIP4(A,B,C,D,E,WL60,SL60,KL3);
+ RIP4(E,A,B,C,D,WL61,SL61,KL3);
+ RIP4(D,E,A,B,C,WL62,SL62,KL3);
+ RIP4(C,D,E,A,B,WL63,SL63,KL3);
+
+ RIP5(B,C,D,E,A,WL64,SL64,KL4);
+ RIP5(A,B,C,D,E,WL65,SL65,KL4);
+ RIP5(E,A,B,C,D,WL66,SL66,KL4);
+ RIP5(D,E,A,B,C,WL67,SL67,KL4);
+ RIP5(C,D,E,A,B,WL68,SL68,KL4);
+ RIP5(B,C,D,E,A,WL69,SL69,KL4);
+ RIP5(A,B,C,D,E,WL70,SL70,KL4);
+ RIP5(E,A,B,C,D,WL71,SL71,KL4);
+ RIP5(D,E,A,B,C,WL72,SL72,KL4);
+ RIP5(C,D,E,A,B,WL73,SL73,KL4);
+ RIP5(B,C,D,E,A,WL74,SL74,KL4);
+ RIP5(A,B,C,D,E,WL75,SL75,KL4);
+ RIP5(E,A,B,C,D,WL76,SL76,KL4);
+ RIP5(D,E,A,B,C,WL77,SL77,KL4);
+ RIP5(C,D,E,A,B,WL78,SL78,KL4);
+ RIP5(B,C,D,E,A,WL79,SL79,KL4);
+
+ a=A; b=B; c=C; d=D; e=E;
+ /* Do other half */
+ A=ctx->A; B=ctx->B; C=ctx->C; D=ctx->D; E=ctx->E;
+
+ RIP5(A,B,C,D,E,WR00,SR00,KR0);
+ RIP5(E,A,B,C,D,WR01,SR01,KR0);
+ RIP5(D,E,A,B,C,WR02,SR02,KR0);
+ RIP5(C,D,E,A,B,WR03,SR03,KR0);
+ RIP5(B,C,D,E,A,WR04,SR04,KR0);
+ RIP5(A,B,C,D,E,WR05,SR05,KR0);
+ RIP5(E,A,B,C,D,WR06,SR06,KR0);
+ RIP5(D,E,A,B,C,WR07,SR07,KR0);
+ RIP5(C,D,E,A,B,WR08,SR08,KR0);
+ RIP5(B,C,D,E,A,WR09,SR09,KR0);
+ RIP5(A,B,C,D,E,WR10,SR10,KR0);
+ RIP5(E,A,B,C,D,WR11,SR11,KR0);
+ RIP5(D,E,A,B,C,WR12,SR12,KR0);
+ RIP5(C,D,E,A,B,WR13,SR13,KR0);
+ RIP5(B,C,D,E,A,WR14,SR14,KR0);
+ RIP5(A,B,C,D,E,WR15,SR15,KR0);
+
+ RIP4(E,A,B,C,D,WR16,SR16,KR1);
+ RIP4(D,E,A,B,C,WR17,SR17,KR1);
+ RIP4(C,D,E,A,B,WR18,SR18,KR1);
+ RIP4(B,C,D,E,A,WR19,SR19,KR1);
+ RIP4(A,B,C,D,E,WR20,SR20,KR1);
+ RIP4(E,A,B,C,D,WR21,SR21,KR1);
+ RIP4(D,E,A,B,C,WR22,SR22,KR1);
+ RIP4(C,D,E,A,B,WR23,SR23,KR1);
+ RIP4(B,C,D,E,A,WR24,SR24,KR1);
+ RIP4(A,B,C,D,E,WR25,SR25,KR1);
+ RIP4(E,A,B,C,D,WR26,SR26,KR1);
+ RIP4(D,E,A,B,C,WR27,SR27,KR1);
+ RIP4(C,D,E,A,B,WR28,SR28,KR1);
+ RIP4(B,C,D,E,A,WR29,SR29,KR1);
+ RIP4(A,B,C,D,E,WR30,SR30,KR1);
+ RIP4(E,A,B,C,D,WR31,SR31,KR1);
+
+ RIP3(D,E,A,B,C,WR32,SR32,KR2);
+ RIP3(C,D,E,A,B,WR33,SR33,KR2);
+ RIP3(B,C,D,E,A,WR34,SR34,KR2);
+ RIP3(A,B,C,D,E,WR35,SR35,KR2);
+ RIP3(E,A,B,C,D,WR36,SR36,KR2);
+ RIP3(D,E,A,B,C,WR37,SR37,KR2);
+ RIP3(C,D,E,A,B,WR38,SR38,KR2);
+ RIP3(B,C,D,E,A,WR39,SR39,KR2);
+ RIP3(A,B,C,D,E,WR40,SR40,KR2);
+ RIP3(E,A,B,C,D,WR41,SR41,KR2);
+ RIP3(D,E,A,B,C,WR42,SR42,KR2);
+ RIP3(C,D,E,A,B,WR43,SR43,KR2);
+ RIP3(B,C,D,E,A,WR44,SR44,KR2);
+ RIP3(A,B,C,D,E,WR45,SR45,KR2);
+ RIP3(E,A,B,C,D,WR46,SR46,KR2);
+ RIP3(D,E,A,B,C,WR47,SR47,KR2);
+
+ RIP2(C,D,E,A,B,WR48,SR48,KR3);
+ RIP2(B,C,D,E,A,WR49,SR49,KR3);
+ RIP2(A,B,C,D,E,WR50,SR50,KR3);
+ RIP2(E,A,B,C,D,WR51,SR51,KR3);
+ RIP2(D,E,A,B,C,WR52,SR52,KR3);
+ RIP2(C,D,E,A,B,WR53,SR53,KR3);
+ RIP2(B,C,D,E,A,WR54,SR54,KR3);
+ RIP2(A,B,C,D,E,WR55,SR55,KR3);
+ RIP2(E,A,B,C,D,WR56,SR56,KR3);
+ RIP2(D,E,A,B,C,WR57,SR57,KR3);
+ RIP2(C,D,E,A,B,WR58,SR58,KR3);
+ RIP2(B,C,D,E,A,WR59,SR59,KR3);
+ RIP2(A,B,C,D,E,WR60,SR60,KR3);
+ RIP2(E,A,B,C,D,WR61,SR61,KR3);
+ RIP2(D,E,A,B,C,WR62,SR62,KR3);
+ RIP2(C,D,E,A,B,WR63,SR63,KR3);
+
+ RIP1(B,C,D,E,A,WR64,SR64);
+ RIP1(A,B,C,D,E,WR65,SR65);
+ RIP1(E,A,B,C,D,WR66,SR66);
+ RIP1(D,E,A,B,C,WR67,SR67);
+ RIP1(C,D,E,A,B,WR68,SR68);
+ RIP1(B,C,D,E,A,WR69,SR69);
+ RIP1(A,B,C,D,E,WR70,SR70);
+ RIP1(E,A,B,C,D,WR71,SR71);
+ RIP1(D,E,A,B,C,WR72,SR72);
+ RIP1(C,D,E,A,B,WR73,SR73);
+ RIP1(B,C,D,E,A,WR74,SR74);
+ RIP1(A,B,C,D,E,WR75,SR75);
+ RIP1(E,A,B,C,D,WR76,SR76);
+ RIP1(D,E,A,B,C,WR77,SR77);
+ RIP1(C,D,E,A,B,WR78,SR78);
+ RIP1(B,C,D,E,A,WR79,SR79);
+
+ D =ctx->B+c+D;
+ ctx->B=ctx->C+d+E;
+ ctx->C=ctx->D+e+A;
+ ctx->D=ctx->E+a+B;
+ ctx->E=ctx->A+b+C;
+ ctx->A=D;
+
+ X+=16;
+ num-=64;
+ if (num <= 0) break;
+ }
+ }
+#endif
+
+void RIPEMD160_Final(md, c)
+unsigned char *md;
+RIPEMD160_CTX *c;
+ {
+ register int i,j;
+ register u_int32_t l;
+ register u_int32_t *p;
+ static unsigned char end[4]={0x80,0x00,0x00,0x00};
+ unsigned char *cp=end;
+
+ /* c->num should definitly have room for at least one more byte. */
+ p=c->data;
+ j=c->num;
+ i=j>>2;
+
+ /* purify often complains about the following line as an
+ * Uninitialized Memory Read. While this can be true, the
+ * following p_c2l macro will reset l when that case is true.
+ * This is because j&0x03 contains the number of 'valid' bytes
+ * already in p[i]. If and only if j&0x03 == 0, the UMR will
+ * occur but this is also the only time p_c2l will do
+ * l= *(cp++) instead of l|= *(cp++)
+ * Many thanks to Alex Tang <altitude@cic.net> for pickup this
+ * 'potential bug' */
+#ifdef PURIFY
+ if ((j&0x03) == 0) p[i]=0;
+#endif
+ l=p[i];
+ p_c2l(cp,l,j&0x03);
+ p[i]=l;
+ i++;
+ /* i is the next 'undefined word' */
+ if (c->num >= RIPEMD160_LAST_BLOCK)
+ {
+ for (; i<RIPEMD160_LBLOCK; i++)
+ p[i]=0;
+ ripemd160_block(c,p,64);
+ i=0;
+ }
+ for (; i<(RIPEMD160_LBLOCK-2); i++)
+ p[i]=0;
+ p[RIPEMD160_LBLOCK-2]=c->Nl;
+ p[RIPEMD160_LBLOCK-1]=c->Nh;
+ ripemd160_block(c,p,64);
+ cp=md;
+ l=c->A; l2c(l,cp);
+ l=c->B; l2c(l,cp);
+ l=c->C; l2c(l,cp);
+ l=c->D; l2c(l,cp);
+ l=c->E; l2c(l,cp);
+
+ /* clear stuff, ripemd160_block may be leaving some stuff on the stack
+ * but I'm not worried :-) */
+ c->num=0;
+/* memset((char *)&c,0,sizeof(c));*/
+ }
+
+#ifdef undef
+int printit(l)
+unsigned long *l;
+ {
+ int i,ii;
+
+ for (i=0; i<2; i++)
+ {
+ for (ii=0; ii<8; ii++)
+ {
+ fprintf(stderr,"%08lx ",l[i*8+ii]);
+ }
+ fprintf(stderr,"\n");
+ }
+ }
+#endif
diff --git a/lib/libmd/rmd_locl.h b/lib/libmd/rmd_locl.h
new file mode 100644
index 0000000..49f054c
--- /dev/null
+++ b/lib/libmd/rmd_locl.h
@@ -0,0 +1,216 @@
+/* crypto/ripemd/rmd_locl.h */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include "ripemd.h"
+
+#undef c2nl
+#define c2nl(c,l) (l =(((u_int32_t)(*((c)++)))<<24), \
+ l|=(((u_int32_t)(*((c)++)))<<16), \
+ l|=(((u_int32_t)(*((c)++)))<< 8), \
+ l|=(((u_int32_t)(*((c)++))) ))
+
+#undef p_c2nl
+#define p_c2nl(c,l,n) { \
+ switch (n) { \
+ case 0: l =((u_int32_t)(*((c)++)))<<24; \
+ case 1: l|=((u_int32_t)(*((c)++)))<<16; \
+ case 2: l|=((u_int32_t)(*((c)++)))<< 8; \
+ case 3: l|=((u_int32_t)(*((c)++))); \
+ } \
+ }
+
+#undef c2nl_p
+/* NOTE the pointer is not incremented at the end of this */
+#define c2nl_p(c,l,n) { \
+ l=0; \
+ (c)+=n; \
+ switch (n) { \
+ case 3: l =((u_int32_t)(*(--(c))))<< 8; \
+ case 2: l|=((u_int32_t)(*(--(c))))<<16; \
+ case 1: l|=((u_int32_t)(*(--(c))))<<24; \
+ } \
+ }
+
+#undef p_c2nl_p
+#define p_c2nl_p(c,l,sc,len) { \
+ switch (sc) \
+ { \
+ case 0: l =((u_int32_t)(*((c)++)))<<24; \
+ if (--len == 0) break; \
+ case 1: l|=((u_int32_t)(*((c)++)))<<16; \
+ if (--len == 0) break; \
+ case 2: l|=((u_int32_t)(*((c)++)))<< 8; \
+ } \
+ }
+
+#undef nl2c
+#define nl2c(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \
+ *((c)++)=(unsigned char)(((l)>>16)&0xff), \
+ *((c)++)=(unsigned char)(((l)>> 8)&0xff), \
+ *((c)++)=(unsigned char)(((l) )&0xff))
+
+#undef c2l
+#define c2l(c,l) (l =(((u_int32_t)(*((c)++))) ), \
+ l|=(((u_int32_t)(*((c)++)))<< 8), \
+ l|=(((u_int32_t)(*((c)++)))<<16), \
+ l|=(((u_int32_t)(*((c)++)))<<24))
+
+#undef p_c2l
+#define p_c2l(c,l,n) { \
+ switch (n) { \
+ case 0: l =((u_int32_t)(*((c)++))); \
+ case 1: l|=((u_int32_t)(*((c)++)))<< 8; \
+ case 2: l|=((u_int32_t)(*((c)++)))<<16; \
+ case 3: l|=((u_int32_t)(*((c)++)))<<24; \
+ } \
+ }
+
+#undef c2l_p
+/* NOTE the pointer is not incremented at the end of this */
+#define c2l_p(c,l,n) { \
+ l=0; \
+ (c)+=n; \
+ switch (n) { \
+ case 3: l =((u_int32_t)(*(--(c))))<<16; \
+ case 2: l|=((u_int32_t)(*(--(c))))<< 8; \
+ case 1: l|=((u_int32_t)(*(--(c)))); \
+ } \
+ }
+
+#undef p_c2l_p
+#define p_c2l_p(c,l,sc,len) { \
+ switch (sc) \
+ { \
+ case 0: l =((u_int32_t)(*((c)++))); \
+ if (--len == 0) break; \
+ case 1: l|=((u_int32_t)(*((c)++)))<< 8; \
+ if (--len == 0) break; \
+ case 2: l|=((u_int32_t)(*((c)++)))<<16; \
+ } \
+ }
+
+#undef l2c
+#define l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \
+ *((c)++)=(unsigned char)(((l)>> 8)&0xff), \
+ *((c)++)=(unsigned char)(((l)>>16)&0xff), \
+ *((c)++)=(unsigned char)(((l)>>24)&0xff))
+
+#undef ROTATE
+#if defined(WIN32)
+#define ROTATE(a,n) _lrotl(a,n)
+#else
+#define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n))))
+#endif
+
+/* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */
+#if defined(WIN32)
+/* 5 instructions with rotate instruction, else 9 */
+#define Endian_Reverse32(a) \
+ { \
+ u_int32_t l=(a); \
+ (a)=((ROTATE(l,8)&0x00FF00FF)|(ROTATE(l,24)&0xFF00FF00)); \
+ }
+#else
+/* 6 instructions with rotate instruction, else 8 */
+#define Endian_Reverse32(a) \
+ { \
+ u_int32_t l=(a); \
+ l=(((l&0xFF00FF00)>>8L)|((l&0x00FF00FF)<<8L)); \
+ (a)=ROTATE(l,16L); \
+ }
+#endif
+
+#define F1(x,y,z) ((x)^(y)^(z))
+#define F2(x,y,z) (((x)&(y))|((~x)&z))
+#define F3(x,y,z) (((x)|(~y))^(z))
+#define F4(x,y,z) (((x)&(z))|((y)&(~(z))))
+#define F5(x,y,z) ((x)^((y)|(~(z))))
+
+#define RIPEMD160_A 0x67452301L
+#define RIPEMD160_B 0xEFCDAB89L
+#define RIPEMD160_C 0x98BADCFEL
+#define RIPEMD160_D 0x10325476L
+#define RIPEMD160_E 0xC3D2E1F0L
+
+#include "rmdconst.h"
+
+#define RIP1(a,b,c,d,e,w,s) { \
+ a+=F1(b,c,d)+X[w]; \
+ a=ROTATE(a,s)+e; \
+ c=ROTATE(c,10); }
+
+#define RIP2(a,b,c,d,e,w,s,K) { \
+ a+=F2(b,c,d)+X[w]+K; \
+ a=ROTATE(a,s)+e; \
+ c=ROTATE(c,10); }
+
+#define RIP3(a,b,c,d,e,w,s,K) { \
+ a+=F3(b,c,d)+X[w]+K; \
+ a=ROTATE(a,s)+e; \
+ c=ROTATE(c,10); }
+
+#define RIP4(a,b,c,d,e,w,s,K) { \
+ a+=F4(b,c,d)+X[w]+K; \
+ a=ROTATE(a,s)+e; \
+ c=ROTATE(c,10); }
+
+#define RIP5(a,b,c,d,e,w,s,K) { \
+ a+=F5(b,c,d)+X[w]+K; \
+ a=ROTATE(a,s)+e; \
+ c=ROTATE(c,10); }
+
diff --git a/lib/libmd/rmdconst.h b/lib/libmd/rmdconst.h
new file mode 100644
index 0000000..59c48de
--- /dev/null
+++ b/lib/libmd/rmdconst.h
@@ -0,0 +1,399 @@
+/* crypto/ripemd/rmdconst.h */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+#define KL0 0x00000000L
+#define KL1 0x5A827999L
+#define KL2 0x6ED9EBA1L
+#define KL3 0x8F1BBCDCL
+#define KL4 0xA953FD4EL
+
+#define KR0 0x50A28BE6L
+#define KR1 0x5C4DD124L
+#define KR2 0x6D703EF3L
+#define KR3 0x7A6D76E9L
+#define KR4 0x00000000L
+
+#define WL00 0
+#define SL00 11
+#define WL01 1
+#define SL01 14
+#define WL02 2
+#define SL02 15
+#define WL03 3
+#define SL03 12
+#define WL04 4
+#define SL04 5
+#define WL05 5
+#define SL05 8
+#define WL06 6
+#define SL06 7
+#define WL07 7
+#define SL07 9
+#define WL08 8
+#define SL08 11
+#define WL09 9
+#define SL09 13
+#define WL10 10
+#define SL10 14
+#define WL11 11
+#define SL11 15
+#define WL12 12
+#define SL12 6
+#define WL13 13
+#define SL13 7
+#define WL14 14
+#define SL14 9
+#define WL15 15
+#define SL15 8
+
+#define WL16 7
+#define SL16 7
+#define WL17 4
+#define SL17 6
+#define WL18 13
+#define SL18 8
+#define WL19 1
+#define SL19 13
+#define WL20 10
+#define SL20 11
+#define WL21 6
+#define SL21 9
+#define WL22 15
+#define SL22 7
+#define WL23 3
+#define SL23 15
+#define WL24 12
+#define SL24 7
+#define WL25 0
+#define SL25 12
+#define WL26 9
+#define SL26 15
+#define WL27 5
+#define SL27 9
+#define WL28 2
+#define SL28 11
+#define WL29 14
+#define SL29 7
+#define WL30 11
+#define SL30 13
+#define WL31 8
+#define SL31 12
+
+#define WL32 3
+#define SL32 11
+#define WL33 10
+#define SL33 13
+#define WL34 14
+#define SL34 6
+#define WL35 4
+#define SL35 7
+#define WL36 9
+#define SL36 14
+#define WL37 15
+#define SL37 9
+#define WL38 8
+#define SL38 13
+#define WL39 1
+#define SL39 15
+#define WL40 2
+#define SL40 14
+#define WL41 7
+#define SL41 8
+#define WL42 0
+#define SL42 13
+#define WL43 6
+#define SL43 6
+#define WL44 13
+#define SL44 5
+#define WL45 11
+#define SL45 12
+#define WL46 5
+#define SL46 7
+#define WL47 12
+#define SL47 5
+
+#define WL48 1
+#define SL48 11
+#define WL49 9
+#define SL49 12
+#define WL50 11
+#define SL50 14
+#define WL51 10
+#define SL51 15
+#define WL52 0
+#define SL52 14
+#define WL53 8
+#define SL53 15
+#define WL54 12
+#define SL54 9
+#define WL55 4
+#define SL55 8
+#define WL56 13
+#define SL56 9
+#define WL57 3
+#define SL57 14
+#define WL58 7
+#define SL58 5
+#define WL59 15
+#define SL59 6
+#define WL60 14
+#define SL60 8
+#define WL61 5
+#define SL61 6
+#define WL62 6
+#define SL62 5
+#define WL63 2
+#define SL63 12
+
+#define WL64 4
+#define SL64 9
+#define WL65 0
+#define SL65 15
+#define WL66 5
+#define SL66 5
+#define WL67 9
+#define SL67 11
+#define WL68 7
+#define SL68 6
+#define WL69 12
+#define SL69 8
+#define WL70 2
+#define SL70 13
+#define WL71 10
+#define SL71 12
+#define WL72 14
+#define SL72 5
+#define WL73 1
+#define SL73 12
+#define WL74 3
+#define SL74 13
+#define WL75 8
+#define SL75 14
+#define WL76 11
+#define SL76 11
+#define WL77 6
+#define SL77 8
+#define WL78 15
+#define SL78 5
+#define WL79 13
+#define SL79 6
+
+#define WR00 5
+#define SR00 8
+#define WR01 14
+#define SR01 9
+#define WR02 7
+#define SR02 9
+#define WR03 0
+#define SR03 11
+#define WR04 9
+#define SR04 13
+#define WR05 2
+#define SR05 15
+#define WR06 11
+#define SR06 15
+#define WR07 4
+#define SR07 5
+#define WR08 13
+#define SR08 7
+#define WR09 6
+#define SR09 7
+#define WR10 15
+#define SR10 8
+#define WR11 8
+#define SR11 11
+#define WR12 1
+#define SR12 14
+#define WR13 10
+#define SR13 14
+#define WR14 3
+#define SR14 12
+#define WR15 12
+#define SR15 6
+
+#define WR16 6
+#define SR16 9
+#define WR17 11
+#define SR17 13
+#define WR18 3
+#define SR18 15
+#define WR19 7
+#define SR19 7
+#define WR20 0
+#define SR20 12
+#define WR21 13
+#define SR21 8
+#define WR22 5
+#define SR22 9
+#define WR23 10
+#define SR23 11
+#define WR24 14
+#define SR24 7
+#define WR25 15
+#define SR25 7
+#define WR26 8
+#define SR26 12
+#define WR27 12
+#define SR27 7
+#define WR28 4
+#define SR28 6
+#define WR29 9
+#define SR29 15
+#define WR30 1
+#define SR30 13
+#define WR31 2
+#define SR31 11
+
+#define WR32 15
+#define SR32 9
+#define WR33 5
+#define SR33 7
+#define WR34 1
+#define SR34 15
+#define WR35 3
+#define SR35 11
+#define WR36 7
+#define SR36 8
+#define WR37 14
+#define SR37 6
+#define WR38 6
+#define SR38 6
+#define WR39 9
+#define SR39 14
+#define WR40 11
+#define SR40 12
+#define WR41 8
+#define SR41 13
+#define WR42 12
+#define SR42 5
+#define WR43 2
+#define SR43 14
+#define WR44 10
+#define SR44 13
+#define WR45 0
+#define SR45 13
+#define WR46 4
+#define SR46 7
+#define WR47 13
+#define SR47 5
+
+#define WR48 8
+#define SR48 15
+#define WR49 6
+#define SR49 5
+#define WR50 4
+#define SR50 8
+#define WR51 1
+#define SR51 11
+#define WR52 3
+#define SR52 14
+#define WR53 11
+#define SR53 14
+#define WR54 15
+#define SR54 6
+#define WR55 0
+#define SR55 14
+#define WR56 5
+#define SR56 6
+#define WR57 12
+#define SR57 9
+#define WR58 2
+#define SR58 12
+#define WR59 13
+#define SR59 9
+#define WR60 9
+#define SR60 12
+#define WR61 7
+#define SR61 5
+#define WR62 10
+#define SR62 15
+#define WR63 14
+#define SR63 8
+
+#define WR64 12
+#define SR64 8
+#define WR65 15
+#define SR65 5
+#define WR66 10
+#define SR66 12
+#define WR67 4
+#define SR67 9
+#define WR68 1
+#define SR68 12
+#define WR69 5
+#define SR69 5
+#define WR70 8
+#define SR70 14
+#define WR71 7
+#define SR71 6
+#define WR72 6
+#define SR72 8
+#define WR73 2
+#define SR73 13
+#define WR74 13
+#define SR74 6
+#define WR75 14
+#define SR75 5
+#define WR76 0
+#define SR76 15
+#define WR77 3
+#define SR77 13
+#define WR78 9
+#define SR78 11
+#define WR79 11
+#define SR79 11
+
diff --git a/lib/libmd/rmddriver.c b/lib/libmd/rmddriver.c
new file mode 100644
index 0000000..e2aa0a4
--- /dev/null
+++ b/lib/libmd/rmddriver.c
@@ -0,0 +1,51 @@
+/* RIPEMD160DRIVER.C - test driver for RIPEMD160
+ * $Id: RIPEMD160driver.c,v 1.1 1999/02/26 04:24:56 wollman Exp $
+ */
+
+/* Copyright (C) 1990-2, RSA Data Security, Inc. Created 1990. All
+ rights reserved.
+
+ RSA Data Security, Inc. makes no representations concerning either
+ the merchantability of this software or the suitability of this
+ software for any particular purpose. It is provided "as is"
+ without express or implied warranty of any kind.
+
+ These notices must be retained in any copies of any part of this
+ documentation and/or software.
+ */
+
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <time.h>
+#include <string.h>
+#include "ripemd.h"
+
+/* Digests a string and prints the result.
+ */
+static void RIPEMD160String (string)
+char *string;
+{
+ char buf[2*20+1];
+
+ printf ("RIPEMD160 (\"%s\") = %s\n",
+ string, RIPEMD160_Data(string,strlen(string),buf));
+}
+
+/* Digests a reference suite of strings and prints the results.
+ */
+main()
+{
+ printf ("RIPEMD160 test suite:\n");
+
+ RIPEMD160String ("");
+ RIPEMD160String ("abc");
+ RIPEMD160String ("message digest");
+ RIPEMD160String ("abcdefghijklmnopqrstuvwxyz");
+ RIPEMD160String
+ ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789");
+ RIPEMD160String
+ ("1234567890123456789012345678901234567890\
+1234567890123456789012345678901234567890");
+ return 0;
+}
diff --git a/lib/libmd/sha.3 b/lib/libmd/sha.3
index d9ae849..28a29bd 100644
--- a/lib/libmd/sha.3
+++ b/lib/libmd/sha.3
@@ -7,10 +7,10 @@
.\" ----------------------------------------------------------------------------
.\"
.\" From: Id: mdX.3,v 1.14 1999/02/11 20:31:49 wollman Exp
-.\" $Id$
+.\" $Id: sha.3,v 1.1 1999/02/26 04:24:56 wollman Exp $
.\"
.Dd February 25, 1999
-.Dt SHAX 3
+.Dt SHA 3
.Os FreeBSD 4.0
.Sh NAME
.Nm SHA_Init ,
@@ -123,7 +123,8 @@ argument is non-null it must point to at least 41 characters of buffer space.
.Sh SEE ALSO
.Xr md2 3 ,
.Xr md4 3 ,
-.Xr md5 3
+.Xr md5 3 ,
+.Xr ripemd 3
.Sh AUTHORS
The core hash routines were implemented by Eric Young based on the
published
diff --git a/lib/libmd/sha.h b/lib/libmd/sha.h
index e0f3da2..ed3867d 100644
--- a/lib/libmd/sha.h
+++ b/lib/libmd/sha.h
@@ -54,7 +54,7 @@
* copied and put under another distribution licence
* [including the GNU Public Licence.]
*
- * $Id$
+ * $Id: sha.h,v 1.1 1999/02/26 04:24:56 wollman Exp $
*/
#ifndef _SHA_H_
@@ -80,17 +80,14 @@ typedef struct SHAstate_st {
__BEGIN_DECLS
void SHA_Init(SHA_CTX *c);
-void SHA_Update(SHA_CTX *c, const unsigned char *data, u_int32_t len);
+void SHA_Update(SHA_CTX *c, const unsigned char *data, size_t len);
void SHA_Final(unsigned char *md, SHA_CTX *c);
-void SHA_Transform(SHA_CTX *c, unsigned char *data);
char *SHA_End(SHA_CTX *, char *);
char *SHA_File(const char *, char *);
char *SHA_Data(const unsigned char *, unsigned int, char *);
void SHA1_Init(SHA_CTX *c);
-void SHA1_Update(SHA_CTX *c, const unsigned char *data, unsigned long len);
+void SHA1_Update(SHA_CTX *c, const unsigned char *data, size_t len);
void SHA1_Final(unsigned char *md, SHA_CTX *c);
-unsigned char *SHA1(unsigned char *d, unsigned long n, unsigned char *md);
-void SHA1_Transform(SHA_CTX *c, unsigned char *data);
char *SHA1_End(SHA_CTX *, char *);
char *SHA1_File(const char *, char *);
char *SHA1_Data(const unsigned char *, unsigned int, char *);
diff --git a/lib/libmd/sha1c.c b/lib/libmd/sha1c.c
index 60c0565..660cff9 100644
--- a/lib/libmd/sha1c.c
+++ b/lib/libmd/sha1c.c
@@ -143,7 +143,7 @@ void
SHA1_Update(c, data, len)
SHA_CTX *c;
const unsigned char *data;
- unsigned long len;
+ size_t len;
{
register u_int32_t *p;
int ew,ec,sw,sc;
OpenPOWER on IntegriCloud