diff options
-rw-r--r-- | secure/lib/libcrypto/Makefile | 60 | ||||
-rw-r--r-- | secure/lib/libcrypto/Makefile.asm | 5 | ||||
-rw-r--r-- | secure/lib/libcrypto/i386/bf-586.s | 205 | ||||
-rw-r--r-- | secure/lib/libcrypto/i386/bf-686.s | 197 | ||||
-rw-r--r-- | secure/lib/libcrypto/i386/bn-586.s | 186 | ||||
-rw-r--r-- | secure/lib/libcrypto/i386/cast-586.s | 207 | ||||
-rw-r--r-- | secure/lib/libcrypto/i386/co-586.s | 444 | ||||
-rw-r--r-- | secure/lib/libcrypto/i386/crypt586.s | 492 | ||||
-rw-r--r-- | secure/lib/libcrypto/i386/des-586.s | 2414 | ||||
-rw-r--r-- | secure/lib/libcrypto/i386/md5-586.s | 150 | ||||
-rw-r--r-- | secure/lib/libcrypto/i386/rc4-586.s | 46 | ||||
-rw-r--r-- | secure/lib/libcrypto/i386/rc5-586.s | 97 | ||||
-rw-r--r-- | secure/lib/libcrypto/i386/rmd-586.s | 332 | ||||
-rw-r--r-- | secure/lib/libcrypto/i386/sha1-586.s | 1785 |
14 files changed, 2861 insertions, 3759 deletions
diff --git a/secure/lib/libcrypto/Makefile b/secure/lib/libcrypto/Makefile index 3b50af3..745ecd0 100644 --- a/secure/lib/libcrypto/Makefile +++ b/secure/lib/libcrypto/Makefile @@ -42,7 +42,16 @@ SRCS+= a_bitstr.c a_bool.c a_bytes.c a_d2i_fp.c a_digest.c \ INCS+= asn1.h asn1_mac.h asn1t.h # bf -SRCS+= bf_cfb64.c bf_ecb.c bf_enc.c bf_ofb64.c bf_skey.c +SRCS+= bf_cfb64.c bf_ecb.c bf_ofb64.c bf_skey.c +.if ${MACHINE_ARCH} == "i386" +.if ${MACHINE_CPU:Mi686} +SRCS+= bf-686.s +.else +SRCS+= bf-586.s +.endif +.else +SRCS+= bf_enc.c +.endif INCS+= blowfish.h # bio @@ -53,10 +62,16 @@ SRCS+= b_dump.c b_print.c b_sock.c bf_buff.c bf_lbuf.c bf_nbio.c \ INCS+= bio.h # bn -SRCS+= bn_add.c bn_asm.c bn_blind.c bn_ctx.c bn_div.c bn_err.c bn_exp.c \ +SRCS+= bn_add.c bn_blind.c bn_ctx.c bn_div.c bn_err.c bn_exp.c \ bn_exp2.c bn_gcd.c bn_kron.c bn_lib.c bn_mod.c bn_mont.c \ bn_mpi.c bn_mul.c bn_prime.c bn_print.c bn_rand.c bn_recp.c \ bn_shift.c bn_sqr.c bn_sqrt.c bn_word.c +.if ${MACHINE_ARCH} == "i386" +SRCS+= bn-586.s co-586.s +.else +SRCS+= bn_asm.c +.endif + INCS+= bn.h # buffer @@ -64,7 +79,12 @@ SRCS+= buf_err.c buffer.c INCS+= buffer.h # cast -SRCS+= c_cfb64.c c_ecb.c c_enc.c c_ofb64.c c_skey.c +SRCS+= c_cfb64.c c_ecb.c c_ofb64.c c_skey.c +.if ${MACHINE_ARCH} == "i386" +SRCS+= cast-586.s +.else +SRCS+= c_enc.c +.endif INCS+= cast.h # comp @@ -77,10 +97,15 @@ INCS+= conf.h conf_api.h # des SRCS+= cbc3_enc.c cbc_cksm.c cbc_enc.c cfb64ede.c cfb64enc.c cfb_enc.c \ - des_enc.c des_old.c des_old2.c ecb3_enc.c ecb_enc.c ede_cbcm_enc.c \ - enc_read.c enc_writ.c fcrypt.c fcrypt_b.c ofb64ede.c ofb64enc.c \ + des_old.c des_old2.c ecb3_enc.c ecb_enc.c ede_cbcm_enc.c \ + enc_read.c enc_writ.c fcrypt.c ofb64ede.c ofb64enc.c \ ofb_enc.c pcbc_enc.c qud_cksm.c rand_key.c read2pwd.c \ rpc_enc.c set_key.c str2key.c xcbc_enc.c +.if ${MACHINE_ARCH} == "i386" +SRCS+= des-586.s crypt586.s +.else +SRCS+= des_enc.c fcrypt_b.c +.endif INCS+= des.h des_old.h # dh @@ -126,6 +151,7 @@ SRCS+= bio_b64.c bio_enc.c bio_md.c bio_ok.c c_all.c c_allc.c c_alld.c \ m_ripemd.c m_sha.c m_sha1.c names.c openbsd_hw.c p5_crpt.c \ p5_crpt2.c p_dec.c p_enc.c p_lib.c p_open.c p_seal.c p_sign.c \ p_verify.c +INCS+= evp.h # hmac SRCS+= hmac.c @@ -155,6 +181,9 @@ INCS+= md4.h # md5 SRCS+= md5_dgst.c md5_one.c +.if ${MACHINE_ARCH} == "i386" +SRCS+= md5-586.s +.endif INCS+= md5.h # mdc2 @@ -194,11 +223,21 @@ SRCS+= rc2_cbc.c rc2_ecb.c rc2_skey.c rc2cfb64.c rc2ofb64.c INCS+= rc2.h # rc4 -SRCS+= rc4_enc.c rc4_skey.c +SRCS+= rc4_skey.c +.if ${MACHINE_ARCH} == "i386" +SRCS+= rc4-586.s +.else +SRCS+= rc4_enc.c +.endif INCS+= rc4.h # rc5 -SRCS+= rc5_ecb.c rc5_enc.c rc5_skey.c rc5cfb64.c rc5ofb64.c +SRCS+= rc5_ecb.c rc5_skey.c rc5cfb64.c rc5ofb64.c +.if ${MACHINE_ARCH} == "i386" +SRCS+= rc5-586.s +.else +SRCS+= rc5_enc.c +.endif INCS+= rc5.h # ripemd @@ -213,6 +252,9 @@ INCS+= rsa.h # sha SRCS+= sha1_one.c sha1dgst.c sha_dgst.c sha_one.c +.if ${MACHINE_ARCH} == "i386" +SRCS+= sha1-586.s +.endif INCS+= sha.h # stack @@ -281,6 +323,10 @@ afterinstall: .include <bsd.lib.mk> +.if ${MACHINE_ARCH} == "i386" +.PATH: ${.CURDIR}/i386 +.endif + .if defined(MAKE_IDEA) && ${MAKE_IDEA} == YES _ideapath= ${LCRYPTO_SRC}/crypto/idea .endif diff --git a/secure/lib/libcrypto/Makefile.asm b/secure/lib/libcrypto/Makefile.asm index 57d07d4..9199027 100644 --- a/secure/lib/libcrypto/Makefile.asm +++ b/secure/lib/libcrypto/Makefile.asm @@ -2,7 +2,7 @@ # Use this to help generate the asm *.s files after an import. It is not # perfect by any means, but does what is needed. # Do a 'make -f Makefile.asm all' and it will generate *.s. Move them -# to the i386 subdir, and correct any exposed paths and $FreeBSD$ tags. +# to the i386 subdir, and correct any exposed paths and $ FreeBSD $ tags. .if ${MACHINE_ARCH} == "i386" @@ -51,7 +51,8 @@ CLEANFILES+= ${SRCS:M*.pl:S/.pl$/.cmt/} ${SRCS:M*.pl:S/.pl$/.s/} .SUFFIXES: .pl .cmt .pl.cmt: - perl -I${PERLPATH} ${.IMPSRC} elf ${CPUTYPE:Mi386:S/i//} > ${.TARGET} + ( echo ' # $$'FreeBSD'$$' ;\ + perl -I${PERLPATH} ${.IMPSRC} elf ${CPUTYPE:Mi386:S/i//} ) > ${.TARGET} .cmt.s: tr -d "'" < ${.IMPSRC} > ${.TARGET} diff --git a/secure/lib/libcrypto/i386/bf-586.s b/secure/lib/libcrypto/i386/bf-586.s index 0965b1c..73e0042 100644 --- a/secure/lib/libcrypto/i386/bf-586.s +++ b/secure/lib/libcrypto/i386/bf-586.s @@ -1,9 +1,9 @@ # $FreeBSD$ - # Dont even think of reading this code - # It was automatically generated by bf-586.pl - # Which is a perl program used to generate the x86 assember for - # any of elf, a.out, BSDI, Win32, gaswin (for GNU as on Win32) or Solaris - # eric <eay@cryptsoft.com> + + + + + .file "bf-586.s" .version "01.01" @@ -20,7 +20,7 @@ BF_encrypt: movl 16(%esp), %ebp pushl %esi pushl %edi - # Load the 2 words + movl (%ebx), %edi movl 4(%ebx), %esi xorl %eax, %eax @@ -28,7 +28,7 @@ BF_encrypt: xorl %ecx, %ecx xorl %ebx, %edi - # Round 0 + movl 4(%ebp), %edx movl %edi, %ebx xorl %edx, %esi @@ -48,7 +48,7 @@ BF_encrypt: xorl %eax, %eax xorl %ebx, %esi - # Round 1 + movl 8(%ebp), %edx movl %esi, %ebx xorl %edx, %edi @@ -68,7 +68,7 @@ BF_encrypt: xorl %eax, %eax xorl %ebx, %edi - # Round 2 + movl 12(%ebp), %edx movl %edi, %ebx xorl %edx, %esi @@ -88,7 +88,7 @@ BF_encrypt: xorl %eax, %eax xorl %ebx, %esi - # Round 3 + movl 16(%ebp), %edx movl %esi, %ebx xorl %edx, %edi @@ -108,7 +108,7 @@ BF_encrypt: xorl %eax, %eax xorl %ebx, %edi - # Round 4 + movl 20(%ebp), %edx movl %edi, %ebx xorl %edx, %esi @@ -128,7 +128,7 @@ BF_encrypt: xorl %eax, %eax xorl %ebx, %esi - # Round 5 + movl 24(%ebp), %edx movl %esi, %ebx xorl %edx, %edi @@ -148,7 +148,7 @@ BF_encrypt: xorl %eax, %eax xorl %ebx, %edi - # Round 6 + movl 28(%ebp), %edx movl %edi, %ebx xorl %edx, %esi @@ -168,7 +168,7 @@ BF_encrypt: xorl %eax, %eax xorl %ebx, %esi - # Round 7 + movl 32(%ebp), %edx movl %esi, %ebx xorl %edx, %edi @@ -188,7 +188,7 @@ BF_encrypt: xorl %eax, %eax xorl %ebx, %edi - # Round 8 + movl 36(%ebp), %edx movl %edi, %ebx xorl %edx, %esi @@ -208,7 +208,7 @@ BF_encrypt: xorl %eax, %eax xorl %ebx, %esi - # Round 9 + movl 40(%ebp), %edx movl %esi, %ebx xorl %edx, %edi @@ -228,7 +228,7 @@ BF_encrypt: xorl %eax, %eax xorl %ebx, %edi - # Round 10 + movl 44(%ebp), %edx movl %edi, %ebx xorl %edx, %esi @@ -248,7 +248,7 @@ BF_encrypt: xorl %eax, %eax xorl %ebx, %esi - # Round 11 + movl 48(%ebp), %edx movl %esi, %ebx xorl %edx, %edi @@ -268,7 +268,7 @@ BF_encrypt: xorl %eax, %eax xorl %ebx, %edi - # Round 12 + movl 52(%ebp), %edx movl %edi, %ebx xorl %edx, %esi @@ -288,7 +288,7 @@ BF_encrypt: xorl %eax, %eax xorl %ebx, %esi - # Round 13 + movl 56(%ebp), %edx movl %esi, %ebx xorl %edx, %edi @@ -308,7 +308,7 @@ BF_encrypt: xorl %eax, %eax xorl %ebx, %edi - # Round 14 + movl 60(%ebp), %edx movl %edi, %ebx xorl %edx, %esi @@ -328,7 +328,7 @@ BF_encrypt: xorl %eax, %eax xorl %ebx, %esi - # Round 15 + movl 64(%ebp), %edx movl %esi, %ebx xorl %edx, %edi @@ -345,7 +345,7 @@ BF_encrypt: xorl %eax, %ebx movl 3144(%ebp,%edx,4),%edx addl %edx, %ebx - # Load parameter 0 (16) enc=1 + movl 20(%esp), %eax xorl %ebx, %edi movl 68(%ebp), %edx @@ -357,8 +357,8 @@ BF_encrypt: popl %ebx popl %ebp ret -.BF_encrypt_end: - .size BF_encrypt,.BF_encrypt_end-BF_encrypt +.L_BF_encrypt_end: + .size BF_encrypt,.L_BF_encrypt_end-BF_encrypt .ident "BF_encrypt" .text .align 16 @@ -372,7 +372,7 @@ BF_decrypt: movl 16(%esp), %ebp pushl %esi pushl %edi - # Load the 2 words + movl (%ebx), %edi movl 4(%ebx), %esi xorl %eax, %eax @@ -380,7 +380,7 @@ BF_decrypt: xorl %ecx, %ecx xorl %ebx, %edi - # Round 16 + movl 64(%ebp), %edx movl %edi, %ebx xorl %edx, %esi @@ -400,7 +400,7 @@ BF_decrypt: xorl %eax, %eax xorl %ebx, %esi - # Round 15 + movl 60(%ebp), %edx movl %esi, %ebx xorl %edx, %edi @@ -420,7 +420,7 @@ BF_decrypt: xorl %eax, %eax xorl %ebx, %edi - # Round 14 + movl 56(%ebp), %edx movl %edi, %ebx xorl %edx, %esi @@ -440,7 +440,7 @@ BF_decrypt: xorl %eax, %eax xorl %ebx, %esi - # Round 13 + movl 52(%ebp), %edx movl %esi, %ebx xorl %edx, %edi @@ -460,7 +460,7 @@ BF_decrypt: xorl %eax, %eax xorl %ebx, %edi - # Round 12 + movl 48(%ebp), %edx movl %edi, %ebx xorl %edx, %esi @@ -480,7 +480,7 @@ BF_decrypt: xorl %eax, %eax xorl %ebx, %esi - # Round 11 + movl 44(%ebp), %edx movl %esi, %ebx xorl %edx, %edi @@ -500,7 +500,7 @@ BF_decrypt: xorl %eax, %eax xorl %ebx, %edi - # Round 10 + movl 40(%ebp), %edx movl %edi, %ebx xorl %edx, %esi @@ -520,7 +520,7 @@ BF_decrypt: xorl %eax, %eax xorl %ebx, %esi - # Round 9 + movl 36(%ebp), %edx movl %esi, %ebx xorl %edx, %edi @@ -540,7 +540,7 @@ BF_decrypt: xorl %eax, %eax xorl %ebx, %edi - # Round 8 + movl 32(%ebp), %edx movl %edi, %ebx xorl %edx, %esi @@ -560,7 +560,7 @@ BF_decrypt: xorl %eax, %eax xorl %ebx, %esi - # Round 7 + movl 28(%ebp), %edx movl %esi, %ebx xorl %edx, %edi @@ -580,7 +580,7 @@ BF_decrypt: xorl %eax, %eax xorl %ebx, %edi - # Round 6 + movl 24(%ebp), %edx movl %edi, %ebx xorl %edx, %esi @@ -600,7 +600,7 @@ BF_decrypt: xorl %eax, %eax xorl %ebx, %esi - # Round 5 + movl 20(%ebp), %edx movl %esi, %ebx xorl %edx, %edi @@ -620,7 +620,7 @@ BF_decrypt: xorl %eax, %eax xorl %ebx, %edi - # Round 4 + movl 16(%ebp), %edx movl %edi, %ebx xorl %edx, %esi @@ -640,7 +640,7 @@ BF_decrypt: xorl %eax, %eax xorl %ebx, %esi - # Round 3 + movl 12(%ebp), %edx movl %esi, %ebx xorl %edx, %edi @@ -660,7 +660,7 @@ BF_decrypt: xorl %eax, %eax xorl %ebx, %edi - # Round 2 + movl 8(%ebp), %edx movl %edi, %ebx xorl %edx, %esi @@ -680,7 +680,7 @@ BF_decrypt: xorl %eax, %eax xorl %ebx, %esi - # Round 1 + movl 4(%ebp), %edx movl %esi, %ebx xorl %edx, %edi @@ -697,7 +697,7 @@ BF_decrypt: xorl %eax, %ebx movl 3144(%ebp,%edx,4),%edx addl %edx, %ebx - # Load parameter 0 (1) enc=0 + movl 20(%esp), %eax xorl %ebx, %edi movl (%ebp), %edx @@ -709,8 +709,8 @@ BF_decrypt: popl %ebx popl %ebp ret -.BF_decrypt_end: - .size BF_decrypt,.BF_decrypt_end-BF_decrypt +.L_BF_decrypt_end: + .size BF_decrypt,.L_BF_decrypt_end-BF_decrypt .ident "BF_decrypt" .text .align 16 @@ -723,7 +723,7 @@ BF_cbc_encrypt: pushl %esi pushl %edi movl 28(%esp), %ebp - # getting iv ptr from parameter 4 + movl 36(%esp), %ebx movl (%ebx), %esi movl 4(%ebx), %edi @@ -734,9 +734,9 @@ BF_cbc_encrypt: movl %esp, %ebx movl 36(%esp), %esi movl 40(%esp), %edi - # getting encrypt flag from parameter 5 + movl 56(%esp), %ecx - # get and push parameter 3 + movl 48(%esp), %eax pushl %eax pushl %ebx @@ -752,18 +752,18 @@ BF_cbc_encrypt: xorl %ecx, %eax xorl %edx, %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl %eax, 8(%esp) movl %ebx, 12(%esp) call BF_encrypt movl 8(%esp), %eax movl 12(%esp), %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl %eax, (%edi) movl %ebx, 4(%edi) addl $8, %esi @@ -774,43 +774,48 @@ BF_cbc_encrypt: movl 52(%esp), %ebp andl $7, %ebp jz .L003finish + call .L004PIC_point +.L004PIC_point: + popl %edx + leal .L005cbc_enc_jmp_table-.L004PIC_point(%edx),%ecx + movl (%ecx,%ebp,4), %ebp + addl %edx, %ebp xorl %ecx, %ecx xorl %edx, %edx - movl .L004cbc_enc_jmp_table(,%ebp,4),%ebp jmp *%ebp -.L005ej7: +.L006ej7: movb 6(%esi), %dh sall $8, %edx -.L006ej6: +.L007ej6: movb 5(%esi), %dh -.L007ej5: +.L008ej5: movb 4(%esi), %dl -.L008ej4: +.L009ej4: movl (%esi), %ecx - jmp .L009ejend -.L010ej3: + jmp .L010ejend +.L011ej3: movb 2(%esi), %ch sall $8, %ecx -.L011ej2: +.L012ej2: movb 1(%esi), %ch -.L012ej1: +.L013ej1: movb (%esi), %cl -.L009ejend: +.L010ejend: xorl %ecx, %eax xorl %edx, %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl %eax, 8(%esp) movl %ebx, 12(%esp) call BF_encrypt movl 8(%esp), %eax movl 12(%esp), %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl %eax, (%edi) movl %ebx, 4(%edi) jmp .L003finish @@ -819,23 +824,23 @@ BF_cbc_encrypt: andl $4294967288, %ebp movl 16(%esp), %eax movl 20(%esp), %ebx - jz .L013decrypt_finish -.L014decrypt_loop: + jz .L014decrypt_finish +.L015decrypt_loop: movl (%esi), %eax movl 4(%esi), %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl %eax, 8(%esp) movl %ebx, 12(%esp) call BF_decrypt movl 8(%esp), %eax movl 12(%esp), %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl 16(%esp), %ecx movl 20(%esp), %edx xorl %eax, %ecx @@ -849,52 +854,52 @@ BF_cbc_encrypt: addl $8, %esi addl $8, %edi subl $8, %ebp - jnz .L014decrypt_loop -.L013decrypt_finish: + jnz .L015decrypt_loop +.L014decrypt_finish: movl 52(%esp), %ebp andl $7, %ebp jz .L003finish movl (%esi), %eax movl 4(%esi), %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl %eax, 8(%esp) movl %ebx, 12(%esp) call BF_decrypt movl 8(%esp), %eax movl 12(%esp), %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl 16(%esp), %ecx movl 20(%esp), %edx xorl %eax, %ecx xorl %ebx, %edx movl (%esi), %eax movl 4(%esi), %ebx -.L015dj7: +.L016dj7: rorl $16, %edx movb %dl, 6(%edi) shrl $16, %edx -.L016dj6: +.L017dj6: movb %dh, 5(%edi) -.L017dj5: +.L018dj5: movb %dl, 4(%edi) -.L018dj4: +.L019dj4: movl %ecx, (%edi) - jmp .L019djend -.L020dj3: + jmp .L020djend +.L021dj3: rorl $16, %ecx movb %cl, 2(%edi) sall $16, %ecx -.L021dj2: +.L022dj2: movb %ch, 1(%esi) -.L022dj1: +.L023dj1: movb %cl, (%esi) -.L019djend: +.L020djend: jmp .L003finish .align 16 .L003finish: @@ -908,25 +913,15 @@ BF_cbc_encrypt: popl %ebp ret .align 16 -.L004cbc_enc_jmp_table: - .long 0 - .long .L012ej1 - .long .L011ej2 - .long .L010ej3 - .long .L008ej4 - .long .L007ej5 - .long .L006ej6 - .long .L005ej7 -.align 16 -.L023cbc_dec_jmp_table: +.L005cbc_enc_jmp_table: .long 0 - .long .L022dj1 - .long .L021dj2 - .long .L020dj3 - .long .L018dj4 - .long .L017dj5 - .long .L016dj6 - .long .L015dj7 + .long .L013ej1-.L004PIC_point + .long .L012ej2-.L004PIC_point + .long .L011ej3-.L004PIC_point + .long .L009ej4-.L004PIC_point + .long .L008ej5-.L004PIC_point + .long .L007ej6-.L004PIC_point + .long .L006ej7-.L004PIC_point .L_BF_cbc_encrypt_end: .size BF_cbc_encrypt,.L_BF_cbc_encrypt_end-BF_cbc_encrypt .ident "desasm.pl" diff --git a/secure/lib/libcrypto/i386/bf-686.s b/secure/lib/libcrypto/i386/bf-686.s index bb3b9c7..d3b4cb8 100644 --- a/secure/lib/libcrypto/i386/bf-686.s +++ b/secure/lib/libcrypto/i386/bf-686.s @@ -1,9 +1,9 @@ # $FreeBSD$ - # Dont even think of reading this code - # It was automatically generated by bf-686.pl - # Which is a perl program used to generate the x86 assember for - # any of elf, a.out, BSDI, Win32, gaswin (for GNU as on Win32) or Solaris - # eric <eay@cryptsoft.com> + + + + + .file "bf-686.s" .version "01.01" @@ -19,18 +19,18 @@ BF_encrypt: pushl %edi - # Load the 2 words + movl 20(%esp), %eax movl (%eax), %ecx movl 4(%eax), %edx - # P pointer, s and enc flag + movl 24(%esp), %edi xorl %eax, %eax xorl %ebx, %ebx xorl (%edi), %ecx - # Round 0 + rorl $16, %ecx movl 4(%edi), %esi movb %ch, %al @@ -49,7 +49,7 @@ BF_encrypt: xorl %eax, %eax xorl %esi, %edx - # Round 1 + rorl $16, %edx movl 8(%edi), %esi movb %dh, %al @@ -68,7 +68,7 @@ BF_encrypt: xorl %eax, %eax xorl %esi, %ecx - # Round 2 + rorl $16, %ecx movl 12(%edi), %esi movb %ch, %al @@ -87,7 +87,7 @@ BF_encrypt: xorl %eax, %eax xorl %esi, %edx - # Round 3 + rorl $16, %edx movl 16(%edi), %esi movb %dh, %al @@ -106,7 +106,7 @@ BF_encrypt: xorl %eax, %eax xorl %esi, %ecx - # Round 4 + rorl $16, %ecx movl 20(%edi), %esi movb %ch, %al @@ -125,7 +125,7 @@ BF_encrypt: xorl %eax, %eax xorl %esi, %edx - # Round 5 + rorl $16, %edx movl 24(%edi), %esi movb %dh, %al @@ -144,7 +144,7 @@ BF_encrypt: xorl %eax, %eax xorl %esi, %ecx - # Round 6 + rorl $16, %ecx movl 28(%edi), %esi movb %ch, %al @@ -163,7 +163,7 @@ BF_encrypt: xorl %eax, %eax xorl %esi, %edx - # Round 7 + rorl $16, %edx movl 32(%edi), %esi movb %dh, %al @@ -182,7 +182,7 @@ BF_encrypt: xorl %eax, %eax xorl %esi, %ecx - # Round 8 + rorl $16, %ecx movl 36(%edi), %esi movb %ch, %al @@ -201,7 +201,7 @@ BF_encrypt: xorl %eax, %eax xorl %esi, %edx - # Round 9 + rorl $16, %edx movl 40(%edi), %esi movb %dh, %al @@ -220,7 +220,7 @@ BF_encrypt: xorl %eax, %eax xorl %esi, %ecx - # Round 10 + rorl $16, %ecx movl 44(%edi), %esi movb %ch, %al @@ -239,7 +239,7 @@ BF_encrypt: xorl %eax, %eax xorl %esi, %edx - # Round 11 + rorl $16, %edx movl 48(%edi), %esi movb %dh, %al @@ -258,7 +258,7 @@ BF_encrypt: xorl %eax, %eax xorl %esi, %ecx - # Round 12 + rorl $16, %ecx movl 52(%edi), %esi movb %ch, %al @@ -277,7 +277,7 @@ BF_encrypt: xorl %eax, %eax xorl %esi, %edx - # Round 13 + rorl $16, %edx movl 56(%edi), %esi movb %dh, %al @@ -296,7 +296,7 @@ BF_encrypt: xorl %eax, %eax xorl %esi, %ecx - # Round 14 + rorl $16, %ecx movl 60(%edi), %esi movb %ch, %al @@ -315,7 +315,7 @@ BF_encrypt: xorl %eax, %eax xorl %esi, %edx - # Round 15 + rorl $16, %edx movl 64(%edi), %esi movb %dh, %al @@ -356,18 +356,18 @@ BF_decrypt: pushl %edi - # Load the 2 words + movl 20(%esp), %eax movl (%eax), %ecx movl 4(%eax), %edx - # P pointer, s and enc flag + movl 24(%esp), %edi xorl %eax, %eax xorl %ebx, %ebx xorl 68(%edi), %ecx - # Round 16 + rorl $16, %ecx movl 64(%edi), %esi movb %ch, %al @@ -386,7 +386,7 @@ BF_decrypt: xorl %eax, %eax xorl %esi, %edx - # Round 15 + rorl $16, %edx movl 60(%edi), %esi movb %dh, %al @@ -405,7 +405,7 @@ BF_decrypt: xorl %eax, %eax xorl %esi, %ecx - # Round 14 + rorl $16, %ecx movl 56(%edi), %esi movb %ch, %al @@ -424,7 +424,7 @@ BF_decrypt: xorl %eax, %eax xorl %esi, %edx - # Round 13 + rorl $16, %edx movl 52(%edi), %esi movb %dh, %al @@ -443,7 +443,7 @@ BF_decrypt: xorl %eax, %eax xorl %esi, %ecx - # Round 12 + rorl $16, %ecx movl 48(%edi), %esi movb %ch, %al @@ -462,7 +462,7 @@ BF_decrypt: xorl %eax, %eax xorl %esi, %edx - # Round 11 + rorl $16, %edx movl 44(%edi), %esi movb %dh, %al @@ -481,7 +481,7 @@ BF_decrypt: xorl %eax, %eax xorl %esi, %ecx - # Round 10 + rorl $16, %ecx movl 40(%edi), %esi movb %ch, %al @@ -500,7 +500,7 @@ BF_decrypt: xorl %eax, %eax xorl %esi, %edx - # Round 9 + rorl $16, %edx movl 36(%edi), %esi movb %dh, %al @@ -519,7 +519,7 @@ BF_decrypt: xorl %eax, %eax xorl %esi, %ecx - # Round 8 + rorl $16, %ecx movl 32(%edi), %esi movb %ch, %al @@ -538,7 +538,7 @@ BF_decrypt: xorl %eax, %eax xorl %esi, %edx - # Round 7 + rorl $16, %edx movl 28(%edi), %esi movb %dh, %al @@ -557,7 +557,7 @@ BF_decrypt: xorl %eax, %eax xorl %esi, %ecx - # Round 6 + rorl $16, %ecx movl 24(%edi), %esi movb %ch, %al @@ -576,7 +576,7 @@ BF_decrypt: xorl %eax, %eax xorl %esi, %edx - # Round 5 + rorl $16, %edx movl 20(%edi), %esi movb %dh, %al @@ -595,7 +595,7 @@ BF_decrypt: xorl %eax, %eax xorl %esi, %ecx - # Round 4 + rorl $16, %ecx movl 16(%edi), %esi movb %ch, %al @@ -614,7 +614,7 @@ BF_decrypt: xorl %eax, %eax xorl %esi, %edx - # Round 3 + rorl $16, %edx movl 12(%edi), %esi movb %dh, %al @@ -633,7 +633,7 @@ BF_decrypt: xorl %eax, %eax xorl %esi, %ecx - # Round 2 + rorl $16, %ecx movl 8(%edi), %esi movb %ch, %al @@ -652,7 +652,7 @@ BF_decrypt: xorl %eax, %eax xorl %esi, %edx - # Round 1 + rorl $16, %edx movl 4(%edi), %esi movb %dh, %al @@ -693,7 +693,7 @@ BF_cbc_encrypt: pushl %esi pushl %edi movl 28(%esp), %ebp - # getting iv ptr from parameter 4 + movl 36(%esp), %ebx movl (%ebx), %esi movl 4(%ebx), %edi @@ -704,9 +704,9 @@ BF_cbc_encrypt: movl %esp, %ebx movl 36(%esp), %esi movl 40(%esp), %edi - # getting encrypt flag from parameter 5 + movl 56(%esp), %ecx - # get and push parameter 3 + movl 48(%esp), %eax pushl %eax pushl %ebx @@ -722,18 +722,18 @@ BF_cbc_encrypt: xorl %ecx, %eax xorl %edx, %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl %eax, 8(%esp) movl %ebx, 12(%esp) call BF_encrypt movl 8(%esp), %eax movl 12(%esp), %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl %eax, (%edi) movl %ebx, 4(%edi) addl $8, %esi @@ -744,43 +744,48 @@ BF_cbc_encrypt: movl 52(%esp), %ebp andl $7, %ebp jz .L003finish + call .L004PIC_point +.L004PIC_point: + popl %edx + leal .L005cbc_enc_jmp_table-.L004PIC_point(%edx),%ecx + movl (%ecx,%ebp,4), %ebp + addl %edx, %ebp xorl %ecx, %ecx xorl %edx, %edx - movl .L004cbc_enc_jmp_table(,%ebp,4),%ebp jmp *%ebp -.L005ej7: +.L006ej7: movb 6(%esi), %dh sall $8, %edx -.L006ej6: +.L007ej6: movb 5(%esi), %dh -.L007ej5: +.L008ej5: movb 4(%esi), %dl -.L008ej4: +.L009ej4: movl (%esi), %ecx - jmp .L009ejend -.L010ej3: + jmp .L010ejend +.L011ej3: movb 2(%esi), %ch sall $8, %ecx -.L011ej2: +.L012ej2: movb 1(%esi), %ch -.L012ej1: +.L013ej1: movb (%esi), %cl -.L009ejend: +.L010ejend: xorl %ecx, %eax xorl %edx, %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl %eax, 8(%esp) movl %ebx, 12(%esp) call BF_encrypt movl 8(%esp), %eax movl 12(%esp), %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl %eax, (%edi) movl %ebx, 4(%edi) jmp .L003finish @@ -789,23 +794,23 @@ BF_cbc_encrypt: andl $4294967288, %ebp movl 16(%esp), %eax movl 20(%esp), %ebx - jz .L013decrypt_finish -.L014decrypt_loop: + jz .L014decrypt_finish +.L015decrypt_loop: movl (%esi), %eax movl 4(%esi), %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl %eax, 8(%esp) movl %ebx, 12(%esp) call BF_decrypt movl 8(%esp), %eax movl 12(%esp), %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl 16(%esp), %ecx movl 20(%esp), %edx xorl %eax, %ecx @@ -819,52 +824,52 @@ BF_cbc_encrypt: addl $8, %esi addl $8, %edi subl $8, %ebp - jnz .L014decrypt_loop -.L013decrypt_finish: + jnz .L015decrypt_loop +.L014decrypt_finish: movl 52(%esp), %ebp andl $7, %ebp jz .L003finish movl (%esi), %eax movl 4(%esi), %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl %eax, 8(%esp) movl %ebx, 12(%esp) call BF_decrypt movl 8(%esp), %eax movl 12(%esp), %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl 16(%esp), %ecx movl 20(%esp), %edx xorl %eax, %ecx xorl %ebx, %edx movl (%esi), %eax movl 4(%esi), %ebx -.L015dj7: +.L016dj7: rorl $16, %edx movb %dl, 6(%edi) shrl $16, %edx -.L016dj6: +.L017dj6: movb %dh, 5(%edi) -.L017dj5: +.L018dj5: movb %dl, 4(%edi) -.L018dj4: +.L019dj4: movl %ecx, (%edi) - jmp .L019djend -.L020dj3: + jmp .L020djend +.L021dj3: rorl $16, %ecx movb %cl, 2(%edi) sall $16, %ecx -.L021dj2: +.L022dj2: movb %ch, 1(%esi) -.L022dj1: +.L023dj1: movb %cl, (%esi) -.L019djend: +.L020djend: jmp .L003finish .align 16 .L003finish: @@ -878,25 +883,15 @@ BF_cbc_encrypt: popl %ebp ret .align 16 -.L004cbc_enc_jmp_table: - .long 0 - .long .L012ej1 - .long .L011ej2 - .long .L010ej3 - .long .L008ej4 - .long .L007ej5 - .long .L006ej6 - .long .L005ej7 -.align 16 -.L023cbc_dec_jmp_table: +.L005cbc_enc_jmp_table: .long 0 - .long .L022dj1 - .long .L021dj2 - .long .L020dj3 - .long .L018dj4 - .long .L017dj5 - .long .L016dj6 - .long .L015dj7 + .long .L013ej1-.L004PIC_point + .long .L012ej2-.L004PIC_point + .long .L011ej3-.L004PIC_point + .long .L009ej4-.L004PIC_point + .long .L008ej5-.L004PIC_point + .long .L007ej6-.L004PIC_point + .long .L006ej7-.L004PIC_point .L_BF_cbc_encrypt_end: .size BF_cbc_encrypt,.L_BF_cbc_encrypt_end-BF_cbc_encrypt .ident "desasm.pl" diff --git a/secure/lib/libcrypto/i386/bn-586.s b/secure/lib/libcrypto/i386/bn-586.s index 3ea4a8a..88c73fa 100644 --- a/secure/lib/libcrypto/i386/bn-586.s +++ b/secure/lib/libcrypto/i386/bn-586.s @@ -1,11 +1,11 @@ # $FreeBSD$ - # Dont even think of reading this code - # It was automatically generated by bn-586.pl - # Which is a perl program used to generate the x86 assember for - # any of elf, a.out, BSDI, Win32, gaswin (for GNU as on Win32) or Solaris - # eric <eay@cryptsoft.com> - .file "bn-586.s" + + + + + + .file "/usr/src/secure/lib/libcrypto/../../../crypto/openssl/crypto/bn/asm/bn-586.s" .version "01.01" gcc2_compiled.: .text @@ -29,7 +29,7 @@ bn_mul_add_words: jz .L000maw_finish .L001maw_loop: movl %ecx, (%esp) - # Round 0 + movl (%ebx), %eax mull %ebp addl %esi, %eax @@ -39,7 +39,7 @@ bn_mul_add_words: adcl $0, %edx movl %eax, (%edi) movl %edx, %esi - # Round 4 + movl 4(%ebx), %eax mull %ebp addl %esi, %eax @@ -49,7 +49,7 @@ bn_mul_add_words: adcl $0, %edx movl %eax, 4(%edi) movl %edx, %esi - # Round 8 + movl 8(%ebx), %eax mull %ebp addl %esi, %eax @@ -59,7 +59,7 @@ bn_mul_add_words: adcl $0, %edx movl %eax, 8(%edi) movl %edx, %esi - # Round 12 + movl 12(%ebx), %eax mull %ebp addl %esi, %eax @@ -69,7 +69,7 @@ bn_mul_add_words: adcl $0, %edx movl %eax, 12(%edi) movl %edx, %esi - # Round 16 + movl 16(%ebx), %eax mull %ebp addl %esi, %eax @@ -79,7 +79,7 @@ bn_mul_add_words: adcl $0, %edx movl %eax, 16(%edi) movl %edx, %esi - # Round 20 + movl 20(%ebx), %eax mull %ebp addl %esi, %eax @@ -89,7 +89,7 @@ bn_mul_add_words: adcl $0, %edx movl %eax, 20(%edi) movl %edx, %esi - # Round 24 + movl 24(%ebx), %eax mull %ebp addl %esi, %eax @@ -99,7 +99,7 @@ bn_mul_add_words: adcl $0, %edx movl %eax, 24(%edi) movl %edx, %esi - # Round 28 + movl 28(%ebx), %eax mull %ebp addl %esi, %eax @@ -122,7 +122,7 @@ bn_mul_add_words: jmp .L003maw_end .align 16 .L002maw_finish2: - # Tail Round 0 + movl (%ebx), %eax mull %ebp addl %esi, %eax @@ -134,7 +134,7 @@ bn_mul_add_words: movl %eax, (%edi) movl %edx, %esi jz .L003maw_end - # Tail Round 1 + movl 4(%ebx), %eax mull %ebp addl %esi, %eax @@ -146,7 +146,7 @@ bn_mul_add_words: movl %eax, 4(%edi) movl %edx, %esi jz .L003maw_end - # Tail Round 2 + movl 8(%ebx), %eax mull %ebp addl %esi, %eax @@ -158,7 +158,7 @@ bn_mul_add_words: movl %eax, 8(%edi) movl %edx, %esi jz .L003maw_end - # Tail Round 3 + movl 12(%ebx), %eax mull %ebp addl %esi, %eax @@ -170,7 +170,7 @@ bn_mul_add_words: movl %eax, 12(%edi) movl %edx, %esi jz .L003maw_end - # Tail Round 4 + movl 16(%ebx), %eax mull %ebp addl %esi, %eax @@ -182,7 +182,7 @@ bn_mul_add_words: movl %eax, 16(%edi) movl %edx, %esi jz .L003maw_end - # Tail Round 5 + movl 20(%ebx), %eax mull %ebp addl %esi, %eax @@ -194,7 +194,7 @@ bn_mul_add_words: movl %eax, 20(%edi) movl %edx, %esi jz .L003maw_end - # Tail Round 6 + movl 24(%ebx), %eax mull %ebp addl %esi, %eax @@ -212,8 +212,8 @@ bn_mul_add_words: popl %ebx popl %ebp ret -.bn_mul_add_words_end: - .size bn_mul_add_words,.bn_mul_add_words_end-bn_mul_add_words +.L_bn_mul_add_words_end: + .size bn_mul_add_words,.L_bn_mul_add_words_end-bn_mul_add_words .ident "bn_mul_add_words" .text .align 16 @@ -234,56 +234,56 @@ bn_mul_words: andl $4294967288, %ebp jz .L004mw_finish .L005mw_loop: - # Round 0 + movl (%ebx), %eax mull %ecx addl %esi, %eax adcl $0, %edx movl %eax, (%edi) movl %edx, %esi - # Round 4 + movl 4(%ebx), %eax mull %ecx addl %esi, %eax adcl $0, %edx movl %eax, 4(%edi) movl %edx, %esi - # Round 8 + movl 8(%ebx), %eax mull %ecx addl %esi, %eax adcl $0, %edx movl %eax, 8(%edi) movl %edx, %esi - # Round 12 + movl 12(%ebx), %eax mull %ecx addl %esi, %eax adcl $0, %edx movl %eax, 12(%edi) movl %edx, %esi - # Round 16 + movl 16(%ebx), %eax mull %ecx addl %esi, %eax adcl $0, %edx movl %eax, 16(%edi) movl %edx, %esi - # Round 20 + movl 20(%ebx), %eax mull %ecx addl %esi, %eax adcl $0, %edx movl %eax, 20(%edi) movl %edx, %esi - # Round 24 + movl 24(%ebx), %eax mull %ecx addl %esi, %eax adcl $0, %edx movl %eax, 24(%edi) movl %edx, %esi - # Round 28 + movl 28(%ebx), %eax mull %ecx addl %esi, %eax @@ -303,7 +303,7 @@ bn_mul_words: jmp .L007mw_end .align 16 .L006mw_finish2: - # Tail Round 0 + movl (%ebx), %eax mull %ecx addl %esi, %eax @@ -312,7 +312,7 @@ bn_mul_words: movl %edx, %esi decl %ebp jz .L007mw_end - # Tail Round 1 + movl 4(%ebx), %eax mull %ecx addl %esi, %eax @@ -321,7 +321,7 @@ bn_mul_words: movl %edx, %esi decl %ebp jz .L007mw_end - # Tail Round 2 + movl 8(%ebx), %eax mull %ecx addl %esi, %eax @@ -330,7 +330,7 @@ bn_mul_words: movl %edx, %esi decl %ebp jz .L007mw_end - # Tail Round 3 + movl 12(%ebx), %eax mull %ecx addl %esi, %eax @@ -339,7 +339,7 @@ bn_mul_words: movl %edx, %esi decl %ebp jz .L007mw_end - # Tail Round 4 + movl 16(%ebx), %eax mull %ecx addl %esi, %eax @@ -348,7 +348,7 @@ bn_mul_words: movl %edx, %esi decl %ebp jz .L007mw_end - # Tail Round 5 + movl 20(%ebx), %eax mull %ecx addl %esi, %eax @@ -357,7 +357,7 @@ bn_mul_words: movl %edx, %esi decl %ebp jz .L007mw_end - # Tail Round 6 + movl 24(%ebx), %eax mull %ecx addl %esi, %eax @@ -371,8 +371,8 @@ bn_mul_words: popl %ebx popl %ebp ret -.bn_mul_words_end: - .size bn_mul_words,.bn_mul_words_end-bn_mul_words +.L_bn_mul_words_end: + .size bn_mul_words,.L_bn_mul_words_end-bn_mul_words .ident "bn_mul_words" .text .align 16 @@ -391,42 +391,42 @@ bn_sqr_words: andl $4294967288, %ebx jz .L008sw_finish .L009sw_loop: - # Round 0 + movl (%edi), %eax mull %eax movl %eax, (%esi) movl %edx, 4(%esi) - # Round 4 + movl 4(%edi), %eax mull %eax movl %eax, 8(%esi) movl %edx, 12(%esi) - # Round 8 + movl 8(%edi), %eax mull %eax movl %eax, 16(%esi) movl %edx, 20(%esi) - # Round 12 + movl 12(%edi), %eax mull %eax movl %eax, 24(%esi) movl %edx, 28(%esi) - # Round 16 + movl 16(%edi), %eax mull %eax movl %eax, 32(%esi) movl %edx, 36(%esi) - # Round 20 + movl 20(%edi), %eax mull %eax movl %eax, 40(%esi) movl %edx, 44(%esi) - # Round 24 + movl 24(%edi), %eax mull %eax movl %eax, 48(%esi) movl %edx, 52(%esi) - # Round 28 + movl 28(%edi), %eax mull %eax movl %eax, 56(%esi) @@ -440,49 +440,49 @@ bn_sqr_words: movl 28(%esp), %ebx andl $7, %ebx jz .L010sw_end - # Tail Round 0 + movl (%edi), %eax mull %eax movl %eax, (%esi) decl %ebx movl %edx, 4(%esi) jz .L010sw_end - # Tail Round 1 + movl 4(%edi), %eax mull %eax movl %eax, 8(%esi) decl %ebx movl %edx, 12(%esi) jz .L010sw_end - # Tail Round 2 + movl 8(%edi), %eax mull %eax movl %eax, 16(%esi) decl %ebx movl %edx, 20(%esi) jz .L010sw_end - # Tail Round 3 + movl 12(%edi), %eax mull %eax movl %eax, 24(%esi) decl %ebx movl %edx, 28(%esi) jz .L010sw_end - # Tail Round 4 + movl 16(%edi), %eax mull %eax movl %eax, 32(%esi) decl %ebx movl %edx, 36(%esi) jz .L010sw_end - # Tail Round 5 + movl 20(%edi), %eax mull %eax movl %eax, 40(%esi) decl %ebx movl %edx, 44(%esi) jz .L010sw_end - # Tail Round 6 + movl 24(%edi), %eax mull %eax movl %eax, 48(%esi) @@ -493,8 +493,8 @@ bn_sqr_words: popl %ebx popl %ebp ret -.bn_sqr_words_end: - .size bn_sqr_words,.bn_sqr_words_end-bn_sqr_words +.L_bn_sqr_words_end: + .size bn_sqr_words,.L_bn_sqr_words_end-bn_sqr_words .ident "bn_sqr_words" .text .align 16 @@ -515,8 +515,8 @@ bn_div_words: popl %ebx popl %ebp ret -.bn_div_words_end: - .size bn_div_words,.bn_div_words_end-bn_div_words +.L_bn_div_words_end: + .size bn_div_words,.L_bn_div_words_end-bn_div_words .ident "bn_div_words" .text .align 16 @@ -537,7 +537,7 @@ bn_add_words: andl $4294967288, %ebp jz .L011aw_finish .L012aw_loop: - # Round 0 + movl (%esi), %ecx movl (%edi), %edx addl %eax, %ecx @@ -546,7 +546,7 @@ bn_add_words: addl %edx, %ecx adcl $0, %eax movl %ecx, (%ebx) - # Round 1 + movl 4(%esi), %ecx movl 4(%edi), %edx addl %eax, %ecx @@ -555,7 +555,7 @@ bn_add_words: addl %edx, %ecx adcl $0, %eax movl %ecx, 4(%ebx) - # Round 2 + movl 8(%esi), %ecx movl 8(%edi), %edx addl %eax, %ecx @@ -564,7 +564,7 @@ bn_add_words: addl %edx, %ecx adcl $0, %eax movl %ecx, 8(%ebx) - # Round 3 + movl 12(%esi), %ecx movl 12(%edi), %edx addl %eax, %ecx @@ -573,7 +573,7 @@ bn_add_words: addl %edx, %ecx adcl $0, %eax movl %ecx, 12(%ebx) - # Round 4 + movl 16(%esi), %ecx movl 16(%edi), %edx addl %eax, %ecx @@ -582,7 +582,7 @@ bn_add_words: addl %edx, %ecx adcl $0, %eax movl %ecx, 16(%ebx) - # Round 5 + movl 20(%esi), %ecx movl 20(%edi), %edx addl %eax, %ecx @@ -591,7 +591,7 @@ bn_add_words: addl %edx, %ecx adcl $0, %eax movl %ecx, 20(%ebx) - # Round 6 + movl 24(%esi), %ecx movl 24(%edi), %edx addl %eax, %ecx @@ -600,7 +600,7 @@ bn_add_words: addl %edx, %ecx adcl $0, %eax movl %ecx, 24(%ebx) - # Round 7 + movl 28(%esi), %ecx movl 28(%edi), %edx addl %eax, %ecx @@ -619,7 +619,7 @@ bn_add_words: movl 32(%esp), %ebp andl $7, %ebp jz .L013aw_end - # Tail Round 0 + movl (%esi), %ecx movl (%edi), %edx addl %eax, %ecx @@ -630,7 +630,7 @@ bn_add_words: decl %ebp movl %ecx, (%ebx) jz .L013aw_end - # Tail Round 1 + movl 4(%esi), %ecx movl 4(%edi), %edx addl %eax, %ecx @@ -641,7 +641,7 @@ bn_add_words: decl %ebp movl %ecx, 4(%ebx) jz .L013aw_end - # Tail Round 2 + movl 8(%esi), %ecx movl 8(%edi), %edx addl %eax, %ecx @@ -652,7 +652,7 @@ bn_add_words: decl %ebp movl %ecx, 8(%ebx) jz .L013aw_end - # Tail Round 3 + movl 12(%esi), %ecx movl 12(%edi), %edx addl %eax, %ecx @@ -663,7 +663,7 @@ bn_add_words: decl %ebp movl %ecx, 12(%ebx) jz .L013aw_end - # Tail Round 4 + movl 16(%esi), %ecx movl 16(%edi), %edx addl %eax, %ecx @@ -674,7 +674,7 @@ bn_add_words: decl %ebp movl %ecx, 16(%ebx) jz .L013aw_end - # Tail Round 5 + movl 20(%esi), %ecx movl 20(%edi), %edx addl %eax, %ecx @@ -685,7 +685,7 @@ bn_add_words: decl %ebp movl %ecx, 20(%ebx) jz .L013aw_end - # Tail Round 6 + movl 24(%esi), %ecx movl 24(%edi), %edx addl %eax, %ecx @@ -700,8 +700,8 @@ bn_add_words: popl %ebx popl %ebp ret -.bn_add_words_end: - .size bn_add_words,.bn_add_words_end-bn_add_words +.L_bn_add_words_end: + .size bn_add_words,.L_bn_add_words_end-bn_add_words .ident "bn_add_words" .text .align 16 @@ -722,7 +722,7 @@ bn_sub_words: andl $4294967288, %ebp jz .L014aw_finish .L015aw_loop: - # Round 0 + movl (%esi), %ecx movl (%edi), %edx subl %eax, %ecx @@ -731,7 +731,7 @@ bn_sub_words: subl %edx, %ecx adcl $0, %eax movl %ecx, (%ebx) - # Round 1 + movl 4(%esi), %ecx movl 4(%edi), %edx subl %eax, %ecx @@ -740,7 +740,7 @@ bn_sub_words: subl %edx, %ecx adcl $0, %eax movl %ecx, 4(%ebx) - # Round 2 + movl 8(%esi), %ecx movl 8(%edi), %edx subl %eax, %ecx @@ -749,7 +749,7 @@ bn_sub_words: subl %edx, %ecx adcl $0, %eax movl %ecx, 8(%ebx) - # Round 3 + movl 12(%esi), %ecx movl 12(%edi), %edx subl %eax, %ecx @@ -758,7 +758,7 @@ bn_sub_words: subl %edx, %ecx adcl $0, %eax movl %ecx, 12(%ebx) - # Round 4 + movl 16(%esi), %ecx movl 16(%edi), %edx subl %eax, %ecx @@ -767,7 +767,7 @@ bn_sub_words: subl %edx, %ecx adcl $0, %eax movl %ecx, 16(%ebx) - # Round 5 + movl 20(%esi), %ecx movl 20(%edi), %edx subl %eax, %ecx @@ -776,7 +776,7 @@ bn_sub_words: subl %edx, %ecx adcl $0, %eax movl %ecx, 20(%ebx) - # Round 6 + movl 24(%esi), %ecx movl 24(%edi), %edx subl %eax, %ecx @@ -785,7 +785,7 @@ bn_sub_words: subl %edx, %ecx adcl $0, %eax movl %ecx, 24(%ebx) - # Round 7 + movl 28(%esi), %ecx movl 28(%edi), %edx subl %eax, %ecx @@ -804,7 +804,7 @@ bn_sub_words: movl 32(%esp), %ebp andl $7, %ebp jz .L016aw_end - # Tail Round 0 + movl (%esi), %ecx movl (%edi), %edx subl %eax, %ecx @@ -815,7 +815,7 @@ bn_sub_words: decl %ebp movl %ecx, (%ebx) jz .L016aw_end - # Tail Round 1 + movl 4(%esi), %ecx movl 4(%edi), %edx subl %eax, %ecx @@ -826,7 +826,7 @@ bn_sub_words: decl %ebp movl %ecx, 4(%ebx) jz .L016aw_end - # Tail Round 2 + movl 8(%esi), %ecx movl 8(%edi), %edx subl %eax, %ecx @@ -837,7 +837,7 @@ bn_sub_words: decl %ebp movl %ecx, 8(%ebx) jz .L016aw_end - # Tail Round 3 + movl 12(%esi), %ecx movl 12(%edi), %edx subl %eax, %ecx @@ -848,7 +848,7 @@ bn_sub_words: decl %ebp movl %ecx, 12(%ebx) jz .L016aw_end - # Tail Round 4 + movl 16(%esi), %ecx movl 16(%edi), %edx subl %eax, %ecx @@ -859,7 +859,7 @@ bn_sub_words: decl %ebp movl %ecx, 16(%ebx) jz .L016aw_end - # Tail Round 5 + movl 20(%esi), %ecx movl 20(%edi), %edx subl %eax, %ecx @@ -870,7 +870,7 @@ bn_sub_words: decl %ebp movl %ecx, 20(%ebx) jz .L016aw_end - # Tail Round 6 + movl 24(%esi), %ecx movl 24(%edi), %edx subl %eax, %ecx @@ -885,6 +885,6 @@ bn_sub_words: popl %ebx popl %ebp ret -.bn_sub_words_end: - .size bn_sub_words,.bn_sub_words_end-bn_sub_words +.L_bn_sub_words_end: + .size bn_sub_words,.L_bn_sub_words_end-bn_sub_words .ident "bn_sub_words" diff --git a/secure/lib/libcrypto/i386/cast-586.s b/secure/lib/libcrypto/i386/cast-586.s index b6da53b..bb31376 100644 --- a/secure/lib/libcrypto/i386/cast-586.s +++ b/secure/lib/libcrypto/i386/cast-586.s @@ -1,9 +1,9 @@ # $FreeBSD$ - # Dont even think of reading this code - # It was automatically generated by cast-586.pl - # Which is a perl program used to generate the x86 assember for - # any of elf, a.out, BSDI, Win32, gaswin (for GNU as on Win32) or Solaris - # eric <eay@cryptsoft.com> + + + + + .file "cast-586.s" .version "01.01" @@ -20,14 +20,14 @@ CAST_encrypt: movl 16(%esp), %ebp pushl %esi pushl %edi - # Load the 2 words + movl (%ebx), %edi movl 4(%ebx), %esi - # Get short key flag + movl 128(%ebp), %eax pushl %eax xorl %eax, %eax - # round 0 + movl (%ebp), %edx movl 4(%ebp), %ecx addl %esi, %edx @@ -48,7 +48,7 @@ CAST_encrypt: movl CAST_S_table3(,%edx,4),%ebx addl %ebx, %ecx xorl %ecx, %edi - # round 1 + movl 8(%ebp), %edx movl 12(%ebp), %ecx xorl %edi, %edx @@ -69,7 +69,7 @@ CAST_encrypt: movl CAST_S_table3(,%edx,4),%ebx xorl %ebx, %ecx xorl %ecx, %esi - # round 2 + movl 16(%ebp), %edx movl 20(%ebp), %ecx subl %esi, %edx @@ -90,7 +90,7 @@ CAST_encrypt: movl CAST_S_table3(,%edx,4),%ebx subl %ebx, %ecx xorl %ecx, %edi - # round 3 + movl 24(%ebp), %edx movl 28(%ebp), %ecx addl %edi, %edx @@ -111,7 +111,7 @@ CAST_encrypt: movl CAST_S_table3(,%edx,4),%ebx addl %ebx, %ecx xorl %ecx, %esi - # round 4 + movl 32(%ebp), %edx movl 36(%ebp), %ecx xorl %esi, %edx @@ -132,7 +132,7 @@ CAST_encrypt: movl CAST_S_table3(,%edx,4),%ebx xorl %ebx, %ecx xorl %ecx, %edi - # round 5 + movl 40(%ebp), %edx movl 44(%ebp), %ecx subl %edi, %edx @@ -153,7 +153,7 @@ CAST_encrypt: movl CAST_S_table3(,%edx,4),%ebx subl %ebx, %ecx xorl %ecx, %esi - # round 6 + movl 48(%ebp), %edx movl 52(%ebp), %ecx addl %esi, %edx @@ -174,7 +174,7 @@ CAST_encrypt: movl CAST_S_table3(,%edx,4),%ebx addl %ebx, %ecx xorl %ecx, %edi - # round 7 + movl 56(%ebp), %edx movl 60(%ebp), %ecx xorl %edi, %edx @@ -195,7 +195,7 @@ CAST_encrypt: movl CAST_S_table3(,%edx,4),%ebx xorl %ebx, %ecx xorl %ecx, %esi - # round 8 + movl 64(%ebp), %edx movl 68(%ebp), %ecx subl %esi, %edx @@ -216,7 +216,7 @@ CAST_encrypt: movl CAST_S_table3(,%edx,4),%ebx subl %ebx, %ecx xorl %ecx, %edi - # round 9 + movl 72(%ebp), %edx movl 76(%ebp), %ecx addl %edi, %edx @@ -237,7 +237,7 @@ CAST_encrypt: movl CAST_S_table3(,%edx,4),%ebx addl %ebx, %ecx xorl %ecx, %esi - # round 10 + movl 80(%ebp), %edx movl 84(%ebp), %ecx xorl %esi, %edx @@ -258,7 +258,7 @@ CAST_encrypt: movl CAST_S_table3(,%edx,4),%ebx xorl %ebx, %ecx xorl %ecx, %edi - # round 11 + movl 88(%ebp), %edx movl 92(%ebp), %ecx subl %edi, %edx @@ -279,11 +279,11 @@ CAST_encrypt: movl CAST_S_table3(,%edx,4),%ebx subl %ebx, %ecx xorl %ecx, %esi - # test short key flag + popl %edx orl %edx, %edx jnz .L000cast_enc_done - # round 12 + movl 96(%ebp), %edx movl 100(%ebp), %ecx addl %esi, %edx @@ -304,7 +304,7 @@ CAST_encrypt: movl CAST_S_table3(,%edx,4),%ebx addl %ebx, %ecx xorl %ecx, %edi - # round 13 + movl 104(%ebp), %edx movl 108(%ebp), %ecx xorl %edi, %edx @@ -325,7 +325,7 @@ CAST_encrypt: movl CAST_S_table3(,%edx,4),%ebx xorl %ebx, %ecx xorl %ecx, %esi - # round 14 + movl 112(%ebp), %edx movl 116(%ebp), %ecx subl %esi, %edx @@ -346,7 +346,7 @@ CAST_encrypt: movl CAST_S_table3(,%edx,4),%ebx subl %ebx, %ecx xorl %ecx, %edi - # round 15 + movl 120(%ebp), %edx movl 124(%ebp), %ecx addl %edi, %edx @@ -377,8 +377,8 @@ CAST_encrypt: popl %ebx popl %ebp ret -.CAST_encrypt_end: - .size CAST_encrypt,.CAST_encrypt_end-CAST_encrypt +.L_CAST_encrypt_end: + .size CAST_encrypt,.L_CAST_encrypt_end-CAST_encrypt .ident "CAST_encrypt" .text .align 16 @@ -392,15 +392,15 @@ CAST_decrypt: movl 16(%esp), %ebp pushl %esi pushl %edi - # Load the 2 words + movl (%ebx), %edi movl 4(%ebx), %esi - # Get short key flag + movl 128(%ebp), %eax orl %eax, %eax jnz .L001cast_dec_skip xorl %eax, %eax - # round 15 + movl 120(%ebp), %edx movl 124(%ebp), %ecx addl %esi, %edx @@ -421,7 +421,7 @@ CAST_decrypt: movl CAST_S_table3(,%edx,4),%ebx addl %ebx, %ecx xorl %ecx, %edi - # round 14 + movl 112(%ebp), %edx movl 116(%ebp), %ecx subl %edi, %edx @@ -442,7 +442,7 @@ CAST_decrypt: movl CAST_S_table3(,%edx,4),%ebx subl %ebx, %ecx xorl %ecx, %esi - # round 13 + movl 104(%ebp), %edx movl 108(%ebp), %ecx xorl %esi, %edx @@ -463,7 +463,7 @@ CAST_decrypt: movl CAST_S_table3(,%edx,4),%ebx xorl %ebx, %ecx xorl %ecx, %edi - # round 12 + movl 96(%ebp), %edx movl 100(%ebp), %ecx addl %edi, %edx @@ -485,7 +485,7 @@ CAST_decrypt: addl %ebx, %ecx xorl %ecx, %esi .L001cast_dec_skip: - # round 11 + movl 88(%ebp), %edx movl 92(%ebp), %ecx subl %esi, %edx @@ -506,7 +506,7 @@ CAST_decrypt: movl CAST_S_table3(,%edx,4),%ebx subl %ebx, %ecx xorl %ecx, %edi - # round 10 + movl 80(%ebp), %edx movl 84(%ebp), %ecx xorl %edi, %edx @@ -527,7 +527,7 @@ CAST_decrypt: movl CAST_S_table3(,%edx,4),%ebx xorl %ebx, %ecx xorl %ecx, %esi - # round 9 + movl 72(%ebp), %edx movl 76(%ebp), %ecx addl %esi, %edx @@ -548,7 +548,7 @@ CAST_decrypt: movl CAST_S_table3(,%edx,4),%ebx addl %ebx, %ecx xorl %ecx, %edi - # round 8 + movl 64(%ebp), %edx movl 68(%ebp), %ecx subl %edi, %edx @@ -569,7 +569,7 @@ CAST_decrypt: movl CAST_S_table3(,%edx,4),%ebx subl %ebx, %ecx xorl %ecx, %esi - # round 7 + movl 56(%ebp), %edx movl 60(%ebp), %ecx xorl %esi, %edx @@ -590,7 +590,7 @@ CAST_decrypt: movl CAST_S_table3(,%edx,4),%ebx xorl %ebx, %ecx xorl %ecx, %edi - # round 6 + movl 48(%ebp), %edx movl 52(%ebp), %ecx addl %edi, %edx @@ -611,7 +611,7 @@ CAST_decrypt: movl CAST_S_table3(,%edx,4),%ebx addl %ebx, %ecx xorl %ecx, %esi - # round 5 + movl 40(%ebp), %edx movl 44(%ebp), %ecx subl %esi, %edx @@ -632,7 +632,7 @@ CAST_decrypt: movl CAST_S_table3(,%edx,4),%ebx subl %ebx, %ecx xorl %ecx, %edi - # round 4 + movl 32(%ebp), %edx movl 36(%ebp), %ecx xorl %edi, %edx @@ -653,7 +653,7 @@ CAST_decrypt: movl CAST_S_table3(,%edx,4),%ebx xorl %ebx, %ecx xorl %ecx, %esi - # round 3 + movl 24(%ebp), %edx movl 28(%ebp), %ecx addl %esi, %edx @@ -674,7 +674,7 @@ CAST_decrypt: movl CAST_S_table3(,%edx,4),%ebx addl %ebx, %ecx xorl %ecx, %edi - # round 2 + movl 16(%ebp), %edx movl 20(%ebp), %ecx subl %edi, %edx @@ -695,7 +695,7 @@ CAST_decrypt: movl CAST_S_table3(,%edx,4),%ebx subl %ebx, %ecx xorl %ecx, %esi - # round 1 + movl 8(%ebp), %edx movl 12(%ebp), %ecx xorl %esi, %edx @@ -716,7 +716,7 @@ CAST_decrypt: movl CAST_S_table3(,%edx,4),%ebx xorl %ebx, %ecx xorl %ecx, %edi - # round 0 + movl (%ebp), %edx movl 4(%ebp), %ecx addl %edi, %edx @@ -746,8 +746,8 @@ CAST_decrypt: popl %ebx popl %ebp ret -.CAST_decrypt_end: - .size CAST_decrypt,.CAST_decrypt_end-CAST_decrypt +.L_CAST_decrypt_end: + .size CAST_decrypt,.L_CAST_decrypt_end-CAST_decrypt .ident "CAST_decrypt" .text .align 16 @@ -760,7 +760,7 @@ CAST_cbc_encrypt: pushl %esi pushl %edi movl 28(%esp), %ebp - # getting iv ptr from parameter 4 + movl 36(%esp), %ebx movl (%ebx), %esi movl 4(%ebx), %edi @@ -771,9 +771,9 @@ CAST_cbc_encrypt: movl %esp, %ebx movl 36(%esp), %esi movl 40(%esp), %edi - # getting encrypt flag from parameter 5 + movl 56(%esp), %ecx - # get and push parameter 3 + movl 48(%esp), %eax pushl %eax pushl %ebx @@ -789,18 +789,18 @@ CAST_cbc_encrypt: xorl %ecx, %eax xorl %edx, %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl %eax, 8(%esp) movl %ebx, 12(%esp) call CAST_encrypt movl 8(%esp), %eax movl 12(%esp), %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl %eax, (%edi) movl %ebx, 4(%edi) addl $8, %esi @@ -811,45 +811,50 @@ CAST_cbc_encrypt: movl 52(%esp), %ebp andl $7, %ebp jz .L005finish + call .L006PIC_point +.L006PIC_point: + popl %edx + leal .L007cbc_enc_jmp_table-.L006PIC_point(%edx),%ecx + movl (%ecx,%ebp,4), %ebp + addl %edx, %ebp xorl %ecx, %ecx xorl %edx, %edx - movl .L006cbc_enc_jmp_table(,%ebp,4),%ebp jmp *%ebp -.L007ej7: +.L008ej7: xorl %edx, %edx movb 6(%esi), %dh sall $8, %edx -.L008ej6: +.L009ej6: movb 5(%esi), %dh -.L009ej5: +.L010ej5: movb 4(%esi), %dl -.L010ej4: +.L011ej4: movl (%esi), %ecx - jmp .L011ejend -.L012ej3: + jmp .L012ejend +.L013ej3: movb 2(%esi), %ch xorl %ecx, %ecx sall $8, %ecx -.L013ej2: +.L014ej2: movb 1(%esi), %ch -.L014ej1: +.L015ej1: movb (%esi), %cl -.L011ejend: +.L012ejend: xorl %ecx, %eax xorl %edx, %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl %eax, 8(%esp) movl %ebx, 12(%esp) call CAST_encrypt movl 8(%esp), %eax movl 12(%esp), %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl %eax, (%edi) movl %ebx, 4(%edi) jmp .L005finish @@ -858,23 +863,23 @@ CAST_cbc_encrypt: andl $4294967288, %ebp movl 16(%esp), %eax movl 20(%esp), %ebx - jz .L015decrypt_finish -.L016decrypt_loop: + jz .L016decrypt_finish +.L017decrypt_loop: movl (%esi), %eax movl 4(%esi), %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl %eax, 8(%esp) movl %ebx, 12(%esp) call CAST_decrypt movl 8(%esp), %eax movl 12(%esp), %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl 16(%esp), %ecx movl 20(%esp), %edx xorl %eax, %ecx @@ -888,52 +893,52 @@ CAST_cbc_encrypt: addl $8, %esi addl $8, %edi subl $8, %ebp - jnz .L016decrypt_loop -.L015decrypt_finish: + jnz .L017decrypt_loop +.L016decrypt_finish: movl 52(%esp), %ebp andl $7, %ebp jz .L005finish movl (%esi), %eax movl 4(%esi), %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl %eax, 8(%esp) movl %ebx, 12(%esp) call CAST_decrypt movl 8(%esp), %eax movl 12(%esp), %ebx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 203 # bswapl %ebx +.byte 203 movl 16(%esp), %ecx movl 20(%esp), %edx xorl %eax, %ecx xorl %ebx, %edx movl (%esi), %eax movl 4(%esi), %ebx -.L017dj7: +.L018dj7: rorl $16, %edx movb %dl, 6(%edi) shrl $16, %edx -.L018dj6: +.L019dj6: movb %dh, 5(%edi) -.L019dj5: +.L020dj5: movb %dl, 4(%edi) -.L020dj4: +.L021dj4: movl %ecx, (%edi) - jmp .L021djend -.L022dj3: + jmp .L022djend +.L023dj3: rorl $16, %ecx movb %cl, 2(%edi) sall $16, %ecx -.L023dj2: +.L024dj2: movb %ch, 1(%esi) -.L024dj1: +.L025dj1: movb %cl, (%esi) -.L021djend: +.L022djend: jmp .L005finish .align 16 .L005finish: @@ -947,25 +952,15 @@ CAST_cbc_encrypt: popl %ebp ret .align 16 -.L006cbc_enc_jmp_table: - .long 0 - .long .L014ej1 - .long .L013ej2 - .long .L012ej3 - .long .L010ej4 - .long .L009ej5 - .long .L008ej6 - .long .L007ej7 -.align 16 -.L025cbc_dec_jmp_table: +.L007cbc_enc_jmp_table: .long 0 - .long .L024dj1 - .long .L023dj2 - .long .L022dj3 - .long .L020dj4 - .long .L019dj5 - .long .L018dj6 - .long .L017dj7 + .long .L015ej1-.L006PIC_point + .long .L014ej2-.L006PIC_point + .long .L013ej3-.L006PIC_point + .long .L011ej4-.L006PIC_point + .long .L010ej5-.L006PIC_point + .long .L009ej6-.L006PIC_point + .long .L008ej7-.L006PIC_point .L_CAST_cbc_encrypt_end: .size CAST_cbc_encrypt,.L_CAST_cbc_encrypt_end-CAST_cbc_encrypt .ident "desasm.pl" diff --git a/secure/lib/libcrypto/i386/co-586.s b/secure/lib/libcrypto/i386/co-586.s index 084f6fe..4e554b0 100644 --- a/secure/lib/libcrypto/i386/co-586.s +++ b/secure/lib/libcrypto/i386/co-586.s @@ -1,11 +1,11 @@ # $FreeBSD$ - # Dont even think of reading this code - # It was automatically generated by co-586.pl - # Which is a perl program used to generate the x86 assember for - # any of elf, a.out, BSDI, Win32, gaswin (for GNU as on Win32) or Solaris - # eric <eay@cryptsoft.com> - .file "co-586.s" + + + + + + .file "/usr/src/secure/lib/libcrypto/../../../crypto/openssl/crypto/bn/asm/co-586.s" .version "01.01" gcc2_compiled.: .text @@ -23,9 +23,9 @@ bn_mul_comba8: movl (%esi), %eax xorl %ecx, %ecx movl (%edi), %edx - # ################## Calculate word 0 + xorl %ebp, %ebp - # mul a[0]*b[0] + mull %edx addl %eax, %ebx movl 20(%esp), %eax @@ -34,17 +34,17 @@ bn_mul_comba8: adcl $0, %ebp movl %ebx, (%eax) movl 4(%esi), %eax - # saved r[0] - # ################## Calculate word 1 + + xorl %ebx, %ebx - # mul a[1]*b[0] + mull %edx addl %eax, %ecx movl (%esi), %eax adcl %edx, %ebp movl 4(%edi), %edx adcl $0, %ebx - # mul a[0]*b[1] + mull %edx addl %eax, %ecx movl 20(%esp), %eax @@ -53,24 +53,24 @@ bn_mul_comba8: adcl $0, %ebx movl %ecx, 4(%eax) movl 8(%esi), %eax - # saved r[1] - # ################## Calculate word 2 + + xorl %ecx, %ecx - # mul a[2]*b[0] + mull %edx addl %eax, %ebp movl 4(%esi), %eax adcl %edx, %ebx movl 4(%edi), %edx adcl $0, %ecx - # mul a[1]*b[1] + mull %edx addl %eax, %ebp movl (%esi), %eax adcl %edx, %ebx movl 8(%edi), %edx adcl $0, %ecx - # mul a[0]*b[2] + mull %edx addl %eax, %ebp movl 20(%esp), %eax @@ -79,31 +79,31 @@ bn_mul_comba8: adcl $0, %ecx movl %ebp, 8(%eax) movl 12(%esi), %eax - # saved r[2] - # ################## Calculate word 3 + + xorl %ebp, %ebp - # mul a[3]*b[0] + mull %edx addl %eax, %ebx movl 8(%esi), %eax adcl %edx, %ecx movl 4(%edi), %edx adcl $0, %ebp - # mul a[2]*b[1] + mull %edx addl %eax, %ebx movl 4(%esi), %eax adcl %edx, %ecx movl 8(%edi), %edx adcl $0, %ebp - # mul a[1]*b[2] + mull %edx addl %eax, %ebx movl (%esi), %eax adcl %edx, %ecx movl 12(%edi), %edx adcl $0, %ebp - # mul a[0]*b[3] + mull %edx addl %eax, %ebx movl 20(%esp), %eax @@ -112,38 +112,38 @@ bn_mul_comba8: adcl $0, %ebp movl %ebx, 12(%eax) movl 16(%esi), %eax - # saved r[3] - # ################## Calculate word 4 + + xorl %ebx, %ebx - # mul a[4]*b[0] + mull %edx addl %eax, %ecx movl 12(%esi), %eax adcl %edx, %ebp movl 4(%edi), %edx adcl $0, %ebx - # mul a[3]*b[1] + mull %edx addl %eax, %ecx movl 8(%esi), %eax adcl %edx, %ebp movl 8(%edi), %edx adcl $0, %ebx - # mul a[2]*b[2] + mull %edx addl %eax, %ecx movl 4(%esi), %eax adcl %edx, %ebp movl 12(%edi), %edx adcl $0, %ebx - # mul a[1]*b[3] + mull %edx addl %eax, %ecx movl (%esi), %eax adcl %edx, %ebp movl 16(%edi), %edx adcl $0, %ebx - # mul a[0]*b[4] + mull %edx addl %eax, %ecx movl 20(%esp), %eax @@ -152,45 +152,45 @@ bn_mul_comba8: adcl $0, %ebx movl %ecx, 16(%eax) movl 20(%esi), %eax - # saved r[4] - # ################## Calculate word 5 + + xorl %ecx, %ecx - # mul a[5]*b[0] + mull %edx addl %eax, %ebp movl 16(%esi), %eax adcl %edx, %ebx movl 4(%edi), %edx adcl $0, %ecx - # mul a[4]*b[1] + mull %edx addl %eax, %ebp movl 12(%esi), %eax adcl %edx, %ebx movl 8(%edi), %edx adcl $0, %ecx - # mul a[3]*b[2] + mull %edx addl %eax, %ebp movl 8(%esi), %eax adcl %edx, %ebx movl 12(%edi), %edx adcl $0, %ecx - # mul a[2]*b[3] + mull %edx addl %eax, %ebp movl 4(%esi), %eax adcl %edx, %ebx movl 16(%edi), %edx adcl $0, %ecx - # mul a[1]*b[4] + mull %edx addl %eax, %ebp movl (%esi), %eax adcl %edx, %ebx movl 20(%edi), %edx adcl $0, %ecx - # mul a[0]*b[5] + mull %edx addl %eax, %ebp movl 20(%esp), %eax @@ -199,52 +199,52 @@ bn_mul_comba8: adcl $0, %ecx movl %ebp, 20(%eax) movl 24(%esi), %eax - # saved r[5] - # ################## Calculate word 6 + + xorl %ebp, %ebp - # mul a[6]*b[0] + mull %edx addl %eax, %ebx movl 20(%esi), %eax adcl %edx, %ecx movl 4(%edi), %edx adcl $0, %ebp - # mul a[5]*b[1] + mull %edx addl %eax, %ebx movl 16(%esi), %eax adcl %edx, %ecx movl 8(%edi), %edx adcl $0, %ebp - # mul a[4]*b[2] + mull %edx addl %eax, %ebx movl 12(%esi), %eax adcl %edx, %ecx movl 12(%edi), %edx adcl $0, %ebp - # mul a[3]*b[3] + mull %edx addl %eax, %ebx movl 8(%esi), %eax adcl %edx, %ecx movl 16(%edi), %edx adcl $0, %ebp - # mul a[2]*b[4] + mull %edx addl %eax, %ebx movl 4(%esi), %eax adcl %edx, %ecx movl 20(%edi), %edx adcl $0, %ebp - # mul a[1]*b[5] + mull %edx addl %eax, %ebx movl (%esi), %eax adcl %edx, %ecx movl 24(%edi), %edx adcl $0, %ebp - # mul a[0]*b[6] + mull %edx addl %eax, %ebx movl 20(%esp), %eax @@ -253,59 +253,59 @@ bn_mul_comba8: adcl $0, %ebp movl %ebx, 24(%eax) movl 28(%esi), %eax - # saved r[6] - # ################## Calculate word 7 + + xorl %ebx, %ebx - # mul a[7]*b[0] + mull %edx addl %eax, %ecx movl 24(%esi), %eax adcl %edx, %ebp movl 4(%edi), %edx adcl $0, %ebx - # mul a[6]*b[1] + mull %edx addl %eax, %ecx movl 20(%esi), %eax adcl %edx, %ebp movl 8(%edi), %edx adcl $0, %ebx - # mul a[5]*b[2] + mull %edx addl %eax, %ecx movl 16(%esi), %eax adcl %edx, %ebp movl 12(%edi), %edx adcl $0, %ebx - # mul a[4]*b[3] + mull %edx addl %eax, %ecx movl 12(%esi), %eax adcl %edx, %ebp movl 16(%edi), %edx adcl $0, %ebx - # mul a[3]*b[4] + mull %edx addl %eax, %ecx movl 8(%esi), %eax adcl %edx, %ebp movl 20(%edi), %edx adcl $0, %ebx - # mul a[2]*b[5] + mull %edx addl %eax, %ecx movl 4(%esi), %eax adcl %edx, %ebp movl 24(%edi), %edx adcl $0, %ebx - # mul a[1]*b[6] + mull %edx addl %eax, %ecx movl (%esi), %eax adcl %edx, %ebp movl 28(%edi), %edx adcl $0, %ebx - # mul a[0]*b[7] + mull %edx addl %eax, %ecx movl 20(%esp), %eax @@ -314,52 +314,52 @@ bn_mul_comba8: adcl $0, %ebx movl %ecx, 28(%eax) movl 28(%esi), %eax - # saved r[7] - # ################## Calculate word 8 + + xorl %ecx, %ecx - # mul a[7]*b[1] + mull %edx addl %eax, %ebp movl 24(%esi), %eax adcl %edx, %ebx movl 8(%edi), %edx adcl $0, %ecx - # mul a[6]*b[2] + mull %edx addl %eax, %ebp movl 20(%esi), %eax adcl %edx, %ebx movl 12(%edi), %edx adcl $0, %ecx - # mul a[5]*b[3] + mull %edx addl %eax, %ebp movl 16(%esi), %eax adcl %edx, %ebx movl 16(%edi), %edx adcl $0, %ecx - # mul a[4]*b[4] + mull %edx addl %eax, %ebp movl 12(%esi), %eax adcl %edx, %ebx movl 20(%edi), %edx adcl $0, %ecx - # mul a[3]*b[5] + mull %edx addl %eax, %ebp movl 8(%esi), %eax adcl %edx, %ebx movl 24(%edi), %edx adcl $0, %ecx - # mul a[2]*b[6] + mull %edx addl %eax, %ebp movl 4(%esi), %eax adcl %edx, %ebx movl 28(%edi), %edx adcl $0, %ecx - # mul a[1]*b[7] + mull %edx addl %eax, %ebp movl 20(%esp), %eax @@ -368,45 +368,45 @@ bn_mul_comba8: adcl $0, %ecx movl %ebp, 32(%eax) movl 28(%esi), %eax - # saved r[8] - # ################## Calculate word 9 + + xorl %ebp, %ebp - # mul a[7]*b[2] + mull %edx addl %eax, %ebx movl 24(%esi), %eax adcl %edx, %ecx movl 12(%edi), %edx adcl $0, %ebp - # mul a[6]*b[3] + mull %edx addl %eax, %ebx movl 20(%esi), %eax adcl %edx, %ecx movl 16(%edi), %edx adcl $0, %ebp - # mul a[5]*b[4] + mull %edx addl %eax, %ebx movl 16(%esi), %eax adcl %edx, %ecx movl 20(%edi), %edx adcl $0, %ebp - # mul a[4]*b[5] + mull %edx addl %eax, %ebx movl 12(%esi), %eax adcl %edx, %ecx movl 24(%edi), %edx adcl $0, %ebp - # mul a[3]*b[6] + mull %edx addl %eax, %ebx movl 8(%esi), %eax adcl %edx, %ecx movl 28(%edi), %edx adcl $0, %ebp - # mul a[2]*b[7] + mull %edx addl %eax, %ebx movl 20(%esp), %eax @@ -415,38 +415,38 @@ bn_mul_comba8: adcl $0, %ebp movl %ebx, 36(%eax) movl 28(%esi), %eax - # saved r[9] - # ################## Calculate word 10 + + xorl %ebx, %ebx - # mul a[7]*b[3] + mull %edx addl %eax, %ecx movl 24(%esi), %eax adcl %edx, %ebp movl 16(%edi), %edx adcl $0, %ebx - # mul a[6]*b[4] + mull %edx addl %eax, %ecx movl 20(%esi), %eax adcl %edx, %ebp movl 20(%edi), %edx adcl $0, %ebx - # mul a[5]*b[5] + mull %edx addl %eax, %ecx movl 16(%esi), %eax adcl %edx, %ebp movl 24(%edi), %edx adcl $0, %ebx - # mul a[4]*b[6] + mull %edx addl %eax, %ecx movl 12(%esi), %eax adcl %edx, %ebp movl 28(%edi), %edx adcl $0, %ebx - # mul a[3]*b[7] + mull %edx addl %eax, %ecx movl 20(%esp), %eax @@ -455,31 +455,31 @@ bn_mul_comba8: adcl $0, %ebx movl %ecx, 40(%eax) movl 28(%esi), %eax - # saved r[10] - # ################## Calculate word 11 + + xorl %ecx, %ecx - # mul a[7]*b[4] + mull %edx addl %eax, %ebp movl 24(%esi), %eax adcl %edx, %ebx movl 20(%edi), %edx adcl $0, %ecx - # mul a[6]*b[5] + mull %edx addl %eax, %ebp movl 20(%esi), %eax adcl %edx, %ebx movl 24(%edi), %edx adcl $0, %ecx - # mul a[5]*b[6] + mull %edx addl %eax, %ebp movl 16(%esi), %eax adcl %edx, %ebx movl 28(%edi), %edx adcl $0, %ecx - # mul a[4]*b[7] + mull %edx addl %eax, %ebp movl 20(%esp), %eax @@ -488,24 +488,24 @@ bn_mul_comba8: adcl $0, %ecx movl %ebp, 44(%eax) movl 28(%esi), %eax - # saved r[11] - # ################## Calculate word 12 + + xorl %ebp, %ebp - # mul a[7]*b[5] + mull %edx addl %eax, %ebx movl 24(%esi), %eax adcl %edx, %ecx movl 24(%edi), %edx adcl $0, %ebp - # mul a[6]*b[6] + mull %edx addl %eax, %ebx movl 20(%esi), %eax adcl %edx, %ecx movl 28(%edi), %edx adcl $0, %ebp - # mul a[5]*b[7] + mull %edx addl %eax, %ebx movl 20(%esp), %eax @@ -514,17 +514,17 @@ bn_mul_comba8: adcl $0, %ebp movl %ebx, 48(%eax) movl 28(%esi), %eax - # saved r[12] - # ################## Calculate word 13 + + xorl %ebx, %ebx - # mul a[7]*b[6] + mull %edx addl %eax, %ecx movl 24(%esi), %eax adcl %edx, %ebp movl 28(%edi), %edx adcl $0, %ebx - # mul a[6]*b[7] + mull %edx addl %eax, %ecx movl 20(%esp), %eax @@ -533,18 +533,18 @@ bn_mul_comba8: adcl $0, %ebx movl %ecx, 52(%eax) movl 28(%esi), %eax - # saved r[13] - # ################## Calculate word 14 + + xorl %ecx, %ecx - # mul a[7]*b[7] + mull %edx addl %eax, %ebp movl 20(%esp), %eax adcl %edx, %ebx adcl $0, %ecx movl %ebp, 56(%eax) - # saved r[14] - # save r[15] + + movl %ebx, 60(%eax) popl %ebx popl %ebp @@ -569,9 +569,9 @@ bn_mul_comba4: movl (%esi), %eax xorl %ecx, %ecx movl (%edi), %edx - # ################## Calculate word 0 + xorl %ebp, %ebp - # mul a[0]*b[0] + mull %edx addl %eax, %ebx movl 20(%esp), %eax @@ -580,17 +580,17 @@ bn_mul_comba4: adcl $0, %ebp movl %ebx, (%eax) movl 4(%esi), %eax - # saved r[0] - # ################## Calculate word 1 + + xorl %ebx, %ebx - # mul a[1]*b[0] + mull %edx addl %eax, %ecx movl (%esi), %eax adcl %edx, %ebp movl 4(%edi), %edx adcl $0, %ebx - # mul a[0]*b[1] + mull %edx addl %eax, %ecx movl 20(%esp), %eax @@ -599,24 +599,24 @@ bn_mul_comba4: adcl $0, %ebx movl %ecx, 4(%eax) movl 8(%esi), %eax - # saved r[1] - # ################## Calculate word 2 + + xorl %ecx, %ecx - # mul a[2]*b[0] + mull %edx addl %eax, %ebp movl 4(%esi), %eax adcl %edx, %ebx movl 4(%edi), %edx adcl $0, %ecx - # mul a[1]*b[1] + mull %edx addl %eax, %ebp movl (%esi), %eax adcl %edx, %ebx movl 8(%edi), %edx adcl $0, %ecx - # mul a[0]*b[2] + mull %edx addl %eax, %ebp movl 20(%esp), %eax @@ -625,31 +625,31 @@ bn_mul_comba4: adcl $0, %ecx movl %ebp, 8(%eax) movl 12(%esi), %eax - # saved r[2] - # ################## Calculate word 3 + + xorl %ebp, %ebp - # mul a[3]*b[0] + mull %edx addl %eax, %ebx movl 8(%esi), %eax adcl %edx, %ecx movl 4(%edi), %edx adcl $0, %ebp - # mul a[2]*b[1] + mull %edx addl %eax, %ebx movl 4(%esi), %eax adcl %edx, %ecx movl 8(%edi), %edx adcl $0, %ebp - # mul a[1]*b[2] + mull %edx addl %eax, %ebx movl (%esi), %eax adcl %edx, %ecx movl 12(%edi), %edx adcl $0, %ebp - # mul a[0]*b[3] + mull %edx addl %eax, %ebx movl 20(%esp), %eax @@ -658,24 +658,24 @@ bn_mul_comba4: adcl $0, %ebp movl %ebx, 12(%eax) movl 12(%esi), %eax - # saved r[3] - # ################## Calculate word 4 + + xorl %ebx, %ebx - # mul a[3]*b[1] + mull %edx addl %eax, %ecx movl 8(%esi), %eax adcl %edx, %ebp movl 8(%edi), %edx adcl $0, %ebx - # mul a[2]*b[2] + mull %edx addl %eax, %ecx movl 4(%esi), %eax adcl %edx, %ebp movl 12(%edi), %edx adcl $0, %ebx - # mul a[1]*b[3] + mull %edx addl %eax, %ecx movl 20(%esp), %eax @@ -684,17 +684,17 @@ bn_mul_comba4: adcl $0, %ebx movl %ecx, 16(%eax) movl 12(%esi), %eax - # saved r[4] - # ################## Calculate word 5 + + xorl %ecx, %ecx - # mul a[3]*b[2] + mull %edx addl %eax, %ebp movl 8(%esi), %eax adcl %edx, %ebx movl 12(%edi), %edx adcl $0, %ecx - # mul a[2]*b[3] + mull %edx addl %eax, %ebp movl 20(%esp), %eax @@ -703,18 +703,18 @@ bn_mul_comba4: adcl $0, %ecx movl %ebp, 20(%eax) movl 12(%esi), %eax - # saved r[5] - # ################## Calculate word 6 + + xorl %ebp, %ebp - # mul a[3]*b[3] + mull %edx addl %eax, %ebx movl 20(%esp), %eax adcl %edx, %ecx adcl $0, %ebp movl %ebx, 24(%eax) - # saved r[6] - # save r[7] + + movl %ecx, 28(%eax) popl %ebx popl %ebp @@ -738,9 +738,9 @@ bn_sqr_comba8: xorl %ebx, %ebx xorl %ecx, %ecx movl (%esi), %eax - # ############### Calculate word 0 + xorl %ebp, %ebp - # sqr a[0]*a[0] + mull %eax addl %eax, %ebx adcl %edx, %ecx @@ -748,10 +748,10 @@ bn_sqr_comba8: adcl $0, %ebp movl %ebx, (%edi) movl 4(%esi), %eax - # saved r[0] - # ############### Calculate word 1 + + xorl %ebx, %ebx - # sqr a[1]*a[0] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -762,10 +762,10 @@ bn_sqr_comba8: adcl $0, %ebx movl %ecx, 4(%edi) movl (%esi), %edx - # saved r[1] - # ############### Calculate word 2 + + xorl %ecx, %ecx - # sqr a[2]*a[0] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -774,7 +774,7 @@ bn_sqr_comba8: adcl %edx, %ebx movl 4(%esi), %eax adcl $0, %ecx - # sqr a[1]*a[1] + mull %eax addl %eax, %ebp adcl %edx, %ebx @@ -782,10 +782,10 @@ bn_sqr_comba8: adcl $0, %ecx movl %ebp, 8(%edi) movl 12(%esi), %eax - # saved r[2] - # ############### Calculate word 3 + + xorl %ebp, %ebp - # sqr a[3]*a[0] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -795,7 +795,7 @@ bn_sqr_comba8: movl 8(%esi), %eax adcl $0, %ebp movl 4(%esi), %edx - # sqr a[2]*a[1] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -806,10 +806,10 @@ bn_sqr_comba8: adcl $0, %ebp movl %ebx, 12(%edi) movl (%esi), %edx - # saved r[3] - # ############### Calculate word 4 + + xorl %ebx, %ebx - # sqr a[4]*a[0] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -819,7 +819,7 @@ bn_sqr_comba8: movl 12(%esi), %eax adcl $0, %ebx movl 4(%esi), %edx - # sqr a[3]*a[1] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -828,7 +828,7 @@ bn_sqr_comba8: adcl %edx, %ebp movl 8(%esi), %eax adcl $0, %ebx - # sqr a[2]*a[2] + mull %eax addl %eax, %ecx adcl %edx, %ebp @@ -836,10 +836,10 @@ bn_sqr_comba8: adcl $0, %ebx movl %ecx, 16(%edi) movl 20(%esi), %eax - # saved r[4] - # ############### Calculate word 5 + + xorl %ecx, %ecx - # sqr a[5]*a[0] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -849,7 +849,7 @@ bn_sqr_comba8: movl 16(%esi), %eax adcl $0, %ecx movl 4(%esi), %edx - # sqr a[4]*a[1] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -859,7 +859,7 @@ bn_sqr_comba8: movl 12(%esi), %eax adcl $0, %ecx movl 8(%esi), %edx - # sqr a[3]*a[2] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -870,10 +870,10 @@ bn_sqr_comba8: adcl $0, %ecx movl %ebp, 20(%edi) movl (%esi), %edx - # saved r[5] - # ############### Calculate word 6 + + xorl %ebp, %ebp - # sqr a[6]*a[0] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -883,7 +883,7 @@ bn_sqr_comba8: movl 20(%esi), %eax adcl $0, %ebp movl 4(%esi), %edx - # sqr a[5]*a[1] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -893,7 +893,7 @@ bn_sqr_comba8: movl 16(%esi), %eax adcl $0, %ebp movl 8(%esi), %edx - # sqr a[4]*a[2] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -902,7 +902,7 @@ bn_sqr_comba8: adcl %edx, %ecx movl 12(%esi), %eax adcl $0, %ebp - # sqr a[3]*a[3] + mull %eax addl %eax, %ebx adcl %edx, %ecx @@ -910,10 +910,10 @@ bn_sqr_comba8: adcl $0, %ebp movl %ebx, 24(%edi) movl 28(%esi), %eax - # saved r[6] - # ############### Calculate word 7 + + xorl %ebx, %ebx - # sqr a[7]*a[0] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -923,7 +923,7 @@ bn_sqr_comba8: movl 24(%esi), %eax adcl $0, %ebx movl 4(%esi), %edx - # sqr a[6]*a[1] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -933,7 +933,7 @@ bn_sqr_comba8: movl 20(%esi), %eax adcl $0, %ebx movl 8(%esi), %edx - # sqr a[5]*a[2] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -943,7 +943,7 @@ bn_sqr_comba8: movl 16(%esi), %eax adcl $0, %ebx movl 12(%esi), %edx - # sqr a[4]*a[3] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -954,10 +954,10 @@ bn_sqr_comba8: adcl $0, %ebx movl %ecx, 28(%edi) movl 4(%esi), %edx - # saved r[7] - # ############### Calculate word 8 + + xorl %ecx, %ecx - # sqr a[7]*a[1] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -967,7 +967,7 @@ bn_sqr_comba8: movl 24(%esi), %eax adcl $0, %ecx movl 8(%esi), %edx - # sqr a[6]*a[2] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -977,7 +977,7 @@ bn_sqr_comba8: movl 20(%esi), %eax adcl $0, %ecx movl 12(%esi), %edx - # sqr a[5]*a[3] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -986,7 +986,7 @@ bn_sqr_comba8: adcl %edx, %ebx movl 16(%esi), %eax adcl $0, %ecx - # sqr a[4]*a[4] + mull %eax addl %eax, %ebp adcl %edx, %ebx @@ -994,10 +994,10 @@ bn_sqr_comba8: adcl $0, %ecx movl %ebp, 32(%edi) movl 28(%esi), %eax - # saved r[8] - # ############### Calculate word 9 + + xorl %ebp, %ebp - # sqr a[7]*a[2] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -1007,7 +1007,7 @@ bn_sqr_comba8: movl 24(%esi), %eax adcl $0, %ebp movl 12(%esi), %edx - # sqr a[6]*a[3] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -1017,7 +1017,7 @@ bn_sqr_comba8: movl 20(%esi), %eax adcl $0, %ebp movl 16(%esi), %edx - # sqr a[5]*a[4] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -1028,10 +1028,10 @@ bn_sqr_comba8: adcl $0, %ebp movl %ebx, 36(%edi) movl 12(%esi), %edx - # saved r[9] - # ############### Calculate word 10 + + xorl %ebx, %ebx - # sqr a[7]*a[3] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -1041,7 +1041,7 @@ bn_sqr_comba8: movl 24(%esi), %eax adcl $0, %ebx movl 16(%esi), %edx - # sqr a[6]*a[4] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -1050,7 +1050,7 @@ bn_sqr_comba8: adcl %edx, %ebp movl 20(%esi), %eax adcl $0, %ebx - # sqr a[5]*a[5] + mull %eax addl %eax, %ecx adcl %edx, %ebp @@ -1058,10 +1058,10 @@ bn_sqr_comba8: adcl $0, %ebx movl %ecx, 40(%edi) movl 28(%esi), %eax - # saved r[10] - # ############### Calculate word 11 + + xorl %ecx, %ecx - # sqr a[7]*a[4] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -1071,7 +1071,7 @@ bn_sqr_comba8: movl 24(%esi), %eax adcl $0, %ecx movl 20(%esi), %edx - # sqr a[6]*a[5] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -1082,10 +1082,10 @@ bn_sqr_comba8: adcl $0, %ecx movl %ebp, 44(%edi) movl 20(%esi), %edx - # saved r[11] - # ############### Calculate word 12 + + xorl %ebp, %ebp - # sqr a[7]*a[5] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -1094,7 +1094,7 @@ bn_sqr_comba8: adcl %edx, %ecx movl 24(%esi), %eax adcl $0, %ebp - # sqr a[6]*a[6] + mull %eax addl %eax, %ebx adcl %edx, %ecx @@ -1102,10 +1102,10 @@ bn_sqr_comba8: adcl $0, %ebp movl %ebx, 48(%edi) movl 28(%esi), %eax - # saved r[12] - # ############### Calculate word 13 + + xorl %ebx, %ebx - # sqr a[7]*a[6] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -1115,16 +1115,16 @@ bn_sqr_comba8: movl 28(%esi), %eax adcl $0, %ebx movl %ecx, 52(%edi) - # saved r[13] - # ############### Calculate word 14 + + xorl %ecx, %ecx - # sqr a[7]*a[7] + mull %eax addl %eax, %ebp adcl %edx, %ebx adcl $0, %ecx movl %ebp, 56(%edi) - # saved r[14] + movl %ebx, 60(%edi) popl %ebx popl %ebp @@ -1148,9 +1148,9 @@ bn_sqr_comba4: xorl %ebx, %ebx xorl %ecx, %ecx movl (%esi), %eax - # ############### Calculate word 0 + xorl %ebp, %ebp - # sqr a[0]*a[0] + mull %eax addl %eax, %ebx adcl %edx, %ecx @@ -1158,10 +1158,10 @@ bn_sqr_comba4: adcl $0, %ebp movl %ebx, (%edi) movl 4(%esi), %eax - # saved r[0] - # ############### Calculate word 1 + + xorl %ebx, %ebx - # sqr a[1]*a[0] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -1172,10 +1172,10 @@ bn_sqr_comba4: adcl $0, %ebx movl %ecx, 4(%edi) movl (%esi), %edx - # saved r[1] - # ############### Calculate word 2 + + xorl %ecx, %ecx - # sqr a[2]*a[0] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -1184,7 +1184,7 @@ bn_sqr_comba4: adcl %edx, %ebx movl 4(%esi), %eax adcl $0, %ecx - # sqr a[1]*a[1] + mull %eax addl %eax, %ebp adcl %edx, %ebx @@ -1192,10 +1192,10 @@ bn_sqr_comba4: adcl $0, %ecx movl %ebp, 8(%edi) movl 12(%esi), %eax - # saved r[2] - # ############### Calculate word 3 + + xorl %ebp, %ebp - # sqr a[3]*a[0] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -1205,7 +1205,7 @@ bn_sqr_comba4: movl 8(%esi), %eax adcl $0, %ebp movl 4(%esi), %edx - # sqr a[2]*a[1] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -1216,10 +1216,10 @@ bn_sqr_comba4: adcl $0, %ebp movl %ebx, 12(%edi) movl 4(%esi), %edx - # saved r[3] - # ############### Calculate word 4 + + xorl %ebx, %ebx - # sqr a[3]*a[1] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -1228,7 +1228,7 @@ bn_sqr_comba4: adcl %edx, %ebp movl 8(%esi), %eax adcl $0, %ebx - # sqr a[2]*a[2] + mull %eax addl %eax, %ecx adcl %edx, %ebp @@ -1236,10 +1236,10 @@ bn_sqr_comba4: adcl $0, %ebx movl %ecx, 16(%edi) movl 12(%esi), %eax - # saved r[4] - # ############### Calculate word 5 + + xorl %ecx, %ecx - # sqr a[3]*a[2] + mull %edx addl %eax, %eax adcl %edx, %edx @@ -1249,16 +1249,16 @@ bn_sqr_comba4: movl 12(%esi), %eax adcl $0, %ecx movl %ebp, 20(%edi) - # saved r[5] - # ############### Calculate word 6 + + xorl %ebp, %ebp - # sqr a[3]*a[3] + mull %eax addl %eax, %ebx adcl %edx, %ecx adcl $0, %ebp movl %ebx, 24(%edi) - # saved r[6] + movl %ecx, 28(%edi) popl %ebx popl %ebp diff --git a/secure/lib/libcrypto/i386/crypt586.s b/secure/lib/libcrypto/i386/crypt586.s index e80834e..dc594e4 100644 --- a/secure/lib/libcrypto/i386/crypt586.s +++ b/secure/lib/libcrypto/i386/crypt586.s @@ -1,9 +1,9 @@ # $FreeBSD$ - # Dont even think of reading this code - # It was automatically generated by crypt586.pl - # Which is a perl program used to generate the x86 assember for - # any of elf, a.out, BSDI, Win32, gaswin (for GNU as on Win32) or Solaris - # eric <eay@cryptsoft.com> + + + + + .file "crypt586.s" .version "01.01" @@ -19,18 +19,20 @@ fcrypt_body: pushl %edi - # Load the 2 words + xorl %edi, %edi xorl %esi, %esi - movl 24(%esp), %ebp + leal DES_SPtrans, %edx + pushl %edx + movl 28(%esp), %ebp pushl $25 .L000start: - # Round 0 - movl 32(%esp), %eax + + movl 36(%esp), %eax movl %esi, %edx shrl $16, %edx - movl 36(%esp), %ecx + movl 40(%esp), %ecx xorl %esi, %edx andl %edx, %eax andl %ecx, %edx @@ -53,37 +55,34 @@ fcrypt_body: movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + movl 4(%esp), %ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 28(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx + movl 0x600(%ebp,%ebx),%ebx xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx + movl 0x700(%ebp,%ecx),%ebx xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx + movl 0x400(%ebp,%eax),%ebx xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx + movl 0x500(%ebp,%edx),%ebx xorl %ebx, %edi + movl 32(%esp), %ebp + - # Round 1 - movl 32(%esp), %eax + movl 36(%esp), %eax movl %edi, %edx shrl $16, %edx - movl 36(%esp), %ecx + movl 40(%esp), %ecx xorl %edi, %edx andl %edx, %eax andl %ecx, %edx @@ -106,37 +105,34 @@ fcrypt_body: movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + movl 4(%esp), %ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 28(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx + movl 0x600(%ebp,%ebx),%ebx xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx + movl 0x700(%ebp,%ecx),%ebx xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx + movl 0x400(%ebp,%eax),%ebx xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx + movl 0x500(%ebp,%edx),%ebx xorl %ebx, %esi + movl 32(%esp), %ebp + - # Round 2 - movl 32(%esp), %eax + movl 36(%esp), %eax movl %esi, %edx shrl $16, %edx - movl 36(%esp), %ecx + movl 40(%esp), %ecx xorl %esi, %edx andl %edx, %eax andl %ecx, %edx @@ -159,37 +155,34 @@ fcrypt_body: movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + movl 4(%esp), %ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 28(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx + movl 0x600(%ebp,%ebx),%ebx xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx + movl 0x700(%ebp,%ecx),%ebx xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx + movl 0x400(%ebp,%eax),%ebx xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx + movl 0x500(%ebp,%edx),%ebx xorl %ebx, %edi + movl 32(%esp), %ebp - # Round 3 - movl 32(%esp), %eax + + movl 36(%esp), %eax movl %edi, %edx shrl $16, %edx - movl 36(%esp), %ecx + movl 40(%esp), %ecx xorl %edi, %edx andl %edx, %eax andl %ecx, %edx @@ -212,37 +205,34 @@ fcrypt_body: movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + movl 4(%esp), %ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 28(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx + movl 0x600(%ebp,%ebx),%ebx xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx + movl 0x700(%ebp,%ecx),%ebx xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx + movl 0x400(%ebp,%eax),%ebx xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx + movl 0x500(%ebp,%edx),%ebx xorl %ebx, %esi + movl 32(%esp), %ebp - # Round 4 - movl 32(%esp), %eax + + movl 36(%esp), %eax movl %esi, %edx shrl $16, %edx - movl 36(%esp), %ecx + movl 40(%esp), %ecx xorl %esi, %edx andl %edx, %eax andl %ecx, %edx @@ -265,37 +255,34 @@ fcrypt_body: movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + movl 4(%esp), %ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 28(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx + movl 0x600(%ebp,%ebx),%ebx xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx + movl 0x700(%ebp,%ecx),%ebx xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx + movl 0x400(%ebp,%eax),%ebx xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx + movl 0x500(%ebp,%edx),%ebx xorl %ebx, %edi + movl 32(%esp), %ebp + - # Round 5 - movl 32(%esp), %eax + movl 36(%esp), %eax movl %edi, %edx shrl $16, %edx - movl 36(%esp), %ecx + movl 40(%esp), %ecx xorl %edi, %edx andl %edx, %eax andl %ecx, %edx @@ -318,37 +305,34 @@ fcrypt_body: movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + movl 4(%esp), %ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 28(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx + movl 0x600(%ebp,%ebx),%ebx xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx + movl 0x700(%ebp,%ecx),%ebx xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx + movl 0x400(%ebp,%eax),%ebx xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx + movl 0x500(%ebp,%edx),%ebx xorl %ebx, %esi + movl 32(%esp), %ebp + - # Round 6 - movl 32(%esp), %eax + movl 36(%esp), %eax movl %esi, %edx shrl $16, %edx - movl 36(%esp), %ecx + movl 40(%esp), %ecx xorl %esi, %edx andl %edx, %eax andl %ecx, %edx @@ -371,37 +355,34 @@ fcrypt_body: movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + movl 4(%esp), %ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 28(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx + movl 0x600(%ebp,%ebx),%ebx xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx + movl 0x700(%ebp,%ecx),%ebx xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx + movl 0x400(%ebp,%eax),%ebx xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx + movl 0x500(%ebp,%edx),%ebx xorl %ebx, %edi + movl 32(%esp), %ebp + - # Round 7 - movl 32(%esp), %eax + movl 36(%esp), %eax movl %edi, %edx shrl $16, %edx - movl 36(%esp), %ecx + movl 40(%esp), %ecx xorl %edi, %edx andl %edx, %eax andl %ecx, %edx @@ -424,37 +405,34 @@ fcrypt_body: movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + movl 4(%esp), %ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 28(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx + movl 0x600(%ebp,%ebx),%ebx xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx + movl 0x700(%ebp,%ecx),%ebx xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx + movl 0x400(%ebp,%eax),%ebx xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx + movl 0x500(%ebp,%edx),%ebx xorl %ebx, %esi + movl 32(%esp), %ebp - # Round 8 - movl 32(%esp), %eax + + movl 36(%esp), %eax movl %esi, %edx shrl $16, %edx - movl 36(%esp), %ecx + movl 40(%esp), %ecx xorl %esi, %edx andl %edx, %eax andl %ecx, %edx @@ -477,37 +455,34 @@ fcrypt_body: movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + movl 4(%esp), %ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 28(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx + movl 0x600(%ebp,%ebx),%ebx xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx + movl 0x700(%ebp,%ecx),%ebx xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx + movl 0x400(%ebp,%eax),%ebx xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx + movl 0x500(%ebp,%edx),%ebx xorl %ebx, %edi + movl 32(%esp), %ebp - # Round 9 - movl 32(%esp), %eax + + movl 36(%esp), %eax movl %edi, %edx shrl $16, %edx - movl 36(%esp), %ecx + movl 40(%esp), %ecx xorl %edi, %edx andl %edx, %eax andl %ecx, %edx @@ -530,37 +505,34 @@ fcrypt_body: movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + movl 4(%esp), %ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 28(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx + movl 0x600(%ebp,%ebx),%ebx xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx + movl 0x700(%ebp,%ecx),%ebx xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx + movl 0x400(%ebp,%eax),%ebx xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx + movl 0x500(%ebp,%edx),%ebx xorl %ebx, %esi + movl 32(%esp), %ebp + - # Round 10 - movl 32(%esp), %eax + movl 36(%esp), %eax movl %esi, %edx shrl $16, %edx - movl 36(%esp), %ecx + movl 40(%esp), %ecx xorl %esi, %edx andl %edx, %eax andl %ecx, %edx @@ -583,37 +555,34 @@ fcrypt_body: movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + movl 4(%esp), %ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 28(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx + movl 0x600(%ebp,%ebx),%ebx xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx + movl 0x700(%ebp,%ecx),%ebx xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx + movl 0x400(%ebp,%eax),%ebx xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx + movl 0x500(%ebp,%edx),%ebx xorl %ebx, %edi + movl 32(%esp), %ebp + - # Round 11 - movl 32(%esp), %eax + movl 36(%esp), %eax movl %edi, %edx shrl $16, %edx - movl 36(%esp), %ecx + movl 40(%esp), %ecx xorl %edi, %edx andl %edx, %eax andl %ecx, %edx @@ -636,37 +605,34 @@ fcrypt_body: movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + movl 4(%esp), %ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 28(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx + movl 0x600(%ebp,%ebx),%ebx xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx + movl 0x700(%ebp,%ecx),%ebx xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx + movl 0x400(%ebp,%eax),%ebx xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx + movl 0x500(%ebp,%edx),%ebx xorl %ebx, %esi + movl 32(%esp), %ebp + - # Round 12 - movl 32(%esp), %eax + movl 36(%esp), %eax movl %esi, %edx shrl $16, %edx - movl 36(%esp), %ecx + movl 40(%esp), %ecx xorl %esi, %edx andl %edx, %eax andl %ecx, %edx @@ -689,37 +655,34 @@ fcrypt_body: movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + movl 4(%esp), %ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 28(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx + movl 0x600(%ebp,%ebx),%ebx xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx + movl 0x700(%ebp,%ecx),%ebx xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx + movl 0x400(%ebp,%eax),%ebx xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx + movl 0x500(%ebp,%edx),%ebx xorl %ebx, %edi + movl 32(%esp), %ebp - # Round 13 - movl 32(%esp), %eax + + movl 36(%esp), %eax movl %edi, %edx shrl $16, %edx - movl 36(%esp), %ecx + movl 40(%esp), %ecx xorl %edi, %edx andl %edx, %eax andl %ecx, %edx @@ -742,37 +705,34 @@ fcrypt_body: movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + movl 4(%esp), %ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 28(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx + movl 0x600(%ebp,%ebx),%ebx xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx + movl 0x700(%ebp,%ecx),%ebx xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx + movl 0x400(%ebp,%eax),%ebx xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx + movl 0x500(%ebp,%edx),%ebx xorl %ebx, %esi + movl 32(%esp), %ebp - # Round 14 - movl 32(%esp), %eax + + movl 36(%esp), %eax movl %esi, %edx shrl $16, %edx - movl 36(%esp), %ecx + movl 40(%esp), %ecx xorl %esi, %edx andl %edx, %eax andl %ecx, %edx @@ -795,37 +755,34 @@ fcrypt_body: movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + movl 4(%esp), %ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 28(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx + movl 0x600(%ebp,%ebx),%ebx xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx + movl 0x700(%ebp,%ecx),%ebx xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx + movl 0x400(%ebp,%eax),%ebx xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx + movl 0x500(%ebp,%edx),%ebx xorl %ebx, %edi + movl 32(%esp), %ebp + - # Round 15 - movl 32(%esp), %eax + movl 36(%esp), %eax movl %edi, %edx shrl $16, %edx - movl 36(%esp), %ecx + movl 40(%esp), %ecx xorl %edi, %edx andl %edx, %eax andl %ecx, %edx @@ -848,31 +805,28 @@ fcrypt_body: movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + movl 4(%esp), %ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 28(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx + movl 0x600(%ebp,%ebx),%ebx xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx + movl 0x700(%ebp,%ecx),%ebx xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx + movl 0x400(%ebp,%eax),%ebx xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx + movl 0x500(%ebp,%edx),%ebx xorl %ebx, %esi + movl 32(%esp), %ebp movl (%esp), %ebx movl %edi, %eax decl %ebx @@ -881,10 +835,10 @@ fcrypt_body: movl %ebx, (%esp) jnz .L000start - # FP - movl 24(%esp), %edx + + movl 28(%esp), %edx .byte 209 -.byte 207 # rorl $1 %edi +.byte 207 movl %esi, %eax xorl %edi, %esi andl $0xaaaaaaaa, %esi @@ -922,12 +876,12 @@ fcrypt_body: rorl $4, %eax movl %eax, (%edx) movl %edi, 4(%edx) - popl %ecx + addl $8, %esp popl %edi popl %esi popl %ebx popl %ebp ret -.fcrypt_body_end: - .size fcrypt_body,.fcrypt_body_end-fcrypt_body +.L_fcrypt_body_end: + .size fcrypt_body,.L_fcrypt_body_end-fcrypt_body .ident "fcrypt_body" diff --git a/secure/lib/libcrypto/i386/des-586.s b/secure/lib/libcrypto/i386/des-586.s index 55ddd71..b761290 100644 --- a/secure/lib/libcrypto/i386/des-586.s +++ b/secure/lib/libcrypto/i386/des-586.s @@ -1,22 +1,22 @@ # $FreeBSD$ - # Dont even think of reading this code - # It was automatically generated by des-586.pl - # Which is a perl program used to generate the x86 assember for - # any of elf, a.out, BSDI, Win32, gaswin (for GNU as on Win32) or Solaris - # eric <eay@cryptsoft.com> + + + + + .file "des-586.s" .version "01.01" gcc2_compiled.: .text .align 16 -.globl des_encrypt1 - .type des_encrypt1,@function -des_encrypt1: +.globl DES_encrypt1 + .type DES_encrypt1,@function +DES_encrypt1: pushl %esi pushl %edi - # Load the 2 words + movl 12(%esp), %esi xorl %ecx, %ecx pushl %ebx @@ -25,7 +25,7 @@ des_encrypt1: movl 28(%esp), %ebx movl 4(%esi), %edi - # IP + roll $4, %eax movl %eax, %esi xorl %edi, %eax @@ -62,1202 +62,979 @@ des_encrypt1: xorl %eax, %edi .byte 209 -.byte 199 # roll $1 %edi - movl 24(%esp), %ebp +.byte 199 + leal DES_SPtrans, %ebp + movl 24(%esp), %ecx cmpl $0, %ebx je .L000start_decrypt - # Round 0 - movl (%ebp), %eax + + movl (%ecx), %eax xorl %ebx, %ebx - movl 4(%ebp), %edx + movl 4(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi - # Round 1 - movl 8(%ebp), %eax + + movl 8(%ecx), %eax xorl %ebx, %ebx - movl 12(%ebp), %edx + movl 12(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi - # Round 2 - movl 16(%ebp), %eax + + movl 16(%ecx), %eax xorl %ebx, %ebx - movl 20(%ebp), %edx + movl 20(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi + - # Round 3 - movl 24(%ebp), %eax + movl 24(%ecx), %eax xorl %ebx, %ebx - movl 28(%ebp), %edx + movl 28(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi + - # Round 4 - movl 32(%ebp), %eax + movl 32(%ecx), %eax xorl %ebx, %ebx - movl 36(%ebp), %edx + movl 36(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi - # Round 5 - movl 40(%ebp), %eax + + movl 40(%ecx), %eax xorl %ebx, %ebx - movl 44(%ebp), %edx + movl 44(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi + - # Round 6 - movl 48(%ebp), %eax + movl 48(%ecx), %eax xorl %ebx, %ebx - movl 52(%ebp), %edx + movl 52(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi + - # Round 7 - movl 56(%ebp), %eax + movl 56(%ecx), %eax xorl %ebx, %ebx - movl 60(%ebp), %edx + movl 60(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi - # Round 8 - movl 64(%ebp), %eax + + movl 64(%ecx), %eax xorl %ebx, %ebx - movl 68(%ebp), %edx + movl 68(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi - # Round 9 - movl 72(%ebp), %eax + + movl 72(%ecx), %eax xorl %ebx, %ebx - movl 76(%ebp), %edx + movl 76(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi + - # Round 10 - movl 80(%ebp), %eax + movl 80(%ecx), %eax xorl %ebx, %ebx - movl 84(%ebp), %edx + movl 84(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi + - # Round 11 - movl 88(%ebp), %eax + movl 88(%ecx), %eax xorl %ebx, %ebx - movl 92(%ebp), %edx + movl 92(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi - # Round 12 - movl 96(%ebp), %eax + + movl 96(%ecx), %eax xorl %ebx, %ebx - movl 100(%ebp), %edx + movl 100(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi + - # Round 13 - movl 104(%ebp), %eax + movl 104(%ecx), %eax xorl %ebx, %ebx - movl 108(%ebp), %edx + movl 108(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi + - # Round 14 - movl 112(%ebp), %eax + movl 112(%ecx), %eax xorl %ebx, %ebx - movl 116(%ebp), %edx + movl 116(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi - # Round 15 - movl 120(%ebp), %eax + + movl 120(%ecx), %eax xorl %ebx, %ebx - movl 124(%ebp), %edx + movl 124(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi jmp .L001end .L000start_decrypt: - # Round 15 - movl 120(%ebp), %eax + + movl 120(%ecx), %eax xorl %ebx, %ebx - movl 124(%ebp), %edx + movl 124(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi + - # Round 14 - movl 112(%ebp), %eax + movl 112(%ecx), %eax xorl %ebx, %ebx - movl 116(%ebp), %edx + movl 116(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi + - # Round 13 - movl 104(%ebp), %eax + movl 104(%ecx), %eax xorl %ebx, %ebx - movl 108(%ebp), %edx + movl 108(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi - # Round 12 - movl 96(%ebp), %eax + + movl 96(%ecx), %eax xorl %ebx, %ebx - movl 100(%ebp), %edx + movl 100(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi + - # Round 11 - movl 88(%ebp), %eax + movl 88(%ecx), %eax xorl %ebx, %ebx - movl 92(%ebp), %edx + movl 92(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi + - # Round 10 - movl 80(%ebp), %eax + movl 80(%ecx), %eax xorl %ebx, %ebx - movl 84(%ebp), %edx + movl 84(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi - # Round 9 - movl 72(%ebp), %eax + + movl 72(%ecx), %eax xorl %ebx, %ebx - movl 76(%ebp), %edx + movl 76(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi - # Round 8 - movl 64(%ebp), %eax + + movl 64(%ecx), %eax xorl %ebx, %ebx - movl 68(%ebp), %edx + movl 68(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi + - # Round 7 - movl 56(%ebp), %eax + movl 56(%ecx), %eax xorl %ebx, %ebx - movl 60(%ebp), %edx + movl 60(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi + - # Round 6 - movl 48(%ebp), %eax + movl 48(%ecx), %eax xorl %ebx, %ebx - movl 52(%ebp), %edx + movl 52(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi - # Round 5 - movl 40(%ebp), %eax + + movl 40(%ecx), %eax xorl %ebx, %ebx - movl 44(%ebp), %edx + movl 44(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi + - # Round 4 - movl 32(%ebp), %eax + movl 32(%ecx), %eax xorl %ebx, %ebx - movl 36(%ebp), %edx + movl 36(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi + - # Round 3 - movl 24(%ebp), %eax + movl 24(%ecx), %eax xorl %ebx, %ebx - movl 28(%ebp), %edx + movl 28(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi - # Round 2 - movl 16(%ebp), %eax + + movl 16(%ecx), %eax xorl %ebx, %ebx - movl 20(%ebp), %edx + movl 20(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi - # Round 1 - movl 8(%ebp), %eax + + movl 8(%ecx), %eax xorl %ebx, %ebx - movl 12(%ebp), %edx + movl 12(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi + - # Round 0 - movl (%ebp), %eax + movl (%ecx), %eax xorl %ebx, %ebx - movl 4(%ebp), %edx + movl 4(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi .L001end: - # FP + movl 20(%esp), %edx .byte 209 -.byte 206 # rorl $1 %esi +.byte 206 movl %edi, %eax xorl %esi, %edi andl $0xaaaaaaaa, %edi @@ -1300,18 +1077,18 @@ des_encrypt1: popl %edi popl %esi ret -.L_des_encrypt1_end: - .size des_encrypt1,.L_des_encrypt1_end-des_encrypt1 +.L_DES_encrypt1_end: + .size DES_encrypt1,.L_DES_encrypt1_end-DES_encrypt1 .ident "desasm.pl" .text .align 16 -.globl des_encrypt2 - .type des_encrypt2,@function -des_encrypt2: +.globl DES_encrypt2 + .type DES_encrypt2,@function +DES_encrypt2: pushl %esi pushl %edi - # Load the 2 words + movl 12(%esp), %eax xorl %ecx, %ecx pushl %ebx @@ -1321,1198 +1098,975 @@ des_encrypt2: roll $3, %esi movl 4(%eax), %edi roll $3, %edi - movl 24(%esp), %ebp + leal DES_SPtrans, %ebp + movl 24(%esp), %ecx cmpl $0, %ebx je .L002start_decrypt - # Round 0 - movl (%ebp), %eax + + movl (%ecx), %eax xorl %ebx, %ebx - movl 4(%ebp), %edx + movl 4(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi + - # Round 1 - movl 8(%ebp), %eax + movl 8(%ecx), %eax xorl %ebx, %ebx - movl 12(%ebp), %edx + movl 12(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi + - # Round 2 - movl 16(%ebp), %eax + movl 16(%ecx), %eax xorl %ebx, %ebx - movl 20(%ebp), %edx + movl 20(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi - # Round 3 - movl 24(%ebp), %eax + + movl 24(%ecx), %eax xorl %ebx, %ebx - movl 28(%ebp), %edx + movl 28(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi - # Round 4 - movl 32(%ebp), %eax + + movl 32(%ecx), %eax xorl %ebx, %ebx - movl 36(%ebp), %edx + movl 36(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi + - # Round 5 - movl 40(%ebp), %eax + movl 40(%ecx), %eax xorl %ebx, %ebx - movl 44(%ebp), %edx + movl 44(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi + - # Round 6 - movl 48(%ebp), %eax + movl 48(%ecx), %eax xorl %ebx, %ebx - movl 52(%ebp), %edx + movl 52(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi - # Round 7 - movl 56(%ebp), %eax + + movl 56(%ecx), %eax xorl %ebx, %ebx - movl 60(%ebp), %edx + movl 60(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi + - # Round 8 - movl 64(%ebp), %eax + movl 64(%ecx), %eax xorl %ebx, %ebx - movl 68(%ebp), %edx + movl 68(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi + - # Round 9 - movl 72(%ebp), %eax + movl 72(%ecx), %eax xorl %ebx, %ebx - movl 76(%ebp), %edx + movl 76(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi - # Round 10 - movl 80(%ebp), %eax + + movl 80(%ecx), %eax xorl %ebx, %ebx - movl 84(%ebp), %edx + movl 84(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi - # Round 11 - movl 88(%ebp), %eax + + movl 88(%ecx), %eax xorl %ebx, %ebx - movl 92(%ebp), %edx + movl 92(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi + - # Round 12 - movl 96(%ebp), %eax + movl 96(%ecx), %eax xorl %ebx, %ebx - movl 100(%ebp), %edx + movl 100(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi + - # Round 13 - movl 104(%ebp), %eax + movl 104(%ecx), %eax xorl %ebx, %ebx - movl 108(%ebp), %edx + movl 108(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi - # Round 14 - movl 112(%ebp), %eax + + movl 112(%ecx), %eax xorl %ebx, %ebx - movl 116(%ebp), %edx + movl 116(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi + - # Round 15 - movl 120(%ebp), %eax + movl 120(%ecx), %eax xorl %ebx, %ebx - movl 124(%ebp), %edx + movl 124(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi jmp .L003end .L002start_decrypt: - # Round 15 - movl 120(%ebp), %eax + + movl 120(%ecx), %eax xorl %ebx, %ebx - movl 124(%ebp), %edx + movl 124(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi - # Round 14 - movl 112(%ebp), %eax + + movl 112(%ecx), %eax xorl %ebx, %ebx - movl 116(%ebp), %edx + movl 116(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi - # Round 13 - movl 104(%ebp), %eax + + movl 104(%ecx), %eax xorl %ebx, %ebx - movl 108(%ebp), %edx + movl 108(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi + - # Round 12 - movl 96(%ebp), %eax + movl 96(%ecx), %eax xorl %ebx, %ebx - movl 100(%ebp), %edx + movl 100(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi + - # Round 11 - movl 88(%ebp), %eax + movl 88(%ecx), %eax xorl %ebx, %ebx - movl 92(%ebp), %edx + movl 92(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi - # Round 10 - movl 80(%ebp), %eax + + movl 80(%ecx), %eax xorl %ebx, %ebx - movl 84(%ebp), %edx + movl 84(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi + - # Round 9 - movl 72(%ebp), %eax + movl 72(%ecx), %eax xorl %ebx, %ebx - movl 76(%ebp), %edx + movl 76(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi + - # Round 8 - movl 64(%ebp), %eax + movl 64(%ecx), %eax xorl %ebx, %ebx - movl 68(%ebp), %edx + movl 68(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi - # Round 7 - movl 56(%ebp), %eax + + movl 56(%ecx), %eax xorl %ebx, %ebx - movl 60(%ebp), %edx + movl 60(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi - # Round 6 - movl 48(%ebp), %eax + + movl 48(%ecx), %eax xorl %ebx, %ebx - movl 52(%ebp), %edx + movl 52(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi + - # Round 5 - movl 40(%ebp), %eax + movl 40(%ecx), %eax xorl %ebx, %ebx - movl 44(%ebp), %edx + movl 44(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi + - # Round 4 - movl 32(%ebp), %eax + movl 32(%ecx), %eax xorl %ebx, %ebx - movl 36(%ebp), %edx + movl 36(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi - # Round 3 - movl 24(%ebp), %eax + + movl 24(%ecx), %eax xorl %ebx, %ebx - movl 28(%ebp), %edx + movl 28(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi + - # Round 2 - movl 16(%ebp), %eax + movl 16(%ecx), %eax xorl %ebx, %ebx - movl 20(%ebp), %edx + movl 20(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi + - # Round 1 - movl 8(%ebp), %eax + movl 8(%ecx), %eax xorl %ebx, %ebx - movl 12(%ebp), %edx + movl 12(%ecx), %edx xorl %esi, %eax + xorl %ecx, %ecx xorl %esi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%edi movb %dl, %bl - xorl %ebp, %edi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi + xorl 0x200(%ebp,%ecx),%edi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %edi + xorl 0x100(%ebp,%ebx),%edi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %edi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%edi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %edi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %edi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %edi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %edi + xorl 0x600(%ebp,%ebx),%edi + xorl 0x700(%ebp,%ecx),%edi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%edi + xorl 0x500(%ebp,%edx),%edi - # Round 0 - movl (%ebp), %eax + + movl (%ecx), %eax xorl %ebx, %ebx - movl 4(%ebp), %edx + movl 4(%ecx), %edx xorl %edi, %eax + xorl %ecx, %ecx xorl %edi, %edx andl $0xfcfcfcfc, %eax andl $0xcfcfcfcf, %edx movb %al, %bl movb %ah, %cl rorl $4, %edx - movl des_SPtrans(%ebx),%ebp + xorl (%ebp,%ebx),%esi movb %dl, %bl - xorl %ebp, %esi - movl 0x200+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi + xorl 0x200(%ebp,%ecx),%esi movb %dh, %cl shrl $16, %eax - movl 0x100+des_SPtrans(%ebx),%ebp - xorl %ebp, %esi + xorl 0x100(%ebp,%ebx),%esi movb %ah, %bl shrl $16, %edx - movl 0x300+des_SPtrans(%ecx),%ebp - xorl %ebp, %esi - movl 24(%esp), %ebp + xorl 0x300(%ebp,%ecx),%esi movb %dh, %cl andl $0xff, %eax andl $0xff, %edx - movl 0x600+des_SPtrans(%ebx),%ebx - xorl %ebx, %esi - movl 0x700+des_SPtrans(%ecx),%ebx - xorl %ebx, %esi - movl 0x400+des_SPtrans(%eax),%ebx - xorl %ebx, %esi - movl 0x500+des_SPtrans(%edx),%ebx - xorl %ebx, %esi + xorl 0x600(%ebp,%ebx),%esi + xorl 0x700(%ebp,%ecx),%esi + movl 24(%esp), %ecx + xorl 0x400(%ebp,%eax),%esi + xorl 0x500(%ebp,%edx),%esi .L003end: - # Fixup + rorl $3, %edi movl 20(%esp), %eax rorl $3, %esi @@ -2523,26 +2077,26 @@ des_encrypt2: popl %edi popl %esi ret -.L_des_encrypt2_end: - .size des_encrypt2,.L_des_encrypt2_end-des_encrypt2 +.L_DES_encrypt2_end: + .size DES_encrypt2,.L_DES_encrypt2_end-DES_encrypt2 .ident "desasm.pl" .text .align 16 -.globl des_encrypt3 - .type des_encrypt3,@function -des_encrypt3: +.globl DES_encrypt3 + .type DES_encrypt3,@function +DES_encrypt3: pushl %ebx movl 8(%esp), %ebx pushl %ebp pushl %esi pushl %edi - # Load the data words + movl (%ebx), %edi movl 4(%ebx), %esi subl $12, %esp - # IP + roll $4, %edi movl %edi, %edx xorl %esi, %edi @@ -2588,20 +2142,20 @@ des_encrypt3: movl $1, 8(%esp) movl %eax, 4(%esp) movl %ebx, (%esp) - call des_encrypt2 + call DES_encrypt2 movl $0, 8(%esp) movl %edi, 4(%esp) movl %ebx, (%esp) - call des_encrypt2 + call DES_encrypt2 movl $1, 8(%esp) movl %esi, 4(%esp) movl %ebx, (%esp) - call des_encrypt2 + call DES_encrypt2 addl $12, %esp movl (%ebx), %edi movl 4(%ebx), %esi - # FP + roll $2, %esi roll $3, %edi movl %edi, %eax @@ -2646,26 +2200,26 @@ des_encrypt3: popl %ebp popl %ebx ret -.L_des_encrypt3_end: - .size des_encrypt3,.L_des_encrypt3_end-des_encrypt3 +.L_DES_encrypt3_end: + .size DES_encrypt3,.L_DES_encrypt3_end-DES_encrypt3 .ident "desasm.pl" .text .align 16 -.globl des_decrypt3 - .type des_decrypt3,@function -des_decrypt3: +.globl DES_decrypt3 + .type DES_decrypt3,@function +DES_decrypt3: pushl %ebx movl 8(%esp), %ebx pushl %ebp pushl %esi pushl %edi - # Load the data words + movl (%ebx), %edi movl 4(%ebx), %esi subl $12, %esp - # IP + roll $4, %edi movl %edi, %edx xorl %esi, %edi @@ -2711,20 +2265,20 @@ des_decrypt3: movl $0, 8(%esp) movl %eax, 4(%esp) movl %ebx, (%esp) - call des_encrypt2 + call DES_encrypt2 movl $1, 8(%esp) movl %edi, 4(%esp) movl %ebx, (%esp) - call des_encrypt2 + call DES_encrypt2 movl $0, 8(%esp) movl %esi, 4(%esp) movl %ebx, (%esp) - call des_encrypt2 + call DES_encrypt2 addl $12, %esp movl (%ebx), %edi movl 4(%ebx), %esi - # FP + roll $2, %esi roll $3, %edi movl %edi, %eax @@ -2769,21 +2323,21 @@ des_decrypt3: popl %ebp popl %ebx ret -.L_des_decrypt3_end: - .size des_decrypt3,.L_des_decrypt3_end-des_decrypt3 +.L_DES_decrypt3_end: + .size DES_decrypt3,.L_DES_decrypt3_end-DES_decrypt3 .ident "desasm.pl" .text .align 16 -.globl des_ncbc_encrypt - .type des_ncbc_encrypt,@function -des_ncbc_encrypt: +.globl DES_ncbc_encrypt + .type DES_ncbc_encrypt,@function +DES_ncbc_encrypt: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 28(%esp), %ebp - # getting iv ptr from parameter 4 + movl 36(%esp), %ebx movl (%ebx), %esi movl 4(%ebx), %edi @@ -2794,11 +2348,11 @@ des_ncbc_encrypt: movl %esp, %ebx movl 36(%esp), %esi movl 40(%esp), %edi - # getting encrypt flag from parameter 5 + movl 56(%esp), %ecx - # get and push parameter 5 + pushl %ecx - # get and push parameter 3 + movl 52(%esp), %eax pushl %eax pushl %ebx @@ -2815,7 +2369,7 @@ des_ncbc_encrypt: xorl %edx, %ebx movl %eax, 12(%esp) movl %ebx, 16(%esp) - call des_encrypt1 + call DES_encrypt1 movl 12(%esp), %eax movl 16(%esp), %ebx movl %eax, (%edi) @@ -2828,33 +2382,38 @@ des_ncbc_encrypt: movl 56(%esp), %ebp andl $7, %ebp jz .L007finish + call .L008PIC_point +.L008PIC_point: + popl %edx + leal .L009cbc_enc_jmp_table-.L008PIC_point(%edx),%ecx + movl (%ecx,%ebp,4), %ebp + addl %edx, %ebp xorl %ecx, %ecx xorl %edx, %edx - movl .L008cbc_enc_jmp_table(,%ebp,4),%ebp jmp *%ebp -.L009ej7: +.L010ej7: movb 6(%esi), %dh sall $8, %edx -.L010ej6: +.L011ej6: movb 5(%esi), %dh -.L011ej5: +.L012ej5: movb 4(%esi), %dl -.L012ej4: +.L013ej4: movl (%esi), %ecx - jmp .L013ejend -.L014ej3: + jmp .L014ejend +.L015ej3: movb 2(%esi), %ch sall $8, %ecx -.L015ej2: +.L016ej2: movb 1(%esi), %ch -.L016ej1: +.L017ej1: movb (%esi), %cl -.L013ejend: +.L014ejend: xorl %ecx, %eax xorl %edx, %ebx movl %eax, 12(%esp) movl %ebx, 16(%esp) - call des_encrypt1 + call DES_encrypt1 movl 12(%esp), %eax movl 16(%esp), %ebx movl %eax, (%edi) @@ -2865,13 +2424,13 @@ des_ncbc_encrypt: andl $4294967288, %ebp movl 20(%esp), %eax movl 24(%esp), %ebx - jz .L017decrypt_finish -.L018decrypt_loop: + jz .L018decrypt_finish +.L019decrypt_loop: movl (%esi), %eax movl 4(%esi), %ebx movl %eax, 12(%esp) movl %ebx, 16(%esp) - call des_encrypt1 + call DES_encrypt1 movl 12(%esp), %eax movl 16(%esp), %ebx movl 20(%esp), %ecx @@ -2887,8 +2446,8 @@ des_ncbc_encrypt: addl $8, %esi addl $8, %edi subl $8, %ebp - jnz .L018decrypt_loop -.L017decrypt_finish: + jnz .L019decrypt_loop +.L018decrypt_finish: movl 56(%esp), %ebp andl $7, %ebp jz .L007finish @@ -2896,7 +2455,7 @@ des_ncbc_encrypt: movl 4(%esi), %ebx movl %eax, 12(%esp) movl %ebx, 16(%esp) - call des_encrypt1 + call DES_encrypt1 movl 12(%esp), %eax movl 16(%esp), %ebx movl 20(%esp), %ecx @@ -2905,26 +2464,26 @@ des_ncbc_encrypt: xorl %ebx, %edx movl (%esi), %eax movl 4(%esi), %ebx -.L019dj7: +.L020dj7: rorl $16, %edx movb %dl, 6(%edi) shrl $16, %edx -.L020dj6: +.L021dj6: movb %dh, 5(%edi) -.L021dj5: +.L022dj5: movb %dl, 4(%edi) -.L022dj4: +.L023dj4: movl %ecx, (%edi) - jmp .L023djend -.L024dj3: + jmp .L024djend +.L025dj3: rorl $16, %ecx movb %cl, 2(%edi) sall $16, %ecx -.L025dj2: +.L026dj2: movb %ch, 1(%esi) -.L026dj1: +.L027dj1: movb %cl, (%esi) -.L023djend: +.L024djend: jmp .L007finish .align 16 .L007finish: @@ -2938,40 +2497,30 @@ des_ncbc_encrypt: popl %ebp ret .align 16 -.L008cbc_enc_jmp_table: +.L009cbc_enc_jmp_table: .long 0 - .long .L016ej1 - .long .L015ej2 - .long .L014ej3 - .long .L012ej4 - .long .L011ej5 - .long .L010ej6 - .long .L009ej7 -.align 16 -.L027cbc_dec_jmp_table: - .long 0 - .long .L026dj1 - .long .L025dj2 - .long .L024dj3 - .long .L022dj4 - .long .L021dj5 - .long .L020dj6 - .long .L019dj7 -.L_des_ncbc_encrypt_end: - .size des_ncbc_encrypt,.L_des_ncbc_encrypt_end-des_ncbc_encrypt + .long .L017ej1-.L008PIC_point + .long .L016ej2-.L008PIC_point + .long .L015ej3-.L008PIC_point + .long .L013ej4-.L008PIC_point + .long .L012ej5-.L008PIC_point + .long .L011ej6-.L008PIC_point + .long .L010ej7-.L008PIC_point +.L_DES_ncbc_encrypt_end: + .size DES_ncbc_encrypt,.L_DES_ncbc_encrypt_end-DES_ncbc_encrypt .ident "desasm.pl" .text .align 16 -.globl des_ede3_cbc_encrypt - .type des_ede3_cbc_encrypt,@function -des_ede3_cbc_encrypt: +.globl DES_ede3_cbc_encrypt + .type DES_ede3_cbc_encrypt,@function +DES_ede3_cbc_encrypt: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 28(%esp), %ebp - # getting iv ptr from parameter 6 + movl 44(%esp), %ebx movl (%ebx), %esi movl 4(%ebx), %edi @@ -2982,15 +2531,15 @@ des_ede3_cbc_encrypt: movl %esp, %ebx movl 36(%esp), %esi movl 40(%esp), %edi - # getting encrypt flag from parameter 7 + movl 64(%esp), %ecx - # get and push parameter 5 + movl 56(%esp), %eax pushl %eax - # get and push parameter 4 + movl 56(%esp), %eax pushl %eax - # get and push parameter 3 + movl 56(%esp), %eax pushl %eax pushl %ebx @@ -3007,7 +2556,7 @@ des_ede3_cbc_encrypt: xorl %edx, %ebx movl %eax, 16(%esp) movl %ebx, 20(%esp) - call des_encrypt3 + call DES_encrypt3 movl 16(%esp), %eax movl 20(%esp), %ebx movl %eax, (%edi) @@ -3020,33 +2569,38 @@ des_ede3_cbc_encrypt: movl 60(%esp), %ebp andl $7, %ebp jz .L031finish + call .L032PIC_point +.L032PIC_point: + popl %edx + leal .L033cbc_enc_jmp_table-.L032PIC_point(%edx),%ecx + movl (%ecx,%ebp,4), %ebp + addl %edx, %ebp xorl %ecx, %ecx xorl %edx, %edx - movl .L032cbc_enc_jmp_table(,%ebp,4),%ebp jmp *%ebp -.L033ej7: +.L034ej7: movb 6(%esi), %dh sall $8, %edx -.L034ej6: +.L035ej6: movb 5(%esi), %dh -.L035ej5: +.L036ej5: movb 4(%esi), %dl -.L036ej4: +.L037ej4: movl (%esi), %ecx - jmp .L037ejend -.L038ej3: + jmp .L038ejend +.L039ej3: movb 2(%esi), %ch sall $8, %ecx -.L039ej2: +.L040ej2: movb 1(%esi), %ch -.L040ej1: +.L041ej1: movb (%esi), %cl -.L037ejend: +.L038ejend: xorl %ecx, %eax xorl %edx, %ebx movl %eax, 16(%esp) movl %ebx, 20(%esp) - call des_encrypt3 + call DES_encrypt3 movl 16(%esp), %eax movl 20(%esp), %ebx movl %eax, (%edi) @@ -3057,13 +2611,13 @@ des_ede3_cbc_encrypt: andl $4294967288, %ebp movl 24(%esp), %eax movl 28(%esp), %ebx - jz .L041decrypt_finish -.L042decrypt_loop: + jz .L042decrypt_finish +.L043decrypt_loop: movl (%esi), %eax movl 4(%esi), %ebx movl %eax, 16(%esp) movl %ebx, 20(%esp) - call des_decrypt3 + call DES_decrypt3 movl 16(%esp), %eax movl 20(%esp), %ebx movl 24(%esp), %ecx @@ -3079,8 +2633,8 @@ des_ede3_cbc_encrypt: addl $8, %esi addl $8, %edi subl $8, %ebp - jnz .L042decrypt_loop -.L041decrypt_finish: + jnz .L043decrypt_loop +.L042decrypt_finish: movl 60(%esp), %ebp andl $7, %ebp jz .L031finish @@ -3088,7 +2642,7 @@ des_ede3_cbc_encrypt: movl 4(%esi), %ebx movl %eax, 16(%esp) movl %ebx, 20(%esp) - call des_decrypt3 + call DES_decrypt3 movl 16(%esp), %eax movl 20(%esp), %ebx movl 24(%esp), %ecx @@ -3097,26 +2651,26 @@ des_ede3_cbc_encrypt: xorl %ebx, %edx movl (%esi), %eax movl 4(%esi), %ebx -.L043dj7: +.L044dj7: rorl $16, %edx movb %dl, 6(%edi) shrl $16, %edx -.L044dj6: +.L045dj6: movb %dh, 5(%edi) -.L045dj5: +.L046dj5: movb %dl, 4(%edi) -.L046dj4: +.L047dj4: movl %ecx, (%edi) - jmp .L047djend -.L048dj3: + jmp .L048djend +.L049dj3: rorl $16, %ecx movb %cl, 2(%edi) sall $16, %ecx -.L049dj2: +.L050dj2: movb %ch, 1(%esi) -.L050dj1: +.L051dj1: movb %cl, (%esi) -.L047djend: +.L048djend: jmp .L031finish .align 16 .L031finish: @@ -3130,25 +2684,15 @@ des_ede3_cbc_encrypt: popl %ebp ret .align 16 -.L032cbc_enc_jmp_table: - .long 0 - .long .L040ej1 - .long .L039ej2 - .long .L038ej3 - .long .L036ej4 - .long .L035ej5 - .long .L034ej6 - .long .L033ej7 -.align 16 -.L051cbc_dec_jmp_table: +.L033cbc_enc_jmp_table: .long 0 - .long .L050dj1 - .long .L049dj2 - .long .L048dj3 - .long .L046dj4 - .long .L045dj5 - .long .L044dj6 - .long .L043dj7 -.L_des_ede3_cbc_encrypt_end: - .size des_ede3_cbc_encrypt,.L_des_ede3_cbc_encrypt_end-des_ede3_cbc_encrypt + .long .L041ej1-.L032PIC_point + .long .L040ej2-.L032PIC_point + .long .L039ej3-.L032PIC_point + .long .L037ej4-.L032PIC_point + .long .L036ej5-.L032PIC_point + .long .L035ej6-.L032PIC_point + .long .L034ej7-.L032PIC_point +.L_DES_ede3_cbc_encrypt_end: + .size DES_ede3_cbc_encrypt,.L_DES_ede3_cbc_encrypt_end-DES_ede3_cbc_encrypt .ident "desasm.pl" diff --git a/secure/lib/libcrypto/i386/md5-586.s b/secure/lib/libcrypto/i386/md5-586.s index 5816cc5..b97c357 100644 --- a/secure/lib/libcrypto/i386/md5-586.s +++ b/secure/lib/libcrypto/i386/md5-586.s @@ -1,11 +1,11 @@ # $FreeBSD$ - # Dont even think of reading this code - # It was automatically generated by md5-586.pl - # Which is a perl program used to generate the x86 assember for - # any of elf, a.out, BSDI, Win32, gaswin (for GNU as on Win32) or Solaris - # eric <eay@cryptsoft.com> - .file "md5-586.s" + + + + + + .file "/usr/src/secure/lib/libcrypto/../../../crypto/openssl/crypto/md5/asm/md5-586.s" .version "01.01" gcc2_compiled.: .text @@ -30,10 +30,10 @@ md5_block_asm_host_order: movl 12(%edi), %edx .L000start: - # R0 section + movl %ecx, %edi movl (%esi), %ebp - # R0 0 + xorl %edx, %edi andl %ebx, %edi leal 3614090360(%eax,%ebp,1),%eax @@ -43,7 +43,7 @@ md5_block_asm_host_order: roll $7, %eax movl 4(%esi), %ebp addl %ebx, %eax - # R0 1 + xorl %ecx, %edi andl %eax, %edi leal 3905402710(%edx,%ebp,1),%edx @@ -53,7 +53,7 @@ md5_block_asm_host_order: roll $12, %edx movl 8(%esi), %ebp addl %eax, %edx - # R0 2 + xorl %ebx, %edi andl %edx, %edi leal 606105819(%ecx,%ebp,1),%ecx @@ -63,7 +63,7 @@ md5_block_asm_host_order: roll $17, %ecx movl 12(%esi), %ebp addl %edx, %ecx - # R0 3 + xorl %eax, %edi andl %ecx, %edi leal 3250441966(%ebx,%ebp,1),%ebx @@ -73,7 +73,7 @@ md5_block_asm_host_order: roll $22, %ebx movl 16(%esi), %ebp addl %ecx, %ebx - # R0 4 + xorl %edx, %edi andl %ebx, %edi leal 4118548399(%eax,%ebp,1),%eax @@ -83,7 +83,7 @@ md5_block_asm_host_order: roll $7, %eax movl 20(%esi), %ebp addl %ebx, %eax - # R0 5 + xorl %ecx, %edi andl %eax, %edi leal 1200080426(%edx,%ebp,1),%edx @@ -93,7 +93,7 @@ md5_block_asm_host_order: roll $12, %edx movl 24(%esi), %ebp addl %eax, %edx - # R0 6 + xorl %ebx, %edi andl %edx, %edi leal 2821735955(%ecx,%ebp,1),%ecx @@ -103,7 +103,7 @@ md5_block_asm_host_order: roll $17, %ecx movl 28(%esi), %ebp addl %edx, %ecx - # R0 7 + xorl %eax, %edi andl %ecx, %edi leal 4249261313(%ebx,%ebp,1),%ebx @@ -113,7 +113,7 @@ md5_block_asm_host_order: roll $22, %ebx movl 32(%esi), %ebp addl %ecx, %ebx - # R0 8 + xorl %edx, %edi andl %ebx, %edi leal 1770035416(%eax,%ebp,1),%eax @@ -123,7 +123,7 @@ md5_block_asm_host_order: roll $7, %eax movl 36(%esi), %ebp addl %ebx, %eax - # R0 9 + xorl %ecx, %edi andl %eax, %edi leal 2336552879(%edx,%ebp,1),%edx @@ -133,7 +133,7 @@ md5_block_asm_host_order: roll $12, %edx movl 40(%esi), %ebp addl %eax, %edx - # R0 10 + xorl %ebx, %edi andl %edx, %edi leal 4294925233(%ecx,%ebp,1),%ecx @@ -143,7 +143,7 @@ md5_block_asm_host_order: roll $17, %ecx movl 44(%esi), %ebp addl %edx, %ecx - # R0 11 + xorl %eax, %edi andl %ecx, %edi leal 2304563134(%ebx,%ebp,1),%ebx @@ -153,7 +153,7 @@ md5_block_asm_host_order: roll $22, %ebx movl 48(%esi), %ebp addl %ecx, %ebx - # R0 12 + xorl %edx, %edi andl %ebx, %edi leal 1804603682(%eax,%ebp,1),%eax @@ -163,7 +163,7 @@ md5_block_asm_host_order: roll $7, %eax movl 52(%esi), %ebp addl %ebx, %eax - # R0 13 + xorl %ecx, %edi andl %eax, %edi leal 4254626195(%edx,%ebp,1),%edx @@ -173,7 +173,7 @@ md5_block_asm_host_order: roll $12, %edx movl 56(%esi), %ebp addl %eax, %edx - # R0 14 + xorl %ebx, %edi andl %edx, %edi leal 2792965006(%ecx,%ebp,1),%ecx @@ -183,7 +183,7 @@ md5_block_asm_host_order: roll $17, %ecx movl 60(%esi), %ebp addl %edx, %ecx - # R0 15 + xorl %eax, %edi andl %ecx, %edi leal 1236535329(%ebx,%ebp,1),%ebx @@ -194,8 +194,8 @@ md5_block_asm_host_order: movl 4(%esi), %ebp addl %ecx, %ebx - # R1 section - # R1 16 + + leal 4129170786(%eax,%ebp,1),%eax xorl %ebx, %edi andl %edx, %edi @@ -205,7 +205,7 @@ md5_block_asm_host_order: movl %ebx, %edi roll $5, %eax addl %ebx, %eax - # R1 17 + leal 3225465664(%edx,%ebp,1),%edx xorl %eax, %edi andl %ecx, %edi @@ -215,7 +215,7 @@ md5_block_asm_host_order: movl %eax, %edi roll $9, %edx addl %eax, %edx - # R1 18 + leal 643717713(%ecx,%ebp,1),%ecx xorl %edx, %edi andl %ebx, %edi @@ -225,7 +225,7 @@ md5_block_asm_host_order: movl %edx, %edi roll $14, %ecx addl %edx, %ecx - # R1 19 + leal 3921069994(%ebx,%ebp,1),%ebx xorl %ecx, %edi andl %eax, %edi @@ -235,7 +235,7 @@ md5_block_asm_host_order: movl %ecx, %edi roll $20, %ebx addl %ecx, %ebx - # R1 20 + leal 3593408605(%eax,%ebp,1),%eax xorl %ebx, %edi andl %edx, %edi @@ -245,7 +245,7 @@ md5_block_asm_host_order: movl %ebx, %edi roll $5, %eax addl %ebx, %eax - # R1 21 + leal 38016083(%edx,%ebp,1),%edx xorl %eax, %edi andl %ecx, %edi @@ -255,7 +255,7 @@ md5_block_asm_host_order: movl %eax, %edi roll $9, %edx addl %eax, %edx - # R1 22 + leal 3634488961(%ecx,%ebp,1),%ecx xorl %edx, %edi andl %ebx, %edi @@ -265,7 +265,7 @@ md5_block_asm_host_order: movl %edx, %edi roll $14, %ecx addl %edx, %ecx - # R1 23 + leal 3889429448(%ebx,%ebp,1),%ebx xorl %ecx, %edi andl %eax, %edi @@ -275,7 +275,7 @@ md5_block_asm_host_order: movl %ecx, %edi roll $20, %ebx addl %ecx, %ebx - # R1 24 + leal 568446438(%eax,%ebp,1),%eax xorl %ebx, %edi andl %edx, %edi @@ -285,7 +285,7 @@ md5_block_asm_host_order: movl %ebx, %edi roll $5, %eax addl %ebx, %eax - # R1 25 + leal 3275163606(%edx,%ebp,1),%edx xorl %eax, %edi andl %ecx, %edi @@ -295,7 +295,7 @@ md5_block_asm_host_order: movl %eax, %edi roll $9, %edx addl %eax, %edx - # R1 26 + leal 4107603335(%ecx,%ebp,1),%ecx xorl %edx, %edi andl %ebx, %edi @@ -305,7 +305,7 @@ md5_block_asm_host_order: movl %edx, %edi roll $14, %ecx addl %edx, %ecx - # R1 27 + leal 1163531501(%ebx,%ebp,1),%ebx xorl %ecx, %edi andl %eax, %edi @@ -315,7 +315,7 @@ md5_block_asm_host_order: movl %ecx, %edi roll $20, %ebx addl %ecx, %ebx - # R1 28 + leal 2850285829(%eax,%ebp,1),%eax xorl %ebx, %edi andl %edx, %edi @@ -325,7 +325,7 @@ md5_block_asm_host_order: movl %ebx, %edi roll $5, %eax addl %ebx, %eax - # R1 29 + leal 4243563512(%edx,%ebp,1),%edx xorl %eax, %edi andl %ecx, %edi @@ -335,7 +335,7 @@ md5_block_asm_host_order: movl %eax, %edi roll $9, %edx addl %eax, %edx - # R1 30 + leal 1735328473(%ecx,%ebp,1),%ecx xorl %edx, %edi andl %ebx, %edi @@ -345,7 +345,7 @@ md5_block_asm_host_order: movl %edx, %edi roll $14, %ecx addl %edx, %ecx - # R1 31 + leal 2368359562(%ebx,%ebp,1),%ebx xorl %ecx, %edi andl %eax, %edi @@ -356,8 +356,8 @@ md5_block_asm_host_order: roll $20, %ebx addl %ecx, %ebx - # R2 section - # R2 32 + + xorl %edx, %edi xorl %ebx, %edi leal 4294588738(%eax,%ebp,1),%eax @@ -365,7 +365,7 @@ md5_block_asm_host_order: roll $4, %eax movl 32(%esi), %ebp movl %ebx, %edi - # R2 33 + leal 2272392833(%edx,%ebp,1),%edx addl %ebx, %eax xorl %ecx, %edi @@ -375,7 +375,7 @@ md5_block_asm_host_order: movl %eax, %edi roll $11, %edx addl %eax, %edx - # R2 34 + xorl %ebx, %edi xorl %edx, %edi leal 1839030562(%ecx,%ebp,1),%ecx @@ -383,7 +383,7 @@ md5_block_asm_host_order: roll $16, %ecx movl 56(%esi), %ebp movl %edx, %edi - # R2 35 + leal 4259657740(%ebx,%ebp,1),%ebx addl %edx, %ecx xorl %eax, %edi @@ -393,7 +393,7 @@ md5_block_asm_host_order: movl %ecx, %edi roll $23, %ebx addl %ecx, %ebx - # R2 36 + xorl %edx, %edi xorl %ebx, %edi leal 2763975236(%eax,%ebp,1),%eax @@ -401,7 +401,7 @@ md5_block_asm_host_order: roll $4, %eax movl 16(%esi), %ebp movl %ebx, %edi - # R2 37 + leal 1272893353(%edx,%ebp,1),%edx addl %ebx, %eax xorl %ecx, %edi @@ -411,7 +411,7 @@ md5_block_asm_host_order: movl %eax, %edi roll $11, %edx addl %eax, %edx - # R2 38 + xorl %ebx, %edi xorl %edx, %edi leal 4139469664(%ecx,%ebp,1),%ecx @@ -419,7 +419,7 @@ md5_block_asm_host_order: roll $16, %ecx movl 40(%esi), %ebp movl %edx, %edi - # R2 39 + leal 3200236656(%ebx,%ebp,1),%ebx addl %edx, %ecx xorl %eax, %edi @@ -429,7 +429,7 @@ md5_block_asm_host_order: movl %ecx, %edi roll $23, %ebx addl %ecx, %ebx - # R2 40 + xorl %edx, %edi xorl %ebx, %edi leal 681279174(%eax,%ebp,1),%eax @@ -437,7 +437,7 @@ md5_block_asm_host_order: roll $4, %eax movl (%esi), %ebp movl %ebx, %edi - # R2 41 + leal 3936430074(%edx,%ebp,1),%edx addl %ebx, %eax xorl %ecx, %edi @@ -447,7 +447,7 @@ md5_block_asm_host_order: movl %eax, %edi roll $11, %edx addl %eax, %edx - # R2 42 + xorl %ebx, %edi xorl %edx, %edi leal 3572445317(%ecx,%ebp,1),%ecx @@ -455,7 +455,7 @@ md5_block_asm_host_order: roll $16, %ecx movl 24(%esi), %ebp movl %edx, %edi - # R2 43 + leal 76029189(%ebx,%ebp,1),%ebx addl %edx, %ecx xorl %eax, %edi @@ -465,7 +465,7 @@ md5_block_asm_host_order: movl %ecx, %edi roll $23, %ebx addl %ecx, %ebx - # R2 44 + xorl %edx, %edi xorl %ebx, %edi leal 3654602809(%eax,%ebp,1),%eax @@ -473,7 +473,7 @@ md5_block_asm_host_order: roll $4, %eax movl 48(%esi), %ebp movl %ebx, %edi - # R2 45 + leal 3873151461(%edx,%ebp,1),%edx addl %ebx, %eax xorl %ecx, %edi @@ -483,7 +483,7 @@ md5_block_asm_host_order: movl %eax, %edi roll $11, %edx addl %eax, %edx - # R2 46 + xorl %ebx, %edi xorl %edx, %edi leal 530742520(%ecx,%ebp,1),%ecx @@ -491,7 +491,7 @@ md5_block_asm_host_order: roll $16, %ecx movl 8(%esi), %ebp movl %edx, %edi - # R2 47 + leal 3299628645(%ebx,%ebp,1),%ebx addl %edx, %ecx xorl %eax, %edi @@ -502,8 +502,8 @@ md5_block_asm_host_order: roll $23, %ebx addl %ecx, %ebx - # R3 section - # R3 48 + + xorl %edx, %edi orl %ebx, %edi leal 4096336452(%eax,%ebp,1),%eax @@ -514,7 +514,7 @@ md5_block_asm_host_order: roll $6, %eax xorl %ecx, %edi addl %ebx, %eax - # R3 49 + orl %eax, %edi leal 1126891415(%edx,%ebp,1),%edx xorl %ebx, %edi @@ -524,7 +524,7 @@ md5_block_asm_host_order: roll $10, %edx xorl %ebx, %edi addl %eax, %edx - # R3 50 + orl %edx, %edi leal 2878612391(%ecx,%ebp,1),%ecx xorl %eax, %edi @@ -534,7 +534,7 @@ md5_block_asm_host_order: roll $15, %ecx xorl %eax, %edi addl %edx, %ecx - # R3 51 + orl %ecx, %edi leal 4237533241(%ebx,%ebp,1),%ebx xorl %edx, %edi @@ -544,7 +544,7 @@ md5_block_asm_host_order: roll $21, %ebx xorl %edx, %edi addl %ecx, %ebx - # R3 52 + orl %ebx, %edi leal 1700485571(%eax,%ebp,1),%eax xorl %ecx, %edi @@ -554,7 +554,7 @@ md5_block_asm_host_order: roll $6, %eax xorl %ecx, %edi addl %ebx, %eax - # R3 53 + orl %eax, %edi leal 2399980690(%edx,%ebp,1),%edx xorl %ebx, %edi @@ -564,7 +564,7 @@ md5_block_asm_host_order: roll $10, %edx xorl %ebx, %edi addl %eax, %edx - # R3 54 + orl %edx, %edi leal 4293915773(%ecx,%ebp,1),%ecx xorl %eax, %edi @@ -574,7 +574,7 @@ md5_block_asm_host_order: roll $15, %ecx xorl %eax, %edi addl %edx, %ecx - # R3 55 + orl %ecx, %edi leal 2240044497(%ebx,%ebp,1),%ebx xorl %edx, %edi @@ -584,7 +584,7 @@ md5_block_asm_host_order: roll $21, %ebx xorl %edx, %edi addl %ecx, %ebx - # R3 56 + orl %ebx, %edi leal 1873313359(%eax,%ebp,1),%eax xorl %ecx, %edi @@ -594,7 +594,7 @@ md5_block_asm_host_order: roll $6, %eax xorl %ecx, %edi addl %ebx, %eax - # R3 57 + orl %eax, %edi leal 4264355552(%edx,%ebp,1),%edx xorl %ebx, %edi @@ -604,7 +604,7 @@ md5_block_asm_host_order: roll $10, %edx xorl %ebx, %edi addl %eax, %edx - # R3 58 + orl %edx, %edi leal 2734768916(%ecx,%ebp,1),%ecx xorl %eax, %edi @@ -614,7 +614,7 @@ md5_block_asm_host_order: roll $15, %ecx xorl %eax, %edi addl %edx, %ecx - # R3 59 + orl %ecx, %edi leal 1309151649(%ebx,%ebp,1),%ebx xorl %edx, %edi @@ -624,7 +624,7 @@ md5_block_asm_host_order: roll $21, %ebx xorl %edx, %edi addl %ecx, %ebx - # R3 60 + orl %ebx, %edi leal 4149444226(%eax,%ebp,1),%eax xorl %ecx, %edi @@ -634,7 +634,7 @@ md5_block_asm_host_order: roll $6, %eax xorl %ecx, %edi addl %ebx, %eax - # R3 61 + orl %eax, %edi leal 3174756917(%edx,%ebp,1),%edx xorl %ebx, %edi @@ -644,7 +644,7 @@ md5_block_asm_host_order: roll $10, %edx xorl %ebx, %edi addl %eax, %edx - # R3 62 + orl %edx, %edi leal 718787259(%ecx,%ebp,1),%ecx xorl %eax, %edi @@ -654,7 +654,7 @@ md5_block_asm_host_order: roll $15, %ecx xorl %eax, %edi addl %edx, %ecx - # R3 63 + orl %ecx, %edi leal 3951481745(%ebx,%ebp,1),%ebx xorl %edx, %edi @@ -677,7 +677,7 @@ md5_block_asm_host_order: movl %ecx, 8(%ebp) movl %edx, 12(%ebp) cmpl %esi, %edi - jge .L000start + jae .L000start popl %eax popl %ebx popl %ebp diff --git a/secure/lib/libcrypto/i386/rc4-586.s b/secure/lib/libcrypto/i386/rc4-586.s index 996718c..ad27498 100644 --- a/secure/lib/libcrypto/i386/rc4-586.s +++ b/secure/lib/libcrypto/i386/rc4-586.s @@ -1,9 +1,9 @@ # $FreeBSD$ - # Dont even think of reading this code - # It was automatically generated by rc4-586.pl - # Which is a perl program used to generate the x86 assember for - # any of elf, a.out, BSDI, Win32, gaswin (for GNU as on Win32) or Solaris - # eric <eay@cryptsoft.com> + + + + + .file "rc4-586.s" .version "01.01" @@ -35,7 +35,7 @@ RC4: jl .L000end .L001start: addl $8, %esi - # Round 0 + addl %eax, %edx andl $255, %edx incl %ecx @@ -49,7 +49,7 @@ RC4: movl (%ebp,%ebx,4), %ebx movl (%ebp,%ecx,4), %eax movb %bl, (%esp) - # Round 1 + addl %eax, %edx andl $255, %edx incl %ecx @@ -63,7 +63,7 @@ RC4: movl (%ebp,%ebx,4), %ebx movl (%ebp,%ecx,4), %eax movb %bl, 1(%esp) - # Round 2 + addl %eax, %edx andl $255, %edx incl %ecx @@ -77,7 +77,7 @@ RC4: movl (%ebp,%ebx,4), %ebx movl (%ebp,%ecx,4), %eax movb %bl, 2(%esp) - # Round 3 + addl %eax, %edx andl $255, %edx incl %ecx @@ -91,7 +91,7 @@ RC4: movl (%ebp,%ebx,4), %ebx movl (%ebp,%ecx,4), %eax movb %bl, 3(%esp) - # Round 4 + addl %eax, %edx andl $255, %edx incl %ecx @@ -105,7 +105,7 @@ RC4: movl (%ebp,%ebx,4), %ebx movl (%ebp,%ecx,4), %eax movb %bl, 4(%esp) - # Round 5 + addl %eax, %edx andl $255, %edx incl %ecx @@ -119,7 +119,7 @@ RC4: movl (%ebp,%ebx,4), %ebx movl (%ebp,%ecx,4), %eax movb %bl, 5(%esp) - # Round 6 + addl %eax, %edx andl $255, %edx incl %ecx @@ -133,7 +133,7 @@ RC4: movl (%ebp,%ebx,4), %ebx movl (%ebp,%ecx,4), %eax movb %bl, 6(%esp) - # Round 7 + addl %eax, %edx andl $255, %edx incl %ecx @@ -147,7 +147,7 @@ RC4: movl (%ebp,%ebx,4), %ebx addl $8, %edi movb %bl, 7(%esp) - # apply the cipher text + movl (%esp), %eax movl -8(%esi), %ebx xorl %ebx, %eax @@ -161,7 +161,7 @@ RC4: cmpl %ebx, %esi jle .L001start .L000end: - # Round 0 + addl $8, %ebx incl %esi cmpl %esi, %ebx @@ -182,7 +182,7 @@ RC4: movb -1(%esi), %bh xorb %bh, %bl movb %bl, (%edi) - # Round 1 + movl 8(%esp), %ebx cmpl %esi, %ebx jle .L002finished @@ -202,7 +202,7 @@ RC4: movb -1(%esi), %bh xorb %bh, %bl movb %bl, 1(%edi) - # Round 2 + movl 8(%esp), %ebx cmpl %esi, %ebx jle .L002finished @@ -222,7 +222,7 @@ RC4: movb -1(%esi), %bh xorb %bh, %bl movb %bl, 2(%edi) - # Round 3 + movl 8(%esp), %ebx cmpl %esi, %ebx jle .L002finished @@ -242,7 +242,7 @@ RC4: movb -1(%esi), %bh xorb %bh, %bl movb %bl, 3(%edi) - # Round 4 + movl 8(%esp), %ebx cmpl %esi, %ebx jle .L002finished @@ -262,7 +262,7 @@ RC4: movb -1(%esi), %bh xorb %bh, %bl movb %bl, 4(%edi) - # Round 5 + movl 8(%esp), %ebx cmpl %esi, %ebx jle .L002finished @@ -282,7 +282,7 @@ RC4: movb -1(%esi), %bh xorb %bh, %bl movb %bl, 5(%edi) - # Round 6 + movl 8(%esp), %ebx cmpl %esi, %ebx jle .L002finished @@ -311,6 +311,6 @@ RC4: popl %ebx popl %ebp ret -.RC4_end: - .size RC4,.RC4_end-RC4 +.L_RC4_end: + .size RC4,.L_RC4_end-RC4 .ident "RC4" diff --git a/secure/lib/libcrypto/i386/rc5-586.s b/secure/lib/libcrypto/i386/rc5-586.s index 1a4c9d3..a33eff9 100644 --- a/secure/lib/libcrypto/i386/rc5-586.s +++ b/secure/lib/libcrypto/i386/rc5-586.s @@ -1,9 +1,9 @@ # $FreeBSD$ - # Dont even think of reading this code - # It was automatically generated by rc5-586.pl - # Which is a perl program used to generate the x86 assember for - # any of elf, a.out, BSDI, Win32, gaswin (for GNU as on Win32) or Solaris - # eric <eay@cryptsoft.com> + + + + + .file "rc5-586.s" .version "01.01" @@ -19,7 +19,7 @@ RC5_32_encrypt: pushl %edi movl 16(%esp), %edx movl 20(%esp), %ebp - # Load the 2 words + movl (%edx), %edi movl 4(%edx), %esi pushl %ebx @@ -212,7 +212,7 @@ RC5_32_decrypt: pushl %edi movl 16(%esp), %edx movl 20(%esp), %ebp - # Load the 2 words + movl (%edx), %edi movl 4(%edx), %esi pushl %ebx @@ -407,7 +407,7 @@ RC5_32_cbc_encrypt: pushl %esi pushl %edi movl 28(%esp), %ebp - # getting iv ptr from parameter 4 + movl 36(%esp), %ebx movl (%ebx), %esi movl 4(%ebx), %edi @@ -418,9 +418,9 @@ RC5_32_cbc_encrypt: movl %esp, %ebx movl 36(%esp), %esi movl 40(%esp), %edi - # getting encrypt flag from parameter 5 + movl 56(%esp), %ecx - # get and push parameter 3 + movl 48(%esp), %eax pushl %eax pushl %ebx @@ -450,28 +450,33 @@ RC5_32_cbc_encrypt: movl 52(%esp), %ebp andl $7, %ebp jz .L007finish + call .L008PIC_point +.L008PIC_point: + popl %edx + leal .L009cbc_enc_jmp_table-.L008PIC_point(%edx),%ecx + movl (%ecx,%ebp,4), %ebp + addl %edx, %ebp xorl %ecx, %ecx xorl %edx, %edx - movl .L008cbc_enc_jmp_table(,%ebp,4),%ebp jmp *%ebp -.L009ej7: +.L010ej7: movb 6(%esi), %dh sall $8, %edx -.L010ej6: +.L011ej6: movb 5(%esi), %dh -.L011ej5: +.L012ej5: movb 4(%esi), %dl -.L012ej4: +.L013ej4: movl (%esi), %ecx - jmp .L013ejend -.L014ej3: + jmp .L014ejend +.L015ej3: movb 2(%esi), %ch sall $8, %ecx -.L015ej2: +.L016ej2: movb 1(%esi), %ch -.L016ej1: +.L017ej1: movb (%esi), %cl -.L013ejend: +.L014ejend: xorl %ecx, %eax xorl %edx, %ebx movl %eax, 8(%esp) @@ -487,8 +492,8 @@ RC5_32_cbc_encrypt: andl $4294967288, %ebp movl 16(%esp), %eax movl 20(%esp), %ebx - jz .L017decrypt_finish -.L018decrypt_loop: + jz .L018decrypt_finish +.L019decrypt_loop: movl (%esi), %eax movl 4(%esi), %ebx movl %eax, 8(%esp) @@ -509,8 +514,8 @@ RC5_32_cbc_encrypt: addl $8, %esi addl $8, %edi subl $8, %ebp - jnz .L018decrypt_loop -.L017decrypt_finish: + jnz .L019decrypt_loop +.L018decrypt_finish: movl 52(%esp), %ebp andl $7, %ebp jz .L007finish @@ -527,26 +532,26 @@ RC5_32_cbc_encrypt: xorl %ebx, %edx movl (%esi), %eax movl 4(%esi), %ebx -.L019dj7: +.L020dj7: rorl $16, %edx movb %dl, 6(%edi) shrl $16, %edx -.L020dj6: +.L021dj6: movb %dh, 5(%edi) -.L021dj5: +.L022dj5: movb %dl, 4(%edi) -.L022dj4: +.L023dj4: movl %ecx, (%edi) - jmp .L023djend -.L024dj3: + jmp .L024djend +.L025dj3: rorl $16, %ecx movb %cl, 2(%edi) sall $16, %ecx -.L025dj2: +.L026dj2: movb %ch, 1(%esi) -.L026dj1: +.L027dj1: movb %cl, (%esi) -.L023djend: +.L024djend: jmp .L007finish .align 16 .L007finish: @@ -560,25 +565,15 @@ RC5_32_cbc_encrypt: popl %ebp ret .align 16 -.L008cbc_enc_jmp_table: - .long 0 - .long .L016ej1 - .long .L015ej2 - .long .L014ej3 - .long .L012ej4 - .long .L011ej5 - .long .L010ej6 - .long .L009ej7 -.align 16 -.L027cbc_dec_jmp_table: +.L009cbc_enc_jmp_table: .long 0 - .long .L026dj1 - .long .L025dj2 - .long .L024dj3 - .long .L022dj4 - .long .L021dj5 - .long .L020dj6 - .long .L019dj7 + .long .L017ej1-.L008PIC_point + .long .L016ej2-.L008PIC_point + .long .L015ej3-.L008PIC_point + .long .L013ej4-.L008PIC_point + .long .L012ej5-.L008PIC_point + .long .L011ej6-.L008PIC_point + .long .L010ej7-.L008PIC_point .L_RC5_32_cbc_encrypt_end: .size RC5_32_cbc_encrypt,.L_RC5_32_cbc_encrypt_end-RC5_32_cbc_encrypt .ident "desasm.pl" diff --git a/secure/lib/libcrypto/i386/rmd-586.s b/secure/lib/libcrypto/i386/rmd-586.s index 96f4928..fb541db 100644 --- a/secure/lib/libcrypto/i386/rmd-586.s +++ b/secure/lib/libcrypto/i386/rmd-586.s @@ -1,11 +1,11 @@ # $FreeBSD$ - # Dont even think of reading this code - # It was automatically generated by rmd-586.pl - # Which is a perl program used to generate the x86 assember for - # any of elf, a.out, BSDI, Win32, gaswin (for GNU as on Win32) or Solaris - # eric <eay@cryptsoft.com> - .file "rmd-586.s" + + + + + + .file "/usr/src/secure/lib/libcrypto/../../../crypto/openssl/crypto/ripemd/asm/rmd-586.s" .version "01.01" gcc2_compiled.: .text @@ -60,7 +60,7 @@ ripemd160_block_asm_host_order: movl %edi, %eax movl 12(%edx), %ebx movl 16(%edx), %ebp - # 0 + xorl %ebx, %eax movl (%esp), %edx xorl %esi, %eax @@ -70,7 +70,7 @@ ripemd160_block_asm_host_order: movl %esi, %eax roll $11, %ecx addl %ebp, %ecx - # 1 + xorl %edi, %eax movl 4(%esp), %edx xorl %ecx, %eax @@ -81,7 +81,7 @@ ripemd160_block_asm_host_order: xorl %esi, %eax roll $14, %ebp addl %ebx, %ebp - # 2 + movl 8(%esp), %edx xorl %ebp, %eax addl %edx, %ebx @@ -90,7 +90,7 @@ ripemd160_block_asm_host_order: movl %ebp, %eax roll $15, %ebx addl %edi, %ebx - # 3 + xorl %ecx, %eax movl 12(%esp), %edx xorl %ebx, %eax @@ -101,7 +101,7 @@ ripemd160_block_asm_host_order: xorl %ebp, %eax roll $12, %edi addl %esi, %edi - # 4 + movl 16(%esp), %edx xorl %edi, %eax addl %edx, %esi @@ -110,7 +110,7 @@ ripemd160_block_asm_host_order: movl %edi, %eax roll $5, %esi addl %ecx, %esi - # 5 + xorl %ebx, %eax movl 20(%esp), %edx xorl %esi, %eax @@ -121,7 +121,7 @@ ripemd160_block_asm_host_order: xorl %edi, %eax roll $8, %ecx addl %ebp, %ecx - # 6 + movl 24(%esp), %edx xorl %ecx, %eax addl %edx, %ebp @@ -130,7 +130,7 @@ ripemd160_block_asm_host_order: movl %ecx, %eax roll $7, %ebp addl %ebx, %ebp - # 7 + xorl %esi, %eax movl 28(%esp), %edx xorl %ebp, %eax @@ -141,7 +141,7 @@ ripemd160_block_asm_host_order: xorl %ecx, %eax roll $9, %ebx addl %edi, %ebx - # 8 + movl 32(%esp), %edx xorl %ebx, %eax addl %edx, %edi @@ -150,7 +150,7 @@ ripemd160_block_asm_host_order: movl %ebx, %eax roll $11, %edi addl %esi, %edi - # 9 + xorl %ebp, %eax movl 36(%esp), %edx xorl %edi, %eax @@ -161,7 +161,7 @@ ripemd160_block_asm_host_order: xorl %ebx, %eax roll $13, %esi addl %ecx, %esi - # 10 + movl 40(%esp), %edx xorl %esi, %eax addl %edx, %ecx @@ -170,7 +170,7 @@ ripemd160_block_asm_host_order: movl %esi, %eax roll $14, %ecx addl %ebp, %ecx - # 11 + xorl %edi, %eax movl 44(%esp), %edx xorl %ecx, %eax @@ -181,7 +181,7 @@ ripemd160_block_asm_host_order: xorl %esi, %eax roll $15, %ebp addl %ebx, %ebp - # 12 + movl 48(%esp), %edx xorl %ebp, %eax addl %edx, %ebx @@ -190,7 +190,7 @@ ripemd160_block_asm_host_order: movl %ebp, %eax roll $6, %ebx addl %edi, %ebx - # 13 + xorl %ecx, %eax movl 52(%esp), %edx xorl %ebx, %eax @@ -201,7 +201,7 @@ ripemd160_block_asm_host_order: xorl %ebp, %eax roll $7, %edi addl %esi, %edi - # 14 + movl 56(%esp), %edx xorl %edi, %eax addl %edx, %esi @@ -210,7 +210,7 @@ ripemd160_block_asm_host_order: movl %edi, %eax roll $9, %esi addl %ecx, %esi - # 15 + xorl %ebx, %eax movl 60(%esp), %edx xorl %esi, %eax @@ -221,7 +221,7 @@ ripemd160_block_asm_host_order: movl 28(%esp), %edx roll $8, %ecx addl %ebp, %ecx - # 16 + addl %edx, %ebp movl %esi, %edx subl %ecx, %eax @@ -234,7 +234,7 @@ ripemd160_block_asm_host_order: movl $-1, %edx roll $7, %ebp addl %ebx, %ebp - # 17 + addl %eax, %ebx movl %ecx, %eax subl %ebp, %edx @@ -247,7 +247,7 @@ ripemd160_block_asm_host_order: movl $-1, %eax roll $6, %ebx addl %edi, %ebx - # 18 + addl %edx, %edi movl %ebp, %edx subl %ebx, %eax @@ -260,7 +260,7 @@ ripemd160_block_asm_host_order: movl $-1, %edx roll $8, %edi addl %esi, %edi - # 19 + addl %eax, %esi movl %ebx, %eax subl %edi, %edx @@ -273,7 +273,7 @@ ripemd160_block_asm_host_order: movl $-1, %eax roll $13, %esi addl %ecx, %esi - # 20 + addl %edx, %ecx movl %edi, %edx subl %esi, %eax @@ -286,7 +286,7 @@ ripemd160_block_asm_host_order: movl $-1, %edx roll $11, %ecx addl %ebp, %ecx - # 21 + addl %eax, %ebp movl %esi, %eax subl %ecx, %edx @@ -299,7 +299,7 @@ ripemd160_block_asm_host_order: movl $-1, %eax roll $9, %ebp addl %ebx, %ebp - # 22 + addl %edx, %ebx movl %ecx, %edx subl %ebp, %eax @@ -312,7 +312,7 @@ ripemd160_block_asm_host_order: movl $-1, %edx roll $7, %ebx addl %edi, %ebx - # 23 + addl %eax, %edi movl %ebp, %eax subl %ebx, %edx @@ -325,7 +325,7 @@ ripemd160_block_asm_host_order: movl $-1, %eax roll $15, %edi addl %esi, %edi - # 24 + addl %edx, %esi movl %ebx, %edx subl %edi, %eax @@ -338,7 +338,7 @@ ripemd160_block_asm_host_order: movl $-1, %edx roll $7, %esi addl %ecx, %esi - # 25 + addl %eax, %ecx movl %edi, %eax subl %esi, %edx @@ -351,7 +351,7 @@ ripemd160_block_asm_host_order: movl $-1, %eax roll $12, %ecx addl %ebp, %ecx - # 26 + addl %edx, %ebp movl %esi, %edx subl %ecx, %eax @@ -364,7 +364,7 @@ ripemd160_block_asm_host_order: movl $-1, %edx roll $15, %ebp addl %ebx, %ebp - # 27 + addl %eax, %ebx movl %ecx, %eax subl %ebp, %edx @@ -377,7 +377,7 @@ ripemd160_block_asm_host_order: movl $-1, %eax roll $9, %ebx addl %edi, %ebx - # 28 + addl %edx, %edi movl %ebp, %edx subl %ebx, %eax @@ -390,7 +390,7 @@ ripemd160_block_asm_host_order: movl $-1, %edx roll $11, %edi addl %esi, %edi - # 29 + addl %eax, %esi movl %ebx, %eax subl %edi, %edx @@ -403,7 +403,7 @@ ripemd160_block_asm_host_order: movl $-1, %eax roll $7, %esi addl %ecx, %esi - # 30 + addl %edx, %ecx movl %edi, %edx subl %esi, %eax @@ -416,7 +416,7 @@ ripemd160_block_asm_host_order: movl $-1, %edx roll $13, %ecx addl %ebp, %ecx - # 31 + addl %eax, %ebp movl %esi, %eax subl %ecx, %edx @@ -429,7 +429,7 @@ ripemd160_block_asm_host_order: subl %ecx, %edx roll $12, %ebp addl %ebx, %ebp - # 32 + movl 12(%esp), %eax orl %ebp, %edx addl %eax, %ebx @@ -440,7 +440,7 @@ ripemd160_block_asm_host_order: subl %ebp, %eax roll $11, %ebx addl %edi, %ebx - # 33 + movl 40(%esp), %edx orl %ebx, %eax addl %edx, %edi @@ -451,7 +451,7 @@ ripemd160_block_asm_host_order: subl %ebx, %edx roll $13, %edi addl %esi, %edi - # 34 + movl 56(%esp), %eax orl %edi, %edx addl %eax, %esi @@ -462,7 +462,7 @@ ripemd160_block_asm_host_order: subl %edi, %eax roll $6, %esi addl %ecx, %esi - # 35 + movl 16(%esp), %edx orl %esi, %eax addl %edx, %ecx @@ -473,7 +473,7 @@ ripemd160_block_asm_host_order: subl %esi, %edx roll $7, %ecx addl %ebp, %ecx - # 36 + movl 36(%esp), %eax orl %ecx, %edx addl %eax, %ebp @@ -484,7 +484,7 @@ ripemd160_block_asm_host_order: subl %ecx, %eax roll $14, %ebp addl %ebx, %ebp - # 37 + movl 60(%esp), %edx orl %ebp, %eax addl %edx, %ebx @@ -495,7 +495,7 @@ ripemd160_block_asm_host_order: subl %ebp, %edx roll $9, %ebx addl %edi, %ebx - # 38 + movl 32(%esp), %eax orl %ebx, %edx addl %eax, %edi @@ -506,7 +506,7 @@ ripemd160_block_asm_host_order: subl %ebx, %eax roll $13, %edi addl %esi, %edi - # 39 + movl 4(%esp), %edx orl %edi, %eax addl %edx, %esi @@ -517,7 +517,7 @@ ripemd160_block_asm_host_order: subl %edi, %edx roll $15, %esi addl %ecx, %esi - # 40 + movl 8(%esp), %eax orl %esi, %edx addl %eax, %ecx @@ -528,7 +528,7 @@ ripemd160_block_asm_host_order: subl %esi, %eax roll $14, %ecx addl %ebp, %ecx - # 41 + movl 28(%esp), %edx orl %ecx, %eax addl %edx, %ebp @@ -539,7 +539,7 @@ ripemd160_block_asm_host_order: subl %ecx, %edx roll $8, %ebp addl %ebx, %ebp - # 42 + movl (%esp), %eax orl %ebp, %edx addl %eax, %ebx @@ -550,7 +550,7 @@ ripemd160_block_asm_host_order: subl %ebp, %eax roll $13, %ebx addl %edi, %ebx - # 43 + movl 24(%esp), %edx orl %ebx, %eax addl %edx, %edi @@ -561,7 +561,7 @@ ripemd160_block_asm_host_order: subl %ebx, %edx roll $6, %edi addl %esi, %edi - # 44 + movl 52(%esp), %eax orl %edi, %edx addl %eax, %esi @@ -572,7 +572,7 @@ ripemd160_block_asm_host_order: subl %edi, %eax roll $5, %esi addl %ecx, %esi - # 45 + movl 44(%esp), %edx orl %esi, %eax addl %edx, %ecx @@ -583,7 +583,7 @@ ripemd160_block_asm_host_order: subl %esi, %edx roll $12, %ecx addl %ebp, %ecx - # 46 + movl 20(%esp), %eax orl %ecx, %edx addl %eax, %ebp @@ -594,7 +594,7 @@ ripemd160_block_asm_host_order: subl %ecx, %eax roll $7, %ebp addl %ebx, %ebp - # 47 + movl 48(%esp), %edx orl %ebp, %eax addl %edx, %ebx @@ -605,7 +605,7 @@ ripemd160_block_asm_host_order: movl %ecx, %eax roll $5, %ebx addl %edi, %ebx - # 48 + subl %ecx, %edx andl %ebx, %eax andl %ebp, %edx @@ -618,7 +618,7 @@ ripemd160_block_asm_host_order: movl %ebp, %eax roll $11, %edi addl %esi, %edi - # 49 + subl %ebp, %edx andl %edi, %eax andl %ebx, %edx @@ -631,7 +631,7 @@ ripemd160_block_asm_host_order: movl %ebx, %eax roll $12, %esi addl %ecx, %esi - # 50 + subl %ebx, %edx andl %esi, %eax andl %edi, %edx @@ -644,7 +644,7 @@ ripemd160_block_asm_host_order: movl %edi, %eax roll $14, %ecx addl %ebp, %ecx - # 51 + subl %edi, %edx andl %ecx, %eax andl %esi, %edx @@ -657,7 +657,7 @@ ripemd160_block_asm_host_order: movl %esi, %eax roll $15, %ebp addl %ebx, %ebp - # 52 + subl %esi, %edx andl %ebp, %eax andl %ecx, %edx @@ -670,7 +670,7 @@ ripemd160_block_asm_host_order: movl %ecx, %eax roll $14, %ebx addl %edi, %ebx - # 53 + subl %ecx, %edx andl %ebx, %eax andl %ebp, %edx @@ -683,7 +683,7 @@ ripemd160_block_asm_host_order: movl %ebp, %eax roll $15, %edi addl %esi, %edi - # 54 + subl %ebp, %edx andl %edi, %eax andl %ebx, %edx @@ -696,7 +696,7 @@ ripemd160_block_asm_host_order: movl %ebx, %eax roll $9, %esi addl %ecx, %esi - # 55 + subl %ebx, %edx andl %esi, %eax andl %edi, %edx @@ -709,7 +709,7 @@ ripemd160_block_asm_host_order: movl %edi, %eax roll $8, %ecx addl %ebp, %ecx - # 56 + subl %edi, %edx andl %ecx, %eax andl %esi, %edx @@ -722,7 +722,7 @@ ripemd160_block_asm_host_order: movl %esi, %eax roll $9, %ebp addl %ebx, %ebp - # 57 + subl %esi, %edx andl %ebp, %eax andl %ecx, %edx @@ -735,7 +735,7 @@ ripemd160_block_asm_host_order: movl %ecx, %eax roll $14, %ebx addl %edi, %ebx - # 58 + subl %ecx, %edx andl %ebx, %eax andl %ebp, %edx @@ -748,7 +748,7 @@ ripemd160_block_asm_host_order: movl %ebp, %eax roll $5, %edi addl %esi, %edi - # 59 + subl %ebp, %edx andl %edi, %eax andl %ebx, %edx @@ -761,7 +761,7 @@ ripemd160_block_asm_host_order: movl %ebx, %eax roll $6, %esi addl %ecx, %esi - # 60 + subl %ebx, %edx andl %esi, %eax andl %edi, %edx @@ -774,7 +774,7 @@ ripemd160_block_asm_host_order: movl %edi, %eax roll $8, %ecx addl %ebp, %ecx - # 61 + subl %edi, %edx andl %ecx, %eax andl %esi, %edx @@ -787,7 +787,7 @@ ripemd160_block_asm_host_order: movl %esi, %eax roll $6, %ebp addl %ebx, %ebp - # 62 + subl %esi, %edx andl %ebp, %eax andl %ecx, %edx @@ -800,7 +800,7 @@ ripemd160_block_asm_host_order: movl %ecx, %eax roll $5, %ebx addl %edi, %ebx - # 63 + subl %ecx, %edx andl %ebx, %eax andl %ebp, %edx @@ -813,7 +813,7 @@ ripemd160_block_asm_host_order: subl %ebp, %edx roll $12, %edi addl %esi, %edi - # 64 + movl 16(%esp), %eax orl %ebx, %edx addl %eax, %esi @@ -824,7 +824,7 @@ ripemd160_block_asm_host_order: subl %ebx, %eax roll $9, %esi addl %ecx, %esi - # 65 + movl (%esp), %edx orl %edi, %eax addl %edx, %ecx @@ -835,7 +835,7 @@ ripemd160_block_asm_host_order: subl %edi, %edx roll $15, %ecx addl %ebp, %ecx - # 66 + movl 20(%esp), %eax orl %esi, %edx addl %eax, %ebp @@ -846,7 +846,7 @@ ripemd160_block_asm_host_order: subl %esi, %eax roll $5, %ebp addl %ebx, %ebp - # 67 + movl 36(%esp), %edx orl %ecx, %eax addl %edx, %ebx @@ -857,7 +857,7 @@ ripemd160_block_asm_host_order: subl %ecx, %edx roll $11, %ebx addl %edi, %ebx - # 68 + movl 28(%esp), %eax orl %ebp, %edx addl %eax, %edi @@ -868,7 +868,7 @@ ripemd160_block_asm_host_order: subl %ebp, %eax roll $6, %edi addl %esi, %edi - # 69 + movl 48(%esp), %edx orl %ebx, %eax addl %edx, %esi @@ -879,7 +879,7 @@ ripemd160_block_asm_host_order: subl %ebx, %edx roll $8, %esi addl %ecx, %esi - # 70 + movl 8(%esp), %eax orl %edi, %edx addl %eax, %ecx @@ -890,7 +890,7 @@ ripemd160_block_asm_host_order: subl %edi, %eax roll $13, %ecx addl %ebp, %ecx - # 71 + movl 40(%esp), %edx orl %esi, %eax addl %edx, %ebp @@ -901,7 +901,7 @@ ripemd160_block_asm_host_order: subl %esi, %edx roll $12, %ebp addl %ebx, %ebp - # 72 + movl 56(%esp), %eax orl %ecx, %edx addl %eax, %ebx @@ -912,7 +912,7 @@ ripemd160_block_asm_host_order: subl %ecx, %eax roll $5, %ebx addl %edi, %ebx - # 73 + movl 4(%esp), %edx orl %ebp, %eax addl %edx, %edi @@ -923,7 +923,7 @@ ripemd160_block_asm_host_order: subl %ebp, %edx roll $12, %edi addl %esi, %edi - # 74 + movl 12(%esp), %eax orl %ebx, %edx addl %eax, %esi @@ -934,7 +934,7 @@ ripemd160_block_asm_host_order: subl %ebx, %eax roll $13, %esi addl %ecx, %esi - # 75 + movl 32(%esp), %edx orl %edi, %eax addl %edx, %ecx @@ -945,7 +945,7 @@ ripemd160_block_asm_host_order: subl %edi, %edx roll $14, %ecx addl %ebp, %ecx - # 76 + movl 44(%esp), %eax orl %esi, %edx addl %eax, %ebp @@ -956,7 +956,7 @@ ripemd160_block_asm_host_order: subl %esi, %eax roll $11, %ebp addl %ebx, %ebp - # 77 + movl 24(%esp), %edx orl %ecx, %eax addl %edx, %ebx @@ -967,7 +967,7 @@ ripemd160_block_asm_host_order: subl %ecx, %edx roll $8, %ebx addl %edi, %ebx - # 78 + movl 60(%esp), %eax orl %ebp, %edx addl %eax, %edi @@ -978,7 +978,7 @@ ripemd160_block_asm_host_order: subl %ebp, %eax roll $5, %edi addl %esi, %edi - # 79 + movl 52(%esp), %edx orl %ebx, %eax addl %edx, %esi @@ -998,7 +998,7 @@ ripemd160_block_asm_host_order: movl %ebp, 80(%esp) movl 12(%edx), %ebx movl 16(%edx), %ebp - # 80 + movl $-1, %edx subl %ebx, %edx movl 20(%esp), %eax @@ -1011,7 +1011,7 @@ ripemd160_block_asm_host_order: subl %edi, %eax roll $8, %ecx addl %ebp, %ecx - # 81 + movl 56(%esp), %edx orl %esi, %eax addl %edx, %ebp @@ -1022,7 +1022,7 @@ ripemd160_block_asm_host_order: subl %esi, %edx roll $9, %ebp addl %ebx, %ebp - # 82 + movl 28(%esp), %eax orl %ecx, %edx addl %eax, %ebx @@ -1033,7 +1033,7 @@ ripemd160_block_asm_host_order: subl %ecx, %eax roll $9, %ebx addl %edi, %ebx - # 83 + movl (%esp), %edx orl %ebp, %eax addl %edx, %edi @@ -1044,7 +1044,7 @@ ripemd160_block_asm_host_order: subl %ebp, %edx roll $11, %edi addl %esi, %edi - # 84 + movl 36(%esp), %eax orl %ebx, %edx addl %eax, %esi @@ -1055,7 +1055,7 @@ ripemd160_block_asm_host_order: subl %ebx, %eax roll $13, %esi addl %ecx, %esi - # 85 + movl 8(%esp), %edx orl %edi, %eax addl %edx, %ecx @@ -1066,7 +1066,7 @@ ripemd160_block_asm_host_order: subl %edi, %edx roll $15, %ecx addl %ebp, %ecx - # 86 + movl 44(%esp), %eax orl %esi, %edx addl %eax, %ebp @@ -1077,7 +1077,7 @@ ripemd160_block_asm_host_order: subl %esi, %eax roll $15, %ebp addl %ebx, %ebp - # 87 + movl 16(%esp), %edx orl %ecx, %eax addl %edx, %ebx @@ -1088,7 +1088,7 @@ ripemd160_block_asm_host_order: subl %ecx, %edx roll $5, %ebx addl %edi, %ebx - # 88 + movl 52(%esp), %eax orl %ebp, %edx addl %eax, %edi @@ -1099,7 +1099,7 @@ ripemd160_block_asm_host_order: subl %ebp, %eax roll $7, %edi addl %esi, %edi - # 89 + movl 24(%esp), %edx orl %ebx, %eax addl %edx, %esi @@ -1110,7 +1110,7 @@ ripemd160_block_asm_host_order: subl %ebx, %edx roll $7, %esi addl %ecx, %esi - # 90 + movl 60(%esp), %eax orl %edi, %edx addl %eax, %ecx @@ -1121,7 +1121,7 @@ ripemd160_block_asm_host_order: subl %edi, %eax roll $8, %ecx addl %ebp, %ecx - # 91 + movl 32(%esp), %edx orl %esi, %eax addl %edx, %ebp @@ -1132,7 +1132,7 @@ ripemd160_block_asm_host_order: subl %esi, %edx roll $11, %ebp addl %ebx, %ebp - # 92 + movl 4(%esp), %eax orl %ecx, %edx addl %eax, %ebx @@ -1143,7 +1143,7 @@ ripemd160_block_asm_host_order: subl %ecx, %eax roll $14, %ebx addl %edi, %ebx - # 93 + movl 40(%esp), %edx orl %ebp, %eax addl %edx, %edi @@ -1154,7 +1154,7 @@ ripemd160_block_asm_host_order: subl %ebp, %edx roll $14, %edi addl %esi, %edi - # 94 + movl 12(%esp), %eax orl %ebx, %edx addl %eax, %esi @@ -1165,7 +1165,7 @@ ripemd160_block_asm_host_order: subl %ebx, %eax roll $12, %esi addl %ecx, %esi - # 95 + movl 48(%esp), %edx orl %edi, %eax addl %edx, %ecx @@ -1176,7 +1176,7 @@ ripemd160_block_asm_host_order: movl %edi, %eax roll $6, %ecx addl %ebp, %ecx - # 96 + subl %edi, %edx andl %ecx, %eax andl %esi, %edx @@ -1189,7 +1189,7 @@ ripemd160_block_asm_host_order: movl %esi, %eax roll $9, %ebp addl %ebx, %ebp - # 97 + subl %esi, %edx andl %ebp, %eax andl %ecx, %edx @@ -1202,7 +1202,7 @@ ripemd160_block_asm_host_order: movl %ecx, %eax roll $13, %ebx addl %edi, %ebx - # 98 + subl %ecx, %edx andl %ebx, %eax andl %ebp, %edx @@ -1215,7 +1215,7 @@ ripemd160_block_asm_host_order: movl %ebp, %eax roll $15, %edi addl %esi, %edi - # 99 + subl %ebp, %edx andl %edi, %eax andl %ebx, %edx @@ -1228,7 +1228,7 @@ ripemd160_block_asm_host_order: movl %ebx, %eax roll $7, %esi addl %ecx, %esi - # 100 + subl %ebx, %edx andl %esi, %eax andl %edi, %edx @@ -1241,7 +1241,7 @@ ripemd160_block_asm_host_order: movl %edi, %eax roll $12, %ecx addl %ebp, %ecx - # 101 + subl %edi, %edx andl %ecx, %eax andl %esi, %edx @@ -1254,7 +1254,7 @@ ripemd160_block_asm_host_order: movl %esi, %eax roll $8, %ebp addl %ebx, %ebp - # 102 + subl %esi, %edx andl %ebp, %eax andl %ecx, %edx @@ -1267,7 +1267,7 @@ ripemd160_block_asm_host_order: movl %ecx, %eax roll $9, %ebx addl %edi, %ebx - # 103 + subl %ecx, %edx andl %ebx, %eax andl %ebp, %edx @@ -1280,7 +1280,7 @@ ripemd160_block_asm_host_order: movl %ebp, %eax roll $11, %edi addl %esi, %edi - # 104 + subl %ebp, %edx andl %edi, %eax andl %ebx, %edx @@ -1293,7 +1293,7 @@ ripemd160_block_asm_host_order: movl %ebx, %eax roll $7, %esi addl %ecx, %esi - # 105 + subl %ebx, %edx andl %esi, %eax andl %edi, %edx @@ -1306,7 +1306,7 @@ ripemd160_block_asm_host_order: movl %edi, %eax roll $7, %ecx addl %ebp, %ecx - # 106 + subl %edi, %edx andl %ecx, %eax andl %esi, %edx @@ -1319,7 +1319,7 @@ ripemd160_block_asm_host_order: movl %esi, %eax roll $12, %ebp addl %ebx, %ebp - # 107 + subl %esi, %edx andl %ebp, %eax andl %ecx, %edx @@ -1332,7 +1332,7 @@ ripemd160_block_asm_host_order: movl %ecx, %eax roll $7, %ebx addl %edi, %ebx - # 108 + subl %ecx, %edx andl %ebx, %eax andl %ebp, %edx @@ -1345,7 +1345,7 @@ ripemd160_block_asm_host_order: movl %ebp, %eax roll $6, %edi addl %esi, %edi - # 109 + subl %ebp, %edx andl %edi, %eax andl %ebx, %edx @@ -1358,7 +1358,7 @@ ripemd160_block_asm_host_order: movl %ebx, %eax roll $15, %esi addl %ecx, %esi - # 110 + subl %ebx, %edx andl %esi, %eax andl %edi, %edx @@ -1371,7 +1371,7 @@ ripemd160_block_asm_host_order: movl %edi, %eax roll $13, %ecx addl %ebp, %ecx - # 111 + subl %edi, %edx andl %ecx, %eax andl %esi, %edx @@ -1384,7 +1384,7 @@ ripemd160_block_asm_host_order: subl %ecx, %edx roll $11, %ebp addl %ebx, %ebp - # 112 + movl 60(%esp), %eax orl %ebp, %edx addl %eax, %ebx @@ -1395,7 +1395,7 @@ ripemd160_block_asm_host_order: subl %ebp, %eax roll $9, %ebx addl %edi, %ebx - # 113 + movl 20(%esp), %edx orl %ebx, %eax addl %edx, %edi @@ -1406,7 +1406,7 @@ ripemd160_block_asm_host_order: subl %ebx, %edx roll $7, %edi addl %esi, %edi - # 114 + movl 4(%esp), %eax orl %edi, %edx addl %eax, %esi @@ -1417,7 +1417,7 @@ ripemd160_block_asm_host_order: subl %edi, %eax roll $15, %esi addl %ecx, %esi - # 115 + movl 12(%esp), %edx orl %esi, %eax addl %edx, %ecx @@ -1428,7 +1428,7 @@ ripemd160_block_asm_host_order: subl %esi, %edx roll $11, %ecx addl %ebp, %ecx - # 116 + movl 28(%esp), %eax orl %ecx, %edx addl %eax, %ebp @@ -1439,7 +1439,7 @@ ripemd160_block_asm_host_order: subl %ecx, %eax roll $8, %ebp addl %ebx, %ebp - # 117 + movl 56(%esp), %edx orl %ebp, %eax addl %edx, %ebx @@ -1450,7 +1450,7 @@ ripemd160_block_asm_host_order: subl %ebp, %edx roll $6, %ebx addl %edi, %ebx - # 118 + movl 24(%esp), %eax orl %ebx, %edx addl %eax, %edi @@ -1461,7 +1461,7 @@ ripemd160_block_asm_host_order: subl %ebx, %eax roll $6, %edi addl %esi, %edi - # 119 + movl 36(%esp), %edx orl %edi, %eax addl %edx, %esi @@ -1472,7 +1472,7 @@ ripemd160_block_asm_host_order: subl %edi, %edx roll $14, %esi addl %ecx, %esi - # 120 + movl 44(%esp), %eax orl %esi, %edx addl %eax, %ecx @@ -1483,7 +1483,7 @@ ripemd160_block_asm_host_order: subl %esi, %eax roll $12, %ecx addl %ebp, %ecx - # 121 + movl 32(%esp), %edx orl %ecx, %eax addl %edx, %ebp @@ -1494,7 +1494,7 @@ ripemd160_block_asm_host_order: subl %ecx, %edx roll $13, %ebp addl %ebx, %ebp - # 122 + movl 48(%esp), %eax orl %ebp, %edx addl %eax, %ebx @@ -1505,7 +1505,7 @@ ripemd160_block_asm_host_order: subl %ebp, %eax roll $5, %ebx addl %edi, %ebx - # 123 + movl 8(%esp), %edx orl %ebx, %eax addl %edx, %edi @@ -1516,7 +1516,7 @@ ripemd160_block_asm_host_order: subl %ebx, %edx roll $14, %edi addl %esi, %edi - # 124 + movl 40(%esp), %eax orl %edi, %edx addl %eax, %esi @@ -1527,7 +1527,7 @@ ripemd160_block_asm_host_order: subl %edi, %eax roll $13, %esi addl %ecx, %esi - # 125 + movl (%esp), %edx orl %esi, %eax addl %edx, %ecx @@ -1538,7 +1538,7 @@ ripemd160_block_asm_host_order: subl %esi, %edx roll $13, %ecx addl %ebp, %ecx - # 126 + movl 16(%esp), %eax orl %ecx, %edx addl %eax, %ebp @@ -1549,7 +1549,7 @@ ripemd160_block_asm_host_order: subl %ecx, %eax roll $7, %ebp addl %ebx, %ebp - # 127 + movl 52(%esp), %edx orl %ebp, %eax addl %edx, %ebx @@ -1560,7 +1560,7 @@ ripemd160_block_asm_host_order: movl $-1, %eax roll $5, %ebx addl %edi, %ebx - # 128 + addl %edx, %edi movl %ebp, %edx subl %ebx, %eax @@ -1573,7 +1573,7 @@ ripemd160_block_asm_host_order: movl $-1, %edx roll $15, %edi addl %esi, %edi - # 129 + addl %eax, %esi movl %ebx, %eax subl %edi, %edx @@ -1586,7 +1586,7 @@ ripemd160_block_asm_host_order: movl $-1, %eax roll $5, %esi addl %ecx, %esi - # 130 + addl %edx, %ecx movl %edi, %edx subl %esi, %eax @@ -1599,7 +1599,7 @@ ripemd160_block_asm_host_order: movl $-1, %edx roll $8, %ecx addl %ebp, %ecx - # 131 + addl %eax, %ebp movl %esi, %eax subl %ecx, %edx @@ -1612,7 +1612,7 @@ ripemd160_block_asm_host_order: movl $-1, %eax roll $11, %ebp addl %ebx, %ebp - # 132 + addl %edx, %ebx movl %ecx, %edx subl %ebp, %eax @@ -1625,7 +1625,7 @@ ripemd160_block_asm_host_order: movl $-1, %edx roll $14, %ebx addl %edi, %ebx - # 133 + addl %eax, %edi movl %ebp, %eax subl %ebx, %edx @@ -1638,7 +1638,7 @@ ripemd160_block_asm_host_order: movl $-1, %eax roll $14, %edi addl %esi, %edi - # 134 + addl %edx, %esi movl %ebx, %edx subl %edi, %eax @@ -1651,7 +1651,7 @@ ripemd160_block_asm_host_order: movl $-1, %edx roll $6, %esi addl %ecx, %esi - # 135 + addl %eax, %ecx movl %edi, %eax subl %esi, %edx @@ -1664,7 +1664,7 @@ ripemd160_block_asm_host_order: movl $-1, %eax roll $14, %ecx addl %ebp, %ecx - # 136 + addl %edx, %ebp movl %esi, %edx subl %ecx, %eax @@ -1677,7 +1677,7 @@ ripemd160_block_asm_host_order: movl $-1, %edx roll $6, %ebp addl %ebx, %ebp - # 137 + addl %eax, %ebx movl %ecx, %eax subl %ebp, %edx @@ -1690,7 +1690,7 @@ ripemd160_block_asm_host_order: movl $-1, %eax roll $9, %ebx addl %edi, %ebx - # 138 + addl %edx, %edi movl %ebp, %edx subl %ebx, %eax @@ -1703,7 +1703,7 @@ ripemd160_block_asm_host_order: movl $-1, %edx roll $12, %edi addl %esi, %edi - # 139 + addl %eax, %esi movl %ebx, %eax subl %edi, %edx @@ -1716,7 +1716,7 @@ ripemd160_block_asm_host_order: movl $-1, %eax roll $9, %esi addl %ecx, %esi - # 140 + addl %edx, %ecx movl %edi, %edx subl %esi, %eax @@ -1729,7 +1729,7 @@ ripemd160_block_asm_host_order: movl $-1, %edx roll $12, %ecx addl %ebp, %ecx - # 141 + addl %eax, %ebp movl %esi, %eax subl %ecx, %edx @@ -1742,7 +1742,7 @@ ripemd160_block_asm_host_order: movl $-1, %eax roll $5, %ebp addl %ebx, %ebp - # 142 + addl %edx, %ebx movl %ecx, %edx subl %ebp, %eax @@ -1755,7 +1755,7 @@ ripemd160_block_asm_host_order: movl $-1, %edx roll $15, %ebx addl %edi, %ebx - # 143 + addl %eax, %edi movl %ebp, %eax subl %ebx, %edx @@ -1768,7 +1768,7 @@ ripemd160_block_asm_host_order: xorl %ebp, %eax roll $8, %edi addl %esi, %edi - # 144 + movl 48(%esp), %edx xorl %edi, %eax addl %edx, %esi @@ -1777,7 +1777,7 @@ ripemd160_block_asm_host_order: movl %edi, %eax roll $8, %esi addl %ecx, %esi - # 145 + xorl %ebx, %eax movl 60(%esp), %edx xorl %esi, %eax @@ -1788,7 +1788,7 @@ ripemd160_block_asm_host_order: xorl %edi, %eax roll $5, %ecx addl %ebp, %ecx - # 146 + movl 40(%esp), %edx xorl %ecx, %eax addl %edx, %ebp @@ -1797,7 +1797,7 @@ ripemd160_block_asm_host_order: movl %ecx, %eax roll $12, %ebp addl %ebx, %ebp - # 147 + xorl %esi, %eax movl 16(%esp), %edx xorl %ebp, %eax @@ -1808,7 +1808,7 @@ ripemd160_block_asm_host_order: xorl %ecx, %eax roll $9, %ebx addl %edi, %ebx - # 148 + movl 4(%esp), %edx xorl %ebx, %eax addl %edx, %edi @@ -1817,7 +1817,7 @@ ripemd160_block_asm_host_order: movl %ebx, %eax roll $12, %edi addl %esi, %edi - # 149 + xorl %ebp, %eax movl 20(%esp), %edx xorl %edi, %eax @@ -1828,7 +1828,7 @@ ripemd160_block_asm_host_order: xorl %ebx, %eax roll $5, %esi addl %ecx, %esi - # 150 + movl 32(%esp), %edx xorl %esi, %eax addl %edx, %ecx @@ -1837,7 +1837,7 @@ ripemd160_block_asm_host_order: movl %esi, %eax roll $14, %ecx addl %ebp, %ecx - # 151 + xorl %edi, %eax movl 28(%esp), %edx xorl %ecx, %eax @@ -1848,7 +1848,7 @@ ripemd160_block_asm_host_order: xorl %esi, %eax roll $6, %ebp addl %ebx, %ebp - # 152 + movl 24(%esp), %edx xorl %ebp, %eax addl %edx, %ebx @@ -1857,7 +1857,7 @@ ripemd160_block_asm_host_order: movl %ebp, %eax roll $8, %ebx addl %edi, %ebx - # 153 + xorl %ecx, %eax movl 8(%esp), %edx xorl %ebx, %eax @@ -1868,7 +1868,7 @@ ripemd160_block_asm_host_order: xorl %ebp, %eax roll $13, %edi addl %esi, %edi - # 154 + movl 52(%esp), %edx xorl %edi, %eax addl %edx, %esi @@ -1877,7 +1877,7 @@ ripemd160_block_asm_host_order: movl %edi, %eax roll $6, %esi addl %ecx, %esi - # 155 + xorl %ebx, %eax movl 56(%esp), %edx xorl %esi, %eax @@ -1888,7 +1888,7 @@ ripemd160_block_asm_host_order: xorl %edi, %eax roll $5, %ecx addl %ebp, %ecx - # 156 + movl (%esp), %edx xorl %ecx, %eax addl %edx, %ebp @@ -1897,7 +1897,7 @@ ripemd160_block_asm_host_order: movl %ecx, %eax roll $15, %ebp addl %ebx, %ebp - # 157 + xorl %esi, %eax movl 12(%esp), %edx xorl %ebp, %eax @@ -1908,7 +1908,7 @@ ripemd160_block_asm_host_order: xorl %ecx, %eax roll $13, %ebx addl %edi, %ebx - # 158 + movl 36(%esp), %edx xorl %ebx, %eax addl %edx, %edi @@ -1917,7 +1917,7 @@ ripemd160_block_asm_host_order: movl %ebx, %eax roll $11, %edi addl %esi, %edi - # 159 + xorl %ebp, %eax movl 44(%esp), %edx xorl %edi, %eax diff --git a/secure/lib/libcrypto/i386/sha1-586.s b/secure/lib/libcrypto/i386/sha1-586.s index b13c665..4a35f9d 100644 --- a/secure/lib/libcrypto/i386/sha1-586.s +++ b/secure/lib/libcrypto/i386/sha1-586.s @@ -1,9 +1,9 @@ # $FreeBSD$ - # Dont even think of reading this code - # It was automatically generated by sha1-586.pl - # Which is a perl program used to generate the x86 assember for - # any of elf, a.out, BSDI, Win32, gaswin (for GNU as on Win32) or Solaris - # eric <eay@cryptsoft.com> + + + + + .file "sha1-586.s" .version "01.01" @@ -27,1851 +27,1428 @@ sha1_block_asm_data_order: movl 16(%ebp), %edi movl 8(%ebp), %ebx movl %ecx, 68(%esp) - # First we need to setup the X array + .L000start: - # First, load the words onto the stack in network byte order + movl (%esi), %eax movl 4(%esi), %ecx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 201 # bswapl %ecx +.byte 201 movl %eax, (%esp) movl %ecx, 4(%esp) movl 8(%esi), %eax movl 12(%esi), %ecx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 201 # bswapl %ecx +.byte 201 movl %eax, 8(%esp) movl %ecx, 12(%esp) movl 16(%esi), %eax movl 20(%esi), %ecx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 201 # bswapl %ecx +.byte 201 movl %eax, 16(%esp) movl %ecx, 20(%esp) movl 24(%esi), %eax movl 28(%esi), %ecx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 201 # bswapl %ecx +.byte 201 movl %eax, 24(%esp) movl %ecx, 28(%esp) movl 32(%esi), %eax movl 36(%esi), %ecx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 201 # bswapl %ecx +.byte 201 movl %eax, 32(%esp) movl %ecx, 36(%esp) movl 40(%esi), %eax movl 44(%esi), %ecx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 201 # bswapl %ecx +.byte 201 movl %eax, 40(%esp) movl %ecx, 44(%esp) movl 48(%esi), %eax movl 52(%esi), %ecx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 201 # bswapl %ecx +.byte 201 movl %eax, 48(%esp) movl %ecx, 52(%esp) movl 56(%esi), %eax movl 60(%esi), %ecx .byte 15 -.byte 200 # bswapl %eax +.byte 200 .byte 15 -.byte 201 # bswapl %ecx +.byte 201 movl %eax, 56(%esp) movl %ecx, 60(%esp) - # We now have the X array on the stack - # starting at sp-4 + + movl %esi, 132(%esp) .L001shortcut: - # Start processing + movl (%ebp), %eax movl 4(%ebp), %ecx - # 00_15 0 - movl %ebx, %esi + movl %eax, %ebp - xorl %edx, %esi + movl %ebx, %esi roll $5, %ebp + xorl %edx, %esi andl %ecx, %esi + rorl $2, %ecx addl %edi, %ebp -.byte 209 -.byte 201 # rorl $1 %ecx movl (%esp), %edi -.byte 209 -.byte 201 # rorl $1 %ecx xorl %edx, %esi leal 1518500249(%ebp,%edi,1),%ebp - movl %ecx, %edi addl %ebp, %esi - xorl %ebx, %edi + movl %esi, %ebp - andl %eax, %edi + movl %ecx, %edi roll $5, %ebp + xorl %ebx, %edi + andl %eax, %edi + rorl $2, %eax addl %edx, %ebp movl 4(%esp), %edx -.byte 209 -.byte 200 # rorl $1 %eax xorl %ebx, %edi -.byte 209 -.byte 200 # rorl $1 %eax leal 1518500249(%ebp,%edx,1),%ebp addl %ebp, %edi - # 00_15 2 - movl %eax, %edx + movl %edi, %ebp - xorl %ecx, %edx + movl %eax, %edx roll $5, %ebp + xorl %ecx, %edx andl %esi, %edx + rorl $2, %esi addl %ebx, %ebp -.byte 209 -.byte 206 # rorl $1 %esi movl 8(%esp), %ebx -.byte 209 -.byte 206 # rorl $1 %esi xorl %ecx, %edx leal 1518500249(%ebp,%ebx,1),%ebp - movl %esi, %ebx addl %ebp, %edx - xorl %eax, %ebx + movl %edx, %ebp - andl %edi, %ebx + movl %esi, %ebx roll $5, %ebp + xorl %eax, %ebx + andl %edi, %ebx + rorl $2, %edi addl %ecx, %ebp movl 12(%esp), %ecx -.byte 209 -.byte 207 # rorl $1 %edi xorl %eax, %ebx -.byte 209 -.byte 207 # rorl $1 %edi leal 1518500249(%ebp,%ecx,1),%ebp addl %ebp, %ebx - # 00_15 4 - movl %edi, %ecx + movl %ebx, %ebp - xorl %esi, %ecx + movl %edi, %ecx roll $5, %ebp + xorl %esi, %ecx andl %edx, %ecx + rorl $2, %edx addl %eax, %ebp -.byte 209 -.byte 202 # rorl $1 %edx movl 16(%esp), %eax -.byte 209 -.byte 202 # rorl $1 %edx xorl %esi, %ecx leal 1518500249(%ebp,%eax,1),%ebp - movl %edx, %eax addl %ebp, %ecx - xorl %edi, %eax + movl %ecx, %ebp - andl %ebx, %eax + movl %edx, %eax roll $5, %ebp + xorl %edi, %eax + andl %ebx, %eax + rorl $2, %ebx addl %esi, %ebp movl 20(%esp), %esi -.byte 209 -.byte 203 # rorl $1 %ebx xorl %edi, %eax -.byte 209 -.byte 203 # rorl $1 %ebx leal 1518500249(%ebp,%esi,1),%ebp addl %ebp, %eax - # 00_15 6 - movl %ebx, %esi + movl %eax, %ebp - xorl %edx, %esi + movl %ebx, %esi roll $5, %ebp + xorl %edx, %esi andl %ecx, %esi + rorl $2, %ecx addl %edi, %ebp -.byte 209 -.byte 201 # rorl $1 %ecx movl 24(%esp), %edi -.byte 209 -.byte 201 # rorl $1 %ecx xorl %edx, %esi leal 1518500249(%ebp,%edi,1),%ebp - movl %ecx, %edi addl %ebp, %esi - xorl %ebx, %edi + movl %esi, %ebp - andl %eax, %edi + movl %ecx, %edi roll $5, %ebp + xorl %ebx, %edi + andl %eax, %edi + rorl $2, %eax addl %edx, %ebp movl 28(%esp), %edx -.byte 209 -.byte 200 # rorl $1 %eax xorl %ebx, %edi -.byte 209 -.byte 200 # rorl $1 %eax leal 1518500249(%ebp,%edx,1),%ebp addl %ebp, %edi - # 00_15 8 - movl %eax, %edx + movl %edi, %ebp - xorl %ecx, %edx + movl %eax, %edx roll $5, %ebp + xorl %ecx, %edx andl %esi, %edx + rorl $2, %esi addl %ebx, %ebp -.byte 209 -.byte 206 # rorl $1 %esi movl 32(%esp), %ebx -.byte 209 -.byte 206 # rorl $1 %esi xorl %ecx, %edx leal 1518500249(%ebp,%ebx,1),%ebp - movl %esi, %ebx addl %ebp, %edx - xorl %eax, %ebx + movl %edx, %ebp - andl %edi, %ebx + movl %esi, %ebx roll $5, %ebp + xorl %eax, %ebx + andl %edi, %ebx + rorl $2, %edi addl %ecx, %ebp movl 36(%esp), %ecx -.byte 209 -.byte 207 # rorl $1 %edi xorl %eax, %ebx -.byte 209 -.byte 207 # rorl $1 %edi leal 1518500249(%ebp,%ecx,1),%ebp addl %ebp, %ebx - # 00_15 10 - movl %edi, %ecx + movl %ebx, %ebp - xorl %esi, %ecx + movl %edi, %ecx roll $5, %ebp + xorl %esi, %ecx andl %edx, %ecx + rorl $2, %edx addl %eax, %ebp -.byte 209 -.byte 202 # rorl $1 %edx movl 40(%esp), %eax -.byte 209 -.byte 202 # rorl $1 %edx xorl %esi, %ecx leal 1518500249(%ebp,%eax,1),%ebp - movl %edx, %eax addl %ebp, %ecx - xorl %edi, %eax + movl %ecx, %ebp - andl %ebx, %eax + movl %edx, %eax roll $5, %ebp + xorl %edi, %eax + andl %ebx, %eax + rorl $2, %ebx addl %esi, %ebp movl 44(%esp), %esi -.byte 209 -.byte 203 # rorl $1 %ebx xorl %edi, %eax -.byte 209 -.byte 203 # rorl $1 %ebx leal 1518500249(%ebp,%esi,1),%ebp addl %ebp, %eax - # 00_15 12 - movl %ebx, %esi + movl %eax, %ebp - xorl %edx, %esi + movl %ebx, %esi roll $5, %ebp + xorl %edx, %esi andl %ecx, %esi + rorl $2, %ecx addl %edi, %ebp -.byte 209 -.byte 201 # rorl $1 %ecx movl 48(%esp), %edi -.byte 209 -.byte 201 # rorl $1 %ecx xorl %edx, %esi leal 1518500249(%ebp,%edi,1),%ebp - movl %ecx, %edi addl %ebp, %esi - xorl %ebx, %edi + movl %esi, %ebp - andl %eax, %edi + movl %ecx, %edi roll $5, %ebp + xorl %ebx, %edi + andl %eax, %edi + rorl $2, %eax addl %edx, %ebp movl 52(%esp), %edx -.byte 209 -.byte 200 # rorl $1 %eax xorl %ebx, %edi -.byte 209 -.byte 200 # rorl $1 %eax leal 1518500249(%ebp,%edx,1),%ebp addl %ebp, %edi - # 00_15 14 - movl %eax, %edx + movl %edi, %ebp - xorl %ecx, %edx + movl %eax, %edx roll $5, %ebp + xorl %ecx, %edx andl %esi, %edx + rorl $2, %esi addl %ebx, %ebp -.byte 209 -.byte 206 # rorl $1 %esi movl 56(%esp), %ebx -.byte 209 -.byte 206 # rorl $1 %esi xorl %ecx, %edx leal 1518500249(%ebp,%ebx,1),%ebp - movl %esi, %ebx addl %ebp, %edx - xorl %eax, %ebx + movl %edx, %ebp - andl %edi, %ebx + movl %esi, %ebx roll $5, %ebp + xorl %eax, %ebx + andl %edi, %ebx + rorl $2, %edi addl %ecx, %ebp movl 60(%esp), %ecx -.byte 209 -.byte 207 # rorl $1 %edi xorl %eax, %ebx -.byte 209 -.byte 207 # rorl $1 %edi leal 1518500249(%ebp,%ecx,1),%ebp addl %ebp, %ebx - # 16_19 16 - nop - movl (%esp), %ebp + movl 8(%esp), %ecx - xorl %ebp, %ecx - movl 32(%esp), %ebp - xorl %ebp, %ecx - movl 52(%esp), %ebp - xorl %ebp, %ecx movl %edi, %ebp -.byte 209 -.byte 193 # roll $1 %ecx + xorl (%esp), %ecx xorl %esi, %ebp - movl %ecx, (%esp) + xorl 32(%esp), %ecx andl %edx, %ebp - leal 1518500249(%ecx,%eax,1),%ecx + xorl 52(%esp), %ecx + rorl $2, %edx xorl %esi, %ebp +.byte 209 +.byte 193 + movl %ecx, (%esp) + leal 1518500249(%ecx,%eax,1),%ecx movl %ebx, %eax addl %ebp, %ecx roll $5, %eax -.byte 209 -.byte 202 # rorl $1 %edx addl %eax, %ecx - movl 4(%esp), %eax - movl 12(%esp), %ebp - xorl %ebp, %eax - movl 36(%esp), %ebp - xorl %ebp, %eax - movl 56(%esp), %ebp -.byte 209 -.byte 202 # rorl $1 %edx - xorl %ebp, %eax -.byte 209 -.byte 192 # roll $1 %eax + + movl 12(%esp), %eax movl %edx, %ebp + xorl 4(%esp), %eax xorl %edi, %ebp - movl %eax, 4(%esp) + xorl 36(%esp), %eax andl %ebx, %ebp - leal 1518500249(%eax,%esi,1),%eax + xorl 56(%esp), %eax + rorl $2, %ebx xorl %edi, %ebp +.byte 209 +.byte 192 + movl %eax, 4(%esp) + leal 1518500249(%eax,%esi,1),%eax movl %ecx, %esi + addl %ebp, %eax roll $5, %esi -.byte 209 -.byte 203 # rorl $1 %ebx addl %esi, %eax -.byte 209 -.byte 203 # rorl $1 %ebx - addl %ebp, %eax - # 16_19 18 - movl 8(%esp), %ebp + movl 16(%esp), %esi - xorl %ebp, %esi - movl 40(%esp), %ebp - xorl %ebp, %esi - movl 60(%esp), %ebp - xorl %ebp, %esi movl %ebx, %ebp -.byte 209 -.byte 198 # roll $1 %esi + xorl 8(%esp), %esi xorl %edx, %ebp - movl %esi, 8(%esp) + xorl 40(%esp), %esi andl %ecx, %ebp - leal 1518500249(%esi,%edi,1),%esi + xorl 60(%esp), %esi + rorl $2, %ecx xorl %edx, %ebp +.byte 209 +.byte 198 + movl %esi, 8(%esp) + leal 1518500249(%esi,%edi,1),%esi movl %eax, %edi addl %ebp, %esi roll $5, %edi -.byte 209 -.byte 201 # rorl $1 %ecx addl %edi, %esi - movl 12(%esp), %edi - movl 20(%esp), %ebp - xorl %ebp, %edi - movl 44(%esp), %ebp - xorl %ebp, %edi - movl (%esp), %ebp -.byte 209 -.byte 201 # rorl $1 %ecx - xorl %ebp, %edi -.byte 209 -.byte 199 # roll $1 %edi + + movl 20(%esp), %edi movl %ecx, %ebp + xorl 12(%esp), %edi xorl %ebx, %ebp - movl %edi, 12(%esp) + xorl 44(%esp), %edi andl %eax, %ebp - leal 1518500249(%edi,%edx,1),%edi + xorl (%esp), %edi + rorl $2, %eax xorl %ebx, %ebp +.byte 209 +.byte 199 + movl %edi, 12(%esp) + leal 1518500249(%edi,%edx,1),%edi movl %esi, %edx + addl %ebp, %edi roll $5, %edx -.byte 209 -.byte 200 # rorl $1 %eax addl %edx, %edi -.byte 209 -.byte 200 # rorl $1 %eax - addl %ebp, %edi - # 20_39 20 + movl 16(%esp), %edx - movl 24(%esp), %ebp - xorl %ebp, %edx - movl 48(%esp), %ebp - xorl %ebp, %edx - movl 4(%esp), %ebp - xorl %ebp, %edx movl %esi, %ebp -.byte 209 -.byte 194 # roll $1 %edx + xorl 24(%esp), %edx + rorl $2, %esi + xorl 48(%esp), %edx xorl %eax, %ebp - movl %edx, 16(%esp) + xorl 4(%esp), %edx xorl %ecx, %ebp +.byte 209 +.byte 194 + movl %edx, 16(%esp) leal 1859775393(%edx,%ebx,1),%edx movl %edi, %ebx roll $5, %ebx -.byte 209 -.byte 206 # rorl $1 %esi - addl %ebp, %ebx -.byte 209 -.byte 206 # rorl $1 %esi + addl %ebp, %edx addl %ebx, %edx - # 20_39 21 + movl 20(%esp), %ebx - movl 28(%esp), %ebp - xorl %ebp, %ebx - movl 52(%esp), %ebp - xorl %ebp, %ebx - movl 8(%esp), %ebp - xorl %ebp, %ebx movl %edi, %ebp -.byte 209 -.byte 195 # roll $1 %ebx + xorl 28(%esp), %ebx + rorl $2, %edi + xorl 52(%esp), %ebx xorl %esi, %ebp - movl %ebx, 20(%esp) + xorl 8(%esp), %ebx xorl %eax, %ebp +.byte 209 +.byte 195 + movl %ebx, 20(%esp) leal 1859775393(%ebx,%ecx,1),%ebx movl %edx, %ecx roll $5, %ecx -.byte 209 -.byte 207 # rorl $1 %edi - addl %ebp, %ecx -.byte 209 -.byte 207 # rorl $1 %edi + addl %ebp, %ebx addl %ecx, %ebx - # 20_39 22 + movl 24(%esp), %ecx - movl 32(%esp), %ebp - xorl %ebp, %ecx - movl 56(%esp), %ebp - xorl %ebp, %ecx - movl 12(%esp), %ebp - xorl %ebp, %ecx movl %edx, %ebp -.byte 209 -.byte 193 # roll $1 %ecx + xorl 32(%esp), %ecx + rorl $2, %edx + xorl 56(%esp), %ecx xorl %edi, %ebp - movl %ecx, 24(%esp) + xorl 12(%esp), %ecx xorl %esi, %ebp +.byte 209 +.byte 193 + movl %ecx, 24(%esp) leal 1859775393(%ecx,%eax,1),%ecx movl %ebx, %eax roll $5, %eax -.byte 209 -.byte 202 # rorl $1 %edx - addl %ebp, %eax -.byte 209 -.byte 202 # rorl $1 %edx + addl %ebp, %ecx addl %eax, %ecx - # 20_39 23 + movl 28(%esp), %eax - movl 36(%esp), %ebp - xorl %ebp, %eax - movl 60(%esp), %ebp - xorl %ebp, %eax - movl 16(%esp), %ebp - xorl %ebp, %eax movl %ebx, %ebp -.byte 209 -.byte 192 # roll $1 %eax + xorl 36(%esp), %eax + rorl $2, %ebx + xorl 60(%esp), %eax xorl %edx, %ebp - movl %eax, 28(%esp) + xorl 16(%esp), %eax xorl %edi, %ebp +.byte 209 +.byte 192 + movl %eax, 28(%esp) leal 1859775393(%eax,%esi,1),%eax movl %ecx, %esi roll $5, %esi -.byte 209 -.byte 203 # rorl $1 %ebx - addl %ebp, %esi -.byte 209 -.byte 203 # rorl $1 %ebx + addl %ebp, %eax addl %esi, %eax - # 20_39 24 + movl 32(%esp), %esi - movl 40(%esp), %ebp - xorl %ebp, %esi - movl (%esp), %ebp - xorl %ebp, %esi - movl 20(%esp), %ebp - xorl %ebp, %esi movl %ecx, %ebp -.byte 209 -.byte 198 # roll $1 %esi + xorl 40(%esp), %esi + rorl $2, %ecx + xorl (%esp), %esi xorl %ebx, %ebp - movl %esi, 32(%esp) + xorl 20(%esp), %esi xorl %edx, %ebp +.byte 209 +.byte 198 + movl %esi, 32(%esp) leal 1859775393(%esi,%edi,1),%esi movl %eax, %edi roll $5, %edi -.byte 209 -.byte 201 # rorl $1 %ecx - addl %ebp, %edi -.byte 209 -.byte 201 # rorl $1 %ecx + addl %ebp, %esi addl %edi, %esi - # 20_39 25 + movl 36(%esp), %edi - movl 44(%esp), %ebp - xorl %ebp, %edi - movl 4(%esp), %ebp - xorl %ebp, %edi - movl 24(%esp), %ebp - xorl %ebp, %edi movl %eax, %ebp -.byte 209 -.byte 199 # roll $1 %edi + xorl 44(%esp), %edi + rorl $2, %eax + xorl 4(%esp), %edi xorl %ecx, %ebp - movl %edi, 36(%esp) + xorl 24(%esp), %edi xorl %ebx, %ebp +.byte 209 +.byte 199 + movl %edi, 36(%esp) leal 1859775393(%edi,%edx,1),%edi movl %esi, %edx roll $5, %edx -.byte 209 -.byte 200 # rorl $1 %eax - addl %ebp, %edx -.byte 209 -.byte 200 # rorl $1 %eax + addl %ebp, %edi addl %edx, %edi - # 20_39 26 + movl 40(%esp), %edx - movl 48(%esp), %ebp - xorl %ebp, %edx - movl 8(%esp), %ebp - xorl %ebp, %edx - movl 28(%esp), %ebp - xorl %ebp, %edx movl %esi, %ebp -.byte 209 -.byte 194 # roll $1 %edx + xorl 48(%esp), %edx + rorl $2, %esi + xorl 8(%esp), %edx xorl %eax, %ebp - movl %edx, 40(%esp) + xorl 28(%esp), %edx xorl %ecx, %ebp +.byte 209 +.byte 194 + movl %edx, 40(%esp) leal 1859775393(%edx,%ebx,1),%edx movl %edi, %ebx roll $5, %ebx -.byte 209 -.byte 206 # rorl $1 %esi - addl %ebp, %ebx -.byte 209 -.byte 206 # rorl $1 %esi + addl %ebp, %edx addl %ebx, %edx - # 20_39 27 + movl 44(%esp), %ebx - movl 52(%esp), %ebp - xorl %ebp, %ebx - movl 12(%esp), %ebp - xorl %ebp, %ebx - movl 32(%esp), %ebp - xorl %ebp, %ebx movl %edi, %ebp -.byte 209 -.byte 195 # roll $1 %ebx + xorl 52(%esp), %ebx + rorl $2, %edi + xorl 12(%esp), %ebx xorl %esi, %ebp - movl %ebx, 44(%esp) + xorl 32(%esp), %ebx xorl %eax, %ebp +.byte 209 +.byte 195 + movl %ebx, 44(%esp) leal 1859775393(%ebx,%ecx,1),%ebx movl %edx, %ecx roll $5, %ecx -.byte 209 -.byte 207 # rorl $1 %edi - addl %ebp, %ecx -.byte 209 -.byte 207 # rorl $1 %edi + addl %ebp, %ebx addl %ecx, %ebx - # 20_39 28 + movl 48(%esp), %ecx - movl 56(%esp), %ebp - xorl %ebp, %ecx - movl 16(%esp), %ebp - xorl %ebp, %ecx - movl 36(%esp), %ebp - xorl %ebp, %ecx movl %edx, %ebp -.byte 209 -.byte 193 # roll $1 %ecx + xorl 56(%esp), %ecx + rorl $2, %edx + xorl 16(%esp), %ecx xorl %edi, %ebp - movl %ecx, 48(%esp) + xorl 36(%esp), %ecx xorl %esi, %ebp +.byte 209 +.byte 193 + movl %ecx, 48(%esp) leal 1859775393(%ecx,%eax,1),%ecx movl %ebx, %eax roll $5, %eax -.byte 209 -.byte 202 # rorl $1 %edx - addl %ebp, %eax -.byte 209 -.byte 202 # rorl $1 %edx + addl %ebp, %ecx addl %eax, %ecx - # 20_39 29 + movl 52(%esp), %eax - movl 60(%esp), %ebp - xorl %ebp, %eax - movl 20(%esp), %ebp - xorl %ebp, %eax - movl 40(%esp), %ebp - xorl %ebp, %eax movl %ebx, %ebp -.byte 209 -.byte 192 # roll $1 %eax + xorl 60(%esp), %eax + rorl $2, %ebx + xorl 20(%esp), %eax xorl %edx, %ebp - movl %eax, 52(%esp) + xorl 40(%esp), %eax xorl %edi, %ebp +.byte 209 +.byte 192 + movl %eax, 52(%esp) leal 1859775393(%eax,%esi,1),%eax movl %ecx, %esi roll $5, %esi -.byte 209 -.byte 203 # rorl $1 %ebx - addl %ebp, %esi -.byte 209 -.byte 203 # rorl $1 %ebx + addl %ebp, %eax addl %esi, %eax - # 20_39 30 + movl 56(%esp), %esi - movl (%esp), %ebp - xorl %ebp, %esi - movl 24(%esp), %ebp - xorl %ebp, %esi - movl 44(%esp), %ebp - xorl %ebp, %esi movl %ecx, %ebp -.byte 209 -.byte 198 # roll $1 %esi + xorl (%esp), %esi + rorl $2, %ecx + xorl 24(%esp), %esi xorl %ebx, %ebp - movl %esi, 56(%esp) + xorl 44(%esp), %esi xorl %edx, %ebp +.byte 209 +.byte 198 + movl %esi, 56(%esp) leal 1859775393(%esi,%edi,1),%esi movl %eax, %edi roll $5, %edi -.byte 209 -.byte 201 # rorl $1 %ecx - addl %ebp, %edi -.byte 209 -.byte 201 # rorl $1 %ecx + addl %ebp, %esi addl %edi, %esi - # 20_39 31 + movl 60(%esp), %edi - movl 4(%esp), %ebp - xorl %ebp, %edi - movl 28(%esp), %ebp - xorl %ebp, %edi - movl 48(%esp), %ebp - xorl %ebp, %edi movl %eax, %ebp -.byte 209 -.byte 199 # roll $1 %edi + xorl 4(%esp), %edi + rorl $2, %eax + xorl 28(%esp), %edi xorl %ecx, %ebp - movl %edi, 60(%esp) + xorl 48(%esp), %edi xorl %ebx, %ebp +.byte 209 +.byte 199 + movl %edi, 60(%esp) leal 1859775393(%edi,%edx,1),%edi movl %esi, %edx roll $5, %edx -.byte 209 -.byte 200 # rorl $1 %eax - addl %ebp, %edx -.byte 209 -.byte 200 # rorl $1 %eax + addl %ebp, %edi addl %edx, %edi - # 20_39 32 + movl (%esp), %edx - movl 8(%esp), %ebp - xorl %ebp, %edx - movl 32(%esp), %ebp - xorl %ebp, %edx - movl 52(%esp), %ebp - xorl %ebp, %edx movl %esi, %ebp -.byte 209 -.byte 194 # roll $1 %edx + xorl 8(%esp), %edx + rorl $2, %esi + xorl 32(%esp), %edx xorl %eax, %ebp - movl %edx, (%esp) + xorl 52(%esp), %edx xorl %ecx, %ebp +.byte 209 +.byte 194 + movl %edx, (%esp) leal 1859775393(%edx,%ebx,1),%edx movl %edi, %ebx roll $5, %ebx -.byte 209 -.byte 206 # rorl $1 %esi - addl %ebp, %ebx -.byte 209 -.byte 206 # rorl $1 %esi + addl %ebp, %edx addl %ebx, %edx - # 20_39 33 + movl 4(%esp), %ebx - movl 12(%esp), %ebp - xorl %ebp, %ebx - movl 36(%esp), %ebp - xorl %ebp, %ebx - movl 56(%esp), %ebp - xorl %ebp, %ebx movl %edi, %ebp -.byte 209 -.byte 195 # roll $1 %ebx + xorl 12(%esp), %ebx + rorl $2, %edi + xorl 36(%esp), %ebx xorl %esi, %ebp - movl %ebx, 4(%esp) + xorl 56(%esp), %ebx xorl %eax, %ebp +.byte 209 +.byte 195 + movl %ebx, 4(%esp) leal 1859775393(%ebx,%ecx,1),%ebx movl %edx, %ecx roll $5, %ecx -.byte 209 -.byte 207 # rorl $1 %edi - addl %ebp, %ecx -.byte 209 -.byte 207 # rorl $1 %edi + addl %ebp, %ebx addl %ecx, %ebx - # 20_39 34 + movl 8(%esp), %ecx - movl 16(%esp), %ebp - xorl %ebp, %ecx - movl 40(%esp), %ebp - xorl %ebp, %ecx - movl 60(%esp), %ebp - xorl %ebp, %ecx movl %edx, %ebp -.byte 209 -.byte 193 # roll $1 %ecx + xorl 16(%esp), %ecx + rorl $2, %edx + xorl 40(%esp), %ecx xorl %edi, %ebp - movl %ecx, 8(%esp) + xorl 60(%esp), %ecx xorl %esi, %ebp +.byte 209 +.byte 193 + movl %ecx, 8(%esp) leal 1859775393(%ecx,%eax,1),%ecx movl %ebx, %eax roll $5, %eax -.byte 209 -.byte 202 # rorl $1 %edx - addl %ebp, %eax -.byte 209 -.byte 202 # rorl $1 %edx + addl %ebp, %ecx addl %eax, %ecx - # 20_39 35 + movl 12(%esp), %eax - movl 20(%esp), %ebp - xorl %ebp, %eax - movl 44(%esp), %ebp - xorl %ebp, %eax - movl (%esp), %ebp - xorl %ebp, %eax movl %ebx, %ebp -.byte 209 -.byte 192 # roll $1 %eax + xorl 20(%esp), %eax + rorl $2, %ebx + xorl 44(%esp), %eax xorl %edx, %ebp - movl %eax, 12(%esp) + xorl (%esp), %eax xorl %edi, %ebp +.byte 209 +.byte 192 + movl %eax, 12(%esp) leal 1859775393(%eax,%esi,1),%eax movl %ecx, %esi roll $5, %esi -.byte 209 -.byte 203 # rorl $1 %ebx - addl %ebp, %esi -.byte 209 -.byte 203 # rorl $1 %ebx + addl %ebp, %eax addl %esi, %eax - # 20_39 36 + movl 16(%esp), %esi - movl 24(%esp), %ebp - xorl %ebp, %esi - movl 48(%esp), %ebp - xorl %ebp, %esi - movl 4(%esp), %ebp - xorl %ebp, %esi movl %ecx, %ebp -.byte 209 -.byte 198 # roll $1 %esi + xorl 24(%esp), %esi + rorl $2, %ecx + xorl 48(%esp), %esi xorl %ebx, %ebp - movl %esi, 16(%esp) + xorl 4(%esp), %esi xorl %edx, %ebp +.byte 209 +.byte 198 + movl %esi, 16(%esp) leal 1859775393(%esi,%edi,1),%esi movl %eax, %edi roll $5, %edi -.byte 209 -.byte 201 # rorl $1 %ecx - addl %ebp, %edi -.byte 209 -.byte 201 # rorl $1 %ecx + addl %ebp, %esi addl %edi, %esi - # 20_39 37 + movl 20(%esp), %edi - movl 28(%esp), %ebp - xorl %ebp, %edi - movl 52(%esp), %ebp - xorl %ebp, %edi - movl 8(%esp), %ebp - xorl %ebp, %edi movl %eax, %ebp -.byte 209 -.byte 199 # roll $1 %edi + xorl 28(%esp), %edi + rorl $2, %eax + xorl 52(%esp), %edi xorl %ecx, %ebp - movl %edi, 20(%esp) + xorl 8(%esp), %edi xorl %ebx, %ebp +.byte 209 +.byte 199 + movl %edi, 20(%esp) leal 1859775393(%edi,%edx,1),%edi movl %esi, %edx roll $5, %edx -.byte 209 -.byte 200 # rorl $1 %eax - addl %ebp, %edx -.byte 209 -.byte 200 # rorl $1 %eax + addl %ebp, %edi addl %edx, %edi - # 20_39 38 + movl 24(%esp), %edx - movl 32(%esp), %ebp - xorl %ebp, %edx - movl 56(%esp), %ebp - xorl %ebp, %edx - movl 12(%esp), %ebp - xorl %ebp, %edx movl %esi, %ebp -.byte 209 -.byte 194 # roll $1 %edx + xorl 32(%esp), %edx + rorl $2, %esi + xorl 56(%esp), %edx xorl %eax, %ebp - movl %edx, 24(%esp) + xorl 12(%esp), %edx xorl %ecx, %ebp +.byte 209 +.byte 194 + movl %edx, 24(%esp) leal 1859775393(%edx,%ebx,1),%edx movl %edi, %ebx roll $5, %ebx -.byte 209 -.byte 206 # rorl $1 %esi - addl %ebp, %ebx -.byte 209 -.byte 206 # rorl $1 %esi + addl %ebp, %edx addl %ebx, %edx - # 20_39 39 + movl 28(%esp), %ebx - movl 36(%esp), %ebp - xorl %ebp, %ebx - movl 60(%esp), %ebp - xorl %ebp, %ebx - movl 16(%esp), %ebp - xorl %ebp, %ebx movl %edi, %ebp -.byte 209 -.byte 195 # roll $1 %ebx + xorl 36(%esp), %ebx + rorl $2, %edi + xorl 60(%esp), %ebx xorl %esi, %ebp - movl %ebx, 28(%esp) + xorl 16(%esp), %ebx xorl %eax, %ebp +.byte 209 +.byte 195 + movl %ebx, 28(%esp) leal 1859775393(%ebx,%ecx,1),%ebx movl %edx, %ecx roll $5, %ecx -.byte 209 -.byte 207 # rorl $1 %edi - addl %ebp, %ecx -.byte 209 -.byte 207 # rorl $1 %edi + addl %ebp, %ebx addl %ecx, %ebx - # 40_59 40 + movl 32(%esp), %ecx - movl 40(%esp), %ebp - xorl %ebp, %ecx - movl (%esp), %ebp - xorl %ebp, %ecx - movl 20(%esp), %ebp - xorl %ebp, %ecx movl %edx, %ebp -.byte 209 -.byte 193 # roll $1 %ecx + xorl 40(%esp), %ecx orl %edi, %ebp - movl %ecx, 32(%esp) + xorl (%esp), %ecx andl %esi, %ebp + xorl 20(%esp), %ecx +.byte 209 +.byte 193 + movl %ecx, 32(%esp) leal 2400959708(%ecx,%eax,1),%ecx movl %edx, %eax -.byte 209 -.byte 202 # rorl $1 %edx + rorl $2, %edx andl %edi, %eax orl %eax, %ebp movl %ebx, %eax roll $5, %eax addl %eax, %ebp - movl 36(%esp), %eax addl %ebp, %ecx - movl 44(%esp), %ebp - xorl %ebp, %eax - movl 4(%esp), %ebp - xorl %ebp, %eax - movl 24(%esp), %ebp -.byte 209 -.byte 202 # rorl $1 %edx - xorl %ebp, %eax -.byte 209 -.byte 192 # roll $1 %eax + + movl 36(%esp), %eax movl %ebx, %ebp - movl %eax, 36(%esp) + xorl 44(%esp), %eax orl %edx, %ebp + xorl 4(%esp), %eax + andl %edi, %ebp + xorl 24(%esp), %eax +.byte 209 +.byte 192 + movl %eax, 36(%esp) leal 2400959708(%eax,%esi,1),%eax movl %ebx, %esi - andl %edi, %ebp + rorl $2, %ebx andl %edx, %esi orl %esi, %ebp movl %ecx, %esi roll $5, %esi -.byte 209 -.byte 203 # rorl $1 %ebx addl %esi, %ebp -.byte 209 -.byte 203 # rorl $1 %ebx addl %ebp, %eax - # 40_59 41 - # 40_59 42 + movl 40(%esp), %esi - movl 48(%esp), %ebp - xorl %ebp, %esi - movl 8(%esp), %ebp - xorl %ebp, %esi - movl 28(%esp), %ebp - xorl %ebp, %esi movl %ecx, %ebp -.byte 209 -.byte 198 # roll $1 %esi + xorl 48(%esp), %esi orl %ebx, %ebp - movl %esi, 40(%esp) + xorl 8(%esp), %esi andl %edx, %ebp + xorl 28(%esp), %esi +.byte 209 +.byte 198 + movl %esi, 40(%esp) leal 2400959708(%esi,%edi,1),%esi movl %ecx, %edi -.byte 209 -.byte 201 # rorl $1 %ecx + rorl $2, %ecx andl %ebx, %edi orl %edi, %ebp movl %eax, %edi roll $5, %edi addl %edi, %ebp - movl 44(%esp), %edi addl %ebp, %esi - movl 52(%esp), %ebp - xorl %ebp, %edi - movl 12(%esp), %ebp - xorl %ebp, %edi - movl 32(%esp), %ebp -.byte 209 -.byte 201 # rorl $1 %ecx - xorl %ebp, %edi -.byte 209 -.byte 199 # roll $1 %edi + + movl 44(%esp), %edi movl %eax, %ebp - movl %edi, 44(%esp) + xorl 52(%esp), %edi orl %ecx, %ebp + xorl 12(%esp), %edi + andl %ebx, %ebp + xorl 32(%esp), %edi +.byte 209 +.byte 199 + movl %edi, 44(%esp) leal 2400959708(%edi,%edx,1),%edi movl %eax, %edx - andl %ebx, %ebp + rorl $2, %eax andl %ecx, %edx orl %edx, %ebp movl %esi, %edx roll $5, %edx -.byte 209 -.byte 200 # rorl $1 %eax addl %edx, %ebp -.byte 209 -.byte 200 # rorl $1 %eax addl %ebp, %edi - # 40_59 43 - # 40_59 44 + movl 48(%esp), %edx - movl 56(%esp), %ebp - xorl %ebp, %edx - movl 16(%esp), %ebp - xorl %ebp, %edx - movl 36(%esp), %ebp - xorl %ebp, %edx movl %esi, %ebp -.byte 209 -.byte 194 # roll $1 %edx + xorl 56(%esp), %edx orl %eax, %ebp - movl %edx, 48(%esp) + xorl 16(%esp), %edx andl %ecx, %ebp + xorl 36(%esp), %edx +.byte 209 +.byte 194 + movl %edx, 48(%esp) leal 2400959708(%edx,%ebx,1),%edx movl %esi, %ebx -.byte 209 -.byte 206 # rorl $1 %esi + rorl $2, %esi andl %eax, %ebx orl %ebx, %ebp movl %edi, %ebx roll $5, %ebx addl %ebx, %ebp - movl 52(%esp), %ebx addl %ebp, %edx - movl 60(%esp), %ebp - xorl %ebp, %ebx - movl 20(%esp), %ebp - xorl %ebp, %ebx - movl 40(%esp), %ebp -.byte 209 -.byte 206 # rorl $1 %esi - xorl %ebp, %ebx -.byte 209 -.byte 195 # roll $1 %ebx + + movl 52(%esp), %ebx movl %edi, %ebp - movl %ebx, 52(%esp) + xorl 60(%esp), %ebx orl %esi, %ebp + xorl 20(%esp), %ebx + andl %eax, %ebp + xorl 40(%esp), %ebx +.byte 209 +.byte 195 + movl %ebx, 52(%esp) leal 2400959708(%ebx,%ecx,1),%ebx movl %edi, %ecx - andl %eax, %ebp + rorl $2, %edi andl %esi, %ecx orl %ecx, %ebp movl %edx, %ecx roll $5, %ecx -.byte 209 -.byte 207 # rorl $1 %edi addl %ecx, %ebp -.byte 209 -.byte 207 # rorl $1 %edi addl %ebp, %ebx - # 40_59 45 - # 40_59 46 + movl 56(%esp), %ecx - movl (%esp), %ebp - xorl %ebp, %ecx - movl 24(%esp), %ebp - xorl %ebp, %ecx - movl 44(%esp), %ebp - xorl %ebp, %ecx movl %edx, %ebp -.byte 209 -.byte 193 # roll $1 %ecx + xorl (%esp), %ecx orl %edi, %ebp - movl %ecx, 56(%esp) + xorl 24(%esp), %ecx andl %esi, %ebp + xorl 44(%esp), %ecx +.byte 209 +.byte 193 + movl %ecx, 56(%esp) leal 2400959708(%ecx,%eax,1),%ecx movl %edx, %eax -.byte 209 -.byte 202 # rorl $1 %edx + rorl $2, %edx andl %edi, %eax orl %eax, %ebp movl %ebx, %eax roll $5, %eax addl %eax, %ebp - movl 60(%esp), %eax addl %ebp, %ecx - movl 4(%esp), %ebp - xorl %ebp, %eax - movl 28(%esp), %ebp - xorl %ebp, %eax - movl 48(%esp), %ebp -.byte 209 -.byte 202 # rorl $1 %edx - xorl %ebp, %eax -.byte 209 -.byte 192 # roll $1 %eax + + movl 60(%esp), %eax movl %ebx, %ebp - movl %eax, 60(%esp) + xorl 4(%esp), %eax orl %edx, %ebp + xorl 28(%esp), %eax + andl %edi, %ebp + xorl 48(%esp), %eax +.byte 209 +.byte 192 + movl %eax, 60(%esp) leal 2400959708(%eax,%esi,1),%eax movl %ebx, %esi - andl %edi, %ebp + rorl $2, %ebx andl %edx, %esi orl %esi, %ebp movl %ecx, %esi roll $5, %esi -.byte 209 -.byte 203 # rorl $1 %ebx addl %esi, %ebp -.byte 209 -.byte 203 # rorl $1 %ebx addl %ebp, %eax - # 40_59 47 - # 40_59 48 + movl (%esp), %esi - movl 8(%esp), %ebp - xorl %ebp, %esi - movl 32(%esp), %ebp - xorl %ebp, %esi - movl 52(%esp), %ebp - xorl %ebp, %esi movl %ecx, %ebp -.byte 209 -.byte 198 # roll $1 %esi + xorl 8(%esp), %esi orl %ebx, %ebp - movl %esi, (%esp) + xorl 32(%esp), %esi andl %edx, %ebp + xorl 52(%esp), %esi +.byte 209 +.byte 198 + movl %esi, (%esp) leal 2400959708(%esi,%edi,1),%esi movl %ecx, %edi -.byte 209 -.byte 201 # rorl $1 %ecx + rorl $2, %ecx andl %ebx, %edi orl %edi, %ebp movl %eax, %edi roll $5, %edi addl %edi, %ebp - movl 4(%esp), %edi addl %ebp, %esi - movl 12(%esp), %ebp - xorl %ebp, %edi - movl 36(%esp), %ebp - xorl %ebp, %edi - movl 56(%esp), %ebp -.byte 209 -.byte 201 # rorl $1 %ecx - xorl %ebp, %edi -.byte 209 -.byte 199 # roll $1 %edi + + movl 4(%esp), %edi movl %eax, %ebp - movl %edi, 4(%esp) + xorl 12(%esp), %edi orl %ecx, %ebp + xorl 36(%esp), %edi + andl %ebx, %ebp + xorl 56(%esp), %edi +.byte 209 +.byte 199 + movl %edi, 4(%esp) leal 2400959708(%edi,%edx,1),%edi movl %eax, %edx - andl %ebx, %ebp + rorl $2, %eax andl %ecx, %edx orl %edx, %ebp movl %esi, %edx roll $5, %edx -.byte 209 -.byte 200 # rorl $1 %eax addl %edx, %ebp -.byte 209 -.byte 200 # rorl $1 %eax addl %ebp, %edi - # 40_59 49 - # 40_59 50 + movl 8(%esp), %edx - movl 16(%esp), %ebp - xorl %ebp, %edx - movl 40(%esp), %ebp - xorl %ebp, %edx - movl 60(%esp), %ebp - xorl %ebp, %edx movl %esi, %ebp -.byte 209 -.byte 194 # roll $1 %edx + xorl 16(%esp), %edx orl %eax, %ebp - movl %edx, 8(%esp) + xorl 40(%esp), %edx andl %ecx, %ebp + xorl 60(%esp), %edx +.byte 209 +.byte 194 + movl %edx, 8(%esp) leal 2400959708(%edx,%ebx,1),%edx movl %esi, %ebx -.byte 209 -.byte 206 # rorl $1 %esi + rorl $2, %esi andl %eax, %ebx orl %ebx, %ebp movl %edi, %ebx roll $5, %ebx addl %ebx, %ebp - movl 12(%esp), %ebx addl %ebp, %edx - movl 20(%esp), %ebp - xorl %ebp, %ebx - movl 44(%esp), %ebp - xorl %ebp, %ebx - movl (%esp), %ebp -.byte 209 -.byte 206 # rorl $1 %esi - xorl %ebp, %ebx -.byte 209 -.byte 195 # roll $1 %ebx + + movl 12(%esp), %ebx movl %edi, %ebp - movl %ebx, 12(%esp) + xorl 20(%esp), %ebx orl %esi, %ebp + xorl 44(%esp), %ebx + andl %eax, %ebp + xorl (%esp), %ebx +.byte 209 +.byte 195 + movl %ebx, 12(%esp) leal 2400959708(%ebx,%ecx,1),%ebx movl %edi, %ecx - andl %eax, %ebp + rorl $2, %edi andl %esi, %ecx orl %ecx, %ebp movl %edx, %ecx roll $5, %ecx -.byte 209 -.byte 207 # rorl $1 %edi addl %ecx, %ebp -.byte 209 -.byte 207 # rorl $1 %edi addl %ebp, %ebx - # 40_59 51 - # 40_59 52 + movl 16(%esp), %ecx - movl 24(%esp), %ebp - xorl %ebp, %ecx - movl 48(%esp), %ebp - xorl %ebp, %ecx - movl 4(%esp), %ebp - xorl %ebp, %ecx movl %edx, %ebp -.byte 209 -.byte 193 # roll $1 %ecx + xorl 24(%esp), %ecx orl %edi, %ebp - movl %ecx, 16(%esp) + xorl 48(%esp), %ecx andl %esi, %ebp + xorl 4(%esp), %ecx +.byte 209 +.byte 193 + movl %ecx, 16(%esp) leal 2400959708(%ecx,%eax,1),%ecx movl %edx, %eax -.byte 209 -.byte 202 # rorl $1 %edx + rorl $2, %edx andl %edi, %eax orl %eax, %ebp movl %ebx, %eax roll $5, %eax addl %eax, %ebp - movl 20(%esp), %eax addl %ebp, %ecx - movl 28(%esp), %ebp - xorl %ebp, %eax - movl 52(%esp), %ebp - xorl %ebp, %eax - movl 8(%esp), %ebp -.byte 209 -.byte 202 # rorl $1 %edx - xorl %ebp, %eax -.byte 209 -.byte 192 # roll $1 %eax + + movl 20(%esp), %eax movl %ebx, %ebp - movl %eax, 20(%esp) + xorl 28(%esp), %eax orl %edx, %ebp + xorl 52(%esp), %eax + andl %edi, %ebp + xorl 8(%esp), %eax +.byte 209 +.byte 192 + movl %eax, 20(%esp) leal 2400959708(%eax,%esi,1),%eax movl %ebx, %esi - andl %edi, %ebp + rorl $2, %ebx andl %edx, %esi orl %esi, %ebp movl %ecx, %esi roll $5, %esi -.byte 209 -.byte 203 # rorl $1 %ebx addl %esi, %ebp -.byte 209 -.byte 203 # rorl $1 %ebx addl %ebp, %eax - # 40_59 53 - # 40_59 54 + movl 24(%esp), %esi - movl 32(%esp), %ebp - xorl %ebp, %esi - movl 56(%esp), %ebp - xorl %ebp, %esi - movl 12(%esp), %ebp - xorl %ebp, %esi movl %ecx, %ebp -.byte 209 -.byte 198 # roll $1 %esi + xorl 32(%esp), %esi orl %ebx, %ebp - movl %esi, 24(%esp) + xorl 56(%esp), %esi andl %edx, %ebp + xorl 12(%esp), %esi +.byte 209 +.byte 198 + movl %esi, 24(%esp) leal 2400959708(%esi,%edi,1),%esi movl %ecx, %edi -.byte 209 -.byte 201 # rorl $1 %ecx + rorl $2, %ecx andl %ebx, %edi orl %edi, %ebp movl %eax, %edi roll $5, %edi addl %edi, %ebp - movl 28(%esp), %edi addl %ebp, %esi - movl 36(%esp), %ebp - xorl %ebp, %edi - movl 60(%esp), %ebp - xorl %ebp, %edi - movl 16(%esp), %ebp -.byte 209 -.byte 201 # rorl $1 %ecx - xorl %ebp, %edi -.byte 209 -.byte 199 # roll $1 %edi + + movl 28(%esp), %edi movl %eax, %ebp - movl %edi, 28(%esp) + xorl 36(%esp), %edi orl %ecx, %ebp + xorl 60(%esp), %edi + andl %ebx, %ebp + xorl 16(%esp), %edi +.byte 209 +.byte 199 + movl %edi, 28(%esp) leal 2400959708(%edi,%edx,1),%edi movl %eax, %edx - andl %ebx, %ebp + rorl $2, %eax andl %ecx, %edx orl %edx, %ebp movl %esi, %edx roll $5, %edx -.byte 209 -.byte 200 # rorl $1 %eax addl %edx, %ebp -.byte 209 -.byte 200 # rorl $1 %eax addl %ebp, %edi - # 40_59 55 - # 40_59 56 + movl 32(%esp), %edx - movl 40(%esp), %ebp - xorl %ebp, %edx - movl (%esp), %ebp - xorl %ebp, %edx - movl 20(%esp), %ebp - xorl %ebp, %edx movl %esi, %ebp -.byte 209 -.byte 194 # roll $1 %edx + xorl 40(%esp), %edx orl %eax, %ebp - movl %edx, 32(%esp) + xorl (%esp), %edx andl %ecx, %ebp + xorl 20(%esp), %edx +.byte 209 +.byte 194 + movl %edx, 32(%esp) leal 2400959708(%edx,%ebx,1),%edx movl %esi, %ebx -.byte 209 -.byte 206 # rorl $1 %esi + rorl $2, %esi andl %eax, %ebx orl %ebx, %ebp movl %edi, %ebx roll $5, %ebx addl %ebx, %ebp - movl 36(%esp), %ebx addl %ebp, %edx - movl 44(%esp), %ebp - xorl %ebp, %ebx - movl 4(%esp), %ebp - xorl %ebp, %ebx - movl 24(%esp), %ebp -.byte 209 -.byte 206 # rorl $1 %esi - xorl %ebp, %ebx -.byte 209 -.byte 195 # roll $1 %ebx + + movl 36(%esp), %ebx movl %edi, %ebp - movl %ebx, 36(%esp) + xorl 44(%esp), %ebx orl %esi, %ebp + xorl 4(%esp), %ebx + andl %eax, %ebp + xorl 24(%esp), %ebx +.byte 209 +.byte 195 + movl %ebx, 36(%esp) leal 2400959708(%ebx,%ecx,1),%ebx movl %edi, %ecx - andl %eax, %ebp + rorl $2, %edi andl %esi, %ecx orl %ecx, %ebp movl %edx, %ecx roll $5, %ecx -.byte 209 -.byte 207 # rorl $1 %edi addl %ecx, %ebp -.byte 209 -.byte 207 # rorl $1 %edi addl %ebp, %ebx - # 40_59 57 - # 40_59 58 + movl 40(%esp), %ecx - movl 48(%esp), %ebp - xorl %ebp, %ecx - movl 8(%esp), %ebp - xorl %ebp, %ecx - movl 28(%esp), %ebp - xorl %ebp, %ecx movl %edx, %ebp -.byte 209 -.byte 193 # roll $1 %ecx + xorl 48(%esp), %ecx orl %edi, %ebp - movl %ecx, 40(%esp) + xorl 8(%esp), %ecx andl %esi, %ebp + xorl 28(%esp), %ecx +.byte 209 +.byte 193 + movl %ecx, 40(%esp) leal 2400959708(%ecx,%eax,1),%ecx movl %edx, %eax -.byte 209 -.byte 202 # rorl $1 %edx + rorl $2, %edx andl %edi, %eax orl %eax, %ebp movl %ebx, %eax roll $5, %eax addl %eax, %ebp - movl 44(%esp), %eax addl %ebp, %ecx - movl 52(%esp), %ebp - xorl %ebp, %eax - movl 12(%esp), %ebp - xorl %ebp, %eax - movl 32(%esp), %ebp -.byte 209 -.byte 202 # rorl $1 %edx - xorl %ebp, %eax -.byte 209 -.byte 192 # roll $1 %eax + + movl 44(%esp), %eax movl %ebx, %ebp - movl %eax, 44(%esp) + xorl 52(%esp), %eax orl %edx, %ebp + xorl 12(%esp), %eax + andl %edi, %ebp + xorl 32(%esp), %eax +.byte 209 +.byte 192 + movl %eax, 44(%esp) leal 2400959708(%eax,%esi,1),%eax movl %ebx, %esi - andl %edi, %ebp + rorl $2, %ebx andl %edx, %esi orl %esi, %ebp movl %ecx, %esi roll $5, %esi -.byte 209 -.byte 203 # rorl $1 %ebx addl %esi, %ebp -.byte 209 -.byte 203 # rorl $1 %ebx addl %ebp, %eax - # 40_59 59 - # 20_39 60 + movl 48(%esp), %esi - movl 56(%esp), %ebp - xorl %ebp, %esi - movl 16(%esp), %ebp - xorl %ebp, %esi - movl 36(%esp), %ebp - xorl %ebp, %esi movl %ecx, %ebp -.byte 209 -.byte 198 # roll $1 %esi + xorl 56(%esp), %esi + rorl $2, %ecx + xorl 16(%esp), %esi xorl %ebx, %ebp - movl %esi, 48(%esp) + xorl 36(%esp), %esi xorl %edx, %ebp +.byte 209 +.byte 198 + movl %esi, 48(%esp) leal 3395469782(%esi,%edi,1),%esi movl %eax, %edi roll $5, %edi -.byte 209 -.byte 201 # rorl $1 %ecx - addl %ebp, %edi -.byte 209 -.byte 201 # rorl $1 %ecx + addl %ebp, %esi addl %edi, %esi - # 20_39 61 + movl 52(%esp), %edi - movl 60(%esp), %ebp - xorl %ebp, %edi - movl 20(%esp), %ebp - xorl %ebp, %edi - movl 40(%esp), %ebp - xorl %ebp, %edi movl %eax, %ebp -.byte 209 -.byte 199 # roll $1 %edi + xorl 60(%esp), %edi + rorl $2, %eax + xorl 20(%esp), %edi xorl %ecx, %ebp - movl %edi, 52(%esp) + xorl 40(%esp), %edi xorl %ebx, %ebp +.byte 209 +.byte 199 + movl %edi, 52(%esp) leal 3395469782(%edi,%edx,1),%edi movl %esi, %edx roll $5, %edx -.byte 209 -.byte 200 # rorl $1 %eax - addl %ebp, %edx -.byte 209 -.byte 200 # rorl $1 %eax + addl %ebp, %edi addl %edx, %edi - # 20_39 62 + movl 56(%esp), %edx - movl (%esp), %ebp - xorl %ebp, %edx - movl 24(%esp), %ebp - xorl %ebp, %edx - movl 44(%esp), %ebp - xorl %ebp, %edx movl %esi, %ebp -.byte 209 -.byte 194 # roll $1 %edx + xorl (%esp), %edx + rorl $2, %esi + xorl 24(%esp), %edx xorl %eax, %ebp - movl %edx, 56(%esp) + xorl 44(%esp), %edx xorl %ecx, %ebp +.byte 209 +.byte 194 + movl %edx, 56(%esp) leal 3395469782(%edx,%ebx,1),%edx movl %edi, %ebx roll $5, %ebx -.byte 209 -.byte 206 # rorl $1 %esi - addl %ebp, %ebx -.byte 209 -.byte 206 # rorl $1 %esi + addl %ebp, %edx addl %ebx, %edx - # 20_39 63 + movl 60(%esp), %ebx - movl 4(%esp), %ebp - xorl %ebp, %ebx - movl 28(%esp), %ebp - xorl %ebp, %ebx - movl 48(%esp), %ebp - xorl %ebp, %ebx movl %edi, %ebp -.byte 209 -.byte 195 # roll $1 %ebx + xorl 4(%esp), %ebx + rorl $2, %edi + xorl 28(%esp), %ebx xorl %esi, %ebp - movl %ebx, 60(%esp) + xorl 48(%esp), %ebx xorl %eax, %ebp +.byte 209 +.byte 195 + movl %ebx, 60(%esp) leal 3395469782(%ebx,%ecx,1),%ebx movl %edx, %ecx roll $5, %ecx -.byte 209 -.byte 207 # rorl $1 %edi - addl %ebp, %ecx -.byte 209 -.byte 207 # rorl $1 %edi + addl %ebp, %ebx addl %ecx, %ebx - # 20_39 64 + movl (%esp), %ecx - movl 8(%esp), %ebp - xorl %ebp, %ecx - movl 32(%esp), %ebp - xorl %ebp, %ecx - movl 52(%esp), %ebp - xorl %ebp, %ecx movl %edx, %ebp -.byte 209 -.byte 193 # roll $1 %ecx + xorl 8(%esp), %ecx + rorl $2, %edx + xorl 32(%esp), %ecx xorl %edi, %ebp - movl %ecx, (%esp) + xorl 52(%esp), %ecx xorl %esi, %ebp +.byte 209 +.byte 193 + movl %ecx, (%esp) leal 3395469782(%ecx,%eax,1),%ecx movl %ebx, %eax roll $5, %eax -.byte 209 -.byte 202 # rorl $1 %edx - addl %ebp, %eax -.byte 209 -.byte 202 # rorl $1 %edx + addl %ebp, %ecx addl %eax, %ecx - # 20_39 65 + movl 4(%esp), %eax - movl 12(%esp), %ebp - xorl %ebp, %eax - movl 36(%esp), %ebp - xorl %ebp, %eax - movl 56(%esp), %ebp - xorl %ebp, %eax movl %ebx, %ebp -.byte 209 -.byte 192 # roll $1 %eax + xorl 12(%esp), %eax + rorl $2, %ebx + xorl 36(%esp), %eax xorl %edx, %ebp - movl %eax, 4(%esp) + xorl 56(%esp), %eax xorl %edi, %ebp +.byte 209 +.byte 192 + movl %eax, 4(%esp) leal 3395469782(%eax,%esi,1),%eax movl %ecx, %esi roll $5, %esi -.byte 209 -.byte 203 # rorl $1 %ebx - addl %ebp, %esi -.byte 209 -.byte 203 # rorl $1 %ebx + addl %ebp, %eax addl %esi, %eax - # 20_39 66 + movl 8(%esp), %esi - movl 16(%esp), %ebp - xorl %ebp, %esi - movl 40(%esp), %ebp - xorl %ebp, %esi - movl 60(%esp), %ebp - xorl %ebp, %esi movl %ecx, %ebp -.byte 209 -.byte 198 # roll $1 %esi + xorl 16(%esp), %esi + rorl $2, %ecx + xorl 40(%esp), %esi xorl %ebx, %ebp - movl %esi, 8(%esp) + xorl 60(%esp), %esi xorl %edx, %ebp +.byte 209 +.byte 198 + movl %esi, 8(%esp) leal 3395469782(%esi,%edi,1),%esi movl %eax, %edi roll $5, %edi -.byte 209 -.byte 201 # rorl $1 %ecx - addl %ebp, %edi -.byte 209 -.byte 201 # rorl $1 %ecx + addl %ebp, %esi addl %edi, %esi - # 20_39 67 + movl 12(%esp), %edi - movl 20(%esp), %ebp - xorl %ebp, %edi - movl 44(%esp), %ebp - xorl %ebp, %edi - movl (%esp), %ebp - xorl %ebp, %edi movl %eax, %ebp -.byte 209 -.byte 199 # roll $1 %edi + xorl 20(%esp), %edi + rorl $2, %eax + xorl 44(%esp), %edi xorl %ecx, %ebp - movl %edi, 12(%esp) + xorl (%esp), %edi xorl %ebx, %ebp +.byte 209 +.byte 199 + movl %edi, 12(%esp) leal 3395469782(%edi,%edx,1),%edi movl %esi, %edx roll $5, %edx -.byte 209 -.byte 200 # rorl $1 %eax - addl %ebp, %edx -.byte 209 -.byte 200 # rorl $1 %eax + addl %ebp, %edi addl %edx, %edi - # 20_39 68 + movl 16(%esp), %edx - movl 24(%esp), %ebp - xorl %ebp, %edx - movl 48(%esp), %ebp - xorl %ebp, %edx - movl 4(%esp), %ebp - xorl %ebp, %edx movl %esi, %ebp -.byte 209 -.byte 194 # roll $1 %edx + xorl 24(%esp), %edx + rorl $2, %esi + xorl 48(%esp), %edx xorl %eax, %ebp - movl %edx, 16(%esp) + xorl 4(%esp), %edx xorl %ecx, %ebp +.byte 209 +.byte 194 + movl %edx, 16(%esp) leal 3395469782(%edx,%ebx,1),%edx movl %edi, %ebx roll $5, %ebx -.byte 209 -.byte 206 # rorl $1 %esi - addl %ebp, %ebx -.byte 209 -.byte 206 # rorl $1 %esi + addl %ebp, %edx addl %ebx, %edx - # 20_39 69 + movl 20(%esp), %ebx - movl 28(%esp), %ebp - xorl %ebp, %ebx - movl 52(%esp), %ebp - xorl %ebp, %ebx - movl 8(%esp), %ebp - xorl %ebp, %ebx movl %edi, %ebp -.byte 209 -.byte 195 # roll $1 %ebx + xorl 28(%esp), %ebx + rorl $2, %edi + xorl 52(%esp), %ebx xorl %esi, %ebp - movl %ebx, 20(%esp) + xorl 8(%esp), %ebx xorl %eax, %ebp +.byte 209 +.byte 195 + movl %ebx, 20(%esp) leal 3395469782(%ebx,%ecx,1),%ebx movl %edx, %ecx roll $5, %ecx -.byte 209 -.byte 207 # rorl $1 %edi - addl %ebp, %ecx -.byte 209 -.byte 207 # rorl $1 %edi + addl %ebp, %ebx addl %ecx, %ebx - # 20_39 70 + movl 24(%esp), %ecx - movl 32(%esp), %ebp - xorl %ebp, %ecx - movl 56(%esp), %ebp - xorl %ebp, %ecx - movl 12(%esp), %ebp - xorl %ebp, %ecx movl %edx, %ebp -.byte 209 -.byte 193 # roll $1 %ecx + xorl 32(%esp), %ecx + rorl $2, %edx + xorl 56(%esp), %ecx xorl %edi, %ebp - movl %ecx, 24(%esp) + xorl 12(%esp), %ecx xorl %esi, %ebp +.byte 209 +.byte 193 + movl %ecx, 24(%esp) leal 3395469782(%ecx,%eax,1),%ecx movl %ebx, %eax roll $5, %eax -.byte 209 -.byte 202 # rorl $1 %edx - addl %ebp, %eax -.byte 209 -.byte 202 # rorl $1 %edx + addl %ebp, %ecx addl %eax, %ecx - # 20_39 71 + movl 28(%esp), %eax - movl 36(%esp), %ebp - xorl %ebp, %eax - movl 60(%esp), %ebp - xorl %ebp, %eax - movl 16(%esp), %ebp - xorl %ebp, %eax movl %ebx, %ebp -.byte 209 -.byte 192 # roll $1 %eax + xorl 36(%esp), %eax + rorl $2, %ebx + xorl 60(%esp), %eax xorl %edx, %ebp - movl %eax, 28(%esp) + xorl 16(%esp), %eax xorl %edi, %ebp +.byte 209 +.byte 192 + movl %eax, 28(%esp) leal 3395469782(%eax,%esi,1),%eax movl %ecx, %esi roll $5, %esi -.byte 209 -.byte 203 # rorl $1 %ebx - addl %ebp, %esi -.byte 209 -.byte 203 # rorl $1 %ebx + addl %ebp, %eax addl %esi, %eax - # 20_39 72 + movl 32(%esp), %esi - movl 40(%esp), %ebp - xorl %ebp, %esi - movl (%esp), %ebp - xorl %ebp, %esi - movl 20(%esp), %ebp - xorl %ebp, %esi movl %ecx, %ebp -.byte 209 -.byte 198 # roll $1 %esi + xorl 40(%esp), %esi + rorl $2, %ecx + xorl (%esp), %esi xorl %ebx, %ebp - movl %esi, 32(%esp) + xorl 20(%esp), %esi xorl %edx, %ebp +.byte 209 +.byte 198 + movl %esi, 32(%esp) leal 3395469782(%esi,%edi,1),%esi movl %eax, %edi roll $5, %edi -.byte 209 -.byte 201 # rorl $1 %ecx - addl %ebp, %edi -.byte 209 -.byte 201 # rorl $1 %ecx + addl %ebp, %esi addl %edi, %esi - # 20_39 73 + movl 36(%esp), %edi - movl 44(%esp), %ebp - xorl %ebp, %edi - movl 4(%esp), %ebp - xorl %ebp, %edi - movl 24(%esp), %ebp - xorl %ebp, %edi movl %eax, %ebp -.byte 209 -.byte 199 # roll $1 %edi + xorl 44(%esp), %edi + rorl $2, %eax + xorl 4(%esp), %edi xorl %ecx, %ebp - movl %edi, 36(%esp) + xorl 24(%esp), %edi xorl %ebx, %ebp +.byte 209 +.byte 199 + movl %edi, 36(%esp) leal 3395469782(%edi,%edx,1),%edi movl %esi, %edx roll $5, %edx -.byte 209 -.byte 200 # rorl $1 %eax - addl %ebp, %edx -.byte 209 -.byte 200 # rorl $1 %eax + addl %ebp, %edi addl %edx, %edi - # 20_39 74 + movl 40(%esp), %edx - movl 48(%esp), %ebp - xorl %ebp, %edx - movl 8(%esp), %ebp - xorl %ebp, %edx - movl 28(%esp), %ebp - xorl %ebp, %edx movl %esi, %ebp -.byte 209 -.byte 194 # roll $1 %edx + xorl 48(%esp), %edx + rorl $2, %esi + xorl 8(%esp), %edx xorl %eax, %ebp - movl %edx, 40(%esp) + xorl 28(%esp), %edx xorl %ecx, %ebp +.byte 209 +.byte 194 + movl %edx, 40(%esp) leal 3395469782(%edx,%ebx,1),%edx movl %edi, %ebx roll $5, %ebx -.byte 209 -.byte 206 # rorl $1 %esi - addl %ebp, %ebx -.byte 209 -.byte 206 # rorl $1 %esi + addl %ebp, %edx addl %ebx, %edx - # 20_39 75 + movl 44(%esp), %ebx - movl 52(%esp), %ebp - xorl %ebp, %ebx - movl 12(%esp), %ebp - xorl %ebp, %ebx - movl 32(%esp), %ebp - xorl %ebp, %ebx movl %edi, %ebp -.byte 209 -.byte 195 # roll $1 %ebx + xorl 52(%esp), %ebx + rorl $2, %edi + xorl 12(%esp), %ebx xorl %esi, %ebp - movl %ebx, 44(%esp) + xorl 32(%esp), %ebx xorl %eax, %ebp +.byte 209 +.byte 195 + movl %ebx, 44(%esp) leal 3395469782(%ebx,%ecx,1),%ebx movl %edx, %ecx roll $5, %ecx -.byte 209 -.byte 207 # rorl $1 %edi - addl %ebp, %ecx -.byte 209 -.byte 207 # rorl $1 %edi + addl %ebp, %ebx addl %ecx, %ebx - # 20_39 76 + movl 48(%esp), %ecx - movl 56(%esp), %ebp - xorl %ebp, %ecx - movl 16(%esp), %ebp - xorl %ebp, %ecx - movl 36(%esp), %ebp - xorl %ebp, %ecx movl %edx, %ebp -.byte 209 -.byte 193 # roll $1 %ecx + xorl 56(%esp), %ecx + rorl $2, %edx + xorl 16(%esp), %ecx xorl %edi, %ebp - movl %ecx, 48(%esp) + xorl 36(%esp), %ecx xorl %esi, %ebp +.byte 209 +.byte 193 + movl %ecx, 48(%esp) leal 3395469782(%ecx,%eax,1),%ecx movl %ebx, %eax roll $5, %eax -.byte 209 -.byte 202 # rorl $1 %edx - addl %ebp, %eax -.byte 209 -.byte 202 # rorl $1 %edx + addl %ebp, %ecx addl %eax, %ecx - # 20_39 77 + movl 52(%esp), %eax - movl 60(%esp), %ebp - xorl %ebp, %eax - movl 20(%esp), %ebp - xorl %ebp, %eax - movl 40(%esp), %ebp - xorl %ebp, %eax movl %ebx, %ebp -.byte 209 -.byte 192 # roll $1 %eax + xorl 60(%esp), %eax + rorl $2, %ebx + xorl 20(%esp), %eax xorl %edx, %ebp - movl %eax, 52(%esp) + xorl 40(%esp), %eax xorl %edi, %ebp +.byte 209 +.byte 192 + movl %eax, 52(%esp) leal 3395469782(%eax,%esi,1),%eax movl %ecx, %esi roll $5, %esi -.byte 209 -.byte 203 # rorl $1 %ebx - addl %ebp, %esi -.byte 209 -.byte 203 # rorl $1 %ebx + addl %ebp, %eax addl %esi, %eax - # 20_39 78 + movl 56(%esp), %esi - movl (%esp), %ebp - xorl %ebp, %esi - movl 24(%esp), %ebp - xorl %ebp, %esi - movl 44(%esp), %ebp - xorl %ebp, %esi movl %ecx, %ebp -.byte 209 -.byte 198 # roll $1 %esi + xorl (%esp), %esi + rorl $2, %ecx + xorl 24(%esp), %esi xorl %ebx, %ebp - movl %esi, 56(%esp) + xorl 44(%esp), %esi xorl %edx, %ebp +.byte 209 +.byte 198 + movl %esi, 56(%esp) leal 3395469782(%esi,%edi,1),%esi movl %eax, %edi roll $5, %edi -.byte 209 -.byte 201 # rorl $1 %ecx - addl %ebp, %edi -.byte 209 -.byte 201 # rorl $1 %ecx + addl %ebp, %esi addl %edi, %esi - # 20_39 79 + movl 60(%esp), %edi - movl 4(%esp), %ebp - xorl %ebp, %edi - movl 28(%esp), %ebp - xorl %ebp, %edi - movl 48(%esp), %ebp - xorl %ebp, %edi movl %eax, %ebp -.byte 209 -.byte 199 # roll $1 %edi + xorl 4(%esp), %edi + rorl $2, %eax + xorl 28(%esp), %edi xorl %ecx, %ebp - movl %edi, 60(%esp) + xorl 48(%esp), %edi xorl %ebx, %ebp +.byte 209 +.byte 199 + movl %edi, 60(%esp) leal 3395469782(%edi,%edx,1),%edi movl %esi, %edx roll $5, %edx - addl %ebp, %edx - movl 128(%esp), %ebp -.byte 209 -.byte 200 # rorl $1 %eax + addl %ebp, %edi addl %edx, %edi -.byte 209 -.byte 200 # rorl $1 %eax - # End processing + + movl 128(%esp), %ebp movl 12(%ebp), %edx addl %ecx, %edx movl 4(%ebp), %ecx @@ -1921,7 +1498,7 @@ sha1_block_asm_host_order: movl 16(%ebp), %edi movl 8(%ebp), %ebx movl %ecx, 68(%esp) - # First we need to setup the X array + movl (%esi), %eax movl 4(%esi), %ecx movl %eax, (%esp) |