summaryrefslogtreecommitdiffstats
path: root/crypto/bn/asm
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/bn/asm')
-rw-r--r--crypto/bn/asm/README27
-rw-r--r--crypto/bn/asm/alpha.s3199
-rw-r--r--crypto/bn/asm/alpha.s.works533
-rw-r--r--crypto/bn/asm/alpha.works/add.pl119
-rw-r--r--crypto/bn/asm/alpha.works/div.pl144
-rw-r--r--crypto/bn/asm/alpha.works/mul.pl116
-rw-r--r--crypto/bn/asm/alpha.works/mul_add.pl120
-rw-r--r--crypto/bn/asm/alpha.works/mul_c4.pl213
-rw-r--r--crypto/bn/asm/alpha.works/mul_c4.works.pl98
-rw-r--r--crypto/bn/asm/alpha.works/mul_c8.pl177
-rw-r--r--crypto/bn/asm/alpha.works/sqr.pl113
-rw-r--r--crypto/bn/asm/alpha.works/sqr_c4.pl109
-rw-r--r--crypto/bn/asm/alpha.works/sqr_c8.pl132
-rw-r--r--crypto/bn/asm/alpha.works/sub.pl108
-rw-r--r--crypto/bn/asm/alpha/add.pl118
-rw-r--r--crypto/bn/asm/alpha/div.pl144
-rw-r--r--crypto/bn/asm/alpha/mul.pl104
-rw-r--r--crypto/bn/asm/alpha/mul_add.pl123
-rw-r--r--crypto/bn/asm/alpha/mul_c4.pl215
-rw-r--r--crypto/bn/asm/alpha/mul_c4.works.pl98
-rw-r--r--crypto/bn/asm/alpha/mul_c8.pl177
-rw-r--r--crypto/bn/asm/alpha/sqr.pl113
-rw-r--r--crypto/bn/asm/alpha/sqr_c4.pl109
-rw-r--r--crypto/bn/asm/alpha/sqr_c8.pl132
-rw-r--r--crypto/bn/asm/alpha/sub.pl108
-rw-r--r--crypto/bn/asm/bn-586.pl675
-rw-r--r--crypto/bn/asm/bn-alpha.pl571
-rw-r--r--crypto/bn/asm/ca.pl33
-rw-r--r--crypto/bn/asm/co-586.pl286
-rw-r--r--crypto/bn/asm/co-alpha.pl116
-rw-r--r--crypto/bn/asm/ia64.S1560
-rw-r--r--crypto/bn/asm/mips1.s539
-rw-r--r--crypto/bn/asm/mips3.s2201
-rw-r--r--crypto/bn/asm/pa-risc.s710
-rw-r--r--crypto/bn/asm/pa-risc2.s1618
-rw-r--r--crypto/bn/asm/pa-risc2W.s1605
-rw-r--r--crypto/bn/asm/ppc.pl2078
-rw-r--r--crypto/bn/asm/r3000.s646
-rw-r--r--crypto/bn/asm/sparcv8.S1458
-rw-r--r--crypto/bn/asm/sparcv8plus.S1547
-rw-r--r--crypto/bn/asm/x86.pl28
-rw-r--r--crypto/bn/asm/x86/add.pl76
-rw-r--r--crypto/bn/asm/x86/comba.pl277
-rw-r--r--crypto/bn/asm/x86/div.pl15
-rw-r--r--crypto/bn/asm/x86/f3
-rw-r--r--crypto/bn/asm/x86/mul.pl77
-rw-r--r--crypto/bn/asm/x86/mul_add.pl87
-rw-r--r--crypto/bn/asm/x86/sqr.pl60
-rw-r--r--crypto/bn/asm/x86/sub.pl76
-rw-r--r--crypto/bn/asm/x86_64-gcc.c597
50 files changed, 23588 insertions, 0 deletions
diff --git a/crypto/bn/asm/README b/crypto/bn/asm/README
new file mode 100644
index 0000000..b0f3a68
--- /dev/null
+++ b/crypto/bn/asm/README
@@ -0,0 +1,27 @@
+<OBSOLETE>
+
+All assember in this directory are just version of the file
+crypto/bn/bn_asm.c.
+
+Quite a few of these files are just the assember output from gcc since on
+quite a few machines they are 2 times faster than the system compiler.
+
+For the x86, I have hand written assember because of the bad job all
+compilers seem to do on it. This normally gives a 2 time speed up in the RSA
+routines.
+
+For the DEC alpha, I also hand wrote the assember (except the division which
+is just the output from the C compiler pasted on the end of the file).
+On the 2 alpha C compilers I had access to, it was not possible to do
+64b x 64b -> 128b calculations (both long and the long long data types
+were 64 bits). So the hand assember gives access to the 128 bit result and
+a 2 times speedup :-).
+
+There are 3 versions of assember for the HP PA-RISC.
+
+pa-risc.s is the origional one which works fine and generated using gcc :-)
+
+pa-risc2W.s and pa-risc2.s are 64 and 32-bit PA-RISC 2.0 implementations
+by Chris Ruemmler from HP (with some help from the HP C compiler).
+
+</OBSOLETE>
diff --git a/crypto/bn/asm/alpha.s b/crypto/bn/asm/alpha.s
new file mode 100644
index 0000000..555ff0b
--- /dev/null
+++ b/crypto/bn/asm/alpha.s
@@ -0,0 +1,3199 @@
+ # DEC Alpha assember
+ # The bn_div_words is actually gcc output but the other parts are hand done.
+ # Thanks to tzeruch@ceddec.com for sending me the gcc output for
+ # bn_div_words.
+ # I've gone back and re-done most of routines.
+ # The key thing to remeber for the 164 CPU is that while a
+ # multiply operation takes 8 cycles, another one can only be issued
+ # after 4 cycles have elapsed. I've done modification to help
+ # improve this. Also, normally, a ld instruction will not be available
+ # for about 3 cycles.
+ .file 1 "bn_asm.c"
+ .set noat
+gcc2_compiled.:
+__gnu_compiled_c:
+ .text
+ .align 3
+ .globl bn_mul_add_words
+ .ent bn_mul_add_words
+bn_mul_add_words:
+bn_mul_add_words..ng:
+ .frame $30,0,$26,0
+ .prologue 0
+ .align 5
+ subq $18,4,$18
+ bis $31,$31,$0
+ blt $18,$43 # if we are -1, -2, -3 or -4 goto tail code
+ ldq $20,0($17) # 1 1
+ ldq $1,0($16) # 1 1
+ .align 3
+$42:
+ mulq $20,$19,$5 # 1 2 1 ######
+ ldq $21,8($17) # 2 1
+ ldq $2,8($16) # 2 1
+ umulh $20,$19,$20 # 1 2 ######
+ ldq $27,16($17) # 3 1
+ ldq $3,16($16) # 3 1
+ mulq $21,$19,$6 # 2 2 1 ######
+ ldq $28,24($17) # 4 1
+ addq $1,$5,$1 # 1 2 2
+ ldq $4,24($16) # 4 1
+ umulh $21,$19,$21 # 2 2 ######
+ cmpult $1,$5,$22 # 1 2 3 1
+ addq $20,$22,$20 # 1 3 1
+ addq $1,$0,$1 # 1 2 3 1
+ mulq $27,$19,$7 # 3 2 1 ######
+ cmpult $1,$0,$0 # 1 2 3 2
+ addq $2,$6,$2 # 2 2 2
+ addq $20,$0,$0 # 1 3 2
+ cmpult $2,$6,$23 # 2 2 3 1
+ addq $21,$23,$21 # 2 3 1
+ umulh $27,$19,$27 # 3 2 ######
+ addq $2,$0,$2 # 2 2 3 1
+ cmpult $2,$0,$0 # 2 2 3 2
+ subq $18,4,$18
+ mulq $28,$19,$8 # 4 2 1 ######
+ addq $21,$0,$0 # 2 3 2
+ addq $3,$7,$3 # 3 2 2
+ addq $16,32,$16
+ cmpult $3,$7,$24 # 3 2 3 1
+ stq $1,-32($16) # 1 2 4
+ umulh $28,$19,$28 # 4 2 ######
+ addq $27,$24,$27 # 3 3 1
+ addq $3,$0,$3 # 3 2 3 1
+ stq $2,-24($16) # 2 2 4
+ cmpult $3,$0,$0 # 3 2 3 2
+ stq $3,-16($16) # 3 2 4
+ addq $4,$8,$4 # 4 2 2
+ addq $27,$0,$0 # 3 3 2
+ cmpult $4,$8,$25 # 4 2 3 1
+ addq $17,32,$17
+ addq $28,$25,$28 # 4 3 1
+ addq $4,$0,$4 # 4 2 3 1
+ cmpult $4,$0,$0 # 4 2 3 2
+ stq $4,-8($16) # 4 2 4
+ addq $28,$0,$0 # 4 3 2
+ blt $18,$43
+
+ ldq $20,0($17) # 1 1
+ ldq $1,0($16) # 1 1
+
+ br $42
+
+ .align 4
+$45:
+ ldq $20,0($17) # 4 1
+ ldq $1,0($16) # 4 1
+ mulq $20,$19,$5 # 4 2 1
+ subq $18,1,$18
+ addq $16,8,$16
+ addq $17,8,$17
+ umulh $20,$19,$20 # 4 2
+ addq $1,$5,$1 # 4 2 2
+ cmpult $1,$5,$22 # 4 2 3 1
+ addq $20,$22,$20 # 4 3 1
+ addq $1,$0,$1 # 4 2 3 1
+ cmpult $1,$0,$0 # 4 2 3 2
+ addq $20,$0,$0 # 4 3 2
+ stq $1,-8($16) # 4 2 4
+ bgt $18,$45
+ ret $31,($26),1 # else exit
+
+ .align 4
+$43:
+ addq $18,4,$18
+ bgt $18,$45 # goto tail code
+ ret $31,($26),1 # else exit
+
+ .end bn_mul_add_words
+ .align 3
+ .globl bn_mul_words
+ .ent bn_mul_words
+bn_mul_words:
+bn_mul_words..ng:
+ .frame $30,0,$26,0
+ .prologue 0
+ .align 5
+ subq $18,4,$18
+ bis $31,$31,$0
+ blt $18,$143 # if we are -1, -2, -3 or -4 goto tail code
+ ldq $20,0($17) # 1 1
+ .align 3
+$142:
+
+ mulq $20,$19,$5 # 1 2 1 #####
+ ldq $21,8($17) # 2 1
+ ldq $27,16($17) # 3 1
+ umulh $20,$19,$20 # 1 2 #####
+ ldq $28,24($17) # 4 1
+ mulq $21,$19,$6 # 2 2 1 #####
+ addq $5,$0,$5 # 1 2 3 1
+ subq $18,4,$18
+ cmpult $5,$0,$0 # 1 2 3 2
+ umulh $21,$19,$21 # 2 2 #####
+ addq $20,$0,$0 # 1 3 2
+ addq $17,32,$17
+ addq $6,$0,$6 # 2 2 3 1
+ mulq $27,$19,$7 # 3 2 1 #####
+ cmpult $6,$0,$0 # 2 2 3 2
+ addq $21,$0,$0 # 2 3 2
+ addq $16,32,$16
+ umulh $27,$19,$27 # 3 2 #####
+ stq $5,-32($16) # 1 2 4
+ mulq $28,$19,$8 # 4 2 1 #####
+ addq $7,$0,$7 # 3 2 3 1
+ stq $6,-24($16) # 2 2 4
+ cmpult $7,$0,$0 # 3 2 3 2
+ umulh $28,$19,$28 # 4 2 #####
+ addq $27,$0,$0 # 3 3 2
+ stq $7,-16($16) # 3 2 4
+ addq $8,$0,$8 # 4 2 3 1
+ cmpult $8,$0,$0 # 4 2 3 2
+
+ addq $28,$0,$0 # 4 3 2
+
+ stq $8,-8($16) # 4 2 4
+
+ blt $18,$143
+
+ ldq $20,0($17) # 1 1
+
+ br $142
+
+ .align 4
+$145:
+ ldq $20,0($17) # 4 1
+ mulq $20,$19,$5 # 4 2 1
+ subq $18,1,$18
+ umulh $20,$19,$20 # 4 2
+ addq $5,$0,$5 # 4 2 3 1
+ addq $16,8,$16
+ cmpult $5,$0,$0 # 4 2 3 2
+ addq $17,8,$17
+ addq $20,$0,$0 # 4 3 2
+ stq $5,-8($16) # 4 2 4
+
+ bgt $18,$145
+ ret $31,($26),1 # else exit
+
+ .align 4
+$143:
+ addq $18,4,$18
+ bgt $18,$145 # goto tail code
+ ret $31,($26),1 # else exit
+
+ .end bn_mul_words
+ .align 3
+ .globl bn_sqr_words
+ .ent bn_sqr_words
+bn_sqr_words:
+bn_sqr_words..ng:
+ .frame $30,0,$26,0
+ .prologue 0
+
+ subq $18,4,$18
+ blt $18,$543 # if we are -1, -2, -3 or -4 goto tail code
+ ldq $20,0($17) # 1 1
+ .align 3
+$542:
+ mulq $20,$20,$5 ######
+ ldq $21,8($17) # 1 1
+ subq $18,4
+ umulh $20,$20,$1 ######
+ ldq $27,16($17) # 1 1
+ mulq $21,$21,$6 ######
+ ldq $28,24($17) # 1 1
+ stq $5,0($16) # r[0]
+ umulh $21,$21,$2 ######
+ stq $1,8($16) # r[1]
+ mulq $27,$27,$7 ######
+ stq $6,16($16) # r[0]
+ umulh $27,$27,$3 ######
+ stq $2,24($16) # r[1]
+ mulq $28,$28,$8 ######
+ stq $7,32($16) # r[0]
+ umulh $28,$28,$4 ######
+ stq $3,40($16) # r[1]
+
+ addq $16,64,$16
+ addq $17,32,$17
+ stq $8,-16($16) # r[0]
+ stq $4,-8($16) # r[1]
+
+ blt $18,$543
+ ldq $20,0($17) # 1 1
+ br $542
+
+$442:
+ ldq $20,0($17) # a[0]
+ mulq $20,$20,$5 # a[0]*w low part r2
+ addq $16,16,$16
+ addq $17,8,$17
+ subq $18,1,$18
+ umulh $20,$20,$1 # a[0]*w high part r3
+ stq $5,-16($16) # r[0]
+ stq $1,-8($16) # r[1]
+
+ bgt $18,$442
+ ret $31,($26),1 # else exit
+
+ .align 4
+$543:
+ addq $18,4,$18
+ bgt $18,$442 # goto tail code
+ ret $31,($26),1 # else exit
+ .end bn_sqr_words
+
+ .align 3
+ .globl bn_add_words
+ .ent bn_add_words
+bn_add_words:
+bn_add_words..ng:
+ .frame $30,0,$26,0
+ .prologue 0
+
+ subq $19,4,$19
+ bis $31,$31,$0 # carry = 0
+ blt $19,$900
+ ldq $5,0($17) # a[0]
+ ldq $1,0($18) # b[1]
+ .align 3
+$901:
+ addq $1,$5,$1 # r=a+b;
+ ldq $6,8($17) # a[1]
+ cmpult $1,$5,$22 # did we overflow?
+ ldq $2,8($18) # b[1]
+ addq $1,$0,$1 # c+= overflow
+ ldq $7,16($17) # a[2]
+ cmpult $1,$0,$0 # overflow?
+ ldq $3,16($18) # b[2]
+ addq $0,$22,$0
+ ldq $8,24($17) # a[3]
+ addq $2,$6,$2 # r=a+b;
+ ldq $4,24($18) # b[3]
+ cmpult $2,$6,$23 # did we overflow?
+ addq $3,$7,$3 # r=a+b;
+ addq $2,$0,$2 # c+= overflow
+ cmpult $3,$7,$24 # did we overflow?
+ cmpult $2,$0,$0 # overflow?
+ addq $4,$8,$4 # r=a+b;
+ addq $0,$23,$0
+ cmpult $4,$8,$25 # did we overflow?
+ addq $3,$0,$3 # c+= overflow
+ stq $1,0($16) # r[0]=c
+ cmpult $3,$0,$0 # overflow?
+ stq $2,8($16) # r[1]=c
+ addq $0,$24,$0
+ stq $3,16($16) # r[2]=c
+ addq $4,$0,$4 # c+= overflow
+ subq $19,4,$19 # loop--
+ cmpult $4,$0,$0 # overflow?
+ addq $17,32,$17 # a++
+ addq $0,$25,$0
+ stq $4,24($16) # r[3]=c
+ addq $18,32,$18 # b++
+ addq $16,32,$16 # r++
+
+ blt $19,$900
+ ldq $5,0($17) # a[0]
+ ldq $1,0($18) # b[1]
+ br $901
+ .align 4
+$945:
+ ldq $5,0($17) # a[0]
+ ldq $1,0($18) # b[1]
+ addq $1,$5,$1 # r=a+b;
+ subq $19,1,$19 # loop--
+ addq $1,$0,$1 # c+= overflow
+ addq $17,8,$17 # a++
+ cmpult $1,$5,$22 # did we overflow?
+ cmpult $1,$0,$0 # overflow?
+ addq $18,8,$18 # b++
+ stq $1,0($16) # r[0]=c
+ addq $0,$22,$0
+ addq $16,8,$16 # r++
+
+ bgt $19,$945
+ ret $31,($26),1 # else exit
+
+$900:
+ addq $19,4,$19
+ bgt $19,$945 # goto tail code
+ ret $31,($26),1 # else exit
+ .end bn_add_words
+
+ #
+ # What follows was taken directly from the C compiler with a few
+ # hacks to redo the lables.
+ #
+.text
+ .align 3
+ .globl bn_div_words
+ .ent bn_div_words
+bn_div_words:
+ ldgp $29,0($27)
+bn_div_words..ng:
+ lda $30,-48($30)
+ .frame $30,48,$26,0
+ stq $26,0($30)
+ stq $9,8($30)
+ stq $10,16($30)
+ stq $11,24($30)
+ stq $12,32($30)
+ stq $13,40($30)
+ .mask 0x4003e00,-48
+ .prologue 1
+ bis $16,$16,$9
+ bis $17,$17,$10
+ bis $18,$18,$11
+ bis $31,$31,$13
+ bis $31,2,$12
+ bne $11,$119
+ lda $0,-1
+ br $31,$136
+ .align 4
+$119:
+ bis $11,$11,$16
+ jsr $26,BN_num_bits_word
+ ldgp $29,0($26)
+ subq $0,64,$1
+ beq $1,$120
+ bis $31,1,$1
+ sll $1,$0,$1
+ cmpule $9,$1,$1
+ bne $1,$120
+ # lda $16,_IO_stderr_
+ # lda $17,$C32
+ # bis $0,$0,$18
+ # jsr $26,fprintf
+ # ldgp $29,0($26)
+ jsr $26,abort
+ ldgp $29,0($26)
+ .align 4
+$120:
+ bis $31,64,$3
+ cmpult $9,$11,$2
+ subq $3,$0,$1
+ addl $1,$31,$0
+ subq $9,$11,$1
+ cmoveq $2,$1,$9
+ beq $0,$122
+ zapnot $0,15,$2
+ subq $3,$0,$1
+ sll $11,$2,$11
+ sll $9,$2,$3
+ srl $10,$1,$1
+ sll $10,$2,$10
+ bis $3,$1,$9
+$122:
+ srl $11,32,$5
+ zapnot $11,15,$6
+ lda $7,-1
+ .align 5
+$123:
+ srl $9,32,$1
+ subq $1,$5,$1
+ bne $1,$126
+ zapnot $7,15,$27
+ br $31,$127
+ .align 4
+$126:
+ bis $9,$9,$24
+ bis $5,$5,$25
+ divqu $24,$25,$27
+$127:
+ srl $10,32,$4
+ .align 5
+$128:
+ mulq $27,$5,$1
+ subq $9,$1,$3
+ zapnot $3,240,$1
+ bne $1,$129
+ mulq $6,$27,$2
+ sll $3,32,$1
+ addq $1,$4,$1
+ cmpule $2,$1,$2
+ bne $2,$129
+ subq $27,1,$27
+ br $31,$128
+ .align 4
+$129:
+ mulq $27,$6,$1
+ mulq $27,$5,$4
+ srl $1,32,$3
+ sll $1,32,$1
+ addq $4,$3,$4
+ cmpult $10,$1,$2
+ subq $10,$1,$10
+ addq $2,$4,$2
+ cmpult $9,$2,$1
+ bis $2,$2,$4
+ beq $1,$134
+ addq $9,$11,$9
+ subq $27,1,$27
+$134:
+ subl $12,1,$12
+ subq $9,$4,$9
+ beq $12,$124
+ sll $27,32,$13
+ sll $9,32,$2
+ srl $10,32,$1
+ sll $10,32,$10
+ bis $2,$1,$9
+ br $31,$123
+ .align 4
+$124:
+ bis $13,$27,$0
+$136:
+ ldq $26,0($30)
+ ldq $9,8($30)
+ ldq $10,16($30)
+ ldq $11,24($30)
+ ldq $12,32($30)
+ ldq $13,40($30)
+ addq $30,48,$30
+ ret $31,($26),1
+ .end bn_div_words
+
+ .set noat
+ .text
+ .align 3
+ .globl bn_sub_words
+ .ent bn_sub_words
+bn_sub_words:
+bn_sub_words..ng:
+ .frame $30,0,$26,0
+ .prologue 0
+
+ subq $19, 4, $19
+ bis $31, $31, $0
+ blt $19, $100
+ ldq $1, 0($17)
+ ldq $2, 0($18)
+$101:
+ ldq $3, 8($17)
+ cmpult $1, $2, $4
+ ldq $5, 8($18)
+ subq $1, $2, $1
+ ldq $6, 16($17)
+ cmpult $1, $0, $2
+ ldq $7, 16($18)
+ subq $1, $0, $23
+ ldq $8, 24($17)
+ addq $2, $4, $0
+ cmpult $3, $5, $24
+ subq $3, $5, $3
+ ldq $22, 24($18)
+ cmpult $3, $0, $5
+ subq $3, $0, $25
+ addq $5, $24, $0
+ cmpult $6, $7, $27
+ subq $6, $7, $6
+ stq $23, 0($16)
+ cmpult $6, $0, $7
+ subq $6, $0, $28
+ addq $7, $27, $0
+ cmpult $8, $22, $21
+ subq $8, $22, $8
+ stq $25, 8($16)
+ cmpult $8, $0, $22
+ subq $8, $0, $20
+ addq $22, $21, $0
+ stq $28, 16($16)
+ subq $19, 4, $19
+ stq $20, 24($16)
+ addq $17, 32, $17
+ addq $18, 32, $18
+ addq $16, 32, $16
+ blt $19, $100
+ ldq $1, 0($17)
+ ldq $2, 0($18)
+ br $101
+$102:
+ ldq $1, 0($17)
+ ldq $2, 0($18)
+ cmpult $1, $2, $27
+ subq $1, $2, $1
+ cmpult $1, $0, $2
+ subq $1, $0, $1
+ stq $1, 0($16)
+ addq $2, $27, $0
+ addq $17, 8, $17
+ addq $18, 8, $18
+ addq $16, 8, $16
+ subq $19, 1, $19
+ bgt $19, $102
+ ret $31,($26),1
+$100:
+ addq $19, 4, $19
+ bgt $19, $102
+$103:
+ ret $31,($26),1
+ .end bn_sub_words
+ .text
+ .align 3
+ .globl bn_mul_comba4
+ .ent bn_mul_comba4
+bn_mul_comba4:
+bn_mul_comba4..ng:
+ .frame $30,0,$26,0
+ .prologue 0
+
+ ldq $0, 0($17)
+ ldq $1, 0($18)
+ ldq $2, 8($17)
+ ldq $3, 8($18)
+ ldq $4, 16($17)
+ ldq $5, 16($18)
+ ldq $6, 24($17)
+ ldq $7, 24($18)
+ bis $31, $31, $23
+ mulq $0, $1, $8
+ umulh $0, $1, $22
+ stq $8, 0($16)
+ bis $31, $31, $8
+ mulq $0, $3, $24
+ umulh $0, $3, $25
+ addq $22, $24, $22
+ cmpult $22, $24, $27
+ addq $27, $25, $25
+ addq $23, $25, $23
+ cmpult $23, $25, $28
+ addq $8, $28, $8
+ mulq $2, $1, $21
+ umulh $2, $1, $20
+ addq $22, $21, $22
+ cmpult $22, $21, $19
+ addq $19, $20, $20
+ addq $23, $20, $23
+ cmpult $23, $20, $17
+ addq $8, $17, $8
+ stq $22, 8($16)
+ bis $31, $31, $22
+ mulq $2, $3, $18
+ umulh $2, $3, $24
+ addq $23, $18, $23
+ cmpult $23, $18, $27
+ addq $27, $24, $24
+ addq $8, $24, $8
+ cmpult $8, $24, $25
+ addq $22, $25, $22
+ mulq $0, $5, $28
+ umulh $0, $5, $21
+ addq $23, $28, $23
+ cmpult $23, $28, $19
+ addq $19, $21, $21
+ addq $8, $21, $8
+ cmpult $8, $21, $20
+ addq $22, $20, $22
+ mulq $4, $1, $17
+ umulh $4, $1, $18
+ addq $23, $17, $23
+ cmpult $23, $17, $27
+ addq $27, $18, $18
+ addq $8, $18, $8
+ cmpult $8, $18, $24
+ addq $22, $24, $22
+ stq $23, 16($16)
+ bis $31, $31, $23
+ mulq $0, $7, $25
+ umulh $0, $7, $28
+ addq $8, $25, $8
+ cmpult $8, $25, $19
+ addq $19, $28, $28
+ addq $22, $28, $22
+ cmpult $22, $28, $21
+ addq $23, $21, $23
+ mulq $2, $5, $20
+ umulh $2, $5, $17
+ addq $8, $20, $8
+ cmpult $8, $20, $27
+ addq $27, $17, $17
+ addq $22, $17, $22
+ cmpult $22, $17, $18
+ addq $23, $18, $23
+ mulq $4, $3, $24
+ umulh $4, $3, $25
+ addq $8, $24, $8
+ cmpult $8, $24, $19
+ addq $19, $25, $25
+ addq $22, $25, $22
+ cmpult $22, $25, $28
+ addq $23, $28, $23
+ mulq $6, $1, $21
+ umulh $6, $1, $0
+ addq $8, $21, $8
+ cmpult $8, $21, $20
+ addq $20, $0, $0
+ addq $22, $0, $22
+ cmpult $22, $0, $27
+ addq $23, $27, $23
+ stq $8, 24($16)
+ bis $31, $31, $8
+ mulq $2, $7, $17
+ umulh $2, $7, $18
+ addq $22, $17, $22
+ cmpult $22, $17, $24
+ addq $24, $18, $18
+ addq $23, $18, $23
+ cmpult $23, $18, $19
+ addq $8, $19, $8
+ mulq $4, $5, $25
+ umulh $4, $5, $28
+ addq $22, $25, $22
+ cmpult $22, $25, $21
+ addq $21, $28, $28
+ addq $23, $28, $23
+ cmpult $23, $28, $20
+ addq $8, $20, $8
+ mulq $6, $3, $0
+ umulh $6, $3, $27
+ addq $22, $0, $22
+ cmpult $22, $0, $1
+ addq $1, $27, $27
+ addq $23, $27, $23
+ cmpult $23, $27, $17
+ addq $8, $17, $8
+ stq $22, 32($16)
+ bis $31, $31, $22
+ mulq $4, $7, $24
+ umulh $4, $7, $18
+ addq $23, $24, $23
+ cmpult $23, $24, $19
+ addq $19, $18, $18
+ addq $8, $18, $8
+ cmpult $8, $18, $2
+ addq $22, $2, $22
+ mulq $6, $5, $25
+ umulh $6, $5, $21
+ addq $23, $25, $23
+ cmpult $23, $25, $28
+ addq $28, $21, $21
+ addq $8, $21, $8
+ cmpult $8, $21, $20
+ addq $22, $20, $22
+ stq $23, 40($16)
+ bis $31, $31, $23
+ mulq $6, $7, $0
+ umulh $6, $7, $1
+ addq $8, $0, $8
+ cmpult $8, $0, $27
+ addq $27, $1, $1
+ addq $22, $1, $22
+ cmpult $22, $1, $17
+ addq $23, $17, $23
+ stq $8, 48($16)
+ stq $22, 56($16)
+ ret $31,($26),1
+ .end bn_mul_comba4
+ .text
+ .align 3
+ .globl bn_mul_comba8
+ .ent bn_mul_comba8
+bn_mul_comba8:
+bn_mul_comba8..ng:
+ .frame $30,0,$26,0
+ .prologue 0
+ ldq $1, 0($17)
+ ldq $2, 0($18)
+ zapnot $1, 15, $7
+ srl $2, 32, $8
+ mulq $8, $7, $22
+ srl $1, 32, $6
+ zapnot $2, 15, $5
+ mulq $5, $6, $4
+ mulq $7, $5, $24
+ addq $22, $4, $22
+ cmpult $22, $4, $1
+ mulq $6, $8, $3
+ beq $1, $173
+ bis $31, 1, $1
+ sll $1, 32, $1
+ addq $3, $1, $3
+$173:
+ sll $22, 32, $4
+ addq $24, $4, $24
+ stq $24, 0($16)
+ ldq $2, 0($17)
+ ldq $1, 8($18)
+ zapnot $2, 15, $7
+ srl $1, 32, $8
+ mulq $8, $7, $25
+ zapnot $1, 15, $5
+ mulq $7, $5, $0
+ srl $2, 32, $6
+ mulq $5, $6, $23
+ mulq $6, $8, $6
+ srl $22, 32, $1
+ cmpult $24, $4, $2
+ addq $3, $1, $3
+ addq $2, $3, $22
+ addq $25, $23, $25
+ cmpult $25, $23, $1
+ bis $31, 1, $2
+ beq $1, $177
+ sll $2, 32, $1
+ addq $6, $1, $6
+$177:
+ sll $25, 32, $23
+ ldq $1, 0($18)
+ addq $0, $23, $0
+ bis $0, $0, $7
+ ldq $3, 8($17)
+ addq $22, $7, $22
+ srl $1, 32, $8
+ cmpult $22, $7, $4
+ zapnot $3, 15, $7
+ mulq $8, $7, $28
+ zapnot $1, 15, $5
+ mulq $7, $5, $21
+ srl $25, 32, $1
+ cmpult $0, $23, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $4, $6, $24
+ srl $3, 32, $6
+ mulq $5, $6, $2
+ mulq $6, $8, $6
+ addq $28, $2, $28
+ cmpult $28, $2, $1
+ bis $31, 1, $2
+ beq $1, $181
+ sll $2, 32, $1
+ addq $6, $1, $6
+$181:
+ sll $28, 32, $2
+ addq $21, $2, $21
+ bis $21, $21, $7
+ addq $22, $7, $22
+ stq $22, 8($16)
+ ldq $3, 16($17)
+ ldq $1, 0($18)
+ cmpult $22, $7, $4
+ zapnot $3, 15, $7
+ srl $1, 32, $8
+ mulq $8, $7, $22
+ zapnot $1, 15, $5
+ mulq $7, $5, $20
+ srl $28, 32, $1
+ cmpult $21, $2, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $4, $6, $6
+ addq $24, $6, $24
+ cmpult $24, $6, $23
+ srl $3, 32, $6
+ mulq $5, $6, $2
+ mulq $6, $8, $6
+ addq $22, $2, $22
+ cmpult $22, $2, $1
+ bis $31, 1, $2
+ beq $1, $185
+ sll $2, 32, $1
+ addq $6, $1, $6
+$185:
+ sll $22, 32, $2
+ ldq $1, 8($18)
+ addq $20, $2, $20
+ bis $20, $20, $7
+ ldq $4, 8($17)
+ addq $24, $7, $24
+ srl $1, 32, $8
+ cmpult $24, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $25
+ zapnot $1, 15, $5
+ mulq $7, $5, $0
+ srl $22, 32, $1
+ cmpult $20, $2, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $3, $6, $6
+ addq $23, $6, $23
+ cmpult $23, $6, $22
+ srl $4, 32, $6
+ mulq $5, $6, $5
+ bis $31, 1, $21
+ addq $25, $5, $25
+ cmpult $25, $5, $1
+ mulq $6, $8, $6
+ beq $1, $189
+ sll $21, 32, $1
+ addq $6, $1, $6
+$189:
+ sll $25, 32, $5
+ ldq $2, 16($18)
+ addq $0, $5, $0
+ bis $0, $0, $7
+ ldq $4, 0($17)
+ addq $24, $7, $24
+ srl $2, 32, $8
+ cmpult $24, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $28
+ srl $25, 32, $1
+ addq $6, $1, $6
+ cmpult $0, $5, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $23, $6, $23
+ cmpult $23, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $2
+ addq $1, $22, $22
+ addq $28, $25, $28
+ cmpult $28, $25, $1
+ mulq $6, $8, $6
+ beq $1, $193
+ sll $21, 32, $1
+ addq $6, $1, $6
+$193:
+ sll $28, 32, $25
+ addq $2, $25, $2
+ bis $2, $2, $7
+ addq $24, $7, $24
+ stq $24, 16($16)
+ ldq $4, 0($17)
+ ldq $5, 24($18)
+ cmpult $24, $7, $3
+ zapnot $4, 15, $7
+ srl $5, 32, $8
+ mulq $8, $7, $0
+ srl $28, 32, $1
+ cmpult $2, $25, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $3, $6, $6
+ addq $23, $6, $23
+ cmpult $23, $6, $1
+ srl $4, 32, $6
+ zapnot $5, 15, $5
+ mulq $5, $6, $24
+ mulq $7, $5, $2
+ addq $1, $22, $22
+ addq $0, $24, $0
+ cmpult $0, $24, $1
+ mulq $6, $8, $6
+ beq $1, $197
+ sll $21, 32, $1
+ addq $6, $1, $6
+$197:
+ sll $0, 32, $24
+ ldq $1, 16($18)
+ addq $2, $24, $2
+ bis $2, $2, $7
+ ldq $4, 8($17)
+ addq $23, $7, $23
+ srl $1, 32, $8
+ cmpult $23, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $25
+ zapnot $1, 15, $5
+ mulq $7, $5, $21
+ srl $0, 32, $1
+ cmpult $2, $24, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $3, $6, $6
+ addq $22, $6, $22
+ cmpult $22, $6, $24
+ srl $4, 32, $6
+ mulq $5, $6, $5
+ bis $31, 1, $20
+ addq $25, $5, $25
+ cmpult $25, $5, $1
+ mulq $6, $8, $6
+ beq $1, $201
+ sll $20, 32, $1
+ addq $6, $1, $6
+$201:
+ sll $25, 32, $5
+ ldq $2, 8($18)
+ addq $21, $5, $21
+ bis $21, $21, $7
+ ldq $4, 16($17)
+ addq $23, $7, $23
+ srl $2, 32, $8
+ cmpult $23, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $28
+ srl $25, 32, $1
+ addq $6, $1, $6
+ cmpult $21, $5, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $22, $6, $22
+ cmpult $22, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $24, $24
+ addq $28, $25, $28
+ cmpult $28, $25, $1
+ mulq $6, $8, $6
+ beq $1, $205
+ sll $20, 32, $1
+ addq $6, $1, $6
+$205:
+ sll $28, 32, $25
+ ldq $2, 0($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 24($17)
+ addq $23, $7, $23
+ srl $2, 32, $8
+ cmpult $23, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $0
+ srl $28, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $22, $6, $22
+ cmpult $22, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $2
+ addq $1, $24, $24
+ addq $0, $25, $0
+ cmpult $0, $25, $1
+ mulq $6, $8, $6
+ beq $1, $209
+ sll $20, 32, $1
+ addq $6, $1, $6
+$209:
+ sll $0, 32, $25
+ addq $2, $25, $2
+ bis $2, $2, $7
+ addq $23, $7, $23
+ stq $23, 24($16)
+ ldq $4, 32($17)
+ ldq $5, 0($18)
+ cmpult $23, $7, $3
+ zapnot $4, 15, $7
+ srl $5, 32, $8
+ mulq $8, $7, $28
+ srl $0, 32, $1
+ cmpult $2, $25, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $3, $6, $6
+ addq $22, $6, $22
+ cmpult $22, $6, $1
+ srl $4, 32, $6
+ zapnot $5, 15, $5
+ mulq $5, $6, $23
+ mulq $7, $5, $2
+ addq $1, $24, $24
+ addq $28, $23, $28
+ cmpult $28, $23, $1
+ mulq $6, $8, $6
+ beq $1, $213
+ sll $20, 32, $1
+ addq $6, $1, $6
+$213:
+ sll $28, 32, $23
+ ldq $1, 8($18)
+ addq $2, $23, $2
+ bis $2, $2, $7
+ ldq $4, 24($17)
+ addq $22, $7, $22
+ srl $1, 32, $8
+ cmpult $22, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $25
+ zapnot $1, 15, $5
+ mulq $7, $5, $0
+ srl $28, 32, $1
+ cmpult $2, $23, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $3, $6, $6
+ addq $24, $6, $24
+ cmpult $24, $6, $23
+ srl $4, 32, $6
+ mulq $5, $6, $5
+ bis $31, 1, $21
+ addq $25, $5, $25
+ cmpult $25, $5, $1
+ mulq $6, $8, $6
+ beq $1, $217
+ sll $21, 32, $1
+ addq $6, $1, $6
+$217:
+ sll $25, 32, $5
+ ldq $2, 16($18)
+ addq $0, $5, $0
+ bis $0, $0, $7
+ ldq $4, 16($17)
+ addq $22, $7, $22
+ srl $2, 32, $8
+ cmpult $22, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $28
+ srl $25, 32, $1
+ addq $6, $1, $6
+ cmpult $0, $5, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $24, $6, $24
+ cmpult $24, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $23, $23
+ addq $28, $25, $28
+ cmpult $28, $25, $1
+ mulq $6, $8, $6
+ beq $1, $221
+ sll $21, 32, $1
+ addq $6, $1, $6
+$221:
+ sll $28, 32, $25
+ ldq $2, 24($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 8($17)
+ addq $22, $7, $22
+ srl $2, 32, $8
+ cmpult $22, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $0
+ srl $28, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $24, $6, $24
+ cmpult $24, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $23, $23
+ addq $0, $25, $0
+ cmpult $0, $25, $1
+ mulq $6, $8, $6
+ beq $1, $225
+ sll $21, 32, $1
+ addq $6, $1, $6
+$225:
+ sll $0, 32, $25
+ ldq $2, 32($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 0($17)
+ addq $22, $7, $22
+ srl $2, 32, $8
+ cmpult $22, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $28
+ srl $0, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $24, $6, $24
+ cmpult $24, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $2
+ addq $1, $23, $23
+ addq $28, $25, $28
+ cmpult $28, $25, $1
+ mulq $6, $8, $6
+ beq $1, $229
+ sll $21, 32, $1
+ addq $6, $1, $6
+$229:
+ sll $28, 32, $25
+ addq $2, $25, $2
+ bis $2, $2, $7
+ addq $22, $7, $22
+ stq $22, 32($16)
+ ldq $4, 0($17)
+ ldq $5, 40($18)
+ cmpult $22, $7, $3
+ zapnot $4, 15, $7
+ srl $5, 32, $8
+ mulq $8, $7, $0
+ srl $28, 32, $1
+ cmpult $2, $25, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $3, $6, $6
+ addq $24, $6, $24
+ cmpult $24, $6, $1
+ srl $4, 32, $6
+ zapnot $5, 15, $5
+ mulq $5, $6, $22
+ mulq $7, $5, $2
+ addq $1, $23, $23
+ addq $0, $22, $0
+ cmpult $0, $22, $1
+ mulq $6, $8, $6
+ beq $1, $233
+ sll $21, 32, $1
+ addq $6, $1, $6
+$233:
+ sll $0, 32, $22
+ ldq $1, 32($18)
+ addq $2, $22, $2
+ bis $2, $2, $7
+ ldq $4, 8($17)
+ addq $24, $7, $24
+ srl $1, 32, $8
+ cmpult $24, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $25
+ zapnot $1, 15, $5
+ mulq $7, $5, $21
+ srl $0, 32, $1
+ cmpult $2, $22, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $3, $6, $6
+ addq $23, $6, $23
+ cmpult $23, $6, $22
+ srl $4, 32, $6
+ mulq $5, $6, $5
+ bis $31, 1, $20
+ addq $25, $5, $25
+ cmpult $25, $5, $1
+ mulq $6, $8, $6
+ beq $1, $237
+ sll $20, 32, $1
+ addq $6, $1, $6
+$237:
+ sll $25, 32, $5
+ ldq $2, 24($18)
+ addq $21, $5, $21
+ bis $21, $21, $7
+ ldq $4, 16($17)
+ addq $24, $7, $24
+ srl $2, 32, $8
+ cmpult $24, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $28
+ srl $25, 32, $1
+ addq $6, $1, $6
+ cmpult $21, $5, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $23, $6, $23
+ cmpult $23, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $22, $22
+ addq $28, $25, $28
+ cmpult $28, $25, $1
+ mulq $6, $8, $6
+ beq $1, $241
+ sll $20, 32, $1
+ addq $6, $1, $6
+$241:
+ sll $28, 32, $25
+ ldq $2, 16($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 24($17)
+ addq $24, $7, $24
+ srl $2, 32, $8
+ cmpult $24, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $0
+ srl $28, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $23, $6, $23
+ cmpult $23, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $22, $22
+ addq $0, $25, $0
+ cmpult $0, $25, $1
+ mulq $6, $8, $6
+ beq $1, $245
+ sll $20, 32, $1
+ addq $6, $1, $6
+$245:
+ sll $0, 32, $25
+ ldq $2, 8($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 32($17)
+ addq $24, $7, $24
+ srl $2, 32, $8
+ cmpult $24, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $28
+ srl $0, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $23, $6, $23
+ cmpult $23, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $22, $22
+ addq $28, $25, $28
+ cmpult $28, $25, $1
+ mulq $6, $8, $6
+ beq $1, $249
+ sll $20, 32, $1
+ addq $6, $1, $6
+$249:
+ sll $28, 32, $25
+ ldq $2, 0($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 40($17)
+ addq $24, $7, $24
+ srl $2, 32, $8
+ cmpult $24, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $0
+ srl $28, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $23, $6, $23
+ cmpult $23, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $2
+ addq $1, $22, $22
+ addq $0, $25, $0
+ cmpult $0, $25, $1
+ mulq $6, $8, $6
+ beq $1, $253
+ sll $20, 32, $1
+ addq $6, $1, $6
+$253:
+ sll $0, 32, $25
+ addq $2, $25, $2
+ bis $2, $2, $7
+ addq $24, $7, $24
+ stq $24, 40($16)
+ ldq $4, 48($17)
+ ldq $5, 0($18)
+ cmpult $24, $7, $3
+ zapnot $4, 15, $7
+ srl $5, 32, $8
+ mulq $8, $7, $28
+ srl $0, 32, $1
+ cmpult $2, $25, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $3, $6, $6
+ addq $23, $6, $23
+ cmpult $23, $6, $1
+ srl $4, 32, $6
+ zapnot $5, 15, $5
+ mulq $5, $6, $24
+ mulq $7, $5, $2
+ addq $1, $22, $22
+ addq $28, $24, $28
+ cmpult $28, $24, $1
+ mulq $6, $8, $6
+ beq $1, $257
+ sll $20, 32, $1
+ addq $6, $1, $6
+$257:
+ sll $28, 32, $24
+ ldq $1, 8($18)
+ addq $2, $24, $2
+ bis $2, $2, $7
+ ldq $4, 40($17)
+ addq $23, $7, $23
+ srl $1, 32, $8
+ cmpult $23, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $25
+ zapnot $1, 15, $5
+ mulq $7, $5, $0
+ srl $28, 32, $1
+ cmpult $2, $24, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $3, $6, $6
+ addq $22, $6, $22
+ cmpult $22, $6, $24
+ srl $4, 32, $6
+ mulq $5, $6, $5
+ bis $31, 1, $21
+ addq $25, $5, $25
+ cmpult $25, $5, $1
+ mulq $6, $8, $6
+ beq $1, $261
+ sll $21, 32, $1
+ addq $6, $1, $6
+$261:
+ sll $25, 32, $5
+ ldq $2, 16($18)
+ addq $0, $5, $0
+ bis $0, $0, $7
+ ldq $4, 32($17)
+ addq $23, $7, $23
+ srl $2, 32, $8
+ cmpult $23, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $28
+ srl $25, 32, $1
+ addq $6, $1, $6
+ cmpult $0, $5, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $22, $6, $22
+ cmpult $22, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $24, $24
+ addq $28, $25, $28
+ cmpult $28, $25, $1
+ mulq $6, $8, $6
+ beq $1, $265
+ sll $21, 32, $1
+ addq $6, $1, $6
+$265:
+ sll $28, 32, $25
+ ldq $2, 24($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 24($17)
+ addq $23, $7, $23
+ srl $2, 32, $8
+ cmpult $23, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $0
+ srl $28, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $22, $6, $22
+ cmpult $22, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $24, $24
+ addq $0, $25, $0
+ cmpult $0, $25, $1
+ mulq $6, $8, $6
+ beq $1, $269
+ sll $21, 32, $1
+ addq $6, $1, $6
+$269:
+ sll $0, 32, $25
+ ldq $2, 32($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 16($17)
+ addq $23, $7, $23
+ srl $2, 32, $8
+ cmpult $23, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $28
+ srl $0, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $22, $6, $22
+ cmpult $22, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $24, $24
+ addq $28, $25, $28
+ cmpult $28, $25, $1
+ mulq $6, $8, $6
+ beq $1, $273
+ sll $21, 32, $1
+ addq $6, $1, $6
+$273:
+ sll $28, 32, $25
+ ldq $2, 40($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 8($17)
+ addq $23, $7, $23
+ srl $2, 32, $8
+ cmpult $23, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $0
+ srl $28, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $22, $6, $22
+ cmpult $22, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $24, $24
+ addq $0, $25, $0
+ cmpult $0, $25, $1
+ mulq $6, $8, $6
+ beq $1, $277
+ sll $21, 32, $1
+ addq $6, $1, $6
+$277:
+ sll $0, 32, $25
+ ldq $2, 48($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 0($17)
+ addq $23, $7, $23
+ srl $2, 32, $8
+ cmpult $23, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $28
+ srl $0, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $22, $6, $22
+ cmpult $22, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $2
+ addq $1, $24, $24
+ addq $28, $25, $28
+ cmpult $28, $25, $1
+ mulq $6, $8, $6
+ beq $1, $281
+ sll $21, 32, $1
+ addq $6, $1, $6
+$281:
+ sll $28, 32, $25
+ addq $2, $25, $2
+ bis $2, $2, $7
+ addq $23, $7, $23
+ stq $23, 48($16)
+ ldq $4, 0($17)
+ ldq $5, 56($18)
+ cmpult $23, $7, $3
+ zapnot $4, 15, $7
+ srl $5, 32, $8
+ mulq $8, $7, $0
+ srl $28, 32, $1
+ cmpult $2, $25, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $3, $6, $6
+ addq $22, $6, $22
+ cmpult $22, $6, $1
+ srl $4, 32, $6
+ zapnot $5, 15, $5
+ mulq $5, $6, $23
+ mulq $7, $5, $2
+ addq $1, $24, $24
+ addq $0, $23, $0
+ cmpult $0, $23, $1
+ mulq $6, $8, $6
+ beq $1, $285
+ sll $21, 32, $1
+ addq $6, $1, $6
+$285:
+ sll $0, 32, $23
+ ldq $1, 48($18)
+ addq $2, $23, $2
+ bis $2, $2, $7
+ ldq $4, 8($17)
+ addq $22, $7, $22
+ srl $1, 32, $8
+ cmpult $22, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $25
+ zapnot $1, 15, $5
+ mulq $7, $5, $21
+ srl $0, 32, $1
+ cmpult $2, $23, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $3, $6, $6
+ addq $24, $6, $24
+ cmpult $24, $6, $23
+ srl $4, 32, $6
+ mulq $5, $6, $5
+ bis $31, 1, $20
+ addq $25, $5, $25
+ cmpult $25, $5, $1
+ mulq $6, $8, $6
+ beq $1, $289
+ sll $20, 32, $1
+ addq $6, $1, $6
+$289:
+ sll $25, 32, $5
+ ldq $2, 40($18)
+ addq $21, $5, $21
+ bis $21, $21, $7
+ ldq $4, 16($17)
+ addq $22, $7, $22
+ srl $2, 32, $8
+ cmpult $22, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $28
+ srl $25, 32, $1
+ addq $6, $1, $6
+ cmpult $21, $5, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $24, $6, $24
+ cmpult $24, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $23, $23
+ addq $28, $25, $28
+ cmpult $28, $25, $1
+ mulq $6, $8, $6
+ beq $1, $293
+ sll $20, 32, $1
+ addq $6, $1, $6
+$293:
+ sll $28, 32, $25
+ ldq $2, 32($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 24($17)
+ addq $22, $7, $22
+ srl $2, 32, $8
+ cmpult $22, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $0
+ srl $28, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $24, $6, $24
+ cmpult $24, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $23, $23
+ addq $0, $25, $0
+ cmpult $0, $25, $1
+ mulq $6, $8, $6
+ beq $1, $297
+ sll $20, 32, $1
+ addq $6, $1, $6
+$297:
+ sll $0, 32, $25
+ ldq $2, 24($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 32($17)
+ addq $22, $7, $22
+ srl $2, 32, $8
+ cmpult $22, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $28
+ srl $0, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $24, $6, $24
+ cmpult $24, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $23, $23
+ addq $28, $25, $28
+ cmpult $28, $25, $1
+ mulq $6, $8, $6
+ beq $1, $301
+ sll $20, 32, $1
+ addq $6, $1, $6
+$301:
+ sll $28, 32, $25
+ ldq $2, 16($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 40($17)
+ addq $22, $7, $22
+ srl $2, 32, $8
+ cmpult $22, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $0
+ srl $28, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $24, $6, $24
+ cmpult $24, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $23, $23
+ addq $0, $25, $0
+ cmpult $0, $25, $1
+ mulq $6, $8, $6
+ beq $1, $305
+ sll $20, 32, $1
+ addq $6, $1, $6
+$305:
+ sll $0, 32, $25
+ ldq $2, 8($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 48($17)
+ addq $22, $7, $22
+ srl $2, 32, $8
+ cmpult $22, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $28
+ srl $0, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $24, $6, $24
+ cmpult $24, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $23, $23
+ addq $28, $25, $28
+ cmpult $28, $25, $1
+ mulq $6, $8, $6
+ beq $1, $309
+ sll $20, 32, $1
+ addq $6, $1, $6
+$309:
+ sll $28, 32, $25
+ ldq $2, 0($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 56($17)
+ addq $22, $7, $22
+ srl $2, 32, $8
+ cmpult $22, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $0
+ srl $28, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $24, $6, $24
+ cmpult $24, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $2
+ addq $1, $23, $23
+ addq $0, $25, $0
+ cmpult $0, $25, $1
+ mulq $6, $8, $6
+ beq $1, $313
+ sll $20, 32, $1
+ addq $6, $1, $6
+$313:
+ sll $0, 32, $25
+ addq $2, $25, $2
+ bis $2, $2, $7
+ addq $22, $7, $22
+ stq $22, 56($16)
+ ldq $4, 56($17)
+ ldq $5, 8($18)
+ cmpult $22, $7, $3
+ zapnot $4, 15, $7
+ srl $5, 32, $8
+ mulq $8, $7, $28
+ srl $0, 32, $1
+ cmpult $2, $25, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $3, $6, $6
+ addq $24, $6, $24
+ cmpult $24, $6, $1
+ srl $4, 32, $6
+ zapnot $5, 15, $5
+ mulq $5, $6, $22
+ mulq $7, $5, $2
+ addq $1, $23, $23
+ addq $28, $22, $28
+ cmpult $28, $22, $1
+ mulq $6, $8, $6
+ beq $1, $317
+ sll $20, 32, $1
+ addq $6, $1, $6
+$317:
+ sll $28, 32, $22
+ ldq $1, 16($18)
+ addq $2, $22, $2
+ bis $2, $2, $7
+ ldq $4, 48($17)
+ addq $24, $7, $24
+ srl $1, 32, $8
+ cmpult $24, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $25
+ zapnot $1, 15, $5
+ mulq $7, $5, $0
+ srl $28, 32, $1
+ cmpult $2, $22, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $3, $6, $6
+ addq $23, $6, $23
+ cmpult $23, $6, $22
+ srl $4, 32, $6
+ mulq $5, $6, $5
+ bis $31, 1, $21
+ addq $25, $5, $25
+ cmpult $25, $5, $1
+ mulq $6, $8, $6
+ beq $1, $321
+ sll $21, 32, $1
+ addq $6, $1, $6
+$321:
+ sll $25, 32, $5
+ ldq $2, 24($18)
+ addq $0, $5, $0
+ bis $0, $0, $7
+ ldq $4, 40($17)
+ addq $24, $7, $24
+ srl $2, 32, $8
+ cmpult $24, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $28
+ srl $25, 32, $1
+ addq $6, $1, $6
+ cmpult $0, $5, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $23, $6, $23
+ cmpult $23, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $22, $22
+ addq $28, $25, $28
+ cmpult $28, $25, $1
+ mulq $6, $8, $6
+ beq $1, $325
+ sll $21, 32, $1
+ addq $6, $1, $6
+$325:
+ sll $28, 32, $25
+ ldq $2, 32($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 32($17)
+ addq $24, $7, $24
+ srl $2, 32, $8
+ cmpult $24, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $0
+ srl $28, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $23, $6, $23
+ cmpult $23, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $22, $22
+ addq $0, $25, $0
+ cmpult $0, $25, $1
+ mulq $6, $8, $6
+ beq $1, $329
+ sll $21, 32, $1
+ addq $6, $1, $6
+$329:
+ sll $0, 32, $25
+ ldq $2, 40($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 24($17)
+ addq $24, $7, $24
+ srl $2, 32, $8
+ cmpult $24, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $28
+ srl $0, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $23, $6, $23
+ cmpult $23, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $22, $22
+ addq $28, $25, $28
+ cmpult $28, $25, $1
+ mulq $6, $8, $6
+ beq $1, $333
+ sll $21, 32, $1
+ addq $6, $1, $6
+$333:
+ sll $28, 32, $25
+ ldq $2, 48($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 16($17)
+ addq $24, $7, $24
+ srl $2, 32, $8
+ cmpult $24, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $0
+ srl $28, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $23, $6, $23
+ cmpult $23, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $22, $22
+ addq $0, $25, $0
+ cmpult $0, $25, $1
+ mulq $6, $8, $6
+ beq $1, $337
+ sll $21, 32, $1
+ addq $6, $1, $6
+$337:
+ sll $0, 32, $25
+ ldq $2, 56($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 8($17)
+ addq $24, $7, $24
+ srl $2, 32, $8
+ cmpult $24, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $28
+ srl $0, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $23, $6, $23
+ cmpult $23, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $2
+ addq $1, $22, $22
+ addq $28, $25, $28
+ cmpult $28, $25, $1
+ mulq $6, $8, $6
+ beq $1, $341
+ sll $21, 32, $1
+ addq $6, $1, $6
+$341:
+ sll $28, 32, $25
+ addq $2, $25, $2
+ bis $2, $2, $7
+ addq $24, $7, $24
+ stq $24, 64($16)
+ ldq $4, 16($17)
+ ldq $5, 56($18)
+ cmpult $24, $7, $3
+ zapnot $4, 15, $7
+ srl $5, 32, $8
+ mulq $8, $7, $0
+ srl $28, 32, $1
+ cmpult $2, $25, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $3, $6, $6
+ addq $23, $6, $23
+ cmpult $23, $6, $1
+ srl $4, 32, $6
+ zapnot $5, 15, $5
+ mulq $5, $6, $24
+ mulq $7, $5, $2
+ addq $1, $22, $22
+ addq $0, $24, $0
+ cmpult $0, $24, $1
+ mulq $6, $8, $6
+ beq $1, $345
+ sll $21, 32, $1
+ addq $6, $1, $6
+$345:
+ sll $0, 32, $24
+ ldq $1, 48($18)
+ addq $2, $24, $2
+ bis $2, $2, $7
+ ldq $4, 24($17)
+ addq $23, $7, $23
+ srl $1, 32, $8
+ cmpult $23, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $25
+ zapnot $1, 15, $5
+ mulq $7, $5, $21
+ srl $0, 32, $1
+ cmpult $2, $24, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $3, $6, $6
+ addq $22, $6, $22
+ cmpult $22, $6, $24
+ srl $4, 32, $6
+ mulq $5, $6, $5
+ bis $31, 1, $20
+ addq $25, $5, $25
+ cmpult $25, $5, $1
+ mulq $6, $8, $6
+ beq $1, $349
+ sll $20, 32, $1
+ addq $6, $1, $6
+$349:
+ sll $25, 32, $5
+ ldq $2, 40($18)
+ addq $21, $5, $21
+ bis $21, $21, $7
+ ldq $4, 32($17)
+ addq $23, $7, $23
+ srl $2, 32, $8
+ cmpult $23, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $28
+ srl $25, 32, $1
+ addq $6, $1, $6
+ cmpult $21, $5, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $22, $6, $22
+ cmpult $22, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $24, $24
+ addq $28, $25, $28
+ cmpult $28, $25, $1
+ mulq $6, $8, $6
+ beq $1, $353
+ sll $20, 32, $1
+ addq $6, $1, $6
+$353:
+ sll $28, 32, $25
+ ldq $2, 32($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 40($17)
+ addq $23, $7, $23
+ srl $2, 32, $8
+ cmpult $23, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $0
+ srl $28, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $22, $6, $22
+ cmpult $22, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $24, $24
+ addq $0, $25, $0
+ cmpult $0, $25, $1
+ mulq $6, $8, $6
+ beq $1, $357
+ sll $20, 32, $1
+ addq $6, $1, $6
+$357:
+ sll $0, 32, $25
+ ldq $2, 24($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 48($17)
+ addq $23, $7, $23
+ srl $2, 32, $8
+ cmpult $23, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $28
+ srl $0, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $22, $6, $22
+ cmpult $22, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $24, $24
+ addq $28, $25, $28
+ cmpult $28, $25, $1
+ mulq $6, $8, $6
+ beq $1, $361
+ sll $20, 32, $1
+ addq $6, $1, $6
+$361:
+ sll $28, 32, $25
+ ldq $2, 16($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 56($17)
+ addq $23, $7, $23
+ srl $2, 32, $8
+ cmpult $23, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $0
+ srl $28, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $22, $6, $22
+ cmpult $22, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $2
+ addq $1, $24, $24
+ addq $0, $25, $0
+ cmpult $0, $25, $1
+ mulq $6, $8, $6
+ beq $1, $365
+ sll $20, 32, $1
+ addq $6, $1, $6
+$365:
+ sll $0, 32, $25
+ addq $2, $25, $2
+ bis $2, $2, $7
+ addq $23, $7, $23
+ stq $23, 72($16)
+ ldq $4, 56($17)
+ ldq $5, 24($18)
+ cmpult $23, $7, $3
+ zapnot $4, 15, $7
+ srl $5, 32, $8
+ mulq $8, $7, $28
+ srl $0, 32, $1
+ cmpult $2, $25, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $3, $6, $6
+ addq $22, $6, $22
+ cmpult $22, $6, $1
+ srl $4, 32, $6
+ zapnot $5, 15, $5
+ mulq $5, $6, $23
+ mulq $7, $5, $2
+ addq $1, $24, $24
+ addq $28, $23, $28
+ cmpult $28, $23, $1
+ mulq $6, $8, $6
+ beq $1, $369
+ sll $20, 32, $1
+ addq $6, $1, $6
+$369:
+ sll $28, 32, $23
+ ldq $1, 32($18)
+ addq $2, $23, $2
+ bis $2, $2, $7
+ ldq $4, 48($17)
+ addq $22, $7, $22
+ srl $1, 32, $8
+ cmpult $22, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $25
+ zapnot $1, 15, $5
+ mulq $7, $5, $0
+ srl $28, 32, $1
+ cmpult $2, $23, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $3, $6, $6
+ addq $24, $6, $24
+ cmpult $24, $6, $23
+ srl $4, 32, $6
+ mulq $5, $6, $5
+ bis $31, 1, $21
+ addq $25, $5, $25
+ cmpult $25, $5, $1
+ mulq $6, $8, $6
+ beq $1, $373
+ sll $21, 32, $1
+ addq $6, $1, $6
+$373:
+ sll $25, 32, $5
+ ldq $2, 40($18)
+ addq $0, $5, $0
+ bis $0, $0, $7
+ ldq $4, 40($17)
+ addq $22, $7, $22
+ srl $2, 32, $8
+ cmpult $22, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $28
+ srl $25, 32, $1
+ addq $6, $1, $6
+ cmpult $0, $5, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $24, $6, $24
+ cmpult $24, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $23, $23
+ addq $28, $25, $28
+ cmpult $28, $25, $1
+ mulq $6, $8, $6
+ beq $1, $377
+ sll $21, 32, $1
+ addq $6, $1, $6
+$377:
+ sll $28, 32, $25
+ ldq $2, 48($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 32($17)
+ addq $22, $7, $22
+ srl $2, 32, $8
+ cmpult $22, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $0
+ srl $28, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $24, $6, $24
+ cmpult $24, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $23, $23
+ addq $0, $25, $0
+ cmpult $0, $25, $1
+ mulq $6, $8, $6
+ beq $1, $381
+ sll $21, 32, $1
+ addq $6, $1, $6
+$381:
+ sll $0, 32, $25
+ ldq $2, 56($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 24($17)
+ addq $22, $7, $22
+ srl $2, 32, $8
+ cmpult $22, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $28
+ srl $0, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $24, $6, $24
+ cmpult $24, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $2
+ addq $1, $23, $23
+ addq $28, $25, $28
+ cmpult $28, $25, $1
+ mulq $6, $8, $6
+ beq $1, $385
+ sll $21, 32, $1
+ addq $6, $1, $6
+$385:
+ sll $28, 32, $25
+ addq $2, $25, $2
+ bis $2, $2, $7
+ addq $22, $7, $22
+ stq $22, 80($16)
+ ldq $4, 32($17)
+ ldq $5, 56($18)
+ cmpult $22, $7, $3
+ zapnot $4, 15, $7
+ srl $5, 32, $8
+ mulq $8, $7, $0
+ srl $28, 32, $1
+ cmpult $2, $25, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $3, $6, $6
+ addq $24, $6, $24
+ cmpult $24, $6, $1
+ srl $4, 32, $6
+ zapnot $5, 15, $5
+ mulq $5, $6, $22
+ mulq $7, $5, $2
+ addq $1, $23, $23
+ addq $0, $22, $0
+ cmpult $0, $22, $1
+ mulq $6, $8, $6
+ beq $1, $389
+ sll $21, 32, $1
+ addq $6, $1, $6
+$389:
+ sll $0, 32, $22
+ ldq $1, 48($18)
+ addq $2, $22, $2
+ bis $2, $2, $7
+ ldq $4, 40($17)
+ addq $24, $7, $24
+ srl $1, 32, $8
+ cmpult $24, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $25
+ zapnot $1, 15, $5
+ mulq $7, $5, $21
+ srl $0, 32, $1
+ cmpult $2, $22, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $3, $6, $6
+ addq $23, $6, $23
+ cmpult $23, $6, $22
+ srl $4, 32, $6
+ mulq $5, $6, $5
+ bis $31, 1, $20
+ addq $25, $5, $25
+ cmpult $25, $5, $1
+ mulq $6, $8, $6
+ beq $1, $393
+ sll $20, 32, $1
+ addq $6, $1, $6
+$393:
+ sll $25, 32, $5
+ ldq $2, 40($18)
+ addq $21, $5, $21
+ bis $21, $21, $7
+ ldq $4, 48($17)
+ addq $24, $7, $24
+ srl $2, 32, $8
+ cmpult $24, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $28
+ srl $25, 32, $1
+ addq $6, $1, $6
+ cmpult $21, $5, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $23, $6, $23
+ cmpult $23, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $1, $22, $22
+ addq $28, $25, $28
+ cmpult $28, $25, $1
+ mulq $6, $8, $6
+ beq $1, $397
+ sll $20, 32, $1
+ addq $6, $1, $6
+$397:
+ sll $28, 32, $25
+ ldq $2, 32($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 56($17)
+ addq $24, $7, $24
+ srl $2, 32, $8
+ cmpult $24, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $21
+ srl $28, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $23, $6, $23
+ cmpult $23, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $2
+ addq $1, $22, $22
+ addq $21, $25, $21
+ cmpult $21, $25, $1
+ mulq $6, $8, $6
+ beq $1, $401
+ sll $20, 32, $1
+ addq $6, $1, $6
+$401:
+ sll $21, 32, $25
+ addq $2, $25, $2
+ bis $2, $2, $7
+ addq $24, $7, $24
+ stq $24, 88($16)
+ ldq $4, 56($17)
+ ldq $5, 40($18)
+ cmpult $24, $7, $3
+ zapnot $4, 15, $7
+ srl $5, 32, $8
+ mulq $8, $7, $0
+ srl $21, 32, $1
+ cmpult $2, $25, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $3, $6, $6
+ addq $23, $6, $23
+ cmpult $23, $6, $1
+ srl $4, 32, $6
+ zapnot $5, 15, $5
+ mulq $5, $6, $24
+ mulq $7, $5, $5
+ addq $1, $22, $22
+ addq $0, $24, $0
+ cmpult $0, $24, $1
+ mulq $6, $8, $6
+ beq $1, $405
+ sll $20, 32, $1
+ addq $6, $1, $6
+$405:
+ sll $0, 32, $24
+ ldq $2, 48($18)
+ addq $5, $24, $5
+ bis $5, $5, $7
+ ldq $4, 48($17)
+ addq $23, $7, $23
+ srl $2, 32, $8
+ cmpult $23, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $28
+ srl $0, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $24, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $22, $6, $22
+ cmpult $22, $6, $24
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $5
+ addq $28, $25, $28
+ cmpult $28, $25, $1
+ mulq $6, $8, $6
+ beq $1, $409
+ sll $20, 32, $1
+ addq $6, $1, $6
+$409:
+ sll $28, 32, $25
+ ldq $2, 56($18)
+ addq $5, $25, $5
+ bis $5, $5, $7
+ ldq $4, 40($17)
+ addq $23, $7, $23
+ srl $2, 32, $8
+ cmpult $23, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $0
+ srl $28, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $25, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $22, $6, $22
+ cmpult $22, $6, $1
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $2
+ addq $1, $24, $24
+ addq $0, $25, $0
+ cmpult $0, $25, $1
+ mulq $6, $8, $6
+ beq $1, $413
+ sll $20, 32, $1
+ addq $6, $1, $6
+$413:
+ sll $0, 32, $25
+ addq $2, $25, $2
+ bis $2, $2, $7
+ addq $23, $7, $23
+ stq $23, 96($16)
+ ldq $4, 48($17)
+ ldq $5, 56($18)
+ cmpult $23, $7, $3
+ zapnot $4, 15, $7
+ srl $5, 32, $8
+ mulq $8, $7, $28
+ srl $0, 32, $1
+ cmpult $2, $25, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $3, $6, $6
+ addq $22, $6, $22
+ cmpult $22, $6, $1
+ srl $4, 32, $6
+ zapnot $5, 15, $5
+ mulq $5, $6, $23
+ mulq $7, $5, $5
+ addq $1, $24, $24
+ addq $28, $23, $28
+ cmpult $28, $23, $1
+ mulq $6, $8, $6
+ beq $1, $417
+ sll $20, 32, $1
+ addq $6, $1, $6
+$417:
+ sll $28, 32, $23
+ ldq $2, 48($18)
+ addq $5, $23, $5
+ bis $5, $5, $7
+ ldq $4, 56($17)
+ addq $22, $7, $22
+ srl $2, 32, $8
+ cmpult $22, $7, $3
+ zapnot $4, 15, $7
+ mulq $8, $7, $0
+ srl $28, 32, $1
+ addq $6, $1, $6
+ cmpult $5, $23, $1
+ zapnot $2, 15, $5
+ addq $1, $6, $6
+ addq $3, $6, $6
+ addq $24, $6, $24
+ cmpult $24, $6, $23
+ srl $4, 32, $6
+ mulq $5, $6, $25
+ mulq $7, $5, $2
+ addq $0, $25, $0
+ cmpult $0, $25, $1
+ mulq $6, $8, $6
+ beq $1, $421
+ sll $20, 32, $1
+ addq $6, $1, $6
+$421:
+ sll $0, 32, $25
+ addq $2, $25, $2
+ bis $2, $2, $7
+ addq $22, $7, $22
+ stq $22, 104($16)
+ ldq $4, 56($17)
+ ldq $5, 56($18)
+ cmpult $22, $7, $3
+ zapnot $4, 15, $7
+ srl $5, 32, $8
+ mulq $8, $7, $28
+ srl $0, 32, $1
+ cmpult $2, $25, $2
+ addq $6, $1, $6
+ addq $2, $6, $6
+ addq $3, $6, $6
+ addq $24, $6, $24
+ cmpult $24, $6, $1
+ srl $4, 32, $6
+ zapnot $5, 15, $5
+ mulq $5, $6, $22
+ mulq $7, $5, $2
+ addq $1, $23, $23
+ addq $28, $22, $28
+ cmpult $28, $22, $1
+ mulq $6, $8, $3
+ beq $1, $425
+ sll $20, 32, $1
+ addq $3, $1, $3
+$425:
+ sll $28, 32, $22
+ srl $28, 32, $1
+ addq $2, $22, $2
+ addq $3, $1, $3
+ bis $2, $2, $7
+ addq $24, $7, $24
+ cmpult $7, $22, $1
+ cmpult $24, $7, $2
+ addq $1, $3, $6
+ addq $2, $6, $6
+ stq $24, 112($16)
+ addq $23, $6, $23
+ stq $23, 120($16)
+ ret $31, ($26), 1
+ .end bn_mul_comba8
+ .text
+ .align 3
+ .globl bn_sqr_comba4
+ .ent bn_sqr_comba4
+bn_sqr_comba4:
+bn_sqr_comba4..ng:
+ .frame $30,0,$26,0
+ .prologue 0
+
+ ldq $0, 0($17)
+ ldq $1, 8($17)
+ ldq $2, 16($17)
+ ldq $3, 24($17)
+ bis $31, $31, $6
+ mulq $0, $0, $4
+ umulh $0, $0, $5
+ stq $4, 0($16)
+ bis $31, $31, $4
+ mulq $0, $1, $7
+ umulh $0, $1, $8
+ cmplt $7, $31, $22
+ cmplt $8, $31, $23
+ addq $7, $7, $7
+ addq $8, $8, $8
+ addq $8, $22, $8
+ addq $4, $23, $4
+ addq $5, $7, $5
+ addq $6, $8, $6
+ cmpult $5, $7, $24
+ cmpult $6, $8, $25
+ addq $6, $24, $6
+ addq $4, $25, $4
+ stq $5, 8($16)
+ bis $31, $31, $5
+ mulq $1, $1, $27
+ umulh $1, $1, $28
+ addq $6, $27, $6
+ addq $4, $28, $4
+ cmpult $6, $27, $21
+ cmpult $4, $28, $20
+ addq $4, $21, $4
+ addq $5, $20, $5
+ mulq $2, $0, $19
+ umulh $2, $0, $18
+ cmplt $19, $31, $17
+ cmplt $18, $31, $22
+ addq $19, $19, $19
+ addq $18, $18, $18
+ addq $18, $17, $18
+ addq $5, $22, $5
+ addq $6, $19, $6
+ addq $4, $18, $4
+ cmpult $6, $19, $23
+ cmpult $4, $18, $7
+ addq $4, $23, $4
+ addq $5, $7, $5
+ stq $6, 16($16)
+ bis $31, $31, $6
+ mulq $3, $0, $8
+ umulh $3, $0, $24
+ cmplt $8, $31, $25
+ cmplt $24, $31, $27
+ addq $8, $8, $8
+ addq $24, $24, $24
+ addq $24, $25, $24
+ addq $6, $27, $6
+ addq $4, $8, $4
+ addq $5, $24, $5
+ cmpult $4, $8, $28
+ cmpult $5, $24, $21
+ addq $5, $28, $5
+ addq $6, $21, $6
+ mulq $2, $1, $20
+ umulh $2, $1, $17
+ cmplt $20, $31, $22
+ cmplt $17, $31, $19
+ addq $20, $20, $20
+ addq $17, $17, $17
+ addq $17, $22, $17
+ addq $6, $19, $6
+ addq $4, $20, $4
+ addq $5, $17, $5
+ cmpult $4, $20, $18
+ cmpult $5, $17, $23
+ addq $5, $18, $5
+ addq $6, $23, $6
+ stq $4, 24($16)
+ bis $31, $31, $4
+ mulq $2, $2, $7
+ umulh $2, $2, $25
+ addq $5, $7, $5
+ addq $6, $25, $6
+ cmpult $5, $7, $27
+ cmpult $6, $25, $8
+ addq $6, $27, $6
+ addq $4, $8, $4
+ mulq $3, $1, $24
+ umulh $3, $1, $28
+ cmplt $24, $31, $21
+ cmplt $28, $31, $22
+ addq $24, $24, $24
+ addq $28, $28, $28
+ addq $28, $21, $28
+ addq $4, $22, $4
+ addq $5, $24, $5
+ addq $6, $28, $6
+ cmpult $5, $24, $19
+ cmpult $6, $28, $20
+ addq $6, $19, $6
+ addq $4, $20, $4
+ stq $5, 32($16)
+ bis $31, $31, $5
+ mulq $3, $2, $17
+ umulh $3, $2, $18
+ cmplt $17, $31, $23
+ cmplt $18, $31, $7
+ addq $17, $17, $17
+ addq $18, $18, $18
+ addq $18, $23, $18
+ addq $5, $7, $5
+ addq $6, $17, $6
+ addq $4, $18, $4
+ cmpult $6, $17, $25
+ cmpult $4, $18, $27
+ addq $4, $25, $4
+ addq $5, $27, $5
+ stq $6, 40($16)
+ bis $31, $31, $6
+ mulq $3, $3, $8
+ umulh $3, $3, $21
+ addq $4, $8, $4
+ addq $5, $21, $5
+ cmpult $4, $8, $22
+ cmpult $5, $21, $24
+ addq $5, $22, $5
+ addq $6, $24, $6
+ stq $4, 48($16)
+ stq $5, 56($16)
+ ret $31,($26),1
+ .end bn_sqr_comba4
+ .text
+ .align 3
+ .globl bn_sqr_comba8
+ .ent bn_sqr_comba8
+bn_sqr_comba8:
+bn_sqr_comba8..ng:
+ .frame $30,0,$26,0
+ .prologue 0
+
+ ldq $0, 0($17)
+ ldq $1, 8($17)
+ ldq $2, 16($17)
+ ldq $3, 24($17)
+ ldq $4, 32($17)
+ ldq $5, 40($17)
+ ldq $6, 48($17)
+ ldq $7, 56($17)
+ bis $31, $31, $23
+ mulq $0, $0, $8
+ umulh $0, $0, $22
+ stq $8, 0($16)
+ bis $31, $31, $8
+ mulq $1, $0, $24
+ umulh $1, $0, $25
+ cmplt $24, $31, $27
+ cmplt $25, $31, $28
+ addq $24, $24, $24
+ addq $25, $25, $25
+ addq $25, $27, $25
+ addq $8, $28, $8
+ addq $22, $24, $22
+ addq $23, $25, $23
+ cmpult $22, $24, $21
+ cmpult $23, $25, $20
+ addq $23, $21, $23
+ addq $8, $20, $8
+ stq $22, 8($16)
+ bis $31, $31, $22
+ mulq $1, $1, $19
+ umulh $1, $1, $18
+ addq $23, $19, $23
+ addq $8, $18, $8
+ cmpult $23, $19, $17
+ cmpult $8, $18, $27
+ addq $8, $17, $8
+ addq $22, $27, $22
+ mulq $2, $0, $28
+ umulh $2, $0, $24
+ cmplt $28, $31, $25
+ cmplt $24, $31, $21
+ addq $28, $28, $28
+ addq $24, $24, $24
+ addq $24, $25, $24
+ addq $22, $21, $22
+ addq $23, $28, $23
+ addq $8, $24, $8
+ cmpult $23, $28, $20
+ cmpult $8, $24, $19
+ addq $8, $20, $8
+ addq $22, $19, $22
+ stq $23, 16($16)
+ bis $31, $31, $23
+ mulq $2, $1, $18
+ umulh $2, $1, $17
+ cmplt $18, $31, $27
+ cmplt $17, $31, $25
+ addq $18, $18, $18
+ addq $17, $17, $17
+ addq $17, $27, $17
+ addq $23, $25, $23
+ addq $8, $18, $8
+ addq $22, $17, $22
+ cmpult $8, $18, $21
+ cmpult $22, $17, $28
+ addq $22, $21, $22
+ addq $23, $28, $23
+ mulq $3, $0, $24
+ umulh $3, $0, $20
+ cmplt $24, $31, $19
+ cmplt $20, $31, $27
+ addq $24, $24, $24
+ addq $20, $20, $20
+ addq $20, $19, $20
+ addq $23, $27, $23
+ addq $8, $24, $8
+ addq $22, $20, $22
+ cmpult $8, $24, $25
+ cmpult $22, $20, $18
+ addq $22, $25, $22
+ addq $23, $18, $23
+ stq $8, 24($16)
+ bis $31, $31, $8
+ mulq $2, $2, $17
+ umulh $2, $2, $21
+ addq $22, $17, $22
+ addq $23, $21, $23
+ cmpult $22, $17, $28
+ cmpult $23, $21, $19
+ addq $23, $28, $23
+ addq $8, $19, $8
+ mulq $3, $1, $27
+ umulh $3, $1, $24
+ cmplt $27, $31, $20
+ cmplt $24, $31, $25
+ addq $27, $27, $27
+ addq $24, $24, $24
+ addq $24, $20, $24
+ addq $8, $25, $8
+ addq $22, $27, $22
+ addq $23, $24, $23
+ cmpult $22, $27, $18
+ cmpult $23, $24, $17
+ addq $23, $18, $23
+ addq $8, $17, $8
+ mulq $4, $0, $21
+ umulh $4, $0, $28
+ cmplt $21, $31, $19
+ cmplt $28, $31, $20
+ addq $21, $21, $21
+ addq $28, $28, $28
+ addq $28, $19, $28
+ addq $8, $20, $8
+ addq $22, $21, $22
+ addq $23, $28, $23
+ cmpult $22, $21, $25
+ cmpult $23, $28, $27
+ addq $23, $25, $23
+ addq $8, $27, $8
+ stq $22, 32($16)
+ bis $31, $31, $22
+ mulq $3, $2, $24
+ umulh $3, $2, $18
+ cmplt $24, $31, $17
+ cmplt $18, $31, $19
+ addq $24, $24, $24
+ addq $18, $18, $18
+ addq $18, $17, $18
+ addq $22, $19, $22
+ addq $23, $24, $23
+ addq $8, $18, $8
+ cmpult $23, $24, $20
+ cmpult $8, $18, $21
+ addq $8, $20, $8
+ addq $22, $21, $22
+ mulq $4, $1, $28
+ umulh $4, $1, $25
+ cmplt $28, $31, $27
+ cmplt $25, $31, $17
+ addq $28, $28, $28
+ addq $25, $25, $25
+ addq $25, $27, $25
+ addq $22, $17, $22
+ addq $23, $28, $23
+ addq $8, $25, $8
+ cmpult $23, $28, $19
+ cmpult $8, $25, $24
+ addq $8, $19, $8
+ addq $22, $24, $22
+ mulq $5, $0, $18
+ umulh $5, $0, $20
+ cmplt $18, $31, $21
+ cmplt $20, $31, $27
+ addq $18, $18, $18
+ addq $20, $20, $20
+ addq $20, $21, $20
+ addq $22, $27, $22
+ addq $23, $18, $23
+ addq $8, $20, $8
+ cmpult $23, $18, $17
+ cmpult $8, $20, $28
+ addq $8, $17, $8
+ addq $22, $28, $22
+ stq $23, 40($16)
+ bis $31, $31, $23
+ mulq $3, $3, $25
+ umulh $3, $3, $19
+ addq $8, $25, $8
+ addq $22, $19, $22
+ cmpult $8, $25, $24
+ cmpult $22, $19, $21
+ addq $22, $24, $22
+ addq $23, $21, $23
+ mulq $4, $2, $27
+ umulh $4, $2, $18
+ cmplt $27, $31, $20
+ cmplt $18, $31, $17
+ addq $27, $27, $27
+ addq $18, $18, $18
+ addq $18, $20, $18
+ addq $23, $17, $23
+ addq $8, $27, $8
+ addq $22, $18, $22
+ cmpult $8, $27, $28
+ cmpult $22, $18, $25
+ addq $22, $28, $22
+ addq $23, $25, $23
+ mulq $5, $1, $19
+ umulh $5, $1, $24
+ cmplt $19, $31, $21
+ cmplt $24, $31, $20
+ addq $19, $19, $19
+ addq $24, $24, $24
+ addq $24, $21, $24
+ addq $23, $20, $23
+ addq $8, $19, $8
+ addq $22, $24, $22
+ cmpult $8, $19, $17
+ cmpult $22, $24, $27
+ addq $22, $17, $22
+ addq $23, $27, $23
+ mulq $6, $0, $18
+ umulh $6, $0, $28
+ cmplt $18, $31, $25
+ cmplt $28, $31, $21
+ addq $18, $18, $18
+ addq $28, $28, $28
+ addq $28, $25, $28
+ addq $23, $21, $23
+ addq $8, $18, $8
+ addq $22, $28, $22
+ cmpult $8, $18, $20
+ cmpult $22, $28, $19
+ addq $22, $20, $22
+ addq $23, $19, $23
+ stq $8, 48($16)
+ bis $31, $31, $8
+ mulq $4, $3, $24
+ umulh $4, $3, $17
+ cmplt $24, $31, $27
+ cmplt $17, $31, $25
+ addq $24, $24, $24
+ addq $17, $17, $17
+ addq $17, $27, $17
+ addq $8, $25, $8
+ addq $22, $24, $22
+ addq $23, $17, $23
+ cmpult $22, $24, $21
+ cmpult $23, $17, $18
+ addq $23, $21, $23
+ addq $8, $18, $8
+ mulq $5, $2, $28
+ umulh $5, $2, $20
+ cmplt $28, $31, $19
+ cmplt $20, $31, $27
+ addq $28, $28, $28
+ addq $20, $20, $20
+ addq $20, $19, $20
+ addq $8, $27, $8
+ addq $22, $28, $22
+ addq $23, $20, $23
+ cmpult $22, $28, $25
+ cmpult $23, $20, $24
+ addq $23, $25, $23
+ addq $8, $24, $8
+ mulq $6, $1, $17
+ umulh $6, $1, $21
+ cmplt $17, $31, $18
+ cmplt $21, $31, $19
+ addq $17, $17, $17
+ addq $21, $21, $21
+ addq $21, $18, $21
+ addq $8, $19, $8
+ addq $22, $17, $22
+ addq $23, $21, $23
+ cmpult $22, $17, $27
+ cmpult $23, $21, $28
+ addq $23, $27, $23
+ addq $8, $28, $8
+ mulq $7, $0, $20
+ umulh $7, $0, $25
+ cmplt $20, $31, $24
+ cmplt $25, $31, $18
+ addq $20, $20, $20
+ addq $25, $25, $25
+ addq $25, $24, $25
+ addq $8, $18, $8
+ addq $22, $20, $22
+ addq $23, $25, $23
+ cmpult $22, $20, $19
+ cmpult $23, $25, $17
+ addq $23, $19, $23
+ addq $8, $17, $8
+ stq $22, 56($16)
+ bis $31, $31, $22
+ mulq $4, $4, $21
+ umulh $4, $4, $27
+ addq $23, $21, $23
+ addq $8, $27, $8
+ cmpult $23, $21, $28
+ cmpult $8, $27, $24
+ addq $8, $28, $8
+ addq $22, $24, $22
+ mulq $5, $3, $18
+ umulh $5, $3, $20
+ cmplt $18, $31, $25
+ cmplt $20, $31, $19
+ addq $18, $18, $18
+ addq $20, $20, $20
+ addq $20, $25, $20
+ addq $22, $19, $22
+ addq $23, $18, $23
+ addq $8, $20, $8
+ cmpult $23, $18, $17
+ cmpult $8, $20, $21
+ addq $8, $17, $8
+ addq $22, $21, $22
+ mulq $6, $2, $27
+ umulh $6, $2, $28
+ cmplt $27, $31, $24
+ cmplt $28, $31, $25
+ addq $27, $27, $27
+ addq $28, $28, $28
+ addq $28, $24, $28
+ addq $22, $25, $22
+ addq $23, $27, $23
+ addq $8, $28, $8
+ cmpult $23, $27, $19
+ cmpult $8, $28, $18
+ addq $8, $19, $8
+ addq $22, $18, $22
+ mulq $7, $1, $20
+ umulh $7, $1, $17
+ cmplt $20, $31, $21
+ cmplt $17, $31, $24
+ addq $20, $20, $20
+ addq $17, $17, $17
+ addq $17, $21, $17
+ addq $22, $24, $22
+ addq $23, $20, $23
+ addq $8, $17, $8
+ cmpult $23, $20, $25
+ cmpult $8, $17, $27
+ addq $8, $25, $8
+ addq $22, $27, $22
+ stq $23, 64($16)
+ bis $31, $31, $23
+ mulq $5, $4, $28
+ umulh $5, $4, $19
+ cmplt $28, $31, $18
+ cmplt $19, $31, $21
+ addq $28, $28, $28
+ addq $19, $19, $19
+ addq $19, $18, $19
+ addq $23, $21, $23
+ addq $8, $28, $8
+ addq $22, $19, $22
+ cmpult $8, $28, $24
+ cmpult $22, $19, $20
+ addq $22, $24, $22
+ addq $23, $20, $23
+ mulq $6, $3, $17
+ umulh $6, $3, $25
+ cmplt $17, $31, $27
+ cmplt $25, $31, $18
+ addq $17, $17, $17
+ addq $25, $25, $25
+ addq $25, $27, $25
+ addq $23, $18, $23
+ addq $8, $17, $8
+ addq $22, $25, $22
+ cmpult $8, $17, $21
+ cmpult $22, $25, $28
+ addq $22, $21, $22
+ addq $23, $28, $23
+ mulq $7, $2, $19
+ umulh $7, $2, $24
+ cmplt $19, $31, $20
+ cmplt $24, $31, $27
+ addq $19, $19, $19
+ addq $24, $24, $24
+ addq $24, $20, $24
+ addq $23, $27, $23
+ addq $8, $19, $8
+ addq $22, $24, $22
+ cmpult $8, $19, $18
+ cmpult $22, $24, $17
+ addq $22, $18, $22
+ addq $23, $17, $23
+ stq $8, 72($16)
+ bis $31, $31, $8
+ mulq $5, $5, $25
+ umulh $5, $5, $21
+ addq $22, $25, $22
+ addq $23, $21, $23
+ cmpult $22, $25, $28
+ cmpult $23, $21, $20
+ addq $23, $28, $23
+ addq $8, $20, $8
+ mulq $6, $4, $27
+ umulh $6, $4, $19
+ cmplt $27, $31, $24
+ cmplt $19, $31, $18
+ addq $27, $27, $27
+ addq $19, $19, $19
+ addq $19, $24, $19
+ addq $8, $18, $8
+ addq $22, $27, $22
+ addq $23, $19, $23
+ cmpult $22, $27, $17
+ cmpult $23, $19, $25
+ addq $23, $17, $23
+ addq $8, $25, $8
+ mulq $7, $3, $21
+ umulh $7, $3, $28
+ cmplt $21, $31, $20
+ cmplt $28, $31, $24
+ addq $21, $21, $21
+ addq $28, $28, $28
+ addq $28, $20, $28
+ addq $8, $24, $8
+ addq $22, $21, $22
+ addq $23, $28, $23
+ cmpult $22, $21, $18
+ cmpult $23, $28, $27
+ addq $23, $18, $23
+ addq $8, $27, $8
+ stq $22, 80($16)
+ bis $31, $31, $22
+ mulq $6, $5, $19
+ umulh $6, $5, $17
+ cmplt $19, $31, $25
+ cmplt $17, $31, $20
+ addq $19, $19, $19
+ addq $17, $17, $17
+ addq $17, $25, $17
+ addq $22, $20, $22
+ addq $23, $19, $23
+ addq $8, $17, $8
+ cmpult $23, $19, $24
+ cmpult $8, $17, $21
+ addq $8, $24, $8
+ addq $22, $21, $22
+ mulq $7, $4, $28
+ umulh $7, $4, $18
+ cmplt $28, $31, $27
+ cmplt $18, $31, $25
+ addq $28, $28, $28
+ addq $18, $18, $18
+ addq $18, $27, $18
+ addq $22, $25, $22
+ addq $23, $28, $23
+ addq $8, $18, $8
+ cmpult $23, $28, $20
+ cmpult $8, $18, $19
+ addq $8, $20, $8
+ addq $22, $19, $22
+ stq $23, 88($16)
+ bis $31, $31, $23
+ mulq $6, $6, $17
+ umulh $6, $6, $24
+ addq $8, $17, $8
+ addq $22, $24, $22
+ cmpult $8, $17, $21
+ cmpult $22, $24, $27
+ addq $22, $21, $22
+ addq $23, $27, $23
+ mulq $7, $5, $25
+ umulh $7, $5, $28
+ cmplt $25, $31, $18
+ cmplt $28, $31, $20
+ addq $25, $25, $25
+ addq $28, $28, $28
+ addq $28, $18, $28
+ addq $23, $20, $23
+ addq $8, $25, $8
+ addq $22, $28, $22
+ cmpult $8, $25, $19
+ cmpult $22, $28, $17
+ addq $22, $19, $22
+ addq $23, $17, $23
+ stq $8, 96($16)
+ bis $31, $31, $8
+ mulq $7, $6, $24
+ umulh $7, $6, $21
+ cmplt $24, $31, $27
+ cmplt $21, $31, $18
+ addq $24, $24, $24
+ addq $21, $21, $21
+ addq $21, $27, $21
+ addq $8, $18, $8
+ addq $22, $24, $22
+ addq $23, $21, $23
+ cmpult $22, $24, $20
+ cmpult $23, $21, $25
+ addq $23, $20, $23
+ addq $8, $25, $8
+ stq $22, 104($16)
+ bis $31, $31, $22
+ mulq $7, $7, $28
+ umulh $7, $7, $19
+ addq $23, $28, $23
+ addq $8, $19, $8
+ cmpult $23, $28, $17
+ cmpult $8, $19, $27
+ addq $8, $17, $8
+ addq $22, $27, $22
+ stq $23, 112($16)
+ stq $8, 120($16)
+ ret $31,($26),1
+ .end bn_sqr_comba8
diff --git a/crypto/bn/asm/alpha.s.works b/crypto/bn/asm/alpha.s.works
new file mode 100644
index 0000000..ee6c587
--- /dev/null
+++ b/crypto/bn/asm/alpha.s.works
@@ -0,0 +1,533 @@
+
+ # DEC Alpha assember
+ # The bn_div64 is actually gcc output but the other parts are hand done.
+ # Thanks to tzeruch@ceddec.com for sending me the gcc output for
+ # bn_div64.
+ # I've gone back and re-done most of routines.
+ # The key thing to remeber for the 164 CPU is that while a
+ # multiply operation takes 8 cycles, another one can only be issued
+ # after 4 cycles have elapsed. I've done modification to help
+ # improve this. Also, normally, a ld instruction will not be available
+ # for about 3 cycles.
+ .file 1 "bn_asm.c"
+ .set noat
+gcc2_compiled.:
+__gnu_compiled_c:
+ .text
+ .align 3
+ .globl bn_mul_add_words
+ .ent bn_mul_add_words
+bn_mul_add_words:
+bn_mul_add_words..ng:
+ .frame $30,0,$26,0
+ .prologue 0
+ .align 5
+ subq $18,4,$18
+ bis $31,$31,$0
+ blt $18,$43 # if we are -1, -2, -3 or -4 goto tail code
+ ldq $20,0($17) # 1 1
+ ldq $1,0($16) # 1 1
+ .align 3
+$42:
+ mulq $20,$19,$5 # 1 2 1 ######
+ ldq $21,8($17) # 2 1
+ ldq $2,8($16) # 2 1
+ umulh $20,$19,$20 # 1 2 ######
+ ldq $27,16($17) # 3 1
+ ldq $3,16($16) # 3 1
+ mulq $21,$19,$6 # 2 2 1 ######
+ ldq $28,24($17) # 4 1
+ addq $1,$5,$1 # 1 2 2
+ ldq $4,24($16) # 4 1
+ umulh $21,$19,$21 # 2 2 ######
+ cmpult $1,$5,$22 # 1 2 3 1
+ addq $20,$22,$20 # 1 3 1
+ addq $1,$0,$1 # 1 2 3 1
+ mulq $27,$19,$7 # 3 2 1 ######
+ cmpult $1,$0,$0 # 1 2 3 2
+ addq $2,$6,$2 # 2 2 2
+ addq $20,$0,$0 # 1 3 2
+ cmpult $2,$6,$23 # 2 2 3 1
+ addq $21,$23,$21 # 2 3 1
+ umulh $27,$19,$27 # 3 2 ######
+ addq $2,$0,$2 # 2 2 3 1
+ cmpult $2,$0,$0 # 2 2 3 2
+ subq $18,4,$18
+ mulq $28,$19,$8 # 4 2 1 ######
+ addq $21,$0,$0 # 2 3 2
+ addq $3,$7,$3 # 3 2 2
+ addq $16,32,$16
+ cmpult $3,$7,$24 # 3 2 3 1
+ stq $1,-32($16) # 1 2 4
+ umulh $28,$19,$28 # 4 2 ######
+ addq $27,$24,$27 # 3 3 1
+ addq $3,$0,$3 # 3 2 3 1
+ stq $2,-24($16) # 2 2 4
+ cmpult $3,$0,$0 # 3 2 3 2
+ stq $3,-16($16) # 3 2 4
+ addq $4,$8,$4 # 4 2 2
+ addq $27,$0,$0 # 3 3 2
+ cmpult $4,$8,$25 # 4 2 3 1
+ addq $17,32,$17
+ addq $28,$25,$28 # 4 3 1
+ addq $4,$0,$4 # 4 2 3 1
+ cmpult $4,$0,$0 # 4 2 3 2
+ stq $4,-8($16) # 4 2 4
+ addq $28,$0,$0 # 4 3 2
+ blt $18,$43
+
+ ldq $20,0($17) # 1 1
+ ldq $1,0($16) # 1 1
+
+ br $42
+
+ .align 4
+$45:
+ ldq $20,0($17) # 4 1
+ ldq $1,0($16) # 4 1
+ mulq $20,$19,$5 # 4 2 1
+ subq $18,1,$18
+ addq $16,8,$16
+ addq $17,8,$17
+ umulh $20,$19,$20 # 4 2
+ addq $1,$5,$1 # 4 2 2
+ cmpult $1,$5,$22 # 4 2 3 1
+ addq $20,$22,$20 # 4 3 1
+ addq $1,$0,$1 # 4 2 3 1
+ cmpult $1,$0,$0 # 4 2 3 2
+ addq $20,$0,$0 # 4 3 2
+ stq $1,-8($16) # 4 2 4
+ bgt $18,$45
+ ret $31,($26),1 # else exit
+
+ .align 4
+$43:
+ addq $18,4,$18
+ bgt $18,$45 # goto tail code
+ ret $31,($26),1 # else exit
+
+ .end bn_mul_add_words
+ .align 3
+ .globl bn_mul_words
+ .ent bn_mul_words
+bn_mul_words:
+bn_mul_words..ng:
+ .frame $30,0,$26,0
+ .prologue 0
+ .align 5
+ subq $18,4,$18
+ bis $31,$31,$0
+ blt $18,$143 # if we are -1, -2, -3 or -4 goto tail code
+ ldq $20,0($17) # 1 1
+ .align 3
+$142:
+
+ mulq $20,$19,$5 # 1 2 1 #####
+ ldq $21,8($17) # 2 1
+ ldq $27,16($17) # 3 1
+ umulh $20,$19,$20 # 1 2 #####
+ ldq $28,24($17) # 4 1
+ mulq $21,$19,$6 # 2 2 1 #####
+ addq $5,$0,$5 # 1 2 3 1
+ subq $18,4,$18
+ cmpult $5,$0,$0 # 1 2 3 2
+ umulh $21,$19,$21 # 2 2 #####
+ addq $20,$0,$0 # 1 3 2
+ addq $17,32,$17
+ addq $6,$0,$6 # 2 2 3 1
+ mulq $27,$19,$7 # 3 2 1 #####
+ cmpult $6,$0,$0 # 2 2 3 2
+ addq $21,$0,$0 # 2 3 2
+ addq $16,32,$16
+ umulh $27,$19,$27 # 3 2 #####
+ stq $5,-32($16) # 1 2 4
+ mulq $28,$19,$8 # 4 2 1 #####
+ addq $7,$0,$7 # 3 2 3 1
+ stq $6,-24($16) # 2 2 4
+ cmpult $7,$0,$0 # 3 2 3 2
+ umulh $28,$19,$28 # 4 2 #####
+ addq $27,$0,$0 # 3 3 2
+ stq $7,-16($16) # 3 2 4
+ addq $8,$0,$8 # 4 2 3 1
+ cmpult $8,$0,$0 # 4 2 3 2
+
+ addq $28,$0,$0 # 4 3 2
+
+ stq $8,-8($16) # 4 2 4
+
+ blt $18,$143
+
+ ldq $20,0($17) # 1 1
+
+ br $142
+
+ .align 4
+$145:
+ ldq $20,0($17) # 4 1
+ mulq $20,$19,$5 # 4 2 1
+ subq $18,1,$18
+ umulh $20,$19,$20 # 4 2
+ addq $5,$0,$5 # 4 2 3 1
+ addq $16,8,$16
+ cmpult $5,$0,$0 # 4 2 3 2
+ addq $17,8,$17
+ addq $20,$0,$0 # 4 3 2
+ stq $5,-8($16) # 4 2 4
+
+ bgt $18,$145
+ ret $31,($26),1 # else exit
+
+ .align 4
+$143:
+ addq $18,4,$18
+ bgt $18,$145 # goto tail code
+ ret $31,($26),1 # else exit
+
+ .end bn_mul_words
+ .align 3
+ .globl bn_sqr_words
+ .ent bn_sqr_words
+bn_sqr_words:
+bn_sqr_words..ng:
+ .frame $30,0,$26,0
+ .prologue 0
+
+ subq $18,4,$18
+ blt $18,$543 # if we are -1, -2, -3 or -4 goto tail code
+ ldq $20,0($17) # 1 1
+ .align 3
+$542:
+ mulq $20,$20,$5 ######
+ ldq $21,8($17) # 1 1
+ subq $18,4
+ umulh $20,$20,$1 ######
+ ldq $27,16($17) # 1 1
+ mulq $21,$21,$6 ######
+ ldq $28,24($17) # 1 1
+ stq $5,0($16) # r[0]
+ umulh $21,$21,$2 ######
+ stq $1,8($16) # r[1]
+ mulq $27,$27,$7 ######
+ stq $6,16($16) # r[0]
+ umulh $27,$27,$3 ######
+ stq $2,24($16) # r[1]
+ mulq $28,$28,$8 ######
+ stq $7,32($16) # r[0]
+ umulh $28,$28,$4 ######
+ stq $3,40($16) # r[1]
+
+ addq $16,64,$16
+ addq $17,32,$17
+ stq $8,-16($16) # r[0]
+ stq $4,-8($16) # r[1]
+
+ blt $18,$543
+ ldq $20,0($17) # 1 1
+ br $542
+
+$442:
+ ldq $20,0($17) # a[0]
+ mulq $20,$20,$5 # a[0]*w low part r2
+ addq $16,16,$16
+ addq $17,8,$17
+ subq $18,1,$18
+ umulh $20,$20,$1 # a[0]*w high part r3
+ stq $5,-16($16) # r[0]
+ stq $1,-8($16) # r[1]
+
+ bgt $18,$442
+ ret $31,($26),1 # else exit
+
+ .align 4
+$543:
+ addq $18,4,$18
+ bgt $18,$442 # goto tail code
+ ret $31,($26),1 # else exit
+ .end bn_sqr_words
+
+ .align 3
+ .globl bn_add_words
+ .ent bn_add_words
+bn_add_words:
+bn_add_words..ng:
+ .frame $30,0,$26,0
+ .prologue 0
+
+ subq $19,4,$19
+ bis $31,$31,$0 # carry = 0
+ blt $19,$900
+ ldq $5,0($17) # a[0]
+ ldq $1,0($18) # b[1]
+ .align 3
+$901:
+ addq $1,$5,$1 # r=a+b;
+ ldq $6,8($17) # a[1]
+ cmpult $1,$5,$22 # did we overflow?
+ ldq $2,8($18) # b[1]
+ addq $1,$0,$1 # c+= overflow
+ ldq $7,16($17) # a[2]
+ cmpult $1,$0,$0 # overflow?
+ ldq $3,16($18) # b[2]
+ addq $0,$22,$0
+ ldq $8,24($17) # a[3]
+ addq $2,$6,$2 # r=a+b;
+ ldq $4,24($18) # b[3]
+ cmpult $2,$6,$23 # did we overflow?
+ addq $3,$7,$3 # r=a+b;
+ addq $2,$0,$2 # c+= overflow
+ cmpult $3,$7,$24 # did we overflow?
+ cmpult $2,$0,$0 # overflow?
+ addq $4,$8,$4 # r=a+b;
+ addq $0,$23,$0
+ cmpult $4,$8,$25 # did we overflow?
+ addq $3,$0,$3 # c+= overflow
+ stq $1,0($16) # r[0]=c
+ cmpult $3,$0,$0 # overflow?
+ stq $2,8($16) # r[1]=c
+ addq $0,$24,$0
+ stq $3,16($16) # r[2]=c
+ addq $4,$0,$4 # c+= overflow
+ subq $19,4,$19 # loop--
+ cmpult $4,$0,$0 # overflow?
+ addq $17,32,$17 # a++
+ addq $0,$25,$0
+ stq $4,24($16) # r[3]=c
+ addq $18,32,$18 # b++
+ addq $16,32,$16 # r++
+
+ blt $19,$900
+ ldq $5,0($17) # a[0]
+ ldq $1,0($18) # b[1]
+ br $901
+ .align 4
+$945:
+ ldq $5,0($17) # a[0]
+ ldq $1,0($18) # b[1]
+ addq $1,$5,$1 # r=a+b;
+ subq $19,1,$19 # loop--
+ addq $1,$0,$1 # c+= overflow
+ addq $17,8,$17 # a++
+ cmpult $1,$5,$22 # did we overflow?
+ cmpult $1,$0,$0 # overflow?
+ addq $18,8,$18 # b++
+ stq $1,0($16) # r[0]=c
+ addq $0,$22,$0
+ addq $16,8,$16 # r++
+
+ bgt $19,$945
+ ret $31,($26),1 # else exit
+
+$900:
+ addq $19,4,$19
+ bgt $19,$945 # goto tail code
+ ret $31,($26),1 # else exit
+ .end bn_add_words
+
+ #
+ # What follows was taken directly from the C compiler with a few
+ # hacks to redo the lables.
+ #
+.text
+ .align 3
+ .globl bn_div64
+ .ent bn_div64
+bn_div64:
+ ldgp $29,0($27)
+bn_div64..ng:
+ lda $30,-48($30)
+ .frame $30,48,$26,0
+ stq $26,0($30)
+ stq $9,8($30)
+ stq $10,16($30)
+ stq $11,24($30)
+ stq $12,32($30)
+ stq $13,40($30)
+ .mask 0x4003e00,-48
+ .prologue 1
+ bis $16,$16,$9
+ bis $17,$17,$10
+ bis $18,$18,$11
+ bis $31,$31,$13
+ bis $31,2,$12
+ bne $11,$119
+ lda $0,-1
+ br $31,$136
+ .align 4
+$119:
+ bis $11,$11,$16
+ jsr $26,BN_num_bits_word
+ ldgp $29,0($26)
+ subq $0,64,$1
+ beq $1,$120
+ bis $31,1,$1
+ sll $1,$0,$1
+ cmpule $9,$1,$1
+ bne $1,$120
+ # lda $16,_IO_stderr_
+ # lda $17,$C32
+ # bis $0,$0,$18
+ # jsr $26,fprintf
+ # ldgp $29,0($26)
+ jsr $26,abort
+ ldgp $29,0($26)
+ .align 4
+$120:
+ bis $31,64,$3
+ cmpult $9,$11,$2
+ subq $3,$0,$1
+ addl $1,$31,$0
+ subq $9,$11,$1
+ cmoveq $2,$1,$9
+ beq $0,$122
+ zapnot $0,15,$2
+ subq $3,$0,$1
+ sll $11,$2,$11
+ sll $9,$2,$3
+ srl $10,$1,$1
+ sll $10,$2,$10
+ bis $3,$1,$9
+$122:
+ srl $11,32,$5
+ zapnot $11,15,$6
+ lda $7,-1
+ .align 5
+$123:
+ srl $9,32,$1
+ subq $1,$5,$1
+ bne $1,$126
+ zapnot $7,15,$27
+ br $31,$127
+ .align 4
+$126:
+ bis $9,$9,$24
+ bis $5,$5,$25
+ divqu $24,$25,$27
+$127:
+ srl $10,32,$4
+ .align 5
+$128:
+ mulq $27,$5,$1
+ subq $9,$1,$3
+ zapnot $3,240,$1
+ bne $1,$129
+ mulq $6,$27,$2
+ sll $3,32,$1
+ addq $1,$4,$1
+ cmpule $2,$1,$2
+ bne $2,$129
+ subq $27,1,$27
+ br $31,$128
+ .align 4
+$129:
+ mulq $27,$6,$1
+ mulq $27,$5,$4
+ srl $1,32,$3
+ sll $1,32,$1
+ addq $4,$3,$4
+ cmpult $10,$1,$2
+ subq $10,$1,$10
+ addq $2,$4,$2
+ cmpult $9,$2,$1
+ bis $2,$2,$4
+ beq $1,$134
+ addq $9,$11,$9
+ subq $27,1,$27
+$134:
+ subl $12,1,$12
+ subq $9,$4,$9
+ beq $12,$124
+ sll $27,32,$13
+ sll $9,32,$2
+ srl $10,32,$1
+ sll $10,32,$10
+ bis $2,$1,$9
+ br $31,$123
+ .align 4
+$124:
+ bis $13,$27,$0
+$136:
+ ldq $26,0($30)
+ ldq $9,8($30)
+ ldq $10,16($30)
+ ldq $11,24($30)
+ ldq $12,32($30)
+ ldq $13,40($30)
+ addq $30,48,$30
+ ret $31,($26),1
+ .end bn_div64
+
+ .set noat
+ .text
+ .align 3
+ .globl bn_sub_words
+ .ent bn_sub_words
+bn_sub_words:
+bn_sub_words..ng:
+ .frame $30,0,$26,0
+ .prologue 0
+
+ subq $19, 4, $19
+ bis $31, $31, $0
+ blt $19, $100
+ ldq $1, 0($17)
+ ldq $2, 0($18)
+$101:
+ ldq $3, 8($17)
+ cmpult $1, $2, $4
+ ldq $5, 8($18)
+ subq $1, $2, $1
+ ldq $6, 16($17)
+ cmpult $1, $0, $2
+ ldq $7, 16($18)
+ subq $1, $0, $23
+ ldq $8, 24($17)
+ addq $2, $4, $0
+ cmpult $3, $5, $24
+ subq $3, $5, $3
+ ldq $22, 24($18)
+ cmpult $3, $0, $5
+ subq $3, $0, $25
+ addq $5, $24, $0
+ cmpult $6, $7, $27
+ subq $6, $7, $6
+ stq $23, 0($16)
+ cmpult $6, $0, $7
+ subq $6, $0, $28
+ addq $7, $27, $0
+ cmpult $8, $22, $21
+ subq $8, $22, $8
+ stq $25, 8($16)
+ cmpult $8, $0, $22
+ subq $8, $0, $20
+ addq $22, $21, $0
+ stq $28, 16($16)
+ subq $19, 4, $19
+ stq $20, 24($16)
+ addq $17, 32, $17
+ addq $18, 32, $18
+ addq $16, 32, $16
+ blt $19, $100
+ ldq $1, 0($17)
+ ldq $2, 0($18)
+ br $101
+$102:
+ ldq $1, 0($17)
+ ldq $2, 0($18)
+ cmpult $1, $2, $27
+ subq $1, $2, $1
+ cmpult $1, $0, $2
+ subq $1, $0, $1
+ stq $1, 0($16)
+ addq $2, $27, $0
+ addq $17, 8, $17
+ addq $18, 8, $18
+ addq $16, 8, $16
+ subq $19, 1, $19
+ bgt $19, $102
+ ret $31,($26),1
+$100:
+ addq $19, 4, $19
+ bgt $19, $102
+$103:
+ ret $31,($26),1
+ .end bn_sub_words
diff --git a/crypto/bn/asm/alpha.works/add.pl b/crypto/bn/asm/alpha.works/add.pl
new file mode 100644
index 0000000..4dc76e6
--- /dev/null
+++ b/crypto/bn/asm/alpha.works/add.pl
@@ -0,0 +1,119 @@
+#!/usr/local/bin/perl
+# alpha assember
+
+sub bn_add_words
+ {
+ local($name)=@_;
+ local($cc,$a,$b,$r);
+
+ &init_pool(4);
+ ($cc)=GR("r0");
+
+ $rp=&wparam(0);
+ $ap=&wparam(1);
+ $bp=&wparam(2);
+ $count=&wparam(3);
+
+ &function_begin($name,"");
+
+ &comment("");
+ &sub($count,4,$count);
+ &mov("zero",$cc);
+ &br(&label("finish"));
+ &blt($count,&label("finish"));
+
+ ($a0,$b0)=&NR(2);
+ &ld($a0,&QWPw(0,$ap));
+ &ld($b0,&QWPw(0,$bp));
+
+##########################################################
+ &set_label("loop");
+
+ ($a1)=&NR(1); &ld($a1,&QWPw(1,$ap));
+ ($b1)=&NR(1); &ld($b1,&QWPw(1,$bp));
+ ($a2)=&NR(1); &ld($a2,&QWPw(2,$ap));
+ ($b2)=&NR(1); &ld($b2,&QWPw(2,$bp));
+ ($a3)=&NR(1); &ld($a3,&QWPw(3,$ap));
+ ($b3)=&NR(1); &ld($b3,&QWPw(3,$bp));
+
+ ($o0,$t0)=&NR(2);
+ &add($a0,$b0,$o0);
+ &cmpult($o0,$b0,$t0);
+ &add($o0,$cc,$o0);
+ &cmpult($o0,$cc,$cc);
+ &add($cc,$t0,$cc); &FR($t0);
+
+ ($t1,$o1)=&NR(2);
+
+ &add($a1,$b1,$o1); &FR($a1);
+ &cmpult($o1,$b1,$t1); &FR($b1);
+ &add($o1,$cc,$o1);
+ &cmpult($o1,$cc,$cc);
+ &add($cc,$t1,$cc); &FR($t1);
+
+ ($t2,$o2)=&NR(2);
+
+ &add($a2,$b2,$o2); &FR($a2);
+ &cmpult($o2,$b2,$t2); &FR($b2);
+ &add($o2,$cc,$o2);
+ &cmpult($o2,$cc,$cc);
+ &add($cc,$t2,$cc); &FR($t2);
+
+ ($t3,$o3)=&NR(2);
+
+ &add($a3,$b3,$o3); &FR($a3);
+ &cmpult($o3,$b3,$t3); &FR($b3);
+ &add($o3,$cc,$o3);
+ &cmpult($o3,$cc,$cc);
+ &add($cc,$t3,$cc); &FR($t3);
+
+ &st($o0,&QWPw(0,$rp)); &FR($o0);
+ &st($o1,&QWPw(0,$rp)); &FR($o1);
+ &st($o2,&QWPw(0,$rp)); &FR($o2);
+ &st($o3,&QWPw(0,$rp)); &FR($o3);
+
+ &sub($count,4,$count); # count-=4
+ &add($ap,4*$QWS,$ap); # count+=4
+ &add($bp,4*$QWS,$bp); # count+=4
+ &add($rp,4*$QWS,$rp); # count+=4
+
+ &blt($count,&label("finish"));
+ &ld($a0,&QWPw(0,$ap));
+ &ld($b0,&QWPw(0,$bp));
+ &br(&label("loop"));
+##################################################
+ # Do the last 0..3 words
+
+ ($t0,$o0)=&NR(2);
+ &set_label("last_loop");
+
+ &ld($a0,&QWPw(0,$ap)); # get a
+ &ld($b0,&QWPw(0,$bp)); # get b
+
+ &add($a0,$b0,$o0);
+ &cmpult($o0,$b0,$t0); # will we borrow?
+ &add($o0,$cc,$o0); # will we borrow?
+ &cmpult($o0,$cc,$cc); # will we borrow?
+ &add($cc,$t0,$cc); # add the borrows
+ &st($o0,&QWPw(0,$rp)); # save
+
+ &add($ap,$QWS,$ap);
+ &add($bp,$QWS,$bp);
+ &add($rp,$QWS,$rp);
+ &sub($count,1,$count);
+ &bgt($count,&label("last_loop"));
+ &function_end_A($name);
+
+######################################################
+ &set_label("finish");
+ &add($count,4,$count);
+ &bgt($count,&label("last_loop"));
+
+ &FR($o0,$t0,$a0,$b0);
+ &set_label("end");
+ &function_end($name);
+
+ &fin_pool;
+ }
+
+1;
diff --git a/crypto/bn/asm/alpha.works/div.pl b/crypto/bn/asm/alpha.works/div.pl
new file mode 100644
index 0000000..7ec1443
--- /dev/null
+++ b/crypto/bn/asm/alpha.works/div.pl
@@ -0,0 +1,144 @@
+#!/usr/local/bin/perl
+
+sub bn_div64
+ {
+ local($data)=<<'EOF';
+ #
+ # What follows was taken directly from the C compiler with a few
+ # hacks to redo the lables.
+ #
+.text
+ .set noreorder
+ .set volatile
+ .align 3
+ .globl bn_div64
+ .ent bn_div64
+bn_div64:
+ ldgp $29,0($27)
+bn_div64..ng:
+ lda $30,-48($30)
+ .frame $30,48,$26,0
+ stq $26,0($30)
+ stq $9,8($30)
+ stq $10,16($30)
+ stq $11,24($30)
+ stq $12,32($30)
+ stq $13,40($30)
+ .mask 0x4003e00,-48
+ .prologue 1
+ bis $16,$16,$9
+ bis $17,$17,$10
+ bis $18,$18,$11
+ bis $31,$31,$13
+ bis $31,2,$12
+ bne $11,$9119
+ lda $0,-1
+ br $31,$9136
+ .align 4
+$9119:
+ bis $11,$11,$16
+ jsr $26,BN_num_bits_word
+ ldgp $29,0($26)
+ subq $0,64,$1
+ beq $1,$9120
+ bis $31,1,$1
+ sll $1,$0,$1
+ cmpule $9,$1,$1
+ bne $1,$9120
+ # lda $16,_IO_stderr_
+ # lda $17,$C32
+ # bis $0,$0,$18
+ # jsr $26,fprintf
+ # ldgp $29,0($26)
+ jsr $26,abort
+ ldgp $29,0($26)
+ .align 4
+$9120:
+ bis $31,64,$3
+ cmpult $9,$11,$2
+ subq $3,$0,$1
+ addl $1,$31,$0
+ subq $9,$11,$1
+ cmoveq $2,$1,$9
+ beq $0,$9122
+ zapnot $0,15,$2
+ subq $3,$0,$1
+ sll $11,$2,$11
+ sll $9,$2,$3
+ srl $10,$1,$1
+ sll $10,$2,$10
+ bis $3,$1,$9
+$9122:
+ srl $11,32,$5
+ zapnot $11,15,$6
+ lda $7,-1
+ .align 5
+$9123:
+ srl $9,32,$1
+ subq $1,$5,$1
+ bne $1,$9126
+ zapnot $7,15,$27
+ br $31,$9127
+ .align 4
+$9126:
+ bis $9,$9,$24
+ bis $5,$5,$25
+ divqu $24,$25,$27
+$9127:
+ srl $10,32,$4
+ .align 5
+$9128:
+ mulq $27,$5,$1
+ subq $9,$1,$3
+ zapnot $3,240,$1
+ bne $1,$9129
+ mulq $6,$27,$2
+ sll $3,32,$1
+ addq $1,$4,$1
+ cmpule $2,$1,$2
+ bne $2,$9129
+ subq $27,1,$27
+ br $31,$9128
+ .align 4
+$9129:
+ mulq $27,$6,$1
+ mulq $27,$5,$4
+ srl $1,32,$3
+ sll $1,32,$1
+ addq $4,$3,$4
+ cmpult $10,$1,$2
+ subq $10,$1,$10
+ addq $2,$4,$2
+ cmpult $9,$2,$1
+ bis $2,$2,$4
+ beq $1,$9134
+ addq $9,$11,$9
+ subq $27,1,$27
+$9134:
+ subl $12,1,$12
+ subq $9,$4,$9
+ beq $12,$9124
+ sll $27,32,$13
+ sll $9,32,$2
+ srl $10,32,$1
+ sll $10,32,$10
+ bis $2,$1,$9
+ br $31,$9123
+ .align 4
+$9124:
+ bis $13,$27,$0
+$9136:
+ ldq $26,0($30)
+ ldq $9,8($30)
+ ldq $10,16($30)
+ ldq $11,24($30)
+ ldq $12,32($30)
+ ldq $13,40($30)
+ addq $30,48,$30
+ ret $31,($26),1
+ .end bn_div64
+EOF
+ &asm_add($data);
+ }
+
+1;
diff --git a/crypto/bn/asm/alpha.works/mul.pl b/crypto/bn/asm/alpha.works/mul.pl
new file mode 100644
index 0000000..b182bae
--- /dev/null
+++ b/crypto/bn/asm/alpha.works/mul.pl
@@ -0,0 +1,116 @@
+#!/usr/local/bin/perl
+# alpha assember
+
+sub bn_mul_words
+ {
+ local($name)=@_;
+ local($cc,$a,$b,$r,$couny);
+
+ &init_pool(4);
+ ($cc)=GR("r0");
+
+ $rp=&wparam(0);
+ $ap=&wparam(1);
+ $count=&wparam(2);
+ $word=&wparam(3);
+
+ &function_begin($name,"");
+
+ &comment("");
+ &sub($count,4,$count);
+ &mov("zero",$cc);
+ &br(&label("finish"));
+ &blt($count,&label("finish"));
+
+ ($a0,$r0)=&NR(2);
+ &ld($a0,&QWPw(0,$ap));
+ &ld($r0,&QWPw(0,$rp));
+
+$a=<<'EOF';
+##########################################################
+ &set_label("loop");
+
+ ($a1)=&NR(1); &ld($a1,&QWPw(1,$ap));
+ ($b1)=&NR(1); &ld($b1,&QWPw(1,$bp));
+ ($a2)=&NR(1); &ld($a2,&QWPw(2,$ap));
+ ($b2)=&NR(1); &ld($b2,&QWPw(2,$bp));
+ ($a3)=&NR(1); &ld($a3,&QWPw(3,$ap));
+ ($b3)=&NR(1); &ld($b3,&QWPw(3,$bp));
+
+ ($o0,$t0)=&NR(2);
+ &add($a0,$b0,$o0);
+ &cmpult($o0,$b0,$t0);
+ &add($o0,$cc,$o0);
+ &cmpult($o0,$cc,$cc);
+ &add($cc,$t0,$cc); &FR($t0);
+
+ ($t1,$o1)=&NR(2);
+
+ &add($a1,$b1,$o1); &FR($a1);
+ &cmpult($o1,$b1,$t1); &FR($b1);
+ &add($o1,$cc,$o1);
+ &cmpult($o1,$cc,$cc);
+ &add($cc,$t1,$cc); &FR($t1);
+
+ ($t2,$o2)=&NR(2);
+
+ &add($a2,$b2,$o2); &FR($a2);
+ &cmpult($o2,$b2,$t2); &FR($b2);
+ &add($o2,$cc,$o2);
+ &cmpult($o2,$cc,$cc);
+ &add($cc,$t2,$cc); &FR($t2);
+
+ ($t3,$o3)=&NR(2);
+
+ &add($a3,$b3,$o3); &FR($a3);
+ &cmpult($o3,$b3,$t3); &FR($b3);
+ &add($o3,$cc,$o3);
+ &cmpult($o3,$cc,$cc);
+ &add($cc,$t3,$cc); &FR($t3);
+
+ &st($o0,&QWPw(0,$rp)); &FR($o0);
+ &st($o1,&QWPw(0,$rp)); &FR($o1);
+ &st($o2,&QWPw(0,$rp)); &FR($o2);
+ &st($o3,&QWPw(0,$rp)); &FR($o3);
+
+ &sub($count,4,$count); # count-=4
+ &add($ap,4*$QWS,$ap); # count+=4
+ &add($bp,4*$QWS,$bp); # count+=4
+ &add($rp,4*$QWS,$rp); # count+=4
+
+ &blt($count,&label("finish"));
+ &ld($a0,&QWPw(0,$ap));
+ &ld($b0,&QWPw(0,$bp));
+ &br(&label("loop"));
+EOF
+##################################################
+ # Do the last 0..3 words
+
+ &set_label("last_loop");
+
+ &ld(($a0)=&NR(1),&QWPw(0,$ap)); # get a
+ &mul($a0,$word,($l0)=&NR(1));
+ &add($ap,$QWS,$ap);
+ &muh($a0,$word,($h0)=&NR(1)); &FR($a0);
+ &add($l0,$cc,$l0);
+ &add($rp,$QWS,$rp);
+ &sub($count,1,$count);
+ &cmpult($l0,$cc,$cc);
+ &st($l0,&QWPw(-1,$rp)); &FR($l0);
+ &add($h0,$cc,$cc); &FR($h0);
+
+ &bgt($count,&label("last_loop"));
+ &function_end_A($name);
+
+######################################################
+ &set_label("finish");
+ &add($count,4,$count);
+ &bgt($count,&label("last_loop"));
+
+ &set_label("end");
+ &function_end($name);
+
+ &fin_pool;
+ }
+
+1;
diff --git a/crypto/bn/asm/alpha.works/mul_add.pl b/crypto/bn/asm/alpha.works/mul_add.pl
new file mode 100644
index 0000000..e37f631
--- /dev/null
+++ b/crypto/bn/asm/alpha.works/mul_add.pl
@@ -0,0 +1,120 @@
+#!/usr/local/bin/perl
+# alpha assember
+
+sub bn_mul_add_words
+ {
+ local($name)=@_;
+ local($cc,$a,$b,$r,$couny);
+
+ &init_pool(4);
+ ($cc)=GR("r0");
+
+ $rp=&wparam(0);
+ $ap=&wparam(1);
+ $count=&wparam(2);
+ $word=&wparam(3);
+
+ &function_begin($name,"");
+
+ &comment("");
+ &sub($count,4,$count);
+ &mov("zero",$cc);
+ &br(&label("finish"));
+ &blt($count,&label("finish"));
+
+ ($a0,$r0)=&NR(2);
+ &ld($a0,&QWPw(0,$ap));
+ &ld($r0,&QWPw(0,$rp));
+
+$a=<<'EOF';
+##########################################################
+ &set_label("loop");
+
+ ($a1)=&NR(1); &ld($a1,&QWPw(1,$ap));
+ ($b1)=&NR(1); &ld($b1,&QWPw(1,$bp));
+ ($a2)=&NR(1); &ld($a2,&QWPw(2,$ap));
+ ($b2)=&NR(1); &ld($b2,&QWPw(2,$bp));
+ ($a3)=&NR(1); &ld($a3,&QWPw(3,$ap));
+ ($b3)=&NR(1); &ld($b3,&QWPw(3,$bp));
+
+ ($o0,$t0)=&NR(2);
+ &add($a0,$b0,$o0);
+ &cmpult($o0,$b0,$t0);
+ &add($o0,$cc,$o0);
+ &cmpult($o0,$cc,$cc);
+ &add($cc,$t0,$cc); &FR($t0);
+
+ ($t1,$o1)=&NR(2);
+
+ &add($a1,$b1,$o1); &FR($a1);
+ &cmpult($o1,$b1,$t1); &FR($b1);
+ &add($o1,$cc,$o1);
+ &cmpult($o1,$cc,$cc);
+ &add($cc,$t1,$cc); &FR($t1);
+
+ ($t2,$o2)=&NR(2);
+
+ &add($a2,$b2,$o2); &FR($a2);
+ &cmpult($o2,$b2,$t2); &FR($b2);
+ &add($o2,$cc,$o2);
+ &cmpult($o2,$cc,$cc);
+ &add($cc,$t2,$cc); &FR($t2);
+
+ ($t3,$o3)=&NR(2);
+
+ &add($a3,$b3,$o3); &FR($a3);
+ &cmpult($o3,$b3,$t3); &FR($b3);
+ &add($o3,$cc,$o3);
+ &cmpult($o3,$cc,$cc);
+ &add($cc,$t3,$cc); &FR($t3);
+
+ &st($o0,&QWPw(0,$rp)); &FR($o0);
+ &st($o1,&QWPw(0,$rp)); &FR($o1);
+ &st($o2,&QWPw(0,$rp)); &FR($o2);
+ &st($o3,&QWPw(0,$rp)); &FR($o3);
+
+ &sub($count,4,$count); # count-=4
+ &add($ap,4*$QWS,$ap); # count+=4
+ &add($bp,4*$QWS,$bp); # count+=4
+ &add($rp,4*$QWS,$rp); # count+=4
+
+ &blt($count,&label("finish"));
+ &ld($a0,&QWPw(0,$ap));
+ &ld($b0,&QWPw(0,$bp));
+ &br(&label("loop"));
+EOF
+##################################################
+ # Do the last 0..3 words
+
+ &set_label("last_loop");
+
+ &ld(($a0)=&NR(1),&QWPw(0,$ap)); # get a
+ &ld(($r0)=&NR(1),&QWPw(0,$rp)); # get b
+ &mul($a0,$word,($l0)=&NR(1));
+ &sub($count,1,$count);
+ &add($ap,$QWS,$ap);
+ &muh($a0,$word,($h0)=&NR(1)); &FR($a0);
+ &add($r0,$l0,$r0);
+ &add($rp,$QWS,$rp);
+ &cmpult($r0,$l0,($t0)=&NR(1)); &FR($l0);
+ &add($r0,$cc,$r0);
+ &add($h0,$t0,$h0); &FR($t0);
+ &cmpult($r0,$cc,$cc);
+ &st($r0,&QWPw(-1,$rp)); &FR($r0);
+ &add($h0,$cc,$cc); &FR($h0);
+
+ &bgt($count,&label("last_loop"));
+ &function_end_A($name);
+
+######################################################
+ &set_label("finish");
+ &add($count,4,$count);
+ &bgt($count,&label("last_loop"));
+
+ &set_label("end");
+ &function_end($name);
+
+ &fin_pool;
+ }
+
+1;
diff --git a/crypto/bn/asm/alpha.works/mul_c4.pl b/crypto/bn/asm/alpha.works/mul_c4.pl
new file mode 100644
index 0000000..5efd201
--- /dev/null
+++ b/crypto/bn/asm/alpha.works/mul_c4.pl
@@ -0,0 +1,213 @@
+#!/usr/local/bin/perl
+# alpha assember
+
+sub mul_add_c
+ {
+ local($a,$b,$c0,$c1,$c2)=@_;
+ local($l1,$h1,$t1,$t2);
+
+ &mul($a,$b,($l1)=&NR(1));
+ &muh($a,$b,($h1)=&NR(1));
+ &add($c0,$l1,$c0);
+ &cmpult($c0,$l1,($t1)=&NR(1)); &FR($l1);
+ &add($t1,$h1,$h1); &FR($t1);
+ &add($c1,$h1,$c1);
+ &cmpult($c1,$h1,($t2)=&NR(1)); &FR($h1);
+ &add($c2,$t2,$c2); &FR($t2);
+ }
+
+sub bn_mul_comba4
+ {
+ local($name)=@_;
+ local(@a,@b,$r,$c0,$c1,$c2);
+
+ $cnt=1;
+ &init_pool(3);
+
+ $rp=&wparam(0);
+ $ap=&wparam(1);
+ $bp=&wparam(2);
+
+ &function_begin($name,"");
+
+ &comment("");
+
+ &ld(($a[0])=&NR(1),&QWPw(0,$ap));
+ &ld(($b[0])=&NR(1),&QWPw(0,$bp));
+ &ld(($a[1])=&NR(1),&QWPw(1,$ap));
+ &ld(($b[1])=&NR(1),&QWPw(1,$bp));
+ &mul($a[0],$b[0],($r00)=&NR(1));
+ &ld(($a[2])=&NR(1),&QWPw(2,$ap));
+ &ld(($b[2])=&NR(1),&QWPw(2,$bp));
+ &muh($a[0],$b[0],($r01)=&NR(1));
+ &FR($ap); &ld(($a[3])=&NR(1),&QWPw(3,$ap));
+ &FR($bp); &ld(($b[3])=&NR(1),&QWPw(3,$bp));
+ &mul($a[0],$b[1],($r02)=&NR(1));
+
+ ($R,$H1,$H2)=&NR(3);
+
+ &st($r00,&QWPw(0,$rp)); &FR($r00);
+
+ &mov("zero",$R);
+ &mul($a[1],$b[0],($r03)=&NR(1));
+
+ &mov("zero",$H1);
+ &mov("zero",$H0);
+ &add($R,$r01,$R);
+ &muh($a[0],$b[1],($r04)=&NR(1));
+ &cmpult($R,$r01,($t01)=&NR(1)); &FR($r01);
+ &add($R,$r02,$R);
+ &add($H1,$t01,$H1) &FR($t01);
+ &muh($a[1],$b[0],($r05)=&NR(1));
+ &cmpult($R,$r02,($t02)=&NR(1)); &FR($r02);
+ &add($R,$r03,$R);
+ &add($H2,$t02,$H2) &FR($t02);
+ &mul($a[0],$b[2],($r06)=&NR(1));
+ &cmpult($R,$r03,($t03)=&NR(1)); &FR($r03);
+ &add($H1,$t03,$H1) &FR($t03);
+ &st($R,&QWPw(1,$rp));
+ &add($H1,$H2,$R);
+
+ &mov("zero",$H1);
+ &add($R,$r04,$R);
+ &mov("zero",$H2);
+ &mul($a[1],$b[1],($r07)=&NR(1));
+ &cmpult($R,$r04,($t04)=&NR(1)); &FR($r04);
+ &add($R,$r05,$R);
+ &add($H1,$t04,$H1) &FR($t04);
+ &mul($a[2],$b[0],($r08)=&NR(1));
+ &cmpult($R,$r05,($t05)=&NR(1)); &FR($r05);
+ &add($R,$r01,$R);
+ &add($H2,$t05,$H2) &FR($t05);
+ &muh($a[0],$b[2],($r09)=&NR(1));
+ &cmpult($R,$r06,($t06)=&NR(1)); &FR($r06);
+ &add($R,$r07,$R);
+ &add($H1,$t06,$H1) &FR($t06);
+ &muh($a[1],$b[1],($r10)=&NR(1));
+ &cmpult($R,$r07,($t07)=&NR(1)); &FR($r07);
+ &add($R,$r08,$R);
+ &add($H2,$t07,$H2) &FR($t07);
+ &muh($a[2],$b[0],($r11)=&NR(1));
+ &cmpult($R,$r08,($t08)=&NR(1)); &FR($r08);
+ &add($H1,$t08,$H1) &FR($t08);
+ &st($R,&QWPw(2,$rp));
+ &add($H1,$H2,$R);
+
+ &mov("zero",$H1);
+ &add($R,$r09,$R);
+ &mov("zero",$H2);
+ &mul($a[0],$b[3],($r12)=&NR(1));
+ &cmpult($R,$r09,($t09)=&NR(1)); &FR($r09);
+ &add($R,$r10,$R);
+ &add($H1,$t09,$H1) &FR($t09);
+ &mul($a[1],$b[2],($r13)=&NR(1));
+ &cmpult($R,$r10,($t10)=&NR(1)); &FR($r10);
+ &add($R,$r11,$R);
+ &add($H1,$t10,$H1) &FR($t10);
+ &mul($a[2],$b[1],($r14)=&NR(1));
+ &cmpult($R,$r11,($t11)=&NR(1)); &FR($r11);
+ &add($R,$r12,$R);
+ &add($H1,$t11,$H1) &FR($t11);
+ &mul($a[3],$b[0],($r15)=&NR(1));
+ &cmpult($R,$r12,($t12)=&NR(1)); &FR($r12);
+ &add($R,$r13,$R);
+ &add($H1,$t12,$H1) &FR($t12);
+ &muh($a[0],$b[3],($r16)=&NR(1));
+ &cmpult($R,$r13,($t13)=&NR(1)); &FR($r13);
+ &add($R,$r14,$R);
+ &add($H1,$t13,$H1) &FR($t13);
+ &muh($a[1],$b[2],($r17)=&NR(1));
+ &cmpult($R,$r14,($t14)=&NR(1)); &FR($r14);
+ &add($R,$r15,$R);
+ &add($H1,$t14,$H1) &FR($t14);
+ &muh($a[2],$b[1],($r18)=&NR(1));
+ &cmpult($R,$r15,($t15)=&NR(1)); &FR($r15);
+ &add($H1,$t15,$H1) &FR($t15);
+ &st($R,&QWPw(3,$rp));
+ &add($H1,$H2,$R);
+
+ &mov("zero",$H1);
+ &add($R,$r16,$R);
+ &mov("zero",$H2);
+ &muh($a[3],$b[0],($r19)=&NR(1));
+ &cmpult($R,$r16,($t16)=&NR(1)); &FR($r16);
+ &add($R,$r17,$R);
+ &add($H1,$t16,$H1) &FR($t16);
+ &mul($a[1],$b[3],($r20)=&NR(1));
+ &cmpult($R,$r17,($t17)=&NR(1)); &FR($r17);
+ &add($R,$r18,$R);
+ &add($H1,$t17,$H1) &FR($t17);
+ &mul($a[2],$b[2],($r21)=&NR(1));
+ &cmpult($R,$r18,($t18)=&NR(1)); &FR($r18);
+ &add($R,$r19,$R);
+ &add($H1,$t18,$H1) &FR($t18);
+ &mul($a[3],$b[1],($r22)=&NR(1));
+ &cmpult($R,$r19,($t19)=&NR(1)); &FR($r19);
+ &add($R,$r20,$R);
+ &add($H1,$t19,$H1) &FR($t19);
+ &muh($a[1],$b[3],($r23)=&NR(1));
+ &cmpult($R,$r20,($t20)=&NR(1)); &FR($r20);
+ &add($R,$r21,$R);
+ &add($H1,$t20,$H1) &FR($t20);
+ &muh($a[2],$b[2],($r24)=&NR(1));
+ &cmpult($R,$r21,($t21)=&NR(1)); &FR($r21);
+ &add($R,$r22,$R);
+ &add($H1,$t21,$H1) &FR($t21);
+ &muh($a[3],$b[1],($r25)=&NR(1));
+ &cmpult($R,$r22,($t22)=&NR(1)); &FR($r22);
+ &add($H1,$t22,$H1) &FR($t22);
+ &st($R,&QWPw(4,$rp));
+ &add($H1,$H2,$R);
+
+ &mov("zero",$H1);
+ &add($R,$r23,$R);
+ &mov("zero",$H2);
+ &mul($a[2],$b[3],($r26)=&NR(1));
+ &cmpult($R,$r23,($t23)=&NR(1)); &FR($r23);
+ &add($R,$r24,$R);
+ &add($H1,$t23,$H1) &FR($t23);
+ &mul($a[3],$b[2],($r27)=&NR(1));
+ &cmpult($R,$r24,($t24)=&NR(1)); &FR($r24);
+ &add($R,$r25,$R);
+ &add($H1,$t24,$H1) &FR($t24);
+ &muh($a[2],$b[3],($r28)=&NR(1));
+ &cmpult($R,$r25,($t25)=&NR(1)); &FR($r25);
+ &add($R,$r26,$R);
+ &add($H1,$t25,$H1) &FR($t25);
+ &muh($a[3],$b[2],($r29)=&NR(1));
+ &cmpult($R,$r26,($t26)=&NR(1)); &FR($r26);
+ &add($R,$r27,$R);
+ &add($H1,$t26,$H1) &FR($t26);
+ &mul($a[3],$b[3],($r30)=&NR(1));
+ &cmpult($R,$r27,($t27)=&NR(1)); &FR($r27);
+ &add($H1,$t27,$H1) &FR($t27);
+ &st($R,&QWPw(5,$rp));
+ &add($H1,$H2,$R);
+
+ &mov("zero",$H1);
+ &add($R,$r28,$R);
+ &mov("zero",$H2);
+ &muh($a[3],$b[3],($r31)=&NR(1));
+ &cmpult($R,$r28,($t28)=&NR(1)); &FR($r28);
+ &add($R,$r29,$R);
+ &add($H1,$t28,$H1) &FR($t28);
+ ############
+ &cmpult($R,$r29,($t29)=&NR(1)); &FR($r29);
+ &add($R,$r30,$R);
+ &add($H1,$t29,$H1) &FR($t29);
+ ############
+ &cmpult($R,$r30,($t30)=&NR(1)); &FR($r30);
+ &add($H1,$t30,$H1) &FR($t30);
+ &st($R,&QWPw(6,$rp));
+ &add($H1,$H2,$R);
+
+ &add($R,$r31,$R); &FR($r31);
+ &st($R,&QWPw(7,$rp));
+
+ &FR($R,$H1,$H2);
+ &function_end($name);
+
+ &fin_pool;
+ }
+
+1;
diff --git a/crypto/bn/asm/alpha.works/mul_c4.works.pl b/crypto/bn/asm/alpha.works/mul_c4.works.pl
new file mode 100644
index 0000000..79d86dd
--- /dev/null
+++ b/crypto/bn/asm/alpha.works/mul_c4.works.pl
@@ -0,0 +1,98 @@
+#!/usr/local/bin/perl
+# alpha assember
+
+sub mul_add_c
+ {
+ local($a,$b,$c0,$c1,$c2)=@_;
+ local($l1,$h1,$t1,$t2);
+
+print STDERR "count=$cnt\n"; $cnt++;
+ &mul($a,$b,($l1)=&NR(1));
+ &muh($a,$b,($h1)=&NR(1));
+ &add($c0,$l1,$c0);
+ &cmpult($c0,$l1,($t1)=&NR(1)); &FR($l1);
+ &add($t1,$h1,$h1); &FR($t1);
+ &add($c1,$h1,$c1);
+ &cmpult($c1,$h1,($t2)=&NR(1)); &FR($h1);
+ &add($c2,$t2,$c2); &FR($t2);
+ }
+
+sub bn_mul_comba4
+ {
+ local($name)=@_;
+ local(@a,@b,$r,$c0,$c1,$c2);
+
+ $cnt=1;
+ &init_pool(3);
+
+ $rp=&wparam(0);
+ $ap=&wparam(1);
+ $bp=&wparam(2);
+
+ &function_begin($name,"");
+
+ &comment("");
+
+ &ld(($a[0])=&NR(1),&QWPw(0,$ap));
+ &ld(($b[0])=&NR(1),&QWPw(0,$bp));
+ &ld(($a[1])=&NR(1),&QWPw(1,$ap));
+ &ld(($b[1])=&NR(1),&QWPw(1,$bp));
+ &ld(($a[2])=&NR(1),&QWPw(2,$ap));
+ &ld(($b[2])=&NR(1),&QWPw(2,$bp));
+ &ld(($a[3])=&NR(1),&QWPw(3,$ap)); &FR($ap);
+ &ld(($b[3])=&NR(1),&QWPw(3,$bp)); &FR($bp);
+
+ ($c0,$c1,$c2)=&NR(3);
+ &mov("zero",$c2);
+ &mul($a[0],$b[0],$c0);
+ &muh($a[0],$b[0],$c1);
+ &st($c0,&QWPw(0,$rp)); &FR($c0); ($c0)=&NR($c0);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[0],$b[1],$c0,$c1,$c2);
+ &mul_add_c($a[1],$b[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(1,$rp)); &FR($c0); ($c0)=&NR($c0);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[1],$b[1],$c0,$c1,$c2);
+ &mul_add_c($a[0],$b[2],$c0,$c1,$c2);
+ &mul_add_c($a[2],$b[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(2,$rp)); &FR($c0); ($c0)=&NR($c0);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[0],$b[3],$c0,$c1,$c2); &FR($a[0]);
+ &mul_add_c($a[1],$b[2],$c0,$c1,$c2);
+ &mul_add_c($a[2],$b[1],$c0,$c1,$c2);
+ &mul_add_c($a[3],$b[0],$c0,$c1,$c2); &FR($b[0]);
+ &st($c0,&QWPw(3,$rp)); &FR($c0); ($c0)=&NR($c0);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[1],$b[3],$c0,$c1,$c2); &FR($a[1]);
+ &mul_add_c($a[2],$b[2],$c0,$c1,$c2);
+ &mul_add_c($a[3],$b[1],$c0,$c1,$c2); &FR($b[1]);
+ &st($c0,&QWPw(4,$rp)); &FR($c0); ($c0)=&NR($c0);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[2],$b[3],$c0,$c1,$c2); &FR($a[2]);
+ &mul_add_c($a[3],$b[2],$c0,$c1,$c2); &FR($b[2]);
+ &st($c0,&QWPw(5,$rp)); &FR($c0); ($c0)=&NR($c0);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[3],$b[3],$c0,$c1,$c2); &FR($a[3],$b[3]);
+ &st($c0,&QWPw(6,$rp));
+ &st($c1,&QWPw(7,$rp));
+
+ &FR($c0,$c1,$c2);
+
+ &function_end($name);
+
+ &fin_pool;
+ }
+
+1;
diff --git a/crypto/bn/asm/alpha.works/mul_c8.pl b/crypto/bn/asm/alpha.works/mul_c8.pl
new file mode 100644
index 0000000..525ca74
--- /dev/null
+++ b/crypto/bn/asm/alpha.works/mul_c8.pl
@@ -0,0 +1,177 @@
+#!/usr/local/bin/perl
+# alpha assember
+
+sub bn_mul_comba8
+ {
+ local($name)=@_;
+ local(@a,@b,$r,$c0,$c1,$c2);
+
+ $cnt=1;
+ &init_pool(3);
+
+ $rp=&wparam(0);
+ $ap=&wparam(1);
+ $bp=&wparam(2);
+
+ &function_begin($name,"");
+
+ &comment("");
+
+ &stack_push(2);
+ &ld(($a[0])=&NR(1),&QWPw(0,$ap));
+ &ld(($b[0])=&NR(1),&QWPw(0,$bp));
+ &st($reg_s0,&swtmp(0)); &FR($reg_s0);
+ &st($reg_s1,&swtmp(1)); &FR($reg_s1);
+ &ld(($a[1])=&NR(1),&QWPw(1,$ap));
+ &ld(($b[1])=&NR(1),&QWPw(1,$bp));
+ &ld(($a[2])=&NR(1),&QWPw(2,$ap));
+ &ld(($b[2])=&NR(1),&QWPw(2,$bp));
+ &ld(($a[3])=&NR(1),&QWPw(3,$ap));
+ &ld(($b[3])=&NR(1),&QWPw(3,$bp));
+ &ld(($a[4])=&NR(1),&QWPw(1,$ap));
+ &ld(($b[4])=&NR(1),&QWPw(1,$bp));
+ &ld(($a[5])=&NR(1),&QWPw(1,$ap));
+ &ld(($b[5])=&NR(1),&QWPw(1,$bp));
+ &ld(($a[6])=&NR(1),&QWPw(1,$ap));
+ &ld(($b[6])=&NR(1),&QWPw(1,$bp));
+ &ld(($a[7])=&NR(1),&QWPw(1,$ap)); &FR($ap);
+ &ld(($b[7])=&NR(1),&QWPw(1,$bp)); &FR($bp);
+
+ ($c0,$c1,$c2)=&NR(3);
+ &mov("zero",$c2);
+ &mul($a[0],$b[0],$c0);
+ &muh($a[0],$b[0],$c1);
+ &st($c0,&QWPw(0,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[0],$b[1],$c0,$c1,$c2);
+ &mul_add_c($a[1],$b[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(1,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[0],$b[2],$c0,$c1,$c2);
+ &mul_add_c($a[1],$b[1],$c0,$c1,$c2);
+ &mul_add_c($a[2],$b[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(2,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[0],$b[3],$c0,$c1,$c2);
+ &mul_add_c($a[1],$b[2],$c0,$c1,$c2);
+ &mul_add_c($a[2],$b[1],$c0,$c1,$c2);
+ &mul_add_c($a[3],$b[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(3,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[0],$b[4],$c0,$c1,$c2);
+ &mul_add_c($a[1],$b[3],$c0,$c1,$c2);
+ &mul_add_c($a[2],$b[2],$c0,$c1,$c2);
+ &mul_add_c($a[3],$b[1],$c0,$c1,$c2);
+ &mul_add_c($a[4],$b[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(4,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[0],$b[5],$c0,$c1,$c2);
+ &mul_add_c($a[1],$b[4],$c0,$c1,$c2);
+ &mul_add_c($a[2],$b[3],$c0,$c1,$c2);
+ &mul_add_c($a[3],$b[2],$c0,$c1,$c2);
+ &mul_add_c($a[4],$b[1],$c0,$c1,$c2);
+ &mul_add_c($a[5],$b[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(5,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[0],$b[6],$c0,$c1,$c2);
+ &mul_add_c($a[1],$b[5],$c0,$c1,$c2);
+ &mul_add_c($a[2],$b[4],$c0,$c1,$c2);
+ &mul_add_c($a[3],$b[3],$c0,$c1,$c2);
+ &mul_add_c($a[4],$b[2],$c0,$c1,$c2);
+ &mul_add_c($a[5],$b[1],$c0,$c1,$c2);
+ &mul_add_c($a[6],$b[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(6,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[0],$b[7],$c0,$c1,$c2); &FR($a[0]);
+ &mul_add_c($a[1],$b[6],$c0,$c1,$c2);
+ &mul_add_c($a[2],$b[5],$c0,$c1,$c2);
+ &mul_add_c($a[3],$b[4],$c0,$c1,$c2);
+ &mul_add_c($a[4],$b[3],$c0,$c1,$c2);
+ &mul_add_c($a[5],$b[2],$c0,$c1,$c2);
+ &mul_add_c($a[6],$b[1],$c0,$c1,$c2);
+ &mul_add_c($a[7],$b[0],$c0,$c1,$c2); &FR($b[0]);
+ &st($c0,&QWPw(7,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[1],$b[7],$c0,$c1,$c2); &FR($a[1]);
+ &mul_add_c($a[2],$b[6],$c0,$c1,$c2);
+ &mul_add_c($a[3],$b[5],$c0,$c1,$c2);
+ &mul_add_c($a[4],$b[4],$c0,$c1,$c2);
+ &mul_add_c($a[5],$b[3],$c0,$c1,$c2);
+ &mul_add_c($a[6],$b[2],$c0,$c1,$c2);
+ &mul_add_c($a[7],$b[1],$c0,$c1,$c2); &FR($b[1]);
+ &st($c0,&QWPw(8,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[2],$b[7],$c0,$c1,$c2); &FR($a[2]);
+ &mul_add_c($a[3],$b[6],$c0,$c1,$c2);
+ &mul_add_c($a[4],$b[5],$c0,$c1,$c2);
+ &mul_add_c($a[5],$b[4],$c0,$c1,$c2);
+ &mul_add_c($a[6],$b[3],$c0,$c1,$c2);
+ &mul_add_c($a[7],$b[2],$c0,$c1,$c2); &FR($b[2]);
+ &st($c0,&QWPw(9,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[3],$b[7],$c0,$c1,$c2); &FR($a[3]);
+ &mul_add_c($a[4],$b[6],$c0,$c1,$c2);
+ &mul_add_c($a[5],$b[5],$c0,$c1,$c2);
+ &mul_add_c($a[6],$b[4],$c0,$c1,$c2);
+ &mul_add_c($a[7],$b[3],$c0,$c1,$c2); &FR($b[3]);
+ &st($c0,&QWPw(10,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[4],$b[7],$c0,$c1,$c2); &FR($a[4]);
+ &mul_add_c($a[5],$b[6],$c0,$c1,$c2);
+ &mul_add_c($a[6],$b[5],$c0,$c1,$c2);
+ &mul_add_c($a[7],$b[4],$c0,$c1,$c2); &FR($b[4]);
+ &st($c0,&QWPw(11,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[5],$b[7],$c0,$c1,$c2); &FR($a[5]);
+ &mul_add_c($a[6],$b[6],$c0,$c1,$c2);
+ &mul_add_c($a[7],$b[5],$c0,$c1,$c2); &FR($b[5]);
+ &st($c0,&QWPw(12,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[6],$b[7],$c0,$c1,$c2); &FR($a[6]);
+ &mul_add_c($a[7],$b[6],$c0,$c1,$c2); &FR($b[6]);
+ &st($c0,&QWPw(13,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[7],$b[7],$c0,$c1,$c2); &FR($a[7],$b[7]);
+ &st($c0,&QWPw(14,$rp));
+ &st($c1,&QWPw(15,$rp));
+
+ &FR($c0,$c1,$c2);
+
+ &ld($reg_s0,&swtmp(0));
+ &ld($reg_s1,&swtmp(1));
+ &stack_pop(2);
+
+ &function_end($name);
+
+ &fin_pool;
+ }
+
+1;
diff --git a/crypto/bn/asm/alpha.works/sqr.pl b/crypto/bn/asm/alpha.works/sqr.pl
new file mode 100644
index 0000000..a55b696
--- /dev/null
+++ b/crypto/bn/asm/alpha.works/sqr.pl
@@ -0,0 +1,113 @@
+#!/usr/local/bin/perl
+# alpha assember
+
+sub bn_sqr_words
+ {
+ local($name)=@_;
+ local($cc,$a,$b,$r,$couny);
+
+ &init_pool(3);
+ ($cc)=GR("r0");
+
+ $rp=&wparam(0);
+ $ap=&wparam(1);
+ $count=&wparam(2);
+
+ &function_begin($name,"");
+
+ &comment("");
+ &sub($count,4,$count);
+ &mov("zero",$cc);
+ &br(&label("finish"));
+ &blt($count,&label("finish"));
+
+ ($a0,$r0)=&NR(2);
+ &ld($a0,&QWPw(0,$ap));
+ &ld($r0,&QWPw(0,$rp));
+
+$a=<<'EOF';
+##########################################################
+ &set_label("loop");
+
+ ($a1)=&NR(1); &ld($a1,&QWPw(1,$ap));
+ ($b1)=&NR(1); &ld($b1,&QWPw(1,$bp));
+ ($a2)=&NR(1); &ld($a2,&QWPw(2,$ap));
+ ($b2)=&NR(1); &ld($b2,&QWPw(2,$bp));
+ ($a3)=&NR(1); &ld($a3,&QWPw(3,$ap));
+ ($b3)=&NR(1); &ld($b3,&QWPw(3,$bp));
+
+ ($o0,$t0)=&NR(2);
+ &add($a0,$b0,$o0);
+ &cmpult($o0,$b0,$t0);
+ &add($o0,$cc,$o0);
+ &cmpult($o0,$cc,$cc);
+ &add($cc,$t0,$cc); &FR($t0);
+
+ ($t1,$o1)=&NR(2);
+
+ &add($a1,$b1,$o1); &FR($a1);
+ &cmpult($o1,$b1,$t1); &FR($b1);
+ &add($o1,$cc,$o1);
+ &cmpult($o1,$cc,$cc);
+ &add($cc,$t1,$cc); &FR($t1);
+
+ ($t2,$o2)=&NR(2);
+
+ &add($a2,$b2,$o2); &FR($a2);
+ &cmpult($o2,$b2,$t2); &FR($b2);
+ &add($o2,$cc,$o2);
+ &cmpult($o2,$cc,$cc);
+ &add($cc,$t2,$cc); &FR($t2);
+
+ ($t3,$o3)=&NR(2);
+
+ &add($a3,$b3,$o3); &FR($a3);
+ &cmpult($o3,$b3,$t3); &FR($b3);
+ &add($o3,$cc,$o3);
+ &cmpult($o3,$cc,$cc);
+ &add($cc,$t3,$cc); &FR($t3);
+
+ &st($o0,&QWPw(0,$rp)); &FR($o0);
+ &st($o1,&QWPw(0,$rp)); &FR($o1);
+ &st($o2,&QWPw(0,$rp)); &FR($o2);
+ &st($o3,&QWPw(0,$rp)); &FR($o3);
+
+ &sub($count,4,$count); # count-=4
+ &add($ap,4*$QWS,$ap); # count+=4
+ &add($bp,4*$QWS,$bp); # count+=4
+ &add($rp,4*$QWS,$rp); # count+=4
+
+ &blt($count,&label("finish"));
+ &ld($a0,&QWPw(0,$ap));
+ &ld($b0,&QWPw(0,$bp));
+ &br(&label("loop"));
+EOF
+##################################################
+ # Do the last 0..3 words
+
+ &set_label("last_loop");
+
+ &ld(($a0)=&NR(1),&QWPw(0,$ap)); # get a
+ &mul($a0,$a0,($l0)=&NR(1));
+ &add($ap,$QWS,$ap);
+ &add($rp,2*$QWS,$rp);
+ &sub($count,1,$count);
+ &muh($a0,$a0,($h0)=&NR(1)); &FR($a0);
+ &st($l0,&QWPw(-2,$rp)); &FR($l0);
+ &st($h0,&QWPw(-1,$rp)); &FR($h0);
+
+ &bgt($count,&label("last_loop"));
+ &function_end_A($name);
+
+######################################################
+ &set_label("finish");
+ &add($count,4,$count);
+ &bgt($count,&label("last_loop"));
+
+ &set_label("end");
+ &function_end($name);
+
+ &fin_pool;
+ }
+
+1;
diff --git a/crypto/bn/asm/alpha.works/sqr_c4.pl b/crypto/bn/asm/alpha.works/sqr_c4.pl
new file mode 100644
index 0000000..bf33f5b
--- /dev/null
+++ b/crypto/bn/asm/alpha.works/sqr_c4.pl
@@ -0,0 +1,109 @@
+#!/usr/local/bin/perl
+# alpha assember
+
+sub sqr_add_c
+ {
+ local($a,$c0,$c1,$c2)=@_;
+ local($l1,$h1,$t1,$t2);
+
+ &mul($a,$a,($l1)=&NR(1));
+ &muh($a,$a,($h1)=&NR(1));
+ &add($c0,$l1,$c0);
+ &add($c1,$h1,$c1);
+ &cmpult($c0,$l1,($t1)=&NR(1)); &FR($l1);
+ &cmpult($c1,$h1,($t2)=&NR(1)); &FR($h1);
+ &add($c1,$t1,$c1); &FR($t1);
+ &add($c2,$t2,$c2); &FR($t2);
+ }
+
+sub sqr_add_c2
+ {
+ local($a,$b,$c0,$c1,$c2)=@_;
+ local($l1,$h1,$t1,$t2);
+
+ &mul($a,$b,($l1)=&NR(1));
+ &muh($a,$b,($h1)=&NR(1));
+ &cmplt($l1,"zero",($lc1)=&NR(1));
+ &cmplt($h1,"zero",($hc1)=&NR(1));
+ &add($l1,$l1,$l1);
+ &add($h1,$h1,$h1);
+ &add($h1,$lc1,$h1); &FR($lc1);
+ &add($c2,$hc1,$c2); &FR($hc1);
+
+ &add($c0,$l1,$c0);
+ &add($c1,$h1,$c1);
+ &cmpult($c0,$l1,($lc1)=&NR(1)); &FR($l1);
+ &cmpult($c1,$h1,($hc1)=&NR(1)); &FR($h1);
+
+ &add($c1,$lc1,$c1); &FR($lc1);
+ &add($c2,$hc1,$c2); &FR($hc1);
+ }
+
+
+sub bn_sqr_comba4
+ {
+ local($name)=@_;
+ local(@a,@b,$r,$c0,$c1,$c2);
+
+ $cnt=1;
+ &init_pool(2);
+
+ $rp=&wparam(0);
+ $ap=&wparam(1);
+
+ &function_begin($name,"");
+
+ &comment("");
+
+ &ld(($a[0])=&NR(1),&QWPw(0,$ap));
+ &ld(($a[1])=&NR(1),&QWPw(1,$ap));
+ &ld(($a[2])=&NR(1),&QWPw(2,$ap));
+ &ld(($a[3])=&NR(1),&QWPw(3,$ap)); &FR($ap);
+
+ ($c0,$c1,$c2)=&NR(3);
+
+ &mov("zero",$c2);
+ &mul($a[0],$a[0],$c0);
+ &muh($a[0],$a[0],$c1);
+ &st($c0,&QWPw(0,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c2($a[0],$a[1],$c0,$c1,$c2);
+ &st($c0,&QWPw(1,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c($a[1],$c0,$c1,$c2);
+ &sqr_add_c2($a[2],$a[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(2,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c2($a[3],$a[0],$c0,$c1,$c2);
+ &sqr_add_c2($a[2],$a[1],$c0,$c1,$c2);
+ &st($c0,&QWPw(3,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c($a[2],$c0,$c1,$c2);
+ &sqr_add_c2($a[3],$a[1],$c0,$c1,$c2);
+ &st($c0,&QWPw(4,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c2($a[3],$a[2],$c0,$c1,$c2);
+ &st($c0,&QWPw(5,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c($a[3],$c0,$c1,$c2);
+ &st($c0,&QWPw(6,$rp));
+ &st($c1,&QWPw(7,$rp));
+
+ &function_end($name);
+
+ &fin_pool;
+ }
+
+1;
diff --git a/crypto/bn/asm/alpha.works/sqr_c8.pl b/crypto/bn/asm/alpha.works/sqr_c8.pl
new file mode 100644
index 0000000..b4afe08
--- /dev/null
+++ b/crypto/bn/asm/alpha.works/sqr_c8.pl
@@ -0,0 +1,132 @@
+#!/usr/local/bin/perl
+# alpha assember
+
+sub bn_sqr_comba8
+ {
+ local($name)=@_;
+ local(@a,@b,$r,$c0,$c1,$c2);
+
+ $cnt=1;
+ &init_pool(2);
+
+ $rp=&wparam(0);
+ $ap=&wparam(1);
+
+ &function_begin($name,"");
+
+ &comment("");
+
+ &ld(($a[0])=&NR(1),&QWPw(0,$ap));
+ &ld(($a[1])=&NR(1),&QWPw(1,$ap));
+ &ld(($a[2])=&NR(1),&QWPw(2,$ap));
+ &ld(($a[3])=&NR(1),&QWPw(3,$ap));
+ &ld(($a[4])=&NR(1),&QWPw(4,$ap));
+ &ld(($a[5])=&NR(1),&QWPw(5,$ap));
+ &ld(($a[6])=&NR(1),&QWPw(6,$ap));
+ &ld(($a[7])=&NR(1),&QWPw(7,$ap)); &FR($ap);
+
+ ($c0,$c1,$c2)=&NR(3);
+
+ &mov("zero",$c2);
+ &mul($a[0],$a[0],$c0);
+ &muh($a[0],$a[0],$c1);
+ &st($c0,&QWPw(0,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c2($a[1],$a[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(1,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c($a[1],$c0,$c1,$c2);
+ &sqr_add_c2($a[2],$a[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(2,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c2($a[2],$a[1],$c0,$c1,$c2);
+ &sqr_add_c2($a[3],$a[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(3,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c($a[2],$c0,$c1,$c2);
+ &sqr_add_c2($a[3],$a[1],$c0,$c1,$c2);
+ &sqr_add_c2($a[4],$a[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(4,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c2($a[3],$a[2],$c0,$c1,$c2);
+ &sqr_add_c2($a[4],$a[1],$c0,$c1,$c2);
+ &sqr_add_c2($a[5],$a[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(5,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c($a[3],$c0,$c1,$c2);
+ &sqr_add_c2($a[4],$a[2],$c0,$c1,$c2);
+ &sqr_add_c2($a[5],$a[1],$c0,$c1,$c2);
+ &sqr_add_c2($a[6],$a[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(6,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c2($a[4],$a[3],$c0,$c1,$c2);
+ &sqr_add_c2($a[5],$a[2],$c0,$c1,$c2);
+ &sqr_add_c2($a[6],$a[1],$c0,$c1,$c2);
+ &sqr_add_c2($a[7],$a[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(7,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c($a[4],$c0,$c1,$c2);
+ &sqr_add_c2($a[5],$a[3],$c0,$c1,$c2);
+ &sqr_add_c2($a[6],$a[2],$c0,$c1,$c2);
+ &sqr_add_c2($a[7],$a[1],$c0,$c1,$c2);
+ &st($c0,&QWPw(8,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c2($a[5],$a[4],$c0,$c1,$c2);
+ &sqr_add_c2($a[6],$a[3],$c0,$c1,$c2);
+ &sqr_add_c2($a[7],$a[2],$c0,$c1,$c2);
+ &st($c0,&QWPw(9,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c($a[5],$c0,$c1,$c2);
+ &sqr_add_c2($a[6],$a[4],$c0,$c1,$c2);
+ &sqr_add_c2($a[7],$a[3],$c0,$c1,$c2);
+ &st($c0,&QWPw(10,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c2($a[6],$a[5],$c0,$c1,$c2);
+ &sqr_add_c2($a[7],$a[4],$c0,$c1,$c2);
+ &st($c0,&QWPw(11,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c($a[6],$c0,$c1,$c2);
+ &sqr_add_c2($a[7],$a[5],$c0,$c1,$c2);
+ &st($c0,&QWPw(12,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c2($a[7],$a[6],$c0,$c1,$c2);
+ &st($c0,&QWPw(13,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c($a[7],$c0,$c1,$c2);
+ &st($c0,&QWPw(14,$rp));
+ &st($c1,&QWPw(15,$rp));
+
+ &function_end($name);
+
+ &fin_pool;
+ }
+
+1;
diff --git a/crypto/bn/asm/alpha.works/sub.pl b/crypto/bn/asm/alpha.works/sub.pl
new file mode 100644
index 0000000..d998da5
--- /dev/null
+++ b/crypto/bn/asm/alpha.works/sub.pl
@@ -0,0 +1,108 @@
+#!/usr/local/bin/perl
+# alpha assember
+
+sub bn_sub_words
+ {
+ local($name)=@_;
+ local($cc,$a,$b,$r);
+
+ &init_pool(4);
+ ($cc)=GR("r0");
+
+ $rp=&wparam(0);
+ $ap=&wparam(1);
+ $bp=&wparam(2);
+ $count=&wparam(3);
+
+ &function_begin($name,"");
+
+ &comment("");
+ &sub($count,4,$count);
+ &mov("zero",$cc);
+ &blt($count,&label("finish"));
+
+ ($a0,$b0)=&NR(2);
+ &ld($a0,&QWPw(0,$ap));
+ &ld($b0,&QWPw(0,$bp));
+
+##########################################################
+ &set_label("loop");
+
+ ($a1,$tmp,$b1,$a2,$b2,$a3,$b3,$o0)=&NR(8);
+ &ld($a1,&QWPw(1,$ap));
+ &cmpult($a0,$b0,$tmp); # will we borrow?
+ &ld($b1,&QWPw(1,$bp));
+ &sub($a0,$b0,$a0); # do the subtract
+ &ld($a2,&QWPw(2,$ap));
+ &cmpult($a0,$cc,$b0); # will we borrow?
+ &ld($b2,&QWPw(2,$bp));
+ &sub($a0,$cc,$o0); # will we borrow?
+ &ld($a3,&QWPw(3,$ap));
+ &add($b0,$tmp,$cc); ($t1,$o1)=&NR(2); &FR($tmp);
+
+ &cmpult($a1,$b1,$t1); # will we borrow?
+ &sub($a1,$b1,$a1); # do the subtract
+ &ld($b3,&QWPw(3,$bp));
+ &cmpult($a1,$cc,$b1); # will we borrow?
+ &sub($a1,$cc,$o1); # will we borrow?
+ &add($b1,$t1,$cc); ($tmp,$o2)=&NR(2); &FR($t1,$a1,$b1);
+
+ &cmpult($a2,$b2,$tmp); # will we borrow?
+ &sub($a2,$b2,$a2); # do the subtract
+ &st($o0,&QWPw(0,$rp)); &FR($o0); # save
+ &cmpult($a2,$cc,$b2); # will we borrow?
+ &sub($a2,$cc,$o2); # will we borrow?
+ &add($b2,$tmp,$cc); ($t3,$o3)=&NR(2); &FR($tmp,$a2,$b2);
+
+ &cmpult($a3,$b3,$t3); # will we borrow?
+ &sub($a3,$b3,$a3); # do the subtract
+ &st($o1,&QWPw(1,$rp)); &FR($o1);
+ &cmpult($a3,$cc,$b3); # will we borrow?
+ &sub($a3,$cc,$o3); # will we borrow?
+ &add($b3,$t3,$cc); &FR($t3,$a3,$b3);
+
+ &st($o2,&QWPw(2,$rp)); &FR($o2);
+ &sub($count,4,$count); # count-=4
+ &st($o3,&QWPw(3,$rp)); &FR($o3);
+ &add($ap,4*$QWS,$ap); # count+=4
+ &add($bp,4*$QWS,$bp); # count+=4
+ &add($rp,4*$QWS,$rp); # count+=4
+
+ &blt($count,&label("finish"));
+ &ld($a0,&QWPw(0,$ap));
+ &ld($b0,&QWPw(0,$bp));
+ &br(&label("loop"));
+##################################################
+ # Do the last 0..3 words
+
+ &set_label("last_loop");
+
+ &ld($a0,&QWPw(0,$ap)); # get a
+ &ld($b0,&QWPw(0,$bp)); # get b
+ &cmpult($a0,$b0,$tmp); # will we borrow?
+ &sub($a0,$b0,$a0); # do the subtract
+ &cmpult($a0,$cc,$b0); # will we borrow?
+ &sub($a0,$cc,$a0); # will we borrow?
+ &st($a0,&QWPw(0,$rp)); # save
+ &add($b0,$tmp,$cc); # add the borrows
+
+ &add($ap,$QWS,$ap);
+ &add($bp,$QWS,$bp);
+ &add($rp,$QWS,$rp);
+ &sub($count,1,$count);
+ &bgt($count,&label("last_loop"));
+ &function_end_A($name);
+
+######################################################
+ &set_label("finish");
+ &add($count,4,$count);
+ &bgt($count,&label("last_loop"));
+
+ &FR($a0,$b0);
+ &set_label("end");
+ &function_end($name);
+
+ &fin_pool;
+ }
+
+1;
diff --git a/crypto/bn/asm/alpha/add.pl b/crypto/bn/asm/alpha/add.pl
new file mode 100644
index 0000000..13bf516
--- /dev/null
+++ b/crypto/bn/asm/alpha/add.pl
@@ -0,0 +1,118 @@
+#!/usr/local/bin/perl
+# alpha assember
+
+sub bn_add_words
+ {
+ local($name)=@_;
+ local($cc,$a,$b,$r);
+
+ &init_pool(4);
+ ($cc)=GR("r0");
+
+ $rp=&wparam(0);
+ $ap=&wparam(1);
+ $bp=&wparam(2);
+ $count=&wparam(3);
+
+ &function_begin($name,"");
+
+ &comment("");
+ &sub($count,4,$count);
+ &mov("zero",$cc);
+ &blt($count,&label("finish"));
+
+ ($a0,$b0)=&NR(2);
+
+##########################################################
+ &set_label("loop");
+
+ &ld(($a0)=&NR(1),&QWPw(0,$ap));
+ &ld(($b0)=&NR(1),&QWPw(0,$bp));
+ &ld(($a1)=&NR(1),&QWPw(1,$ap));
+ &ld(($b1)=&NR(1),&QWPw(1,$bp));
+
+ ($o0,$t0)=&NR(2);
+ &add($a0,$b0,$o0);
+ &ld(($a2)=&NR(1),&QWPw(2,$ap));
+ &cmpult($o0,$b0,$t0);
+ &add($o0,$cc,$o0);
+ &cmpult($o0,$cc,$cc);
+ &ld(($b2)=&NR(1),&QWPw(2,$bp));
+ &add($cc,$t0,$cc); &FR($t0);
+
+ ($t1,$o1)=&NR(2);
+
+ &add($a1,$b1,$o1); &FR($a1);
+ &cmpult($o1,$b1,$t1); &FR($b1);
+ &add($o1,$cc,$o1);
+ &cmpult($o1,$cc,$cc);
+ &ld(($a3)=&NR(1),&QWPw(3,$ap));
+ &add($cc,$t1,$cc); &FR($t1);
+
+ ($t2,$o2)=&NR(2);
+
+ &add($a2,$b2,$o2); &FR($a2);
+ &cmpult($o2,$b2,$t2); &FR($b2);
+ &add($o2,$cc,$o2);
+ &cmpult($o2,$cc,$cc);
+ &ld(($b3)=&NR(1),&QWPw(3,$bp));
+ &st($o0,&QWPw(0,$rp)); &FR($o0);
+ &add($cc,$t2,$cc); &FR($t2);
+
+ ($t3,$o3)=&NR(2);
+
+ &st($o1,&QWPw(0,$rp)); &FR($o1);
+ &add($a3,$b3,$o3); &FR($a3);
+ &cmpult($o3,$b3,$t3); &FR($b3);
+ &add($o3,$cc,$o3);
+ &st($o2,&QWPw(0,$rp)); &FR($o2);
+ &cmpult($o3,$cc,$cc);
+ &st($o3,&QWPw(0,$rp)); &FR($o3);
+ &add($cc,$t3,$cc); &FR($t3);
+
+
+ &sub($count,4,$count); # count-=4
+ &add($ap,4*$QWS,$ap); # count+=4
+ &add($bp,4*$QWS,$bp); # count+=4
+ &add($rp,4*$QWS,$rp); # count+=4
+
+ ###
+ &bge($count,&label("loop"));
+ ###
+ &br(&label("finish"));
+##################################################
+ # Do the last 0..3 words
+
+ ($t0,$o0)=&NR(2);
+ &set_label("last_loop");
+
+ &ld($a0,&QWPw(0,$ap)); # get a
+ &ld($b0,&QWPw(0,$bp)); # get b
+ &add($ap,$QWS,$ap);
+ &add($bp,$QWS,$bp);
+ &add($a0,$b0,$o0);
+ &sub($count,1,$count);
+ &cmpult($o0,$b0,$t0); # will we borrow?
+ &add($o0,$cc,$o0); # will we borrow?
+ &cmpult($o0,$cc,$cc); # will we borrow?
+ &add($rp,$QWS,$rp);
+ &st($o0,&QWPw(-1,$rp)); # save
+ &add($cc,$t0,$cc); # add the borrows
+
+ ###
+ &bgt($count,&label("last_loop"));
+ &function_end_A($name);
+
+######################################################
+ &set_label("finish");
+ &add($count,4,$count);
+ &bgt($count,&label("last_loop"));
+
+ &FR($o0,$t0,$a0,$b0);
+ &set_label("end");
+ &function_end($name);
+
+ &fin_pool;
+ }
+
+1;
diff --git a/crypto/bn/asm/alpha/div.pl b/crypto/bn/asm/alpha/div.pl
new file mode 100644
index 0000000..e9e6808
--- /dev/null
+++ b/crypto/bn/asm/alpha/div.pl
@@ -0,0 +1,144 @@
+#!/usr/local/bin/perl
+
+sub bn_div_words
+ {
+ local($data)=<<'EOF';
+ #
+ # What follows was taken directly from the C compiler with a few
+ # hacks to redo the lables.
+ #
+.text
+ .set noreorder
+ .set volatile
+ .align 3
+ .globl bn_div_words
+ .ent bn_div_words
+bn_div_words
+ ldgp $29,0($27)
+bn_div_words.ng:
+ lda $30,-48($30)
+ .frame $30,48,$26,0
+ stq $26,0($30)
+ stq $9,8($30)
+ stq $10,16($30)
+ stq $11,24($30)
+ stq $12,32($30)
+ stq $13,40($30)
+ .mask 0x4003e00,-48
+ .prologue 1
+ bis $16,$16,$9
+ bis $17,$17,$10
+ bis $18,$18,$11
+ bis $31,$31,$13
+ bis $31,2,$12
+ bne $11,$9119
+ lda $0,-1
+ br $31,$9136
+ .align 4
+$9119:
+ bis $11,$11,$16
+ jsr $26,BN_num_bits_word
+ ldgp $29,0($26)
+ subq $0,64,$1
+ beq $1,$9120
+ bis $31,1,$1
+ sll $1,$0,$1
+ cmpule $9,$1,$1
+ bne $1,$9120
+ # lda $16,_IO_stderr_
+ # lda $17,$C32
+ # bis $0,$0,$18
+ # jsr $26,fprintf
+ # ldgp $29,0($26)
+ jsr $26,abort
+ ldgp $29,0($26)
+ .align 4
+$9120:
+ bis $31,64,$3
+ cmpult $9,$11,$2
+ subq $3,$0,$1
+ addl $1,$31,$0
+ subq $9,$11,$1
+ cmoveq $2,$1,$9
+ beq $0,$9122
+ zapnot $0,15,$2
+ subq $3,$0,$1
+ sll $11,$2,$11
+ sll $9,$2,$3
+ srl $10,$1,$1
+ sll $10,$2,$10
+ bis $3,$1,$9
+$9122:
+ srl $11,32,$5
+ zapnot $11,15,$6
+ lda $7,-1
+ .align 5
+$9123:
+ srl $9,32,$1
+ subq $1,$5,$1
+ bne $1,$9126
+ zapnot $7,15,$27
+ br $31,$9127
+ .align 4
+$9126:
+ bis $9,$9,$24
+ bis $5,$5,$25
+ divqu $24,$25,$27
+$9127:
+ srl $10,32,$4
+ .align 5
+$9128:
+ mulq $27,$5,$1
+ subq $9,$1,$3
+ zapnot $3,240,$1
+ bne $1,$9129
+ mulq $6,$27,$2
+ sll $3,32,$1
+ addq $1,$4,$1
+ cmpule $2,$1,$2
+ bne $2,$9129
+ subq $27,1,$27
+ br $31,$9128
+ .align 4
+$9129:
+ mulq $27,$6,$1
+ mulq $27,$5,$4
+ srl $1,32,$3
+ sll $1,32,$1
+ addq $4,$3,$4
+ cmpult $10,$1,$2
+ subq $10,$1,$10
+ addq $2,$4,$2
+ cmpult $9,$2,$1
+ bis $2,$2,$4
+ beq $1,$9134
+ addq $9,$11,$9
+ subq $27,1,$27
+$9134:
+ subl $12,1,$12
+ subq $9,$4,$9
+ beq $12,$9124
+ sll $27,32,$13
+ sll $9,32,$2
+ srl $10,32,$1
+ sll $10,32,$10
+ bis $2,$1,$9
+ br $31,$9123
+ .align 4
+$9124:
+ bis $13,$27,$0
+$9136:
+ ldq $26,0($30)
+ ldq $9,8($30)
+ ldq $10,16($30)
+ ldq $11,24($30)
+ ldq $12,32($30)
+ ldq $13,40($30)
+ addq $30,48,$30
+ ret $31,($26),1
+ .end bn_div_words
+EOF
+ &asm_add($data);
+ }
+
+1;
diff --git a/crypto/bn/asm/alpha/mul.pl b/crypto/bn/asm/alpha/mul.pl
new file mode 100644
index 0000000..76c9265
--- /dev/null
+++ b/crypto/bn/asm/alpha/mul.pl
@@ -0,0 +1,104 @@
+#!/usr/local/bin/perl
+# alpha assember
+
+sub bn_mul_words
+ {
+ local($name)=@_;
+ local($cc,$a,$b,$r,$couny);
+
+ &init_pool(4);
+ ($cc)=GR("r0");
+
+ $rp=&wparam(0);
+ $ap=&wparam(1);
+ $count=&wparam(2);
+ $word=&wparam(3);
+
+ &function_begin($name,"");
+
+ &comment("");
+ &sub($count,4,$count);
+ &mov("zero",$cc);
+ ###
+ &blt($count,&label("finish"));
+
+ ($a0)=&NR(1); &ld($a0,&QWPw(0,$ap));
+
+ &set_label("loop");
+
+ ($a1)=&NR(1); &ld($a1,&QWPw(1,$ap));
+ ($a2)=&NR(1); &ld($a2,&QWPw(2,$ap));
+
+ &muh($a0,$word,($h0)=&NR(1)); &FR($a0);
+ ($a3)=&NR(1); &ld($a3,&QWPw(3,$ap));
+ ### wait 8
+ &mul($a0,$word,($l0)=&NR(1)); &FR($a0);
+ ### wait 8
+ &muh($a1,$word,($h1)=&NR(1)); &FR($a1);
+ &add($l0,$cc,$l0); ### wait 8
+ &mul($a1,$word,($l1)=&NR(1)); &FR($a1);
+ &cmpult($l0,$cc,$cc); ### wait 8
+ &muh($a2,$word,($h2)=&NR(1)); &FR($a2);
+ &add($h0,$cc,$cc); &FR($h0); ### wait 8
+ &mul($a2,$word,($l2)=&NR(1)); &FR($a2);
+ &add($l1,$cc,$l1); ### wait 8
+ &st($l0,&QWPw(0,$rp)); &FR($l0);
+ &cmpult($l1,$cc,$cc); ### wait 8
+ &muh($a3,$word,($h3)=&NR(1)); &FR($a3);
+ &add($h1,$cc,$cc); &FR($h1);
+ &mul($a3,$word,($l3)=&NR(1)); &FR($a3);
+ &add($l2,$cc,$l2);
+ &st($l1,&QWPw(1,$rp)); &FR($l1);
+ &cmpult($l2,$cc,$cc);
+ &add($h2,$cc,$cc); &FR($h2);
+ &sub($count,4,$count); # count-=4
+ &st($l2,&QWPw(2,$rp)); &FR($l2);
+ &add($l3,$cc,$l3);
+ &cmpult($l3,$cc,$cc);
+ &add($bp,4*$QWS,$bp); # count+=4
+ &add($h3,$cc,$cc); &FR($h3);
+ &add($ap,4*$QWS,$ap); # count+=4
+ &st($l3,&QWPw(3,$rp)); &FR($l3);
+ &add($rp,4*$QWS,$rp); # count+=4
+ ###
+ &blt($count,&label("finish"));
+ ($a0)=&NR(1); &ld($a0,&QWPw(0,$ap));
+ &br(&label("finish"));
+##################################################
+
+##################################################
+ # Do the last 0..3 words
+
+ &set_label("last_loop");
+
+ &ld(($a0)=&NR(1),&QWPw(0,$ap)); # get a
+ ###
+ ###
+ ###
+ &muh($a0,$word,($h0)=&NR(1));
+ ### Wait 8 for next mul issue
+ &mul($a0,$word,($l0)=&NR(1)); &FR($a0)
+ &add($ap,$QWS,$ap);
+ ### Loose 12 until result is available
+ &add($rp,$QWS,$rp);
+ &sub($count,1,$count);
+ &add($l0,$cc,$l0);
+ ###
+ &st($l0,&QWPw(-1,$rp)); &FR($l0);
+ &cmpult($l0,$cc,$cc);
+ &add($h0,$cc,$cc); &FR($h0);
+ &bgt($count,&label("last_loop"));
+ &function_end_A($name);
+
+######################################################
+ &set_label("finish");
+ &add($count,4,$count);
+ &bgt($count,&label("last_loop"));
+
+ &set_label("end");
+ &function_end($name);
+
+ &fin_pool;
+ }
+
+1;
diff --git a/crypto/bn/asm/alpha/mul_add.pl b/crypto/bn/asm/alpha/mul_add.pl
new file mode 100644
index 0000000..0d6df69
--- /dev/null
+++ b/crypto/bn/asm/alpha/mul_add.pl
@@ -0,0 +1,123 @@
+#!/usr/local/bin/perl
+# alpha assember
+
+sub bn_mul_add_words
+ {
+ local($name)=@_;
+ local($cc,$a,$b,$r,$couny);
+
+ &init_pool(4);
+ ($cc)=GR("r0");
+
+ $rp=&wparam(0);
+ $ap=&wparam(1);
+ $count=&wparam(2);
+ $word=&wparam(3);
+
+ &function_begin($name,"");
+
+ &comment("");
+ &sub($count,4,$count);
+ &mov("zero",$cc);
+ ###
+ &blt($count,&label("finish"));
+
+ &ld(($a0)=&NR(1),&QWPw(0,$ap));
+
+$a=<<'EOF';
+##########################################################
+ &set_label("loop");
+
+ &ld(($r0)=&NR(1),&QWPw(0,$rp));
+ &ld(($a1)=&NR(1),&QWPw(1,$ap));
+ &muh($a0,$word,($h0)=&NR(1));
+ &ld(($r1)=&NR(1),&QWPw(1,$rp));
+ &ld(($a2)=&NR(1),&QWPw(2,$ap));
+ ###
+ &mul($a0,$word,($l0)=&NR(1)); &FR($a0);
+ &ld(($r2)=&NR(1),&QWPw(2,$rp));
+ &muh($a1,$word,($h1)=&NR(1));
+ &ld(($a3)=&NR(1),&QWPw(3,$ap));
+ &mul($a1,$word,($l1)=&NR(1)); &FR($a1);
+ &ld(($r3)=&NR(1),&QWPw(3,$rp));
+ &add($r0,$l0,$r0);
+ &add($r1,$l1,$r1);
+ &cmpult($r0,$l0,($t0)=&NR(1)); &FR($l0);
+ &cmpult($r1,$l1,($t1)=&NR(1)); &FR($l1);
+ &muh($a2,$word,($h2)=&NR(1));
+ &add($r0,$cc,$r0);
+ &add($h0,$t0,$h0); &FR($t0);
+ &cmpult($r0,$cc,$cc);
+ &add($h1,$t1,$h1); &FR($t1);
+ &add($h0,$cc,$cc); &FR($h0);
+ &mul($a2,$word,($l2)=&NR(1)); &FR($a2);
+ &add($r1,$cc,$r1);
+ &cmpult($r1,$cc,$cc);
+ &add($r2,$l2,$r2);
+ &add($h1,$cc,$cc); &FR($h1);
+ &cmpult($r2,$l2,($t2)=&NR(1)); &FR($l2);
+ &muh($a3,$word,($h3)=&NR(1));
+ &add($r2,$cc,$r2);
+ &st($r0,&QWPw(0,$rp)); &FR($r0);
+ &add($h2,$t2,$h2); &FR($t2);
+ &st($r1,&QWPw(1,$rp)); &FR($r1);
+ &cmpult($r2,$cc,$cc);
+ &mul($a3,$word,($l3)=&NR(1)); &FR($a3);
+ &add($h2,$cc,$cc); &FR($h2);
+ &st($r2,&QWPw(2,$rp)); &FR($r2);
+ &sub($count,4,$count); # count-=4
+ &add($rp,4*$QWS,$rp); # count+=4
+ &add($r3,$l3,$r3);
+ &add($ap,4*$QWS,$ap); # count+=4
+ &cmpult($r3,$l3,($t3)=&NR(1)); &FR($l3);
+ &add($r3,$cc,$r3);
+ &add($h3,$t3,$h3); &FR($t3);
+ &cmpult($r3,$cc,$cc);
+ &st($r3,&QWPw(-1,$rp)); &FR($r3);
+ &add($h3,$cc,$cc); &FR($h3);
+
+ ###
+ &blt($count,&label("finish"));
+ &ld(($a0)=&NR(1),&QWPw(0,$ap));
+ &br(&label("loop"));
+EOF
+##################################################
+ # Do the last 0..3 words
+
+ &set_label("last_loop");
+
+ &ld(($a0)=&NR(1),&QWPw(0,$ap)); # get a
+ &ld(($r0)=&NR(1),&QWPw(0,$rp)); # get b
+ ###
+ ###
+ &muh($a0,$word,($h0)=&NR(1)); &FR($a0);
+ ### wait 8
+ &mul($a0,$word,($l0)=&NR(1)); &FR($a0);
+ &add($rp,$QWS,$rp);
+ &add($ap,$QWS,$ap);
+ &sub($count,1,$count);
+ ### wait 3 until l0 is available
+ &add($r0,$l0,$r0);
+ ###
+ &cmpult($r0,$l0,($t0)=&NR(1)); &FR($l0);
+ &add($r0,$cc,$r0);
+ &add($h0,$t0,$h0); &FR($t0);
+ &cmpult($r0,$cc,$cc);
+ &add($h0,$cc,$cc); &FR($h0);
+
+ &st($r0,&QWPw(-1,$rp)); &FR($r0);
+ &bgt($count,&label("last_loop"));
+ &function_end_A($name);
+
+######################################################
+ &set_label("finish");
+ &add($count,4,$count);
+ &bgt($count,&label("last_loop"));
+
+ &set_label("end");
+ &function_end($name);
+
+ &fin_pool;
+ }
+
+1;
diff --git a/crypto/bn/asm/alpha/mul_c4.pl b/crypto/bn/asm/alpha/mul_c4.pl
new file mode 100644
index 0000000..9cc876d
--- /dev/null
+++ b/crypto/bn/asm/alpha/mul_c4.pl
@@ -0,0 +1,215 @@
+#!/usr/local/bin/perl
+# alpha assember
+
+# upto
+
+sub mul_add_c
+ {
+ local($a,$b,$c0,$c1,$c2)=@_;
+ local($l1,$h1,$t1,$t2);
+
+ &mul($a,$b,($l1)=&NR(1));
+ &muh($a,$b,($h1)=&NR(1));
+ &add($c0,$l1,$c0);
+ &cmpult($c0,$l1,($t1)=&NR(1)); &FR($l1);
+ &add($t1,$h1,$h1); &FR($t1);
+ &add($c1,$h1,$c1);
+ &cmpult($c1,$h1,($t2)=&NR(1)); &FR($h1);
+ &add($c2,$t2,$c2); &FR($t2);
+ }
+
+sub bn_mul_comba4
+ {
+ local($name)=@_;
+ local(@a,@b,$r,$c0,$c1,$c2);
+
+ $cnt=1;
+ &init_pool(3);
+
+ $rp=&wparam(0);
+ $ap=&wparam(1);
+ $bp=&wparam(2);
+
+ &function_begin($name,"");
+
+ &comment("");
+
+ &ld(($a[0])=&NR(1),&QWPw(0,$ap));
+ &ld(($b[0])=&NR(1),&QWPw(0,$bp));
+ &ld(($a[1])=&NR(1),&QWPw(1,$ap));
+ &ld(($b[1])=&NR(1),&QWPw(1,$bp));
+ &mul($a[0],$b[0],($r00)=&NR(1));
+ &ld(($a[2])=&NR(1),&QWPw(2,$ap));
+ &ld(($b[2])=&NR(1),&QWPw(2,$bp));
+ &muh($a[0],$b[0],($r01)=&NR(1));
+ &FR($ap); &ld(($a[3])=&NR(1),&QWPw(3,$ap));
+ &FR($bp); &ld(($b[3])=&NR(1),&QWPw(3,$bp));
+ &mul($a[0],$b[1],($r02)=&NR(1));
+
+ ($R,$H1,$H2)=&NR(3);
+
+ &st($r00,&QWPw(0,$rp)); &FR($r00);
+
+ &mov("zero",$R);
+ &mul($a[1],$b[0],($r03)=&NR(1));
+
+ &mov("zero",$H1);
+ &mov("zero",$H0);
+ &add($R,$r01,$R);
+ &muh($a[0],$b[1],($r04)=&NR(1));
+ &cmpult($R,$r01,($t01)=&NR(1)); &FR($r01);
+ &add($R,$r02,$R);
+ &add($H1,$t01,$H1) &FR($t01);
+ &muh($a[1],$b[0],($r05)=&NR(1));
+ &cmpult($R,$r02,($t02)=&NR(1)); &FR($r02);
+ &add($R,$r03,$R);
+ &add($H2,$t02,$H2) &FR($t02);
+ &mul($a[0],$b[2],($r06)=&NR(1));
+ &cmpult($R,$r03,($t03)=&NR(1)); &FR($r03);
+ &add($H1,$t03,$H1) &FR($t03);
+ &st($R,&QWPw(1,$rp));
+ &add($H1,$H2,$R);
+
+ &mov("zero",$H1);
+ &add($R,$r04,$R);
+ &mov("zero",$H2);
+ &mul($a[1],$b[1],($r07)=&NR(1));
+ &cmpult($R,$r04,($t04)=&NR(1)); &FR($r04);
+ &add($R,$r05,$R);
+ &add($H1,$t04,$H1) &FR($t04);
+ &mul($a[2],$b[0],($r08)=&NR(1));
+ &cmpult($R,$r05,($t05)=&NR(1)); &FR($r05);
+ &add($R,$r01,$R);
+ &add($H2,$t05,$H2) &FR($t05);
+ &muh($a[0],$b[2],($r09)=&NR(1));
+ &cmpult($R,$r06,($t06)=&NR(1)); &FR($r06);
+ &add($R,$r07,$R);
+ &add($H1,$t06,$H1) &FR($t06);
+ &muh($a[1],$b[1],($r10)=&NR(1));
+ &cmpult($R,$r07,($t07)=&NR(1)); &FR($r07);
+ &add($R,$r08,$R);
+ &add($H2,$t07,$H2) &FR($t07);
+ &muh($a[2],$b[0],($r11)=&NR(1));
+ &cmpult($R,$r08,($t08)=&NR(1)); &FR($r08);
+ &add($H1,$t08,$H1) &FR($t08);
+ &st($R,&QWPw(2,$rp));
+ &add($H1,$H2,$R);
+
+ &mov("zero",$H1);
+ &add($R,$r09,$R);
+ &mov("zero",$H2);
+ &mul($a[0],$b[3],($r12)=&NR(1));
+ &cmpult($R,$r09,($t09)=&NR(1)); &FR($r09);
+ &add($R,$r10,$R);
+ &add($H1,$t09,$H1) &FR($t09);
+ &mul($a[1],$b[2],($r13)=&NR(1));
+ &cmpult($R,$r10,($t10)=&NR(1)); &FR($r10);
+ &add($R,$r11,$R);
+ &add($H1,$t10,$H1) &FR($t10);
+ &mul($a[2],$b[1],($r14)=&NR(1));
+ &cmpult($R,$r11,($t11)=&NR(1)); &FR($r11);
+ &add($R,$r12,$R);
+ &add($H1,$t11,$H1) &FR($t11);
+ &mul($a[3],$b[0],($r15)=&NR(1));
+ &cmpult($R,$r12,($t12)=&NR(1)); &FR($r12);
+ &add($R,$r13,$R);
+ &add($H1,$t12,$H1) &FR($t12);
+ &muh($a[0],$b[3],($r16)=&NR(1));
+ &cmpult($R,$r13,($t13)=&NR(1)); &FR($r13);
+ &add($R,$r14,$R);
+ &add($H1,$t13,$H1) &FR($t13);
+ &muh($a[1],$b[2],($r17)=&NR(1));
+ &cmpult($R,$r14,($t14)=&NR(1)); &FR($r14);
+ &add($R,$r15,$R);
+ &add($H1,$t14,$H1) &FR($t14);
+ &muh($a[2],$b[1],($r18)=&NR(1));
+ &cmpult($R,$r15,($t15)=&NR(1)); &FR($r15);
+ &add($H1,$t15,$H1) &FR($t15);
+ &st($R,&QWPw(3,$rp));
+ &add($H1,$H2,$R);
+
+ &mov("zero",$H1);
+ &add($R,$r16,$R);
+ &mov("zero",$H2);
+ &muh($a[3],$b[0],($r19)=&NR(1));
+ &cmpult($R,$r16,($t16)=&NR(1)); &FR($r16);
+ &add($R,$r17,$R);
+ &add($H1,$t16,$H1) &FR($t16);
+ &mul($a[1],$b[3],($r20)=&NR(1));
+ &cmpult($R,$r17,($t17)=&NR(1)); &FR($r17);
+ &add($R,$r18,$R);
+ &add($H1,$t17,$H1) &FR($t17);
+ &mul($a[2],$b[2],($r21)=&NR(1));
+ &cmpult($R,$r18,($t18)=&NR(1)); &FR($r18);
+ &add($R,$r19,$R);
+ &add($H1,$t18,$H1) &FR($t18);
+ &mul($a[3],$b[1],($r22)=&NR(1));
+ &cmpult($R,$r19,($t19)=&NR(1)); &FR($r19);
+ &add($R,$r20,$R);
+ &add($H1,$t19,$H1) &FR($t19);
+ &muh($a[1],$b[3],($r23)=&NR(1));
+ &cmpult($R,$r20,($t20)=&NR(1)); &FR($r20);
+ &add($R,$r21,$R);
+ &add($H1,$t20,$H1) &FR($t20);
+ &muh($a[2],$b[2],($r24)=&NR(1));
+ &cmpult($R,$r21,($t21)=&NR(1)); &FR($r21);
+ &add($R,$r22,$R);
+ &add($H1,$t21,$H1) &FR($t21);
+ &muh($a[3],$b[1],($r25)=&NR(1));
+ &cmpult($R,$r22,($t22)=&NR(1)); &FR($r22);
+ &add($H1,$t22,$H1) &FR($t22);
+ &st($R,&QWPw(4,$rp));
+ &add($H1,$H2,$R);
+
+ &mov("zero",$H1);
+ &add($R,$r23,$R);
+ &mov("zero",$H2);
+ &mul($a[2],$b[3],($r26)=&NR(1));
+ &cmpult($R,$r23,($t23)=&NR(1)); &FR($r23);
+ &add($R,$r24,$R);
+ &add($H1,$t23,$H1) &FR($t23);
+ &mul($a[3],$b[2],($r27)=&NR(1));
+ &cmpult($R,$r24,($t24)=&NR(1)); &FR($r24);
+ &add($R,$r25,$R);
+ &add($H1,$t24,$H1) &FR($t24);
+ &muh($a[2],$b[3],($r28)=&NR(1));
+ &cmpult($R,$r25,($t25)=&NR(1)); &FR($r25);
+ &add($R,$r26,$R);
+ &add($H1,$t25,$H1) &FR($t25);
+ &muh($a[3],$b[2],($r29)=&NR(1));
+ &cmpult($R,$r26,($t26)=&NR(1)); &FR($r26);
+ &add($R,$r27,$R);
+ &add($H1,$t26,$H1) &FR($t26);
+ &mul($a[3],$b[3],($r30)=&NR(1));
+ &cmpult($R,$r27,($t27)=&NR(1)); &FR($r27);
+ &add($H1,$t27,$H1) &FR($t27);
+ &st($R,&QWPw(5,$rp));
+ &add($H1,$H2,$R);
+
+ &mov("zero",$H1);
+ &add($R,$r28,$R);
+ &mov("zero",$H2);
+ &muh($a[3],$b[3],($r31)=&NR(1));
+ &cmpult($R,$r28,($t28)=&NR(1)); &FR($r28);
+ &add($R,$r29,$R);
+ &add($H1,$t28,$H1) &FR($t28);
+ ############
+ &cmpult($R,$r29,($t29)=&NR(1)); &FR($r29);
+ &add($R,$r30,$R);
+ &add($H1,$t29,$H1) &FR($t29);
+ ############
+ &cmpult($R,$r30,($t30)=&NR(1)); &FR($r30);
+ &add($H1,$t30,$H1) &FR($t30);
+ &st($R,&QWPw(6,$rp));
+ &add($H1,$H2,$R);
+
+ &add($R,$r31,$R); &FR($r31);
+ &st($R,&QWPw(7,$rp));
+
+ &FR($R,$H1,$H2);
+ &function_end($name);
+
+ &fin_pool;
+ }
+
+1;
diff --git a/crypto/bn/asm/alpha/mul_c4.works.pl b/crypto/bn/asm/alpha/mul_c4.works.pl
new file mode 100644
index 0000000..79d86dd
--- /dev/null
+++ b/crypto/bn/asm/alpha/mul_c4.works.pl
@@ -0,0 +1,98 @@
+#!/usr/local/bin/perl
+# alpha assember
+
+sub mul_add_c
+ {
+ local($a,$b,$c0,$c1,$c2)=@_;
+ local($l1,$h1,$t1,$t2);
+
+print STDERR "count=$cnt\n"; $cnt++;
+ &mul($a,$b,($l1)=&NR(1));
+ &muh($a,$b,($h1)=&NR(1));
+ &add($c0,$l1,$c0);
+ &cmpult($c0,$l1,($t1)=&NR(1)); &FR($l1);
+ &add($t1,$h1,$h1); &FR($t1);
+ &add($c1,$h1,$c1);
+ &cmpult($c1,$h1,($t2)=&NR(1)); &FR($h1);
+ &add($c2,$t2,$c2); &FR($t2);
+ }
+
+sub bn_mul_comba4
+ {
+ local($name)=@_;
+ local(@a,@b,$r,$c0,$c1,$c2);
+
+ $cnt=1;
+ &init_pool(3);
+
+ $rp=&wparam(0);
+ $ap=&wparam(1);
+ $bp=&wparam(2);
+
+ &function_begin($name,"");
+
+ &comment("");
+
+ &ld(($a[0])=&NR(1),&QWPw(0,$ap));
+ &ld(($b[0])=&NR(1),&QWPw(0,$bp));
+ &ld(($a[1])=&NR(1),&QWPw(1,$ap));
+ &ld(($b[1])=&NR(1),&QWPw(1,$bp));
+ &ld(($a[2])=&NR(1),&QWPw(2,$ap));
+ &ld(($b[2])=&NR(1),&QWPw(2,$bp));
+ &ld(($a[3])=&NR(1),&QWPw(3,$ap)); &FR($ap);
+ &ld(($b[3])=&NR(1),&QWPw(3,$bp)); &FR($bp);
+
+ ($c0,$c1,$c2)=&NR(3);
+ &mov("zero",$c2);
+ &mul($a[0],$b[0],$c0);
+ &muh($a[0],$b[0],$c1);
+ &st($c0,&QWPw(0,$rp)); &FR($c0); ($c0)=&NR($c0);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[0],$b[1],$c0,$c1,$c2);
+ &mul_add_c($a[1],$b[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(1,$rp)); &FR($c0); ($c0)=&NR($c0);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[1],$b[1],$c0,$c1,$c2);
+ &mul_add_c($a[0],$b[2],$c0,$c1,$c2);
+ &mul_add_c($a[2],$b[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(2,$rp)); &FR($c0); ($c0)=&NR($c0);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[0],$b[3],$c0,$c1,$c2); &FR($a[0]);
+ &mul_add_c($a[1],$b[2],$c0,$c1,$c2);
+ &mul_add_c($a[2],$b[1],$c0,$c1,$c2);
+ &mul_add_c($a[3],$b[0],$c0,$c1,$c2); &FR($b[0]);
+ &st($c0,&QWPw(3,$rp)); &FR($c0); ($c0)=&NR($c0);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[1],$b[3],$c0,$c1,$c2); &FR($a[1]);
+ &mul_add_c($a[2],$b[2],$c0,$c1,$c2);
+ &mul_add_c($a[3],$b[1],$c0,$c1,$c2); &FR($b[1]);
+ &st($c0,&QWPw(4,$rp)); &FR($c0); ($c0)=&NR($c0);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[2],$b[3],$c0,$c1,$c2); &FR($a[2]);
+ &mul_add_c($a[3],$b[2],$c0,$c1,$c2); &FR($b[2]);
+ &st($c0,&QWPw(5,$rp)); &FR($c0); ($c0)=&NR($c0);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[3],$b[3],$c0,$c1,$c2); &FR($a[3],$b[3]);
+ &st($c0,&QWPw(6,$rp));
+ &st($c1,&QWPw(7,$rp));
+
+ &FR($c0,$c1,$c2);
+
+ &function_end($name);
+
+ &fin_pool;
+ }
+
+1;
diff --git a/crypto/bn/asm/alpha/mul_c8.pl b/crypto/bn/asm/alpha/mul_c8.pl
new file mode 100644
index 0000000..525ca74
--- /dev/null
+++ b/crypto/bn/asm/alpha/mul_c8.pl
@@ -0,0 +1,177 @@
+#!/usr/local/bin/perl
+# alpha assember
+
+sub bn_mul_comba8
+ {
+ local($name)=@_;
+ local(@a,@b,$r,$c0,$c1,$c2);
+
+ $cnt=1;
+ &init_pool(3);
+
+ $rp=&wparam(0);
+ $ap=&wparam(1);
+ $bp=&wparam(2);
+
+ &function_begin($name,"");
+
+ &comment("");
+
+ &stack_push(2);
+ &ld(($a[0])=&NR(1),&QWPw(0,$ap));
+ &ld(($b[0])=&NR(1),&QWPw(0,$bp));
+ &st($reg_s0,&swtmp(0)); &FR($reg_s0);
+ &st($reg_s1,&swtmp(1)); &FR($reg_s1);
+ &ld(($a[1])=&NR(1),&QWPw(1,$ap));
+ &ld(($b[1])=&NR(1),&QWPw(1,$bp));
+ &ld(($a[2])=&NR(1),&QWPw(2,$ap));
+ &ld(($b[2])=&NR(1),&QWPw(2,$bp));
+ &ld(($a[3])=&NR(1),&QWPw(3,$ap));
+ &ld(($b[3])=&NR(1),&QWPw(3,$bp));
+ &ld(($a[4])=&NR(1),&QWPw(1,$ap));
+ &ld(($b[4])=&NR(1),&QWPw(1,$bp));
+ &ld(($a[5])=&NR(1),&QWPw(1,$ap));
+ &ld(($b[5])=&NR(1),&QWPw(1,$bp));
+ &ld(($a[6])=&NR(1),&QWPw(1,$ap));
+ &ld(($b[6])=&NR(1),&QWPw(1,$bp));
+ &ld(($a[7])=&NR(1),&QWPw(1,$ap)); &FR($ap);
+ &ld(($b[7])=&NR(1),&QWPw(1,$bp)); &FR($bp);
+
+ ($c0,$c1,$c2)=&NR(3);
+ &mov("zero",$c2);
+ &mul($a[0],$b[0],$c0);
+ &muh($a[0],$b[0],$c1);
+ &st($c0,&QWPw(0,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[0],$b[1],$c0,$c1,$c2);
+ &mul_add_c($a[1],$b[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(1,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[0],$b[2],$c0,$c1,$c2);
+ &mul_add_c($a[1],$b[1],$c0,$c1,$c2);
+ &mul_add_c($a[2],$b[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(2,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[0],$b[3],$c0,$c1,$c2);
+ &mul_add_c($a[1],$b[2],$c0,$c1,$c2);
+ &mul_add_c($a[2],$b[1],$c0,$c1,$c2);
+ &mul_add_c($a[3],$b[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(3,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[0],$b[4],$c0,$c1,$c2);
+ &mul_add_c($a[1],$b[3],$c0,$c1,$c2);
+ &mul_add_c($a[2],$b[2],$c0,$c1,$c2);
+ &mul_add_c($a[3],$b[1],$c0,$c1,$c2);
+ &mul_add_c($a[4],$b[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(4,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[0],$b[5],$c0,$c1,$c2);
+ &mul_add_c($a[1],$b[4],$c0,$c1,$c2);
+ &mul_add_c($a[2],$b[3],$c0,$c1,$c2);
+ &mul_add_c($a[3],$b[2],$c0,$c1,$c2);
+ &mul_add_c($a[4],$b[1],$c0,$c1,$c2);
+ &mul_add_c($a[5],$b[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(5,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[0],$b[6],$c0,$c1,$c2);
+ &mul_add_c($a[1],$b[5],$c0,$c1,$c2);
+ &mul_add_c($a[2],$b[4],$c0,$c1,$c2);
+ &mul_add_c($a[3],$b[3],$c0,$c1,$c2);
+ &mul_add_c($a[4],$b[2],$c0,$c1,$c2);
+ &mul_add_c($a[5],$b[1],$c0,$c1,$c2);
+ &mul_add_c($a[6],$b[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(6,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[0],$b[7],$c0,$c1,$c2); &FR($a[0]);
+ &mul_add_c($a[1],$b[6],$c0,$c1,$c2);
+ &mul_add_c($a[2],$b[5],$c0,$c1,$c2);
+ &mul_add_c($a[3],$b[4],$c0,$c1,$c2);
+ &mul_add_c($a[4],$b[3],$c0,$c1,$c2);
+ &mul_add_c($a[5],$b[2],$c0,$c1,$c2);
+ &mul_add_c($a[6],$b[1],$c0,$c1,$c2);
+ &mul_add_c($a[7],$b[0],$c0,$c1,$c2); &FR($b[0]);
+ &st($c0,&QWPw(7,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[1],$b[7],$c0,$c1,$c2); &FR($a[1]);
+ &mul_add_c($a[2],$b[6],$c0,$c1,$c2);
+ &mul_add_c($a[3],$b[5],$c0,$c1,$c2);
+ &mul_add_c($a[4],$b[4],$c0,$c1,$c2);
+ &mul_add_c($a[5],$b[3],$c0,$c1,$c2);
+ &mul_add_c($a[6],$b[2],$c0,$c1,$c2);
+ &mul_add_c($a[7],$b[1],$c0,$c1,$c2); &FR($b[1]);
+ &st($c0,&QWPw(8,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[2],$b[7],$c0,$c1,$c2); &FR($a[2]);
+ &mul_add_c($a[3],$b[6],$c0,$c1,$c2);
+ &mul_add_c($a[4],$b[5],$c0,$c1,$c2);
+ &mul_add_c($a[5],$b[4],$c0,$c1,$c2);
+ &mul_add_c($a[6],$b[3],$c0,$c1,$c2);
+ &mul_add_c($a[7],$b[2],$c0,$c1,$c2); &FR($b[2]);
+ &st($c0,&QWPw(9,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[3],$b[7],$c0,$c1,$c2); &FR($a[3]);
+ &mul_add_c($a[4],$b[6],$c0,$c1,$c2);
+ &mul_add_c($a[5],$b[5],$c0,$c1,$c2);
+ &mul_add_c($a[6],$b[4],$c0,$c1,$c2);
+ &mul_add_c($a[7],$b[3],$c0,$c1,$c2); &FR($b[3]);
+ &st($c0,&QWPw(10,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[4],$b[7],$c0,$c1,$c2); &FR($a[4]);
+ &mul_add_c($a[5],$b[6],$c0,$c1,$c2);
+ &mul_add_c($a[6],$b[5],$c0,$c1,$c2);
+ &mul_add_c($a[7],$b[4],$c0,$c1,$c2); &FR($b[4]);
+ &st($c0,&QWPw(11,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[5],$b[7],$c0,$c1,$c2); &FR($a[5]);
+ &mul_add_c($a[6],$b[6],$c0,$c1,$c2);
+ &mul_add_c($a[7],$b[5],$c0,$c1,$c2); &FR($b[5]);
+ &st($c0,&QWPw(12,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[6],$b[7],$c0,$c1,$c2); &FR($a[6]);
+ &mul_add_c($a[7],$b[6],$c0,$c1,$c2); &FR($b[6]);
+ &st($c0,&QWPw(13,$rp)); &FR($c0); ($c0)=&NR(1);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &mul_add_c($a[7],$b[7],$c0,$c1,$c2); &FR($a[7],$b[7]);
+ &st($c0,&QWPw(14,$rp));
+ &st($c1,&QWPw(15,$rp));
+
+ &FR($c0,$c1,$c2);
+
+ &ld($reg_s0,&swtmp(0));
+ &ld($reg_s1,&swtmp(1));
+ &stack_pop(2);
+
+ &function_end($name);
+
+ &fin_pool;
+ }
+
+1;
diff --git a/crypto/bn/asm/alpha/sqr.pl b/crypto/bn/asm/alpha/sqr.pl
new file mode 100644
index 0000000..a55b696
--- /dev/null
+++ b/crypto/bn/asm/alpha/sqr.pl
@@ -0,0 +1,113 @@
+#!/usr/local/bin/perl
+# alpha assember
+
+sub bn_sqr_words
+ {
+ local($name)=@_;
+ local($cc,$a,$b,$r,$couny);
+
+ &init_pool(3);
+ ($cc)=GR("r0");
+
+ $rp=&wparam(0);
+ $ap=&wparam(1);
+ $count=&wparam(2);
+
+ &function_begin($name,"");
+
+ &comment("");
+ &sub($count,4,$count);
+ &mov("zero",$cc);
+ &br(&label("finish"));
+ &blt($count,&label("finish"));
+
+ ($a0,$r0)=&NR(2);
+ &ld($a0,&QWPw(0,$ap));
+ &ld($r0,&QWPw(0,$rp));
+
+$a=<<'EOF';
+##########################################################
+ &set_label("loop");
+
+ ($a1)=&NR(1); &ld($a1,&QWPw(1,$ap));
+ ($b1)=&NR(1); &ld($b1,&QWPw(1,$bp));
+ ($a2)=&NR(1); &ld($a2,&QWPw(2,$ap));
+ ($b2)=&NR(1); &ld($b2,&QWPw(2,$bp));
+ ($a3)=&NR(1); &ld($a3,&QWPw(3,$ap));
+ ($b3)=&NR(1); &ld($b3,&QWPw(3,$bp));
+
+ ($o0,$t0)=&NR(2);
+ &add($a0,$b0,$o0);
+ &cmpult($o0,$b0,$t0);
+ &add($o0,$cc,$o0);
+ &cmpult($o0,$cc,$cc);
+ &add($cc,$t0,$cc); &FR($t0);
+
+ ($t1,$o1)=&NR(2);
+
+ &add($a1,$b1,$o1); &FR($a1);
+ &cmpult($o1,$b1,$t1); &FR($b1);
+ &add($o1,$cc,$o1);
+ &cmpult($o1,$cc,$cc);
+ &add($cc,$t1,$cc); &FR($t1);
+
+ ($t2,$o2)=&NR(2);
+
+ &add($a2,$b2,$o2); &FR($a2);
+ &cmpult($o2,$b2,$t2); &FR($b2);
+ &add($o2,$cc,$o2);
+ &cmpult($o2,$cc,$cc);
+ &add($cc,$t2,$cc); &FR($t2);
+
+ ($t3,$o3)=&NR(2);
+
+ &add($a3,$b3,$o3); &FR($a3);
+ &cmpult($o3,$b3,$t3); &FR($b3);
+ &add($o3,$cc,$o3);
+ &cmpult($o3,$cc,$cc);
+ &add($cc,$t3,$cc); &FR($t3);
+
+ &st($o0,&QWPw(0,$rp)); &FR($o0);
+ &st($o1,&QWPw(0,$rp)); &FR($o1);
+ &st($o2,&QWPw(0,$rp)); &FR($o2);
+ &st($o3,&QWPw(0,$rp)); &FR($o3);
+
+ &sub($count,4,$count); # count-=4
+ &add($ap,4*$QWS,$ap); # count+=4
+ &add($bp,4*$QWS,$bp); # count+=4
+ &add($rp,4*$QWS,$rp); # count+=4
+
+ &blt($count,&label("finish"));
+ &ld($a0,&QWPw(0,$ap));
+ &ld($b0,&QWPw(0,$bp));
+ &br(&label("loop"));
+EOF
+##################################################
+ # Do the last 0..3 words
+
+ &set_label("last_loop");
+
+ &ld(($a0)=&NR(1),&QWPw(0,$ap)); # get a
+ &mul($a0,$a0,($l0)=&NR(1));
+ &add($ap,$QWS,$ap);
+ &add($rp,2*$QWS,$rp);
+ &sub($count,1,$count);
+ &muh($a0,$a0,($h0)=&NR(1)); &FR($a0);
+ &st($l0,&QWPw(-2,$rp)); &FR($l0);
+ &st($h0,&QWPw(-1,$rp)); &FR($h0);
+
+ &bgt($count,&label("last_loop"));
+ &function_end_A($name);
+
+######################################################
+ &set_label("finish");
+ &add($count,4,$count);
+ &bgt($count,&label("last_loop"));
+
+ &set_label("end");
+ &function_end($name);
+
+ &fin_pool;
+ }
+
+1;
diff --git a/crypto/bn/asm/alpha/sqr_c4.pl b/crypto/bn/asm/alpha/sqr_c4.pl
new file mode 100644
index 0000000..bf33f5b
--- /dev/null
+++ b/crypto/bn/asm/alpha/sqr_c4.pl
@@ -0,0 +1,109 @@
+#!/usr/local/bin/perl
+# alpha assember
+
+sub sqr_add_c
+ {
+ local($a,$c0,$c1,$c2)=@_;
+ local($l1,$h1,$t1,$t2);
+
+ &mul($a,$a,($l1)=&NR(1));
+ &muh($a,$a,($h1)=&NR(1));
+ &add($c0,$l1,$c0);
+ &add($c1,$h1,$c1);
+ &cmpult($c0,$l1,($t1)=&NR(1)); &FR($l1);
+ &cmpult($c1,$h1,($t2)=&NR(1)); &FR($h1);
+ &add($c1,$t1,$c1); &FR($t1);
+ &add($c2,$t2,$c2); &FR($t2);
+ }
+
+sub sqr_add_c2
+ {
+ local($a,$b,$c0,$c1,$c2)=@_;
+ local($l1,$h1,$t1,$t2);
+
+ &mul($a,$b,($l1)=&NR(1));
+ &muh($a,$b,($h1)=&NR(1));
+ &cmplt($l1,"zero",($lc1)=&NR(1));
+ &cmplt($h1,"zero",($hc1)=&NR(1));
+ &add($l1,$l1,$l1);
+ &add($h1,$h1,$h1);
+ &add($h1,$lc1,$h1); &FR($lc1);
+ &add($c2,$hc1,$c2); &FR($hc1);
+
+ &add($c0,$l1,$c0);
+ &add($c1,$h1,$c1);
+ &cmpult($c0,$l1,($lc1)=&NR(1)); &FR($l1);
+ &cmpult($c1,$h1,($hc1)=&NR(1)); &FR($h1);
+
+ &add($c1,$lc1,$c1); &FR($lc1);
+ &add($c2,$hc1,$c2); &FR($hc1);
+ }
+
+
+sub bn_sqr_comba4
+ {
+ local($name)=@_;
+ local(@a,@b,$r,$c0,$c1,$c2);
+
+ $cnt=1;
+ &init_pool(2);
+
+ $rp=&wparam(0);
+ $ap=&wparam(1);
+
+ &function_begin($name,"");
+
+ &comment("");
+
+ &ld(($a[0])=&NR(1),&QWPw(0,$ap));
+ &ld(($a[1])=&NR(1),&QWPw(1,$ap));
+ &ld(($a[2])=&NR(1),&QWPw(2,$ap));
+ &ld(($a[3])=&NR(1),&QWPw(3,$ap)); &FR($ap);
+
+ ($c0,$c1,$c2)=&NR(3);
+
+ &mov("zero",$c2);
+ &mul($a[0],$a[0],$c0);
+ &muh($a[0],$a[0],$c1);
+ &st($c0,&QWPw(0,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c2($a[0],$a[1],$c0,$c1,$c2);
+ &st($c0,&QWPw(1,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c($a[1],$c0,$c1,$c2);
+ &sqr_add_c2($a[2],$a[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(2,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c2($a[3],$a[0],$c0,$c1,$c2);
+ &sqr_add_c2($a[2],$a[1],$c0,$c1,$c2);
+ &st($c0,&QWPw(3,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c($a[2],$c0,$c1,$c2);
+ &sqr_add_c2($a[3],$a[1],$c0,$c1,$c2);
+ &st($c0,&QWPw(4,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c2($a[3],$a[2],$c0,$c1,$c2);
+ &st($c0,&QWPw(5,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c($a[3],$c0,$c1,$c2);
+ &st($c0,&QWPw(6,$rp));
+ &st($c1,&QWPw(7,$rp));
+
+ &function_end($name);
+
+ &fin_pool;
+ }
+
+1;
diff --git a/crypto/bn/asm/alpha/sqr_c8.pl b/crypto/bn/asm/alpha/sqr_c8.pl
new file mode 100644
index 0000000..b4afe08
--- /dev/null
+++ b/crypto/bn/asm/alpha/sqr_c8.pl
@@ -0,0 +1,132 @@
+#!/usr/local/bin/perl
+# alpha assember
+
+sub bn_sqr_comba8
+ {
+ local($name)=@_;
+ local(@a,@b,$r,$c0,$c1,$c2);
+
+ $cnt=1;
+ &init_pool(2);
+
+ $rp=&wparam(0);
+ $ap=&wparam(1);
+
+ &function_begin($name,"");
+
+ &comment("");
+
+ &ld(($a[0])=&NR(1),&QWPw(0,$ap));
+ &ld(($a[1])=&NR(1),&QWPw(1,$ap));
+ &ld(($a[2])=&NR(1),&QWPw(2,$ap));
+ &ld(($a[3])=&NR(1),&QWPw(3,$ap));
+ &ld(($a[4])=&NR(1),&QWPw(4,$ap));
+ &ld(($a[5])=&NR(1),&QWPw(5,$ap));
+ &ld(($a[6])=&NR(1),&QWPw(6,$ap));
+ &ld(($a[7])=&NR(1),&QWPw(7,$ap)); &FR($ap);
+
+ ($c0,$c1,$c2)=&NR(3);
+
+ &mov("zero",$c2);
+ &mul($a[0],$a[0],$c0);
+ &muh($a[0],$a[0],$c1);
+ &st($c0,&QWPw(0,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c2($a[1],$a[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(1,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c($a[1],$c0,$c1,$c2);
+ &sqr_add_c2($a[2],$a[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(2,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c2($a[2],$a[1],$c0,$c1,$c2);
+ &sqr_add_c2($a[3],$a[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(3,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c($a[2],$c0,$c1,$c2);
+ &sqr_add_c2($a[3],$a[1],$c0,$c1,$c2);
+ &sqr_add_c2($a[4],$a[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(4,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c2($a[3],$a[2],$c0,$c1,$c2);
+ &sqr_add_c2($a[4],$a[1],$c0,$c1,$c2);
+ &sqr_add_c2($a[5],$a[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(5,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c($a[3],$c0,$c1,$c2);
+ &sqr_add_c2($a[4],$a[2],$c0,$c1,$c2);
+ &sqr_add_c2($a[5],$a[1],$c0,$c1,$c2);
+ &sqr_add_c2($a[6],$a[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(6,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c2($a[4],$a[3],$c0,$c1,$c2);
+ &sqr_add_c2($a[5],$a[2],$c0,$c1,$c2);
+ &sqr_add_c2($a[6],$a[1],$c0,$c1,$c2);
+ &sqr_add_c2($a[7],$a[0],$c0,$c1,$c2);
+ &st($c0,&QWPw(7,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c($a[4],$c0,$c1,$c2);
+ &sqr_add_c2($a[5],$a[3],$c0,$c1,$c2);
+ &sqr_add_c2($a[6],$a[2],$c0,$c1,$c2);
+ &sqr_add_c2($a[7],$a[1],$c0,$c1,$c2);
+ &st($c0,&QWPw(8,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c2($a[5],$a[4],$c0,$c1,$c2);
+ &sqr_add_c2($a[6],$a[3],$c0,$c1,$c2);
+ &sqr_add_c2($a[7],$a[2],$c0,$c1,$c2);
+ &st($c0,&QWPw(9,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c($a[5],$c0,$c1,$c2);
+ &sqr_add_c2($a[6],$a[4],$c0,$c1,$c2);
+ &sqr_add_c2($a[7],$a[3],$c0,$c1,$c2);
+ &st($c0,&QWPw(10,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c2($a[6],$a[5],$c0,$c1,$c2);
+ &sqr_add_c2($a[7],$a[4],$c0,$c1,$c2);
+ &st($c0,&QWPw(11,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c($a[6],$c0,$c1,$c2);
+ &sqr_add_c2($a[7],$a[5],$c0,$c1,$c2);
+ &st($c0,&QWPw(12,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c2($a[7],$a[6],$c0,$c1,$c2);
+ &st($c0,&QWPw(13,$rp));
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ &mov("zero",$c2);
+
+ &sqr_add_c($a[7],$c0,$c1,$c2);
+ &st($c0,&QWPw(14,$rp));
+ &st($c1,&QWPw(15,$rp));
+
+ &function_end($name);
+
+ &fin_pool;
+ }
+
+1;
diff --git a/crypto/bn/asm/alpha/sub.pl b/crypto/bn/asm/alpha/sub.pl
new file mode 100644
index 0000000..d998da5
--- /dev/null
+++ b/crypto/bn/asm/alpha/sub.pl
@@ -0,0 +1,108 @@
+#!/usr/local/bin/perl
+# alpha assember
+
+sub bn_sub_words
+ {
+ local($name)=@_;
+ local($cc,$a,$b,$r);
+
+ &init_pool(4);
+ ($cc)=GR("r0");
+
+ $rp=&wparam(0);
+ $ap=&wparam(1);
+ $bp=&wparam(2);
+ $count=&wparam(3);
+
+ &function_begin($name,"");
+
+ &comment("");
+ &sub($count,4,$count);
+ &mov("zero",$cc);
+ &blt($count,&label("finish"));
+
+ ($a0,$b0)=&NR(2);
+ &ld($a0,&QWPw(0,$ap));
+ &ld($b0,&QWPw(0,$bp));
+
+##########################################################
+ &set_label("loop");
+
+ ($a1,$tmp,$b1,$a2,$b2,$a3,$b3,$o0)=&NR(8);
+ &ld($a1,&QWPw(1,$ap));
+ &cmpult($a0,$b0,$tmp); # will we borrow?
+ &ld($b1,&QWPw(1,$bp));
+ &sub($a0,$b0,$a0); # do the subtract
+ &ld($a2,&QWPw(2,$ap));
+ &cmpult($a0,$cc,$b0); # will we borrow?
+ &ld($b2,&QWPw(2,$bp));
+ &sub($a0,$cc,$o0); # will we borrow?
+ &ld($a3,&QWPw(3,$ap));
+ &add($b0,$tmp,$cc); ($t1,$o1)=&NR(2); &FR($tmp);
+
+ &cmpult($a1,$b1,$t1); # will we borrow?
+ &sub($a1,$b1,$a1); # do the subtract
+ &ld($b3,&QWPw(3,$bp));
+ &cmpult($a1,$cc,$b1); # will we borrow?
+ &sub($a1,$cc,$o1); # will we borrow?
+ &add($b1,$t1,$cc); ($tmp,$o2)=&NR(2); &FR($t1,$a1,$b1);
+
+ &cmpult($a2,$b2,$tmp); # will we borrow?
+ &sub($a2,$b2,$a2); # do the subtract
+ &st($o0,&QWPw(0,$rp)); &FR($o0); # save
+ &cmpult($a2,$cc,$b2); # will we borrow?
+ &sub($a2,$cc,$o2); # will we borrow?
+ &add($b2,$tmp,$cc); ($t3,$o3)=&NR(2); &FR($tmp,$a2,$b2);
+
+ &cmpult($a3,$b3,$t3); # will we borrow?
+ &sub($a3,$b3,$a3); # do the subtract
+ &st($o1,&QWPw(1,$rp)); &FR($o1);
+ &cmpult($a3,$cc,$b3); # will we borrow?
+ &sub($a3,$cc,$o3); # will we borrow?
+ &add($b3,$t3,$cc); &FR($t3,$a3,$b3);
+
+ &st($o2,&QWPw(2,$rp)); &FR($o2);
+ &sub($count,4,$count); # count-=4
+ &st($o3,&QWPw(3,$rp)); &FR($o3);
+ &add($ap,4*$QWS,$ap); # count+=4
+ &add($bp,4*$QWS,$bp); # count+=4
+ &add($rp,4*$QWS,$rp); # count+=4
+
+ &blt($count,&label("finish"));
+ &ld($a0,&QWPw(0,$ap));
+ &ld($b0,&QWPw(0,$bp));
+ &br(&label("loop"));
+##################################################
+ # Do the last 0..3 words
+
+ &set_label("last_loop");
+
+ &ld($a0,&QWPw(0,$ap)); # get a
+ &ld($b0,&QWPw(0,$bp)); # get b
+ &cmpult($a0,$b0,$tmp); # will we borrow?
+ &sub($a0,$b0,$a0); # do the subtract
+ &cmpult($a0,$cc,$b0); # will we borrow?
+ &sub($a0,$cc,$a0); # will we borrow?
+ &st($a0,&QWPw(0,$rp)); # save
+ &add($b0,$tmp,$cc); # add the borrows
+
+ &add($ap,$QWS,$ap);
+ &add($bp,$QWS,$bp);
+ &add($rp,$QWS,$rp);
+ &sub($count,1,$count);
+ &bgt($count,&label("last_loop"));
+ &function_end_A($name);
+
+######################################################
+ &set_label("finish");
+ &add($count,4,$count);
+ &bgt($count,&label("last_loop"));
+
+ &FR($a0,$b0);
+ &set_label("end");
+ &function_end($name);
+
+ &fin_pool;
+ }
+
+1;
diff --git a/crypto/bn/asm/bn-586.pl b/crypto/bn/asm/bn-586.pl
new file mode 100644
index 0000000..26c2685
--- /dev/null
+++ b/crypto/bn/asm/bn-586.pl
@@ -0,0 +1,675 @@
+#!/usr/local/bin/perl
+
+push(@INC,"perlasm","../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],$0);
+
+$sse2=0;
+for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
+
+&external_label("OPENSSL_ia32cap_P") if ($sse2);
+
+&bn_mul_add_words("bn_mul_add_words");
+&bn_mul_words("bn_mul_words");
+&bn_sqr_words("bn_sqr_words");
+&bn_div_words("bn_div_words");
+&bn_add_words("bn_add_words");
+&bn_sub_words("bn_sub_words");
+&bn_sub_part_words("bn_sub_part_words");
+
+&asm_finish();
+
+sub bn_mul_add_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,$sse2?"EXTRN\t_OPENSSL_ia32cap_P:DWORD":"");
+
+ &comment("");
+ $Low="eax";
+ $High="edx";
+ $a="ebx";
+ $w="ebp";
+ $r="edi";
+ $c="esi";
+
+ &xor($c,$c); # clear carry
+ &mov($r,&wparam(0)); #
+
+ &mov("ecx",&wparam(2)); #
+ &mov($a,&wparam(1)); #
+
+ &and("ecx",0xfffffff8); # num / 8
+ &mov($w,&wparam(3)); #
+
+ &push("ecx"); # Up the stack for a tmp variable
+
+ &jz(&label("maw_finish"));
+
+ if ($sse2) {
+ &picmeup("eax","OPENSSL_ia32cap_P");
+ &bt(&DWP(0,"eax"),26);
+ &jnc(&label("maw_loop"));
+
+ &movd("mm0",$w); # mm0 = w
+ &pxor("mm1","mm1"); # mm1 = carry_in
+
+ &set_label("maw_sse2_loop",0);
+ &movd("mm3",&DWP(0,$r,"",0)); # mm3 = r[0]
+ &paddq("mm1","mm3"); # mm1 = carry_in + r[0]
+ &movd("mm2",&DWP(0,$a,"",0)); # mm2 = a[0]
+ &pmuludq("mm2","mm0"); # mm2 = w*a[0]
+ &movd("mm4",&DWP(4,$a,"",0)); # mm4 = a[1]
+ &pmuludq("mm4","mm0"); # mm4 = w*a[1]
+ &movd("mm6",&DWP(8,$a,"",0)); # mm6 = a[2]
+ &pmuludq("mm6","mm0"); # mm6 = w*a[2]
+ &movd("mm7",&DWP(12,$a,"",0)); # mm7 = a[3]
+ &pmuludq("mm7","mm0"); # mm7 = w*a[3]
+ &paddq("mm1","mm2"); # mm1 = carry_in + r[0] + w*a[0]
+ &movd("mm3",&DWP(4,$r,"",0)); # mm3 = r[1]
+ &paddq("mm3","mm4"); # mm3 = r[1] + w*a[1]
+ &movd("mm5",&DWP(8,$r,"",0)); # mm5 = r[2]
+ &paddq("mm5","mm6"); # mm5 = r[2] + w*a[2]
+ &movd("mm4",&DWP(12,$r,"",0)); # mm4 = r[3]
+ &paddq("mm7","mm4"); # mm7 = r[3] + w*a[3]
+ &movd(&DWP(0,$r,"",0),"mm1");
+ &movd("mm2",&DWP(16,$a,"",0)); # mm2 = a[4]
+ &pmuludq("mm2","mm0"); # mm2 = w*a[4]
+ &psrlq("mm1",32); # mm1 = carry0
+ &movd("mm4",&DWP(20,$a,"",0)); # mm4 = a[5]
+ &pmuludq("mm4","mm0"); # mm4 = w*a[5]
+ &paddq("mm1","mm3"); # mm1 = carry0 + r[1] + w*a[1]
+ &movd("mm6",&DWP(24,$a,"",0)); # mm6 = a[6]
+ &pmuludq("mm6","mm0"); # mm6 = w*a[6]
+ &movd(&DWP(4,$r,"",0),"mm1");
+ &psrlq("mm1",32); # mm1 = carry1
+ &movd("mm3",&DWP(28,$a,"",0)); # mm3 = a[7]
+ &add($a,32);
+ &pmuludq("mm3","mm0"); # mm3 = w*a[7]
+ &paddq("mm1","mm5"); # mm1 = carry1 + r[2] + w*a[2]
+ &movd("mm5",&DWP(16,$r,"",0)); # mm5 = r[4]
+ &paddq("mm2","mm5"); # mm2 = r[4] + w*a[4]
+ &movd(&DWP(8,$r,"",0),"mm1");
+ &psrlq("mm1",32); # mm1 = carry2
+ &paddq("mm1","mm7"); # mm1 = carry2 + r[3] + w*a[3]
+ &movd("mm5",&DWP(20,$r,"",0)); # mm5 = r[5]
+ &paddq("mm4","mm5"); # mm4 = r[5] + w*a[5]
+ &movd(&DWP(12,$r,"",0),"mm1");
+ &psrlq("mm1",32); # mm1 = carry3
+ &paddq("mm1","mm2"); # mm1 = carry3 + r[4] + w*a[4]
+ &movd("mm5",&DWP(24,$r,"",0)); # mm5 = r[6]
+ &paddq("mm6","mm5"); # mm6 = r[6] + w*a[6]
+ &movd(&DWP(16,$r,"",0),"mm1");
+ &psrlq("mm1",32); # mm1 = carry4
+ &paddq("mm1","mm4"); # mm1 = carry4 + r[5] + w*a[5]
+ &movd("mm5",&DWP(28,$r,"",0)); # mm5 = r[7]
+ &paddq("mm3","mm5"); # mm3 = r[7] + w*a[7]
+ &movd(&DWP(20,$r,"",0),"mm1");
+ &psrlq("mm1",32); # mm1 = carry5
+ &paddq("mm1","mm6"); # mm1 = carry5 + r[6] + w*a[6]
+ &movd(&DWP(24,$r,"",0),"mm1");
+ &psrlq("mm1",32); # mm1 = carry6
+ &paddq("mm1","mm3"); # mm1 = carry6 + r[7] + w*a[7]
+ &movd(&DWP(28,$r,"",0),"mm1");
+ &add($r,32);
+ &psrlq("mm1",32); # mm1 = carry_out
+
+ &sub("ecx",8);
+ &jnz(&label("maw_sse2_loop"));
+
+ &movd($c,"mm1"); # c = carry_out
+ &emms();
+
+ &jmp(&label("maw_finish"));
+ }
+
+ &set_label("maw_loop",0);
+
+ &mov(&swtmp(0),"ecx"); #
+
+ for ($i=0; $i<32; $i+=4)
+ {
+ &comment("Round $i");
+
+ &mov("eax",&DWP($i,$a,"",0)); # *a
+ &mul($w); # *a * w
+ &add("eax",$c); # L(t)+= *r
+ &mov($c,&DWP($i,$r,"",0)); # L(t)+= *r
+ &adc("edx",0); # H(t)+=carry
+ &add("eax",$c); # L(t)+=c
+ &adc("edx",0); # H(t)+=carry
+ &mov(&DWP($i,$r,"",0),"eax"); # *r= L(t);
+ &mov($c,"edx"); # c= H(t);
+ }
+
+ &comment("");
+ &mov("ecx",&swtmp(0)); #
+ &add($a,32);
+ &add($r,32);
+ &sub("ecx",8);
+ &jnz(&label("maw_loop"));
+
+ &set_label("maw_finish",0);
+ &mov("ecx",&wparam(2)); # get num
+ &and("ecx",7);
+ &jnz(&label("maw_finish2")); # helps branch prediction
+ &jmp(&label("maw_end"));
+
+ &set_label("maw_finish2",1);
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov("eax",&DWP($i*4,$a,"",0));# *a
+ &mul($w); # *a * w
+ &add("eax",$c); # L(t)+=c
+ &mov($c,&DWP($i*4,$r,"",0)); # L(t)+= *r
+ &adc("edx",0); # H(t)+=carry
+ &add("eax",$c);
+ &adc("edx",0); # H(t)+=carry
+ &dec("ecx") if ($i != 7-1);
+ &mov(&DWP($i*4,$r,"",0),"eax"); # *r= L(t);
+ &mov($c,"edx"); # c= H(t);
+ &jz(&label("maw_end")) if ($i != 7-1);
+ }
+ &set_label("maw_end",0);
+ &mov("eax",$c);
+
+ &pop("ecx"); # clear variable from
+
+ &function_end($name);
+ }
+
+sub bn_mul_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+
+ &comment("");
+ $Low="eax";
+ $High="edx";
+ $a="ebx";
+ $w="ecx";
+ $r="edi";
+ $c="esi";
+ $num="ebp";
+
+ &xor($c,$c); # clear carry
+ &mov($r,&wparam(0)); #
+ &mov($a,&wparam(1)); #
+ &mov($num,&wparam(2)); #
+ &mov($w,&wparam(3)); #
+
+ &and($num,0xfffffff8); # num / 8
+ &jz(&label("mw_finish"));
+
+ &set_label("mw_loop",0);
+ for ($i=0; $i<32; $i+=4)
+ {
+ &comment("Round $i");
+
+ &mov("eax",&DWP($i,$a,"",0)); # *a
+ &mul($w); # *a * w
+ &add("eax",$c); # L(t)+=c
+ # XXX
+
+ &adc("edx",0); # H(t)+=carry
+ &mov(&DWP($i,$r,"",0),"eax"); # *r= L(t);
+
+ &mov($c,"edx"); # c= H(t);
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($r,32);
+ &sub($num,8);
+ &jz(&label("mw_finish"));
+ &jmp(&label("mw_loop"));
+
+ &set_label("mw_finish",0);
+ &mov($num,&wparam(2)); # get num
+ &and($num,7);
+ &jnz(&label("mw_finish2"));
+ &jmp(&label("mw_end"));
+
+ &set_label("mw_finish2",1);
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov("eax",&DWP($i*4,$a,"",0));# *a
+ &mul($w); # *a * w
+ &add("eax",$c); # L(t)+=c
+ # XXX
+ &adc("edx",0); # H(t)+=carry
+ &mov(&DWP($i*4,$r,"",0),"eax");# *r= L(t);
+ &mov($c,"edx"); # c= H(t);
+ &dec($num) if ($i != 7-1);
+ &jz(&label("mw_end")) if ($i != 7-1);
+ }
+ &set_label("mw_end",0);
+ &mov("eax",$c);
+
+ &function_end($name);
+ }
+
+sub bn_sqr_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+
+ &comment("");
+ $r="esi";
+ $a="edi";
+ $num="ebx";
+
+ &mov($r,&wparam(0)); #
+ &mov($a,&wparam(1)); #
+ &mov($num,&wparam(2)); #
+
+ &and($num,0xfffffff8); # num / 8
+ &jz(&label("sw_finish"));
+
+ &set_label("sw_loop",0);
+ for ($i=0; $i<32; $i+=4)
+ {
+ &comment("Round $i");
+ &mov("eax",&DWP($i,$a,"",0)); # *a
+ # XXX
+ &mul("eax"); # *a * *a
+ &mov(&DWP($i*2,$r,"",0),"eax"); #
+ &mov(&DWP($i*2+4,$r,"",0),"edx");#
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($r,64);
+ &sub($num,8);
+ &jnz(&label("sw_loop"));
+
+ &set_label("sw_finish",0);
+ &mov($num,&wparam(2)); # get num
+ &and($num,7);
+ &jz(&label("sw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov("eax",&DWP($i*4,$a,"",0)); # *a
+ # XXX
+ &mul("eax"); # *a * *a
+ &mov(&DWP($i*8,$r,"",0),"eax"); #
+ &dec($num) if ($i != 7-1);
+ &mov(&DWP($i*8+4,$r,"",0),"edx");
+ &jz(&label("sw_end")) if ($i != 7-1);
+ }
+ &set_label("sw_end",0);
+
+ &function_end($name);
+ }
+
+sub bn_div_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+ &mov("edx",&wparam(0)); #
+ &mov("eax",&wparam(1)); #
+ &mov("ebx",&wparam(2)); #
+ &div("ebx");
+ &function_end($name);
+ }
+
+sub bn_add_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+
+ &comment("");
+ $a="esi";
+ $b="edi";
+ $c="eax";
+ $r="ebx";
+ $tmp1="ecx";
+ $tmp2="edx";
+ $num="ebp";
+
+ &mov($r,&wparam(0)); # get r
+ &mov($a,&wparam(1)); # get a
+ &mov($b,&wparam(2)); # get b
+ &mov($num,&wparam(3)); # get num
+ &xor($c,$c); # clear carry
+ &and($num,0xfffffff8); # num / 8
+
+ &jz(&label("aw_finish"));
+
+ &set_label("aw_loop",0);
+ for ($i=0; $i<8; $i++)
+ {
+ &comment("Round $i");
+
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0)); # *b
+ &add($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &add($tmp1,$tmp2);
+ &adc($c,0);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($b,32);
+ &add($r,32);
+ &sub($num,8);
+ &jnz(&label("aw_loop"));
+
+ &set_label("aw_finish",0);
+ &mov($num,&wparam(3)); # get num
+ &and($num,7);
+ &jz(&label("aw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0));# *b
+ &add($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &add($tmp1,$tmp2);
+ &adc($c,0);
+ &dec($num) if ($i != 6);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ &jz(&label("aw_end")) if ($i != 6);
+ }
+ &set_label("aw_end",0);
+
+# &mov("eax",$c); # $c is "eax"
+
+ &function_end($name);
+ }
+
+sub bn_sub_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+
+ &comment("");
+ $a="esi";
+ $b="edi";
+ $c="eax";
+ $r="ebx";
+ $tmp1="ecx";
+ $tmp2="edx";
+ $num="ebp";
+
+ &mov($r,&wparam(0)); # get r
+ &mov($a,&wparam(1)); # get a
+ &mov($b,&wparam(2)); # get b
+ &mov($num,&wparam(3)); # get num
+ &xor($c,$c); # clear carry
+ &and($num,0xfffffff8); # num / 8
+
+ &jz(&label("aw_finish"));
+
+ &set_label("aw_loop",0);
+ for ($i=0; $i<8; $i++)
+ {
+ &comment("Round $i");
+
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0)); # *b
+ &sub($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &sub($tmp1,$tmp2);
+ &adc($c,0);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($b,32);
+ &add($r,32);
+ &sub($num,8);
+ &jnz(&label("aw_loop"));
+
+ &set_label("aw_finish",0);
+ &mov($num,&wparam(3)); # get num
+ &and($num,7);
+ &jz(&label("aw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0));# *b
+ &sub($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &sub($tmp1,$tmp2);
+ &adc($c,0);
+ &dec($num) if ($i != 6);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ &jz(&label("aw_end")) if ($i != 6);
+ }
+ &set_label("aw_end",0);
+
+# &mov("eax",$c); # $c is "eax"
+
+ &function_end($name);
+ }
+
+sub bn_sub_part_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+
+ &comment("");
+ $a="esi";
+ $b="edi";
+ $c="eax";
+ $r="ebx";
+ $tmp1="ecx";
+ $tmp2="edx";
+ $num="ebp";
+
+ &mov($r,&wparam(0)); # get r
+ &mov($a,&wparam(1)); # get a
+ &mov($b,&wparam(2)); # get b
+ &mov($num,&wparam(3)); # get num
+ &xor($c,$c); # clear carry
+ &and($num,0xfffffff8); # num / 8
+
+ &jz(&label("aw_finish"));
+
+ &set_label("aw_loop",0);
+ for ($i=0; $i<8; $i++)
+ {
+ &comment("Round $i");
+
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0)); # *b
+ &sub($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &sub($tmp1,$tmp2);
+ &adc($c,0);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($b,32);
+ &add($r,32);
+ &sub($num,8);
+ &jnz(&label("aw_loop"));
+
+ &set_label("aw_finish",0);
+ &mov($num,&wparam(3)); # get num
+ &and($num,7);
+ &jz(&label("aw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov($tmp1,&DWP(0,$a,"",0)); # *a
+ &mov($tmp2,&DWP(0,$b,"",0));# *b
+ &sub($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &sub($tmp1,$tmp2);
+ &adc($c,0);
+ &mov(&DWP(0,$r,"",0),$tmp1); # *r
+ &add($a, 4);
+ &add($b, 4);
+ &add($r, 4);
+ &dec($num) if ($i != 6);
+ &jz(&label("aw_end")) if ($i != 6);
+ }
+ &set_label("aw_end",0);
+
+ &cmp(&wparam(4),0);
+ &je(&label("pw_end"));
+
+ &mov($num,&wparam(4)); # get dl
+ &cmp($num,0);
+ &je(&label("pw_end"));
+ &jge(&label("pw_pos"));
+
+ &comment("pw_neg");
+ &mov($tmp2,0);
+ &sub($tmp2,$num);
+ &mov($num,$tmp2);
+ &and($num,0xfffffff8); # num / 8
+ &jz(&label("pw_neg_finish"));
+
+ &set_label("pw_neg_loop",0);
+ for ($i=0; $i<8; $i++)
+ {
+ &comment("dl<0 Round $i");
+
+ &mov($tmp1,0);
+ &mov($tmp2,&DWP($i*4,$b,"",0)); # *b
+ &sub($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &sub($tmp1,$tmp2);
+ &adc($c,0);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ }
+
+ &comment("");
+ &add($b,32);
+ &add($r,32);
+ &sub($num,8);
+ &jnz(&label("pw_neg_loop"));
+
+ &set_label("pw_neg_finish",0);
+ &mov($tmp2,&wparam(4)); # get dl
+ &mov($num,0);
+ &sub($num,$tmp2);
+ &and($num,7);
+ &jz(&label("pw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("dl<0 Tail Round $i");
+ &mov($tmp1,0);
+ &mov($tmp2,&DWP($i*4,$b,"",0));# *b
+ &sub($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &sub($tmp1,$tmp2);
+ &adc($c,0);
+ &dec($num) if ($i != 6);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ &jz(&label("pw_end")) if ($i != 6);
+ }
+
+ &jmp(&label("pw_end"));
+
+ &set_label("pw_pos",0);
+
+ &and($num,0xfffffff8); # num / 8
+ &jz(&label("pw_pos_finish"));
+
+ &set_label("pw_pos_loop",0);
+
+ for ($i=0; $i<8; $i++)
+ {
+ &comment("dl>0 Round $i");
+
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &sub($tmp1,$c);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ &jnc(&label("pw_nc".$i));
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($r,32);
+ &sub($num,8);
+ &jnz(&label("pw_pos_loop"));
+
+ &set_label("pw_pos_finish",0);
+ &mov($num,&wparam(4)); # get dl
+ &and($num,7);
+ &jz(&label("pw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("dl>0 Tail Round $i");
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &sub($tmp1,$c);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ &jnc(&label("pw_tail_nc".$i));
+ &dec($num) if ($i != 6);
+ &jz(&label("pw_end")) if ($i != 6);
+ }
+ &mov($c,1);
+ &jmp(&label("pw_end"));
+
+ &set_label("pw_nc_loop",0);
+ for ($i=0; $i<8; $i++)
+ {
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ &set_label("pw_nc".$i,0);
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($r,32);
+ &sub($num,8);
+ &jnz(&label("pw_nc_loop"));
+
+ &mov($num,&wparam(4)); # get dl
+ &and($num,7);
+ &jz(&label("pw_nc_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ &set_label("pw_tail_nc".$i,0);
+ &dec($num) if ($i != 6);
+ &jz(&label("pw_nc_end")) if ($i != 6);
+ }
+
+ &set_label("pw_nc_end",0);
+ &mov($c,0);
+
+ &set_label("pw_end",0);
+
+# &mov("eax",$c); # $c is "eax"
+
+ &function_end($name);
+ }
+
diff --git a/crypto/bn/asm/bn-alpha.pl b/crypto/bn/asm/bn-alpha.pl
new file mode 100644
index 0000000..302edf2
--- /dev/null
+++ b/crypto/bn/asm/bn-alpha.pl
@@ -0,0 +1,571 @@
+#!/usr/local/bin/perl
+# I have this in perl so I can use more usefull register names and then convert
+# them into alpha registers.
+#
+
+$d=&data();
+$d =~ s/CC/0/g;
+$d =~ s/R1/1/g;
+$d =~ s/R2/2/g;
+$d =~ s/R3/3/g;
+$d =~ s/R4/4/g;
+$d =~ s/L1/5/g;
+$d =~ s/L2/6/g;
+$d =~ s/L3/7/g;
+$d =~ s/L4/8/g;
+$d =~ s/O1/22/g;
+$d =~ s/O2/23/g;
+$d =~ s/O3/24/g;
+$d =~ s/O4/25/g;
+$d =~ s/A1/20/g;
+$d =~ s/A2/21/g;
+$d =~ s/A3/27/g;
+$d =~ s/A4/28/g;
+if (0){
+}
+
+print $d;
+
+sub data
+ {
+ local($data)=<<'EOF';
+
+ # DEC Alpha assember
+ # The bn_div_words is actually gcc output but the other parts are hand done.
+ # Thanks to tzeruch@ceddec.com for sending me the gcc output for
+ # bn_div_words.
+ # I've gone back and re-done most of routines.
+ # The key thing to remeber for the 164 CPU is that while a
+ # multiply operation takes 8 cycles, another one can only be issued
+ # after 4 cycles have elapsed. I've done modification to help
+ # improve this. Also, normally, a ld instruction will not be available
+ # for about 3 cycles.
+ .file 1 "bn_asm.c"
+ .set noat
+gcc2_compiled.:
+__gnu_compiled_c:
+ .text
+ .align 3
+ .globl bn_mul_add_words
+ .ent bn_mul_add_words
+bn_mul_add_words:
+bn_mul_add_words..ng:
+ .frame $30,0,$26,0
+ .prologue 0
+ .align 5
+ subq $18,4,$18
+ bis $31,$31,$CC
+ blt $18,$43 # if we are -1, -2, -3 or -4 goto tail code
+ ldq $A1,0($17) # 1 1
+ ldq $R1,0($16) # 1 1
+ .align 3
+$42:
+ mulq $A1,$19,$L1 # 1 2 1 ######
+ ldq $A2,8($17) # 2 1
+ ldq $R2,8($16) # 2 1
+ umulh $A1,$19,$A1 # 1 2 ######
+ ldq $A3,16($17) # 3 1
+ ldq $R3,16($16) # 3 1
+ mulq $A2,$19,$L2 # 2 2 1 ######
+ ldq $A4,24($17) # 4 1
+ addq $R1,$L1,$R1 # 1 2 2
+ ldq $R4,24($16) # 4 1
+ umulh $A2,$19,$A2 # 2 2 ######
+ cmpult $R1,$L1,$O1 # 1 2 3 1
+ addq $A1,$O1,$A1 # 1 3 1
+ addq $R1,$CC,$R1 # 1 2 3 1
+ mulq $A3,$19,$L3 # 3 2 1 ######
+ cmpult $R1,$CC,$CC # 1 2 3 2
+ addq $R2,$L2,$R2 # 2 2 2
+ addq $A1,$CC,$CC # 1 3 2
+ cmpult $R2,$L2,$O2 # 2 2 3 1
+ addq $A2,$O2,$A2 # 2 3 1
+ umulh $A3,$19,$A3 # 3 2 ######
+ addq $R2,$CC,$R2 # 2 2 3 1
+ cmpult $R2,$CC,$CC # 2 2 3 2
+ subq $18,4,$18
+ mulq $A4,$19,$L4 # 4 2 1 ######
+ addq $A2,$CC,$CC # 2 3 2
+ addq $R3,$L3,$R3 # 3 2 2
+ addq $16,32,$16
+ cmpult $R3,$L3,$O3 # 3 2 3 1
+ stq $R1,-32($16) # 1 2 4
+ umulh $A4,$19,$A4 # 4 2 ######
+ addq $A3,$O3,$A3 # 3 3 1
+ addq $R3,$CC,$R3 # 3 2 3 1
+ stq $R2,-24($16) # 2 2 4
+ cmpult $R3,$CC,$CC # 3 2 3 2
+ stq $R3,-16($16) # 3 2 4
+ addq $R4,$L4,$R4 # 4 2 2
+ addq $A3,$CC,$CC # 3 3 2
+ cmpult $R4,$L4,$O4 # 4 2 3 1
+ addq $17,32,$17
+ addq $A4,$O4,$A4 # 4 3 1
+ addq $R4,$CC,$R4 # 4 2 3 1
+ cmpult $R4,$CC,$CC # 4 2 3 2
+ stq $R4,-8($16) # 4 2 4
+ addq $A4,$CC,$CC # 4 3 2
+ blt $18,$43
+
+ ldq $A1,0($17) # 1 1
+ ldq $R1,0($16) # 1 1
+
+ br $42
+
+ .align 4
+$45:
+ ldq $A1,0($17) # 4 1
+ ldq $R1,0($16) # 4 1
+ mulq $A1,$19,$L1 # 4 2 1
+ subq $18,1,$18
+ addq $16,8,$16
+ addq $17,8,$17
+ umulh $A1,$19,$A1 # 4 2
+ addq $R1,$L1,$R1 # 4 2 2
+ cmpult $R1,$L1,$O1 # 4 2 3 1
+ addq $A1,$O1,$A1 # 4 3 1
+ addq $R1,$CC,$R1 # 4 2 3 1
+ cmpult $R1,$CC,$CC # 4 2 3 2
+ addq $A1,$CC,$CC # 4 3 2
+ stq $R1,-8($16) # 4 2 4
+ bgt $18,$45
+ ret $31,($26),1 # else exit
+
+ .align 4
+$43:
+ addq $18,4,$18
+ bgt $18,$45 # goto tail code
+ ret $31,($26),1 # else exit
+
+ .end bn_mul_add_words
+ .align 3
+ .globl bn_mul_words
+ .ent bn_mul_words
+bn_mul_words:
+bn_mul_words..ng:
+ .frame $30,0,$26,0
+ .prologue 0
+ .align 5
+ subq $18,4,$18
+ bis $31,$31,$CC
+ blt $18,$143 # if we are -1, -2, -3 or -4 goto tail code
+ ldq $A1,0($17) # 1 1
+ .align 3
+$142:
+
+ mulq $A1,$19,$L1 # 1 2 1 #####
+ ldq $A2,8($17) # 2 1
+ ldq $A3,16($17) # 3 1
+ umulh $A1,$19,$A1 # 1 2 #####
+ ldq $A4,24($17) # 4 1
+ mulq $A2,$19,$L2 # 2 2 1 #####
+ addq $L1,$CC,$L1 # 1 2 3 1
+ subq $18,4,$18
+ cmpult $L1,$CC,$CC # 1 2 3 2
+ umulh $A2,$19,$A2 # 2 2 #####
+ addq $A1,$CC,$CC # 1 3 2
+ addq $17,32,$17
+ addq $L2,$CC,$L2 # 2 2 3 1
+ mulq $A3,$19,$L3 # 3 2 1 #####
+ cmpult $L2,$CC,$CC # 2 2 3 2
+ addq $A2,$CC,$CC # 2 3 2
+ addq $16,32,$16
+ umulh $A3,$19,$A3 # 3 2 #####
+ stq $L1,-32($16) # 1 2 4
+ mulq $A4,$19,$L4 # 4 2 1 #####
+ addq $L3,$CC,$L3 # 3 2 3 1
+ stq $L2,-24($16) # 2 2 4
+ cmpult $L3,$CC,$CC # 3 2 3 2
+ umulh $A4,$19,$A4 # 4 2 #####
+ addq $A3,$CC,$CC # 3 3 2
+ stq $L3,-16($16) # 3 2 4
+ addq $L4,$CC,$L4 # 4 2 3 1
+ cmpult $L4,$CC,$CC # 4 2 3 2
+
+ addq $A4,$CC,$CC # 4 3 2
+
+ stq $L4,-8($16) # 4 2 4
+
+ blt $18,$143
+
+ ldq $A1,0($17) # 1 1
+
+ br $142
+
+ .align 4
+$145:
+ ldq $A1,0($17) # 4 1
+ mulq $A1,$19,$L1 # 4 2 1
+ subq $18,1,$18
+ umulh $A1,$19,$A1 # 4 2
+ addq $L1,$CC,$L1 # 4 2 3 1
+ addq $16,8,$16
+ cmpult $L1,$CC,$CC # 4 2 3 2
+ addq $17,8,$17
+ addq $A1,$CC,$CC # 4 3 2
+ stq $L1,-8($16) # 4 2 4
+
+ bgt $18,$145
+ ret $31,($26),1 # else exit
+
+ .align 4
+$143:
+ addq $18,4,$18
+ bgt $18,$145 # goto tail code
+ ret $31,($26),1 # else exit
+
+ .end bn_mul_words
+ .align 3
+ .globl bn_sqr_words
+ .ent bn_sqr_words
+bn_sqr_words:
+bn_sqr_words..ng:
+ .frame $30,0,$26,0
+ .prologue 0
+
+ subq $18,4,$18
+ blt $18,$543 # if we are -1, -2, -3 or -4 goto tail code
+ ldq $A1,0($17) # 1 1
+ .align 3
+$542:
+ mulq $A1,$A1,$L1 ######
+ ldq $A2,8($17) # 1 1
+ subq $18,4
+ umulh $A1,$A1,$R1 ######
+ ldq $A3,16($17) # 1 1
+ mulq $A2,$A2,$L2 ######
+ ldq $A4,24($17) # 1 1
+ stq $L1,0($16) # r[0]
+ umulh $A2,$A2,$R2 ######
+ stq $R1,8($16) # r[1]
+ mulq $A3,$A3,$L3 ######
+ stq $L2,16($16) # r[0]
+ umulh $A3,$A3,$R3 ######
+ stq $R2,24($16) # r[1]
+ mulq $A4,$A4,$L4 ######
+ stq $L3,32($16) # r[0]
+ umulh $A4,$A4,$R4 ######
+ stq $R3,40($16) # r[1]
+
+ addq $16,64,$16
+ addq $17,32,$17
+ stq $L4,-16($16) # r[0]
+ stq $R4,-8($16) # r[1]
+
+ blt $18,$543
+ ldq $A1,0($17) # 1 1
+ br $542
+
+$442:
+ ldq $A1,0($17) # a[0]
+ mulq $A1,$A1,$L1 # a[0]*w low part r2
+ addq $16,16,$16
+ addq $17,8,$17
+ subq $18,1,$18
+ umulh $A1,$A1,$R1 # a[0]*w high part r3
+ stq $L1,-16($16) # r[0]
+ stq $R1,-8($16) # r[1]
+
+ bgt $18,$442
+ ret $31,($26),1 # else exit
+
+ .align 4
+$543:
+ addq $18,4,$18
+ bgt $18,$442 # goto tail code
+ ret $31,($26),1 # else exit
+ .end bn_sqr_words
+
+ .align 3
+ .globl bn_add_words
+ .ent bn_add_words
+bn_add_words:
+bn_add_words..ng:
+ .frame $30,0,$26,0
+ .prologue 0
+
+ subq $19,4,$19
+ bis $31,$31,$CC # carry = 0
+ blt $19,$900
+ ldq $L1,0($17) # a[0]
+ ldq $R1,0($18) # b[1]
+ .align 3
+$901:
+ addq $R1,$L1,$R1 # r=a+b;
+ ldq $L2,8($17) # a[1]
+ cmpult $R1,$L1,$O1 # did we overflow?
+ ldq $R2,8($18) # b[1]
+ addq $R1,$CC,$R1 # c+= overflow
+ ldq $L3,16($17) # a[2]
+ cmpult $R1,$CC,$CC # overflow?
+ ldq $R3,16($18) # b[2]
+ addq $CC,$O1,$CC
+ ldq $L4,24($17) # a[3]
+ addq $R2,$L2,$R2 # r=a+b;
+ ldq $R4,24($18) # b[3]
+ cmpult $R2,$L2,$O2 # did we overflow?
+ addq $R3,$L3,$R3 # r=a+b;
+ addq $R2,$CC,$R2 # c+= overflow
+ cmpult $R3,$L3,$O3 # did we overflow?
+ cmpult $R2,$CC,$CC # overflow?
+ addq $R4,$L4,$R4 # r=a+b;
+ addq $CC,$O2,$CC
+ cmpult $R4,$L4,$O4 # did we overflow?
+ addq $R3,$CC,$R3 # c+= overflow
+ stq $R1,0($16) # r[0]=c
+ cmpult $R3,$CC,$CC # overflow?
+ stq $R2,8($16) # r[1]=c
+ addq $CC,$O3,$CC
+ stq $R3,16($16) # r[2]=c
+ addq $R4,$CC,$R4 # c+= overflow
+ subq $19,4,$19 # loop--
+ cmpult $R4,$CC,$CC # overflow?
+ addq $17,32,$17 # a++
+ addq $CC,$O4,$CC
+ stq $R4,24($16) # r[3]=c
+ addq $18,32,$18 # b++
+ addq $16,32,$16 # r++
+
+ blt $19,$900
+ ldq $L1,0($17) # a[0]
+ ldq $R1,0($18) # b[1]
+ br $901
+ .align 4
+$945:
+ ldq $L1,0($17) # a[0]
+ ldq $R1,0($18) # b[1]
+ addq $R1,$L1,$R1 # r=a+b;
+ subq $19,1,$19 # loop--
+ addq $R1,$CC,$R1 # c+= overflow
+ addq $17,8,$17 # a++
+ cmpult $R1,$L1,$O1 # did we overflow?
+ cmpult $R1,$CC,$CC # overflow?
+ addq $18,8,$18 # b++
+ stq $R1,0($16) # r[0]=c
+ addq $CC,$O1,$CC
+ addq $16,8,$16 # r++
+
+ bgt $19,$945
+ ret $31,($26),1 # else exit
+
+$900:
+ addq $19,4,$19
+ bgt $19,$945 # goto tail code
+ ret $31,($26),1 # else exit
+ .end bn_add_words
+
+ .align 3
+ .globl bn_sub_words
+ .ent bn_sub_words
+bn_sub_words:
+bn_sub_words..ng:
+ .frame $30,0,$26,0
+ .prologue 0
+
+ subq $19,4,$19
+ bis $31,$31,$CC # carry = 0
+ br $800
+ blt $19,$800
+ ldq $L1,0($17) # a[0]
+ ldq $R1,0($18) # b[1]
+ .align 3
+$801:
+ addq $R1,$L1,$R1 # r=a+b;
+ ldq $L2,8($17) # a[1]
+ cmpult $R1,$L1,$O1 # did we overflow?
+ ldq $R2,8($18) # b[1]
+ addq $R1,$CC,$R1 # c+= overflow
+ ldq $L3,16($17) # a[2]
+ cmpult $R1,$CC,$CC # overflow?
+ ldq $R3,16($18) # b[2]
+ addq $CC,$O1,$CC
+ ldq $L4,24($17) # a[3]
+ addq $R2,$L2,$R2 # r=a+b;
+ ldq $R4,24($18) # b[3]
+ cmpult $R2,$L2,$O2 # did we overflow?
+ addq $R3,$L3,$R3 # r=a+b;
+ addq $R2,$CC,$R2 # c+= overflow
+ cmpult $R3,$L3,$O3 # did we overflow?
+ cmpult $R2,$CC,$CC # overflow?
+ addq $R4,$L4,$R4 # r=a+b;
+ addq $CC,$O2,$CC
+ cmpult $R4,$L4,$O4 # did we overflow?
+ addq $R3,$CC,$R3 # c+= overflow
+ stq $R1,0($16) # r[0]=c
+ cmpult $R3,$CC,$CC # overflow?
+ stq $R2,8($16) # r[1]=c
+ addq $CC,$O3,$CC
+ stq $R3,16($16) # r[2]=c
+ addq $R4,$CC,$R4 # c+= overflow
+ subq $19,4,$19 # loop--
+ cmpult $R4,$CC,$CC # overflow?
+ addq $17,32,$17 # a++
+ addq $CC,$O4,$CC
+ stq $R4,24($16) # r[3]=c
+ addq $18,32,$18 # b++
+ addq $16,32,$16 # r++
+
+ blt $19,$800
+ ldq $L1,0($17) # a[0]
+ ldq $R1,0($18) # b[1]
+ br $801
+ .align 4
+$845:
+ ldq $L1,0($17) # a[0]
+ ldq $R1,0($18) # b[1]
+ cmpult $L1,$R1,$O1 # will we borrow?
+ subq $L1,$R1,$R1 # r=a-b;
+ subq $19,1,$19 # loop--
+ cmpult $R1,$CC,$O2 # will we borrow?
+ subq $R1,$CC,$R1 # c+= overflow
+ addq $17,8,$17 # a++
+ addq $18,8,$18 # b++
+ stq $R1,0($16) # r[0]=c
+ addq $O2,$O1,$CC
+ addq $16,8,$16 # r++
+
+ bgt $19,$845
+ ret $31,($26),1 # else exit
+
+$800:
+ addq $19,4,$19
+ bgt $19,$845 # goto tail code
+ ret $31,($26),1 # else exit
+ .end bn_sub_words
+
+ #
+ # What follows was taken directly from the C compiler with a few
+ # hacks to redo the lables.
+ #
+.text
+ .align 3
+ .globl bn_div_words
+ .ent bn_div_words
+bn_div_words:
+ ldgp $29,0($27)
+bn_div_words..ng:
+ lda $30,-48($30)
+ .frame $30,48,$26,0
+ stq $26,0($30)
+ stq $9,8($30)
+ stq $10,16($30)
+ stq $11,24($30)
+ stq $12,32($30)
+ stq $13,40($30)
+ .mask 0x4003e00,-48
+ .prologue 1
+ bis $16,$16,$9
+ bis $17,$17,$10
+ bis $18,$18,$11
+ bis $31,$31,$13
+ bis $31,2,$12
+ bne $11,$119
+ lda $0,-1
+ br $31,$136
+ .align 4
+$119:
+ bis $11,$11,$16
+ jsr $26,BN_num_bits_word
+ ldgp $29,0($26)
+ subq $0,64,$1
+ beq $1,$120
+ bis $31,1,$1
+ sll $1,$0,$1
+ cmpule $9,$1,$1
+ bne $1,$120
+ # lda $16,_IO_stderr_
+ # lda $17,$C32
+ # bis $0,$0,$18
+ # jsr $26,fprintf
+ # ldgp $29,0($26)
+ jsr $26,abort
+ ldgp $29,0($26)
+ .align 4
+$120:
+ bis $31,64,$3
+ cmpult $9,$11,$2
+ subq $3,$0,$1
+ addl $1,$31,$0
+ subq $9,$11,$1
+ cmoveq $2,$1,$9
+ beq $0,$122
+ zapnot $0,15,$2
+ subq $3,$0,$1
+ sll $11,$2,$11
+ sll $9,$2,$3
+ srl $10,$1,$1
+ sll $10,$2,$10
+ bis $3,$1,$9
+$122:
+ srl $11,32,$5
+ zapnot $11,15,$6
+ lda $7,-1
+ .align 5
+$123:
+ srl $9,32,$1
+ subq $1,$5,$1
+ bne $1,$126
+ zapnot $7,15,$27
+ br $31,$127
+ .align 4
+$126:
+ bis $9,$9,$24
+ bis $5,$5,$25
+ divqu $24,$25,$27
+$127:
+ srl $10,32,$4
+ .align 5
+$128:
+ mulq $27,$5,$1
+ subq $9,$1,$3
+ zapnot $3,240,$1
+ bne $1,$129
+ mulq $6,$27,$2
+ sll $3,32,$1
+ addq $1,$4,$1
+ cmpule $2,$1,$2
+ bne $2,$129
+ subq $27,1,$27
+ br $31,$128
+ .align 4
+$129:
+ mulq $27,$6,$1
+ mulq $27,$5,$4
+ srl $1,32,$3
+ sll $1,32,$1
+ addq $4,$3,$4
+ cmpult $10,$1,$2
+ subq $10,$1,$10
+ addq $2,$4,$2
+ cmpult $9,$2,$1
+ bis $2,$2,$4
+ beq $1,$134
+ addq $9,$11,$9
+ subq $27,1,$27
+$134:
+ subl $12,1,$12
+ subq $9,$4,$9
+ beq $12,$124
+ sll $27,32,$13
+ sll $9,32,$2
+ srl $10,32,$1
+ sll $10,32,$10
+ bis $2,$1,$9
+ br $31,$123
+ .align 4
+$124:
+ bis $13,$27,$0
+$136:
+ ldq $26,0($30)
+ ldq $9,8($30)
+ ldq $10,16($30)
+ ldq $11,24($30)
+ ldq $12,32($30)
+ ldq $13,40($30)
+ addq $30,48,$30
+ ret $31,($26),1
+ .end bn_div_words
+EOF
+ return($data);
+ }
+
diff --git a/crypto/bn/asm/ca.pl b/crypto/bn/asm/ca.pl
new file mode 100644
index 0000000..c1ce67a
--- /dev/null
+++ b/crypto/bn/asm/ca.pl
@@ -0,0 +1,33 @@
+#!/usr/local/bin/perl
+# I have this in perl so I can use more usefull register names and then convert
+# them into alpha registers.
+#
+
+push(@INC,"perlasm","../../perlasm");
+require "alpha.pl";
+require "alpha/mul_add.pl";
+require "alpha/mul.pl";
+require "alpha/sqr.pl";
+require "alpha/add.pl";
+require "alpha/sub.pl";
+require "alpha/mul_c8.pl";
+require "alpha/mul_c4.pl";
+require "alpha/sqr_c4.pl";
+require "alpha/sqr_c8.pl";
+require "alpha/div.pl";
+
+&asm_init($ARGV[0],$0);
+
+&bn_mul_words("bn_mul_words");
+&bn_sqr_words("bn_sqr_words");
+&bn_mul_add_words("bn_mul_add_words");
+&bn_add_words("bn_add_words");
+&bn_sub_words("bn_sub_words");
+&bn_div_words("bn_div_words");
+&bn_mul_comba8("bn_mul_comba8");
+&bn_mul_comba4("bn_mul_comba4");
+&bn_sqr_comba4("bn_sqr_comba4");
+&bn_sqr_comba8("bn_sqr_comba8");
+
+&asm_finish();
+
diff --git a/crypto/bn/asm/co-586.pl b/crypto/bn/asm/co-586.pl
new file mode 100644
index 0000000..5d962cb
--- /dev/null
+++ b/crypto/bn/asm/co-586.pl
@@ -0,0 +1,286 @@
+#!/usr/local/bin/perl
+
+push(@INC,"perlasm","../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],$0);
+
+&bn_mul_comba("bn_mul_comba8",8);
+&bn_mul_comba("bn_mul_comba4",4);
+&bn_sqr_comba("bn_sqr_comba8",8);
+&bn_sqr_comba("bn_sqr_comba4",4);
+
+&asm_finish();
+
+sub mul_add_c
+ {
+ local($a,$ai,$b,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
+
+ # pos == -1 if eax and edx are pre-loaded, 0 to load from next
+ # words, and 1 if load return value
+
+ &comment("mul a[$ai]*b[$bi]");
+
+ # "eax" and "edx" will always be pre-loaded.
+ # &mov("eax",&DWP($ai*4,$a,"",0)) ;
+ # &mov("edx",&DWP($bi*4,$b,"",0));
+
+ &mul("edx");
+ &add($c0,"eax");
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # laod next a
+ &mov("eax",&wparam(0)) if $pos > 0; # load r[]
+ ###
+ &adc($c1,"edx");
+ &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 0; # laod next b
+ &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 1; # laod next b
+ ###
+ &adc($c2,0);
+ # is pos > 1, it means it is the last loop
+ &mov(&DWP($i*4,"eax","",0),$c0) if $pos > 0; # save r[];
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # laod next a
+ }
+
+sub sqr_add_c
+ {
+ local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
+
+ # pos == -1 if eax and edx are pre-loaded, 0 to load from next
+ # words, and 1 if load return value
+
+ &comment("sqr a[$ai]*a[$bi]");
+
+ # "eax" and "edx" will always be pre-loaded.
+ # &mov("eax",&DWP($ai*4,$a,"",0)) ;
+ # &mov("edx",&DWP($bi*4,$b,"",0));
+
+ if ($ai == $bi)
+ { &mul("eax");}
+ else
+ { &mul("edx");}
+ &add($c0,"eax");
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # load next a
+ ###
+ &adc($c1,"edx");
+ &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos == 1) && ($na != $nb);
+ ###
+ &adc($c2,0);
+ # is pos > 1, it means it is the last loop
+ &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[];
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b
+ }
+
+sub sqr_add_c2
+ {
+ local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
+
+ # pos == -1 if eax and edx are pre-loaded, 0 to load from next
+ # words, and 1 if load return value
+
+ &comment("sqr a[$ai]*a[$bi]");
+
+ # "eax" and "edx" will always be pre-loaded.
+ # &mov("eax",&DWP($ai*4,$a,"",0)) ;
+ # &mov("edx",&DWP($bi*4,$a,"",0));
+
+ if ($ai == $bi)
+ { &mul("eax");}
+ else
+ { &mul("edx");}
+ &add("eax","eax");
+ ###
+ &adc("edx","edx");
+ ###
+ &adc($c2,0);
+ &add($c0,"eax");
+ &adc($c1,"edx");
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # load next a
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b
+ &adc($c2,0);
+ &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[];
+ &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos <= 1) && ($na != $nb);
+ ###
+ }
+
+sub bn_mul_comba
+ {
+ local($name,$num)=@_;
+ local($a,$b,$c0,$c1,$c2);
+ local($i,$as,$ae,$bs,$be,$ai,$bi);
+ local($tot,$end);
+
+ &function_begin_B($name,"");
+
+ $c0="ebx";
+ $c1="ecx";
+ $c2="ebp";
+ $a="esi";
+ $b="edi";
+
+ $as=0;
+ $ae=0;
+ $bs=0;
+ $be=0;
+ $tot=$num+$num-1;
+
+ &push("esi");
+ &mov($a,&wparam(1));
+ &push("edi");
+ &mov($b,&wparam(2));
+ &push("ebp");
+ &push("ebx");
+
+ &xor($c0,$c0);
+ &mov("eax",&DWP(0,$a,"",0)); # load the first word
+ &xor($c1,$c1);
+ &mov("edx",&DWP(0,$b,"",0)); # load the first second
+
+ for ($i=0; $i<$tot; $i++)
+ {
+ $ai=$as;
+ $bi=$bs;
+ $end=$be+1;
+
+ &comment("################## Calculate word $i");
+
+ for ($j=$bs; $j<$end; $j++)
+ {
+ &xor($c2,$c2) if ($j == $bs);
+ if (($j+1) == $end)
+ {
+ $v=1;
+ $v=2 if (($i+1) == $tot);
+ }
+ else
+ { $v=0; }
+ if (($j+1) != $end)
+ {
+ $na=($ai-1);
+ $nb=($bi+1);
+ }
+ else
+ {
+ $na=$as+($i < ($num-1));
+ $nb=$bs+($i >= ($num-1));
+ }
+#printf STDERR "[$ai,$bi] -> [$na,$nb]\n";
+ &mul_add_c($a,$ai,$b,$bi,$c0,$c1,$c2,$v,$i,$na,$nb);
+ if ($v)
+ {
+ &comment("saved r[$i]");
+ # &mov("eax",&wparam(0));
+ # &mov(&DWP($i*4,"eax","",0),$c0);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ }
+ $ai--;
+ $bi++;
+ }
+ $as++ if ($i < ($num-1));
+ $ae++ if ($i >= ($num-1));
+
+ $bs++ if ($i >= ($num-1));
+ $be++ if ($i < ($num-1));
+ }
+ &comment("save r[$i]");
+ # &mov("eax",&wparam(0));
+ &mov(&DWP($i*4,"eax","",0),$c0);
+
+ &pop("ebx");
+ &pop("ebp");
+ &pop("edi");
+ &pop("esi");
+ &ret();
+ &function_end_B($name);
+ }
+
+sub bn_sqr_comba
+ {
+ local($name,$num)=@_;
+ local($r,$a,$c0,$c1,$c2)=@_;
+ local($i,$as,$ae,$bs,$be,$ai,$bi);
+ local($b,$tot,$end,$half);
+
+ &function_begin_B($name,"");
+
+ $c0="ebx";
+ $c1="ecx";
+ $c2="ebp";
+ $a="esi";
+ $r="edi";
+
+ &push("esi");
+ &push("edi");
+ &push("ebp");
+ &push("ebx");
+ &mov($r,&wparam(0));
+ &mov($a,&wparam(1));
+ &xor($c0,$c0);
+ &xor($c1,$c1);
+ &mov("eax",&DWP(0,$a,"",0)); # load the first word
+
+ $as=0;
+ $ae=0;
+ $bs=0;
+ $be=0;
+ $tot=$num+$num-1;
+
+ for ($i=0; $i<$tot; $i++)
+ {
+ $ai=$as;
+ $bi=$bs;
+ $end=$be+1;
+
+ &comment("############### Calculate word $i");
+ for ($j=$bs; $j<$end; $j++)
+ {
+ &xor($c2,$c2) if ($j == $bs);
+ if (($ai-1) < ($bi+1))
+ {
+ $v=1;
+ $v=2 if ($i+1) == $tot;
+ }
+ else
+ { $v=0; }
+ if (!$v)
+ {
+ $na=$ai-1;
+ $nb=$bi+1;
+ }
+ else
+ {
+ $na=$as+($i < ($num-1));
+ $nb=$bs+($i >= ($num-1));
+ }
+ if ($ai == $bi)
+ {
+ &sqr_add_c($r,$a,$ai,$bi,
+ $c0,$c1,$c2,$v,$i,$na,$nb);
+ }
+ else
+ {
+ &sqr_add_c2($r,$a,$ai,$bi,
+ $c0,$c1,$c2,$v,$i,$na,$nb);
+ }
+ if ($v)
+ {
+ &comment("saved r[$i]");
+ #&mov(&DWP($i*4,$r,"",0),$c0);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ last;
+ }
+ $ai--;
+ $bi++;
+ }
+ $as++ if ($i < ($num-1));
+ $ae++ if ($i >= ($num-1));
+
+ $bs++ if ($i >= ($num-1));
+ $be++ if ($i < ($num-1));
+ }
+ &mov(&DWP($i*4,$r,"",0),$c0);
+ &pop("ebx");
+ &pop("ebp");
+ &pop("edi");
+ &pop("esi");
+ &ret();
+ &function_end_B($name);
+ }
diff --git a/crypto/bn/asm/co-alpha.pl b/crypto/bn/asm/co-alpha.pl
new file mode 100644
index 0000000..67dad3e
--- /dev/null
+++ b/crypto/bn/asm/co-alpha.pl
@@ -0,0 +1,116 @@
+#!/usr/local/bin/perl
+# I have this in perl so I can use more usefull register names and then convert
+# them into alpha registers.
+#
+
+push(@INC,"perlasm","../../perlasm");
+require "alpha.pl";
+
+&asm_init($ARGV[0],$0);
+
+print &bn_sub_words("bn_sub_words");
+
+&asm_finish();
+
+sub bn_sub_words
+ {
+ local($name)=@_;
+ local($cc,$a,$b,$r);
+
+ $cc="r0";
+ $a0="r1"; $b0="r5"; $r0="r9"; $tmp="r13";
+ $a1="r2"; $b1="r6"; $r1="r10"; $t1="r14";
+ $a2="r3"; $b2="r7"; $r2="r11";
+ $a3="r4"; $b3="r8"; $r3="r12"; $t3="r15";
+
+ $rp=&wparam(0);
+ $ap=&wparam(1);
+ $bp=&wparam(2);
+ $count=&wparam(3);
+
+ &function_begin($name,"");
+
+ &comment("");
+ &sub($count,4,$count);
+ &mov("zero",$cc);
+ &blt($count,&label("finish"));
+
+ &ld($a0,&QWPw(0,$ap));
+ &ld($b0,&QWPw(0,$bp));
+
+##########################################################
+ &set_label("loop");
+
+ &ld($a1,&QWPw(1,$ap));
+ &cmpult($a0,$b0,$tmp); # will we borrow?
+ &ld($b1,&QWPw(1,$bp));
+ &sub($a0,$b0,$a0); # do the subtract
+ &ld($a2,&QWPw(2,$ap));
+ &cmpult($a0,$cc,$b0); # will we borrow?
+ &ld($b2,&QWPw(2,$bp));
+ &sub($a0,$cc,$a0); # will we borrow?
+ &ld($a3,&QWPw(3,$ap));
+ &add($b0,$tmp,$cc); # add the borrows
+
+ &cmpult($a1,$b1,$t1); # will we borrow?
+ &sub($a1,$b1,$a1); # do the subtract
+ &ld($b3,&QWPw(3,$bp));
+ &cmpult($a1,$cc,$b1); # will we borrow?
+ &sub($a1,$cc,$a1); # will we borrow?
+ &add($b1,$t1,$cc); # add the borrows
+
+ &cmpult($a2,$b2,$tmp); # will we borrow?
+ &sub($a2,$b2,$a2); # do the subtract
+ &st($a0,&QWPw(0,$rp)); # save
+ &cmpult($a2,$cc,$b2); # will we borrow?
+ &sub($a2,$cc,$a2); # will we borrow?
+ &add($b2,$tmp,$cc); # add the borrows
+
+ &cmpult($a3,$b3,$t3); # will we borrow?
+ &sub($a3,$b3,$a3); # do the subtract
+ &st($a1,&QWPw(1,$rp)); # save
+ &cmpult($a3,$cc,$b3); # will we borrow?
+ &sub($a3,$cc,$a3); # will we borrow?
+ &add($b3,$t3,$cc); # add the borrows
+
+ &st($a2,&QWPw(2,$rp)); # save
+ &sub($count,4,$count); # count-=4
+ &st($a3,&QWPw(3,$rp)); # save
+ &add($ap,4*$QWS,$ap); # count+=4
+ &add($bp,4*$QWS,$bp); # count+=4
+ &add($rp,4*$QWS,$rp); # count+=4
+
+ &blt($count,&label("finish"));
+ &ld($a0,&QWPw(0,$ap));
+ &ld($b0,&QWPw(0,$bp));
+ &br(&label("loop"));
+##################################################
+ # Do the last 0..3 words
+
+ &set_label("last_loop");
+
+ &ld($a0,&QWPw(0,$ap)); # get a
+ &ld($b0,&QWPw(0,$bp)); # get b
+ &cmpult($a0,$b0,$tmp); # will we borrow?
+ &sub($a0,$b0,$a0); # do the subtract
+ &cmpult($a0,$cc,$b0); # will we borrow?
+ &sub($a0,$cc,$a0); # will we borrow?
+ &st($a0,&QWPw(0,$rp)); # save
+ &add($b0,$tmp,$cc); # add the borrows
+
+ &add($ap,$QWS,$ap);
+ &add($bp,$QWS,$bp);
+ &add($rp,$QWS,$rp);
+ &sub($count,1,$count);
+ &bgt($count,&label("last_loop"));
+ &function_end_A($name);
+
+######################################################
+ &set_label("finish");
+ &add($count,4,$count);
+ &bgt($count,&label("last_loop"));
+
+ &set_label("end");
+ &function_end($name);
+ }
+
diff --git a/crypto/bn/asm/ia64.S b/crypto/bn/asm/ia64.S
new file mode 100644
index 0000000..7b82b82
--- /dev/null
+++ b/crypto/bn/asm/ia64.S
@@ -0,0 +1,1560 @@
+.explicit
+.text
+.ident "ia64.S, Version 2.1"
+.ident "IA-64 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
+
+//
+// ====================================================================
+// Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+// project.
+//
+// Rights for redistribution and usage in source and binary forms are
+// granted according to the OpenSSL license. Warranty of any kind is
+// disclaimed.
+// ====================================================================
+//
+// Version 2.x is Itanium2 re-tune. Few words about how Itanum2 is
+// different from Itanium to this module viewpoint. Most notably, is it
+// "wider" than Itanium? Can you experience loop scalability as
+// discussed in commentary sections? Not really:-( Itanium2 has 6
+// integer ALU ports, i.e. it's 2 ports wider, but it's not enough to
+// spin twice as fast, as I need 8 IALU ports. Amount of floating point
+// ports is the same, i.e. 2, while I need 4. In other words, to this
+// module Itanium2 remains effectively as "wide" as Itanium. Yet it's
+// essentially different in respect to this module, and a re-tune was
+// required. Well, because some intruction latencies has changed. Most
+// noticeably those intensively used:
+//
+// Itanium Itanium2
+// ldf8 9 6 L2 hit
+// ld8 2 1 L1 hit
+// getf 2 5
+// xma[->getf] 7[+1] 4[+0]
+// add[->st8] 1[+1] 1[+0]
+//
+// What does it mean? You might ratiocinate that the original code
+// should run just faster... Because sum of latencies is smaller...
+// Wrong! Note that getf latency increased. This means that if a loop is
+// scheduled for lower latency (as they were), then it will suffer from
+// stall condition and the code will therefore turn anti-scalable, e.g.
+// original bn_mul_words spun at 5*n or 2.5 times slower than expected
+// on Itanium2! What to do? Reschedule loops for Itanium2? But then
+// Itanium would exhibit anti-scalability. So I've chosen to reschedule
+// for worst latency for every instruction aiming for best *all-round*
+// performance.
+
+// Q. How much faster does it get?
+// A. Here is the output from 'openssl speed rsa dsa' for vanilla
+// 0.9.6a compiled with gcc version 2.96 20000731 (Red Hat
+// Linux 7.1 2.96-81):
+//
+// sign verify sign/s verify/s
+// rsa 512 bits 0.0036s 0.0003s 275.3 2999.2
+// rsa 1024 bits 0.0203s 0.0011s 49.3 894.1
+// rsa 2048 bits 0.1331s 0.0040s 7.5 250.9
+// rsa 4096 bits 0.9270s 0.0147s 1.1 68.1
+// sign verify sign/s verify/s
+// dsa 512 bits 0.0035s 0.0043s 288.3 234.8
+// dsa 1024 bits 0.0111s 0.0135s 90.0 74.2
+//
+// And here is similar output but for this assembler
+// implementation:-)
+//
+// sign verify sign/s verify/s
+// rsa 512 bits 0.0021s 0.0001s 549.4 9638.5
+// rsa 1024 bits 0.0055s 0.0002s 183.8 4481.1
+// rsa 2048 bits 0.0244s 0.0006s 41.4 1726.3
+// rsa 4096 bits 0.1295s 0.0018s 7.7 561.5
+// sign verify sign/s verify/s
+// dsa 512 bits 0.0012s 0.0013s 891.9 756.6
+// dsa 1024 bits 0.0023s 0.0028s 440.4 376.2
+//
+// Yes, you may argue that it's not fair comparison as it's
+// possible to craft the C implementation with BN_UMULT_HIGH
+// inline assembler macro. But of course! Here is the output
+// with the macro:
+//
+// sign verify sign/s verify/s
+// rsa 512 bits 0.0020s 0.0002s 495.0 6561.0
+// rsa 1024 bits 0.0086s 0.0004s 116.2 2235.7
+// rsa 2048 bits 0.0519s 0.0015s 19.3 667.3
+// rsa 4096 bits 0.3464s 0.0053s 2.9 187.7
+// sign verify sign/s verify/s
+// dsa 512 bits 0.0016s 0.0020s 613.1 510.5
+// dsa 1024 bits 0.0045s 0.0054s 221.0 183.9
+//
+// My code is still way faster, huh:-) And I believe that even
+// higher performance can be achieved. Note that as keys get
+// longer, performance gain is larger. Why? According to the
+// profiler there is another player in the field, namely
+// BN_from_montgomery consuming larger and larger portion of CPU
+// time as keysize decreases. I therefore consider putting effort
+// to assembler implementation of the following routine:
+//
+// void bn_mul_add_mont (BN_ULONG *rp,BN_ULONG *np,int nl,BN_ULONG n0)
+// {
+// int i,j;
+// BN_ULONG v;
+//
+// for (i=0; i<nl; i++)
+// {
+// v=bn_mul_add_words(rp,np,nl,(rp[0]*n0)&BN_MASK2);
+// nrp++;
+// rp++;
+// if (((nrp[-1]+=v)&BN_MASK2) < v)
+// for (j=0; ((++nrp[j])&BN_MASK2) == 0; j++) ;
+// }
+// }
+//
+// It might as well be beneficial to implement even combaX
+// variants, as it appears as it can literally unleash the
+// performance (see comment section to bn_mul_comba8 below).
+//
+// And finally for your reference the output for 0.9.6a compiled
+// with SGIcc version 0.01.0-12 (keep in mind that for the moment
+// of this writing it's not possible to convince SGIcc to use
+// BN_UMULT_HIGH inline assembler macro, yet the code is fast,
+// i.e. for a compiler generated one:-):
+//
+// sign verify sign/s verify/s
+// rsa 512 bits 0.0022s 0.0002s 452.7 5894.3
+// rsa 1024 bits 0.0097s 0.0005s 102.7 2002.9
+// rsa 2048 bits 0.0578s 0.0017s 17.3 600.2
+// rsa 4096 bits 0.3838s 0.0061s 2.6 164.5
+// sign verify sign/s verify/s
+// dsa 512 bits 0.0018s 0.0022s 547.3 459.6
+// dsa 1024 bits 0.0051s 0.0062s 196.6 161.3
+//
+// Oh! Benchmarks were performed on 733MHz Lion-class Itanium
+// system running Redhat Linux 7.1 (very special thanks to Ray
+// McCaffity of Williams Communications for providing an account).
+//
+// Q. What's the heck with 'rum 1<<5' at the end of every function?
+// A. Well, by clearing the "upper FP registers written" bit of the
+// User Mask I want to excuse the kernel from preserving upper
+// (f32-f128) FP register bank over process context switch, thus
+// minimizing bus bandwidth consumption during the switch (i.e.
+// after PKI opration completes and the program is off doing
+// something else like bulk symmetric encryption). Having said
+// this, I also want to point out that it might be good idea
+// to compile the whole toolkit (as well as majority of the
+// programs for that matter) with -mfixed-range=f32-f127 command
+// line option. No, it doesn't prevent the compiler from writing
+// to upper bank, but at least discourages to do so. If you don't
+// like the idea you have the option to compile the module with
+// -Drum=nop.m in command line.
+//
+
+#if defined(_HPUX_SOURCE) && !defined(_LP64)
+#define ADDP addp4
+#else
+#define ADDP add
+#endif
+
+#if 1
+//
+// bn_[add|sub]_words routines.
+//
+// Loops are spinning in 2*(n+5) ticks on Itanuim (provided that the
+// data reside in L1 cache, i.e. 2 ticks away). It's possible to
+// compress the epilogue and get down to 2*n+6, but at the cost of
+// scalability (the neat feature of this implementation is that it
+// shall automagically spin in n+5 on "wider" IA-64 implementations:-)
+// I consider that the epilogue is short enough as it is to trade tiny
+// performance loss on Itanium for scalability.
+//
+// BN_ULONG bn_add_words(BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int num)
+//
+.global bn_add_words#
+.proc bn_add_words#
+.align 64
+.skip 32 // makes the loop body aligned at 64-byte boundary
+bn_add_words:
+ .prologue
+ .fframe 0
+ .save ar.pfs,r2
+{ .mii; alloc r2=ar.pfs,4,12,0,16
+ cmp4.le p6,p0=r35,r0 };;
+{ .mfb; mov r8=r0 // return value
+(p6) br.ret.spnt.many b0 };;
+
+ .save ar.lc,r3
+{ .mib; sub r10=r35,r0,1
+ mov r3=ar.lc
+ brp.loop.imp .L_bn_add_words_ctop,.L_bn_add_words_cend-16
+ }
+ .body
+{ .mib; ADDP r14=0,r32 // rp
+ mov r9=pr };;
+{ .mii; ADDP r15=0,r33 // ap
+ mov ar.lc=r10
+ mov ar.ec=6 }
+{ .mib; ADDP r16=0,r34 // bp
+ mov pr.rot=1<<16 };;
+
+.L_bn_add_words_ctop:
+{ .mii; (p16) ld8 r32=[r16],8 // b=*(bp++)
+ (p18) add r39=r37,r34
+ (p19) cmp.ltu.unc p56,p0=r40,r38 }
+{ .mfb; (p0) nop.m 0x0
+ (p0) nop.f 0x0
+ (p0) nop.b 0x0 }
+{ .mii; (p16) ld8 r35=[r15],8 // a=*(ap++)
+ (p58) cmp.eq.or p57,p0=-1,r41 // (p20)
+ (p58) add r41=1,r41 } // (p20)
+{ .mfb; (p21) st8 [r14]=r42,8 // *(rp++)=r
+ (p0) nop.f 0x0
+ br.ctop.sptk .L_bn_add_words_ctop };;
+.L_bn_add_words_cend:
+
+{ .mii;
+(p59) add r8=1,r8 // return value
+ mov pr=r9,0x1ffff
+ mov ar.lc=r3 }
+{ .mbb; nop.b 0x0
+ br.ret.sptk.many b0 };;
+.endp bn_add_words#
+
+//
+// BN_ULONG bn_sub_words(BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int num)
+//
+.global bn_sub_words#
+.proc bn_sub_words#
+.align 64
+.skip 32 // makes the loop body aligned at 64-byte boundary
+bn_sub_words:
+ .prologue
+ .fframe 0
+ .save ar.pfs,r2
+{ .mii; alloc r2=ar.pfs,4,12,0,16
+ cmp4.le p6,p0=r35,r0 };;
+{ .mfb; mov r8=r0 // return value
+(p6) br.ret.spnt.many b0 };;
+
+ .save ar.lc,r3
+{ .mib; sub r10=r35,r0,1
+ mov r3=ar.lc
+ brp.loop.imp .L_bn_sub_words_ctop,.L_bn_sub_words_cend-16
+ }
+ .body
+{ .mib; ADDP r14=0,r32 // rp
+ mov r9=pr };;
+{ .mii; ADDP r15=0,r33 // ap
+ mov ar.lc=r10
+ mov ar.ec=6 }
+{ .mib; ADDP r16=0,r34 // bp
+ mov pr.rot=1<<16 };;
+
+.L_bn_sub_words_ctop:
+{ .mii; (p16) ld8 r32=[r16],8 // b=*(bp++)
+ (p18) sub r39=r37,r34
+ (p19) cmp.gtu.unc p56,p0=r40,r38 }
+{ .mfb; (p0) nop.m 0x0
+ (p0) nop.f 0x0
+ (p0) nop.b 0x0 }
+{ .mii; (p16) ld8 r35=[r15],8 // a=*(ap++)
+ (p58) cmp.eq.or p57,p0=0,r41 // (p20)
+ (p58) add r41=-1,r41 } // (p20)
+{ .mbb; (p21) st8 [r14]=r42,8 // *(rp++)=r
+ (p0) nop.b 0x0
+ br.ctop.sptk .L_bn_sub_words_ctop };;
+.L_bn_sub_words_cend:
+
+{ .mii;
+(p59) add r8=1,r8 // return value
+ mov pr=r9,0x1ffff
+ mov ar.lc=r3 }
+{ .mbb; nop.b 0x0
+ br.ret.sptk.many b0 };;
+.endp bn_sub_words#
+#endif
+
+#if 0
+#define XMA_TEMPTATION
+#endif
+
+#if 1
+//
+// BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
+//
+.global bn_mul_words#
+.proc bn_mul_words#
+.align 64
+.skip 32 // makes the loop body aligned at 64-byte boundary
+bn_mul_words:
+ .prologue
+ .fframe 0
+ .save ar.pfs,r2
+#ifdef XMA_TEMPTATION
+{ .mfi; alloc r2=ar.pfs,4,0,0,0 };;
+#else
+{ .mfi; alloc r2=ar.pfs,4,12,0,16 };;
+#endif
+{ .mib; mov r8=r0 // return value
+ cmp4.le p6,p0=r34,r0
+(p6) br.ret.spnt.many b0 };;
+
+ .save ar.lc,r3
+{ .mii; sub r10=r34,r0,1
+ mov r3=ar.lc
+ mov r9=pr };;
+
+ .body
+{ .mib; setf.sig f8=r35 // w
+ mov pr.rot=0x800001<<16
+ // ------^----- serves as (p50) at first (p27)
+ brp.loop.imp .L_bn_mul_words_ctop,.L_bn_mul_words_cend-16
+ }
+
+#ifndef XMA_TEMPTATION
+
+{ .mmi; ADDP r14=0,r32 // rp
+ ADDP r15=0,r33 // ap
+ mov ar.lc=r10 }
+{ .mmi; mov r40=0 // serves as r35 at first (p27)
+ mov ar.ec=13 };;
+
+// This loop spins in 2*(n+12) ticks. It's scheduled for data in Itanium
+// L2 cache (i.e. 9 ticks away) as floating point load/store instructions
+// bypass L1 cache and L2 latency is actually best-case scenario for
+// ldf8. The loop is not scalable and shall run in 2*(n+12) even on
+// "wider" IA-64 implementations. It's a trade-off here. n+24 loop
+// would give us ~5% in *overall* performance improvement on "wider"
+// IA-64, but would hurt Itanium for about same because of longer
+// epilogue. As it's a matter of few percents in either case I've
+// chosen to trade the scalability for development time (you can see
+// this very instruction sequence in bn_mul_add_words loop which in
+// turn is scalable).
+.L_bn_mul_words_ctop:
+{ .mfi; (p25) getf.sig r36=f52 // low
+ (p21) xmpy.lu f48=f37,f8
+ (p28) cmp.ltu p54,p50=r41,r39 }
+{ .mfi; (p16) ldf8 f32=[r15],8
+ (p21) xmpy.hu f40=f37,f8
+ (p0) nop.i 0x0 };;
+{ .mii; (p25) getf.sig r32=f44 // high
+ .pred.rel "mutex",p50,p54
+ (p50) add r40=r38,r35 // (p27)
+ (p54) add r40=r38,r35,1 } // (p27)
+{ .mfb; (p28) st8 [r14]=r41,8
+ (p0) nop.f 0x0
+ br.ctop.sptk .L_bn_mul_words_ctop };;
+.L_bn_mul_words_cend:
+
+{ .mii; nop.m 0x0
+.pred.rel "mutex",p51,p55
+(p51) add r8=r36,r0
+(p55) add r8=r36,r0,1 }
+{ .mfb; nop.m 0x0
+ nop.f 0x0
+ nop.b 0x0 }
+
+#else // XMA_TEMPTATION
+
+ setf.sig f37=r0 // serves as carry at (p18) tick
+ mov ar.lc=r10
+ mov ar.ec=5;;
+
+// Most of you examining this code very likely wonder why in the name
+// of Intel the following loop is commented out? Indeed, it looks so
+// neat that you find it hard to believe that it's something wrong
+// with it, right? The catch is that every iteration depends on the
+// result from previous one and the latter isn't available instantly.
+// The loop therefore spins at the latency of xma minus 1, or in other
+// words at 6*(n+4) ticks:-( Compare to the "production" loop above
+// that runs in 2*(n+11) where the low latency problem is worked around
+// by moving the dependency to one-tick latent interger ALU. Note that
+// "distance" between ldf8 and xma is not latency of ldf8, but the
+// *difference* between xma and ldf8 latencies.
+.L_bn_mul_words_ctop:
+{ .mfi; (p16) ldf8 f32=[r33],8
+ (p18) xma.hu f38=f34,f8,f39 }
+{ .mfb; (p20) stf8 [r32]=f37,8
+ (p18) xma.lu f35=f34,f8,f39
+ br.ctop.sptk .L_bn_mul_words_ctop };;
+.L_bn_mul_words_cend:
+
+ getf.sig r8=f41 // the return value
+
+#endif // XMA_TEMPTATION
+
+{ .mii; nop.m 0x0
+ mov pr=r9,0x1ffff
+ mov ar.lc=r3 }
+{ .mfb; rum 1<<5 // clear um.mfh
+ nop.f 0x0
+ br.ret.sptk.many b0 };;
+.endp bn_mul_words#
+#endif
+
+#if 1
+//
+// BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
+//
+.global bn_mul_add_words#
+.proc bn_mul_add_words#
+.align 64
+.skip 48 // makes the loop body aligned at 64-byte boundary
+bn_mul_add_words:
+ .prologue
+ .fframe 0
+ .save ar.pfs,r2
+ .save ar.lc,r3
+ .save pr,r9
+{ .mmi; alloc r2=ar.pfs,4,4,0,8
+ cmp4.le p6,p0=r34,r0
+ mov r3=ar.lc };;
+{ .mib; mov r8=r0 // return value
+ sub r10=r34,r0,1
+(p6) br.ret.spnt.many b0 };;
+
+ .body
+{ .mib; setf.sig f8=r35 // w
+ mov r9=pr
+ brp.loop.imp .L_bn_mul_add_words_ctop,.L_bn_mul_add_words_cend-16
+ }
+{ .mmi; ADDP r14=0,r32 // rp
+ ADDP r15=0,r33 // ap
+ mov ar.lc=r10 }
+{ .mii; ADDP r16=0,r32 // rp copy
+ mov pr.rot=0x2001<<16
+ // ------^----- serves as (p40) at first (p27)
+ mov ar.ec=11 };;
+
+// This loop spins in 3*(n+10) ticks on Itanium and in 2*(n+10) on
+// Itanium 2. Yes, unlike previous versions it scales:-) Previous
+// version was peforming *all* additions in IALU and was starving
+// for those even on Itanium 2. In this version one addition is
+// moved to FPU and is folded with multiplication. This is at cost
+// of propogating the result from previous call to this subroutine
+// to L2 cache... In other words negligible even for shorter keys.
+// *Overall* performance improvement [over previous version] varies
+// from 11 to 22 percent depending on key length.
+.L_bn_mul_add_words_ctop:
+.pred.rel "mutex",p40,p42
+{ .mfi; (p23) getf.sig r36=f45 // low
+ (p20) xma.lu f42=f36,f8,f50 // low
+ (p40) add r39=r39,r35 } // (p27)
+{ .mfi; (p16) ldf8 f32=[r15],8 // *(ap++)
+ (p20) xma.hu f36=f36,f8,f50 // high
+ (p42) add r39=r39,r35,1 };; // (p27)
+{ .mmi; (p24) getf.sig r32=f40 // high
+ (p16) ldf8 f46=[r16],8 // *(rp1++)
+ (p40) cmp.ltu p41,p39=r39,r35 } // (p27)
+{ .mib; (p26) st8 [r14]=r39,8 // *(rp2++)
+ (p42) cmp.leu p41,p39=r39,r35 // (p27)
+ br.ctop.sptk .L_bn_mul_add_words_ctop};;
+.L_bn_mul_add_words_cend:
+
+{ .mmi; .pred.rel "mutex",p40,p42
+(p40) add r8=r35,r0
+(p42) add r8=r35,r0,1
+ mov pr=r9,0x1ffff }
+{ .mib; rum 1<<5 // clear um.mfh
+ mov ar.lc=r3
+ br.ret.sptk.many b0 };;
+.endp bn_mul_add_words#
+#endif
+
+#if 1
+//
+// void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num)
+//
+.global bn_sqr_words#
+.proc bn_sqr_words#
+.align 64
+.skip 32 // makes the loop body aligned at 64-byte boundary
+bn_sqr_words:
+ .prologue
+ .fframe 0
+ .save ar.pfs,r2
+{ .mii; alloc r2=ar.pfs,3,0,0,0
+ sxt4 r34=r34 };;
+{ .mii; cmp.le p6,p0=r34,r0
+ mov r8=r0 } // return value
+{ .mfb; ADDP r32=0,r32
+ nop.f 0x0
+(p6) br.ret.spnt.many b0 };;
+
+ .save ar.lc,r3
+{ .mii; sub r10=r34,r0,1
+ mov r3=ar.lc
+ mov r9=pr };;
+
+ .body
+{ .mib; ADDP r33=0,r33
+ mov pr.rot=1<<16
+ brp.loop.imp .L_bn_sqr_words_ctop,.L_bn_sqr_words_cend-16
+ }
+{ .mii; add r34=8,r32
+ mov ar.lc=r10
+ mov ar.ec=18 };;
+
+// 2*(n+17) on Itanium, (n+17) on "wider" IA-64 implementations. It's
+// possible to compress the epilogue (I'm getting tired to write this
+// comment over and over) and get down to 2*n+16 at the cost of
+// scalability. The decision will very likely be reconsidered after the
+// benchmark program is profiled. I.e. if perfomance gain on Itanium
+// will appear larger than loss on "wider" IA-64, then the loop should
+// be explicitely split and the epilogue compressed.
+.L_bn_sqr_words_ctop:
+{ .mfi; (p16) ldf8 f32=[r33],8
+ (p25) xmpy.lu f42=f41,f41
+ (p0) nop.i 0x0 }
+{ .mib; (p33) stf8 [r32]=f50,16
+ (p0) nop.i 0x0
+ (p0) nop.b 0x0 }
+{ .mfi; (p0) nop.m 0x0
+ (p25) xmpy.hu f52=f41,f41
+ (p0) nop.i 0x0 }
+{ .mib; (p33) stf8 [r34]=f60,16
+ (p0) nop.i 0x0
+ br.ctop.sptk .L_bn_sqr_words_ctop };;
+.L_bn_sqr_words_cend:
+
+{ .mii; nop.m 0x0
+ mov pr=r9,0x1ffff
+ mov ar.lc=r3 }
+{ .mfb; rum 1<<5 // clear um.mfh
+ nop.f 0x0
+ br.ret.sptk.many b0 };;
+.endp bn_sqr_words#
+#endif
+
+#if 1
+// Apparently we win nothing by implementing special bn_sqr_comba8.
+// Yes, it is possible to reduce the number of multiplications by
+// almost factor of two, but then the amount of additions would
+// increase by factor of two (as we would have to perform those
+// otherwise performed by xma ourselves). Normally we would trade
+// anyway as multiplications are way more expensive, but not this
+// time... Multiplication kernel is fully pipelined and as we drain
+// one 128-bit multiplication result per clock cycle multiplications
+// are effectively as inexpensive as additions. Special implementation
+// might become of interest for "wider" IA-64 implementation as you'll
+// be able to get through the multiplication phase faster (there won't
+// be any stall issues as discussed in the commentary section below and
+// you therefore will be able to employ all 4 FP units)... But these
+// Itanium days it's simply too hard to justify the effort so I just
+// drop down to bn_mul_comba8 code:-)
+//
+// void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
+//
+.global bn_sqr_comba8#
+.proc bn_sqr_comba8#
+.align 64
+bn_sqr_comba8:
+ .prologue
+ .fframe 0
+ .save ar.pfs,r2
+#if defined(_HPUX_SOURCE) && !defined(_LP64)
+{ .mii; alloc r2=ar.pfs,2,1,0,0
+ addp4 r33=0,r33
+ addp4 r32=0,r32 };;
+{ .mii;
+#else
+{ .mii; alloc r2=ar.pfs,2,1,0,0
+#endif
+ mov r34=r33
+ add r14=8,r33 };;
+ .body
+{ .mii; add r17=8,r34
+ add r15=16,r33
+ add r18=16,r34 }
+{ .mfb; add r16=24,r33
+ br .L_cheat_entry_point8 };;
+.endp bn_sqr_comba8#
+#endif
+
+#if 1
+// I've estimated this routine to run in ~120 ticks, but in reality
+// (i.e. according to ar.itc) it takes ~160 ticks. Are those extra
+// cycles consumed for instructions fetch? Or did I misinterpret some
+// clause in Itanium µ-architecture manual? Comments are welcomed and
+// highly appreciated.
+//
+// On Itanium 2 it takes ~190 ticks. This is because of stalls on
+// result from getf.sig. I do nothing about it at this point for
+// reasons depicted below.
+//
+// However! It should be noted that even 160 ticks is darn good result
+// as it's over 10 (yes, ten, spelled as t-e-n) times faster than the
+// C version (compiled with gcc with inline assembler). I really
+// kicked compiler's butt here, didn't I? Yeah! This brings us to the
+// following statement. It's damn shame that this routine isn't called
+// very often nowadays! According to the profiler most CPU time is
+// consumed by bn_mul_add_words called from BN_from_montgomery. In
+// order to estimate what we're missing, I've compared the performance
+// of this routine against "traditional" implementation, i.e. against
+// following routine:
+//
+// void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+// { r[ 8]=bn_mul_words( &(r[0]),a,8,b[0]);
+// r[ 9]=bn_mul_add_words(&(r[1]),a,8,b[1]);
+// r[10]=bn_mul_add_words(&(r[2]),a,8,b[2]);
+// r[11]=bn_mul_add_words(&(r[3]),a,8,b[3]);
+// r[12]=bn_mul_add_words(&(r[4]),a,8,b[4]);
+// r[13]=bn_mul_add_words(&(r[5]),a,8,b[5]);
+// r[14]=bn_mul_add_words(&(r[6]),a,8,b[6]);
+// r[15]=bn_mul_add_words(&(r[7]),a,8,b[7]);
+// }
+//
+// The one below is over 8 times faster than the one above:-( Even
+// more reasons to "combafy" bn_mul_add_mont...
+//
+// And yes, this routine really made me wish there were an optimizing
+// assembler! It also feels like it deserves a dedication.
+//
+// To my wife for being there and to my kids...
+//
+// void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+//
+#define carry1 r14
+#define carry2 r15
+#define carry3 r34
+.global bn_mul_comba8#
+.proc bn_mul_comba8#
+.align 64
+bn_mul_comba8:
+ .prologue
+ .fframe 0
+ .save ar.pfs,r2
+#if defined(_HPUX_SOURCE) && !defined(_LP64)
+{ .mii; alloc r2=ar.pfs,3,0,0,0
+ addp4 r33=0,r33
+ addp4 r34=0,r34 };;
+{ .mii; addp4 r32=0,r32
+#else
+{ .mii; alloc r2=ar.pfs,3,0,0,0
+#endif
+ add r14=8,r33
+ add r17=8,r34 }
+ .body
+{ .mii; add r15=16,r33
+ add r18=16,r34
+ add r16=24,r33 }
+.L_cheat_entry_point8:
+{ .mmi; add r19=24,r34
+
+ ldf8 f32=[r33],32 };;
+
+{ .mmi; ldf8 f120=[r34],32
+ ldf8 f121=[r17],32 }
+{ .mmi; ldf8 f122=[r18],32
+ ldf8 f123=[r19],32 };;
+{ .mmi; ldf8 f124=[r34]
+ ldf8 f125=[r17] }
+{ .mmi; ldf8 f126=[r18]
+ ldf8 f127=[r19] }
+
+{ .mmi; ldf8 f33=[r14],32
+ ldf8 f34=[r15],32 }
+{ .mmi; ldf8 f35=[r16],32;;
+ ldf8 f36=[r33] }
+{ .mmi; ldf8 f37=[r14]
+ ldf8 f38=[r15] }
+{ .mfi; ldf8 f39=[r16]
+// -------\ Entering multiplier's heaven /-------
+// ------------\ /------------
+// -----------------\ /-----------------
+// ----------------------\/----------------------
+ xma.hu f41=f32,f120,f0 }
+{ .mfi; xma.lu f40=f32,f120,f0 };; // (*)
+{ .mfi; xma.hu f51=f32,f121,f0 }
+{ .mfi; xma.lu f50=f32,f121,f0 };;
+{ .mfi; xma.hu f61=f32,f122,f0 }
+{ .mfi; xma.lu f60=f32,f122,f0 };;
+{ .mfi; xma.hu f71=f32,f123,f0 }
+{ .mfi; xma.lu f70=f32,f123,f0 };;
+{ .mfi; xma.hu f81=f32,f124,f0 }
+{ .mfi; xma.lu f80=f32,f124,f0 };;
+{ .mfi; xma.hu f91=f32,f125,f0 }
+{ .mfi; xma.lu f90=f32,f125,f0 };;
+{ .mfi; xma.hu f101=f32,f126,f0 }
+{ .mfi; xma.lu f100=f32,f126,f0 };;
+{ .mfi; xma.hu f111=f32,f127,f0 }
+{ .mfi; xma.lu f110=f32,f127,f0 };;//
+// (*) You can argue that splitting at every second bundle would
+// prevent "wider" IA-64 implementations from achieving the peak
+// performance. Well, not really... The catch is that if you
+// intend to keep 4 FP units busy by splitting at every fourth
+// bundle and thus perform these 16 multiplications in 4 ticks,
+// the first bundle *below* would stall because the result from
+// the first xma bundle *above* won't be available for another 3
+// ticks (if not more, being an optimist, I assume that "wider"
+// implementation will have same latency:-). This stall will hold
+// you back and the performance would be as if every second bundle
+// were split *anyway*...
+{ .mfi; getf.sig r16=f40
+ xma.hu f42=f33,f120,f41
+ add r33=8,r32 }
+{ .mfi; xma.lu f41=f33,f120,f41 };;
+{ .mfi; getf.sig r24=f50
+ xma.hu f52=f33,f121,f51 }
+{ .mfi; xma.lu f51=f33,f121,f51 };;
+{ .mfi; st8 [r32]=r16,16
+ xma.hu f62=f33,f122,f61 }
+{ .mfi; xma.lu f61=f33,f122,f61 };;
+{ .mfi; xma.hu f72=f33,f123,f71 }
+{ .mfi; xma.lu f71=f33,f123,f71 };;
+{ .mfi; xma.hu f82=f33,f124,f81 }
+{ .mfi; xma.lu f81=f33,f124,f81 };;
+{ .mfi; xma.hu f92=f33,f125,f91 }
+{ .mfi; xma.lu f91=f33,f125,f91 };;
+{ .mfi; xma.hu f102=f33,f126,f101 }
+{ .mfi; xma.lu f101=f33,f126,f101 };;
+{ .mfi; xma.hu f112=f33,f127,f111 }
+{ .mfi; xma.lu f111=f33,f127,f111 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r25=f41
+ xma.hu f43=f34,f120,f42 }
+{ .mfi; xma.lu f42=f34,f120,f42 };;
+{ .mfi; getf.sig r16=f60
+ xma.hu f53=f34,f121,f52 }
+{ .mfi; xma.lu f52=f34,f121,f52 };;
+{ .mfi; getf.sig r17=f51
+ xma.hu f63=f34,f122,f62
+ add r25=r25,r24 }
+{ .mfi; xma.lu f62=f34,f122,f62
+ mov carry1=0 };;
+{ .mfi; cmp.ltu p6,p0=r25,r24
+ xma.hu f73=f34,f123,f72 }
+{ .mfi; xma.lu f72=f34,f123,f72 };;
+{ .mfi; st8 [r33]=r25,16
+ xma.hu f83=f34,f124,f82
+(p6) add carry1=1,carry1 }
+{ .mfi; xma.lu f82=f34,f124,f82 };;
+{ .mfi; xma.hu f93=f34,f125,f92 }
+{ .mfi; xma.lu f92=f34,f125,f92 };;
+{ .mfi; xma.hu f103=f34,f126,f102 }
+{ .mfi; xma.lu f102=f34,f126,f102 };;
+{ .mfi; xma.hu f113=f34,f127,f112 }
+{ .mfi; xma.lu f112=f34,f127,f112 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r18=f42
+ xma.hu f44=f35,f120,f43
+ add r17=r17,r16 }
+{ .mfi; xma.lu f43=f35,f120,f43 };;
+{ .mfi; getf.sig r24=f70
+ xma.hu f54=f35,f121,f53 }
+{ .mfi; mov carry2=0
+ xma.lu f53=f35,f121,f53 };;
+{ .mfi; getf.sig r25=f61
+ xma.hu f64=f35,f122,f63
+ cmp.ltu p7,p0=r17,r16 }
+{ .mfi; add r18=r18,r17
+ xma.lu f63=f35,f122,f63 };;
+{ .mfi; getf.sig r26=f52
+ xma.hu f74=f35,f123,f73
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r18,r17
+ xma.lu f73=f35,f123,f73
+ add r18=r18,carry1 };;
+{ .mfi;
+ xma.hu f84=f35,f124,f83
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r18,carry1
+ xma.lu f83=f35,f124,f83 };;
+{ .mfi; st8 [r32]=r18,16
+ xma.hu f94=f35,f125,f93
+(p7) add carry2=1,carry2 }
+{ .mfi; xma.lu f93=f35,f125,f93 };;
+{ .mfi; xma.hu f104=f35,f126,f103 }
+{ .mfi; xma.lu f103=f35,f126,f103 };;
+{ .mfi; xma.hu f114=f35,f127,f113 }
+{ .mfi; mov carry1=0
+ xma.lu f113=f35,f127,f113
+ add r25=r25,r24 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r27=f43
+ xma.hu f45=f36,f120,f44
+ cmp.ltu p6,p0=r25,r24 }
+{ .mfi; xma.lu f44=f36,f120,f44
+ add r26=r26,r25 };;
+{ .mfi; getf.sig r16=f80
+ xma.hu f55=f36,f121,f54
+(p6) add carry1=1,carry1 }
+{ .mfi; xma.lu f54=f36,f121,f54 };;
+{ .mfi; getf.sig r17=f71
+ xma.hu f65=f36,f122,f64
+ cmp.ltu p6,p0=r26,r25 }
+{ .mfi; xma.lu f64=f36,f122,f64
+ add r27=r27,r26 };;
+{ .mfi; getf.sig r18=f62
+ xma.hu f75=f36,f123,f74
+(p6) add carry1=1,carry1 }
+{ .mfi; cmp.ltu p6,p0=r27,r26
+ xma.lu f74=f36,f123,f74
+ add r27=r27,carry2 };;
+{ .mfi; getf.sig r19=f53
+ xma.hu f85=f36,f124,f84
+(p6) add carry1=1,carry1 }
+{ .mfi; xma.lu f84=f36,f124,f84
+ cmp.ltu p6,p0=r27,carry2 };;
+{ .mfi; st8 [r33]=r27,16
+ xma.hu f95=f36,f125,f94
+(p6) add carry1=1,carry1 }
+{ .mfi; xma.lu f94=f36,f125,f94 };;
+{ .mfi; xma.hu f105=f36,f126,f104 }
+{ .mfi; mov carry2=0
+ xma.lu f104=f36,f126,f104
+ add r17=r17,r16 };;
+{ .mfi; xma.hu f115=f36,f127,f114
+ cmp.ltu p7,p0=r17,r16 }
+{ .mfi; xma.lu f114=f36,f127,f114
+ add r18=r18,r17 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r20=f44
+ xma.hu f46=f37,f120,f45
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r18,r17
+ xma.lu f45=f37,f120,f45
+ add r19=r19,r18 };;
+{ .mfi; getf.sig r24=f90
+ xma.hu f56=f37,f121,f55 }
+{ .mfi; xma.lu f55=f37,f121,f55 };;
+{ .mfi; getf.sig r25=f81
+ xma.hu f66=f37,f122,f65
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r19,r18
+ xma.lu f65=f37,f122,f65
+ add r20=r20,r19 };;
+{ .mfi; getf.sig r26=f72
+ xma.hu f76=f37,f123,f75
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r20,r19
+ xma.lu f75=f37,f123,f75
+ add r20=r20,carry1 };;
+{ .mfi; getf.sig r27=f63
+ xma.hu f86=f37,f124,f85
+(p7) add carry2=1,carry2 }
+{ .mfi; xma.lu f85=f37,f124,f85
+ cmp.ltu p7,p0=r20,carry1 };;
+{ .mfi; getf.sig r28=f54
+ xma.hu f96=f37,f125,f95
+(p7) add carry2=1,carry2 }
+{ .mfi; st8 [r32]=r20,16
+ xma.lu f95=f37,f125,f95 };;
+{ .mfi; xma.hu f106=f37,f126,f105 }
+{ .mfi; mov carry1=0
+ xma.lu f105=f37,f126,f105
+ add r25=r25,r24 };;
+{ .mfi; xma.hu f116=f37,f127,f115
+ cmp.ltu p6,p0=r25,r24 }
+{ .mfi; xma.lu f115=f37,f127,f115
+ add r26=r26,r25 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r29=f45
+ xma.hu f47=f38,f120,f46
+(p6) add carry1=1,carry1 }
+{ .mfi; cmp.ltu p6,p0=r26,r25
+ xma.lu f46=f38,f120,f46
+ add r27=r27,r26 };;
+{ .mfi; getf.sig r16=f100
+ xma.hu f57=f38,f121,f56
+(p6) add carry1=1,carry1 }
+{ .mfi; cmp.ltu p6,p0=r27,r26
+ xma.lu f56=f38,f121,f56
+ add r28=r28,r27 };;
+{ .mfi; getf.sig r17=f91
+ xma.hu f67=f38,f122,f66
+(p6) add carry1=1,carry1 }
+{ .mfi; cmp.ltu p6,p0=r28,r27
+ xma.lu f66=f38,f122,f66
+ add r29=r29,r28 };;
+{ .mfi; getf.sig r18=f82
+ xma.hu f77=f38,f123,f76
+(p6) add carry1=1,carry1 }
+{ .mfi; cmp.ltu p6,p0=r29,r28
+ xma.lu f76=f38,f123,f76
+ add r29=r29,carry2 };;
+{ .mfi; getf.sig r19=f73
+ xma.hu f87=f38,f124,f86
+(p6) add carry1=1,carry1 }
+{ .mfi; xma.lu f86=f38,f124,f86
+ cmp.ltu p6,p0=r29,carry2 };;
+{ .mfi; getf.sig r20=f64
+ xma.hu f97=f38,f125,f96
+(p6) add carry1=1,carry1 }
+{ .mfi; st8 [r33]=r29,16
+ xma.lu f96=f38,f125,f96 };;
+{ .mfi; getf.sig r21=f55
+ xma.hu f107=f38,f126,f106 }
+{ .mfi; mov carry2=0
+ xma.lu f106=f38,f126,f106
+ add r17=r17,r16 };;
+{ .mfi; xma.hu f117=f38,f127,f116
+ cmp.ltu p7,p0=r17,r16 }
+{ .mfi; xma.lu f116=f38,f127,f116
+ add r18=r18,r17 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r22=f46
+ xma.hu f48=f39,f120,f47
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r18,r17
+ xma.lu f47=f39,f120,f47
+ add r19=r19,r18 };;
+{ .mfi; getf.sig r24=f110
+ xma.hu f58=f39,f121,f57
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r19,r18
+ xma.lu f57=f39,f121,f57
+ add r20=r20,r19 };;
+{ .mfi; getf.sig r25=f101
+ xma.hu f68=f39,f122,f67
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r20,r19
+ xma.lu f67=f39,f122,f67
+ add r21=r21,r20 };;
+{ .mfi; getf.sig r26=f92
+ xma.hu f78=f39,f123,f77
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r21,r20
+ xma.lu f77=f39,f123,f77
+ add r22=r22,r21 };;
+{ .mfi; getf.sig r27=f83
+ xma.hu f88=f39,f124,f87
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r22,r21
+ xma.lu f87=f39,f124,f87
+ add r22=r22,carry1 };;
+{ .mfi; getf.sig r28=f74
+ xma.hu f98=f39,f125,f97
+(p7) add carry2=1,carry2 }
+{ .mfi; xma.lu f97=f39,f125,f97
+ cmp.ltu p7,p0=r22,carry1 };;
+{ .mfi; getf.sig r29=f65
+ xma.hu f108=f39,f126,f107
+(p7) add carry2=1,carry2 }
+{ .mfi; st8 [r32]=r22,16
+ xma.lu f107=f39,f126,f107 };;
+{ .mfi; getf.sig r30=f56
+ xma.hu f118=f39,f127,f117 }
+{ .mfi; xma.lu f117=f39,f127,f117 };;//
+//-------------------------------------------------//
+// Leaving muliplier's heaven... Quite a ride, huh?
+
+{ .mii; getf.sig r31=f47
+ add r25=r25,r24
+ mov carry1=0 };;
+{ .mii; getf.sig r16=f111
+ cmp.ltu p6,p0=r25,r24
+ add r26=r26,r25 };;
+{ .mfb; getf.sig r17=f102 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,r25
+ add r27=r27,r26 };;
+{ .mfb; nop.m 0x0 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r27,r26
+ add r28=r28,r27 };;
+{ .mii; getf.sig r18=f93
+ add r17=r17,r16
+ mov carry3=0 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r28,r27
+ add r29=r29,r28 };;
+{ .mii; getf.sig r19=f84
+ cmp.ltu p7,p0=r17,r16 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r29,r28
+ add r30=r30,r29 };;
+{ .mii; getf.sig r20=f75
+ add r18=r18,r17 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r30,r29
+ add r31=r31,r30 };;
+{ .mfb; getf.sig r21=f66 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r18,r17
+ add r19=r19,r18 }
+{ .mfb; nop.m 0x0 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r31,r30
+ add r31=r31,carry2 };;
+{ .mfb; getf.sig r22=f57 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r19,r18
+ add r20=r20,r19 }
+{ .mfb; nop.m 0x0 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r31,carry2 };;
+{ .mfb; getf.sig r23=f48 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r20,r19
+ add r21=r21,r20 }
+{ .mii;
+(p6) add carry1=1,carry1 }
+{ .mfb; st8 [r33]=r31,16 };;
+
+{ .mfb; getf.sig r24=f112 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r21,r20
+ add r22=r22,r21 };;
+{ .mfb; getf.sig r25=f103 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r22,r21
+ add r23=r23,r22 };;
+{ .mfb; getf.sig r26=f94 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r23,r22
+ add r23=r23,carry1 };;
+{ .mfb; getf.sig r27=f85 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p8=r23,carry1};;
+{ .mii; getf.sig r28=f76
+ add r25=r25,r24
+ mov carry1=0 }
+{ .mii; st8 [r32]=r23,16
+ (p7) add carry2=1,carry3
+ (p8) add carry2=0,carry3 };;
+
+{ .mfb; nop.m 0x0 }
+{ .mii; getf.sig r29=f67
+ cmp.ltu p6,p0=r25,r24
+ add r26=r26,r25 };;
+{ .mfb; getf.sig r30=f58 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,r25
+ add r27=r27,r26 };;
+{ .mfb; getf.sig r16=f113 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r27,r26
+ add r28=r28,r27 };;
+{ .mfb; getf.sig r17=f104 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r28,r27
+ add r29=r29,r28 };;
+{ .mfb; getf.sig r18=f95 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r29,r28
+ add r30=r30,r29 };;
+{ .mii; getf.sig r19=f86
+ add r17=r17,r16
+ mov carry3=0 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r30,r29
+ add r30=r30,carry2 };;
+{ .mii; getf.sig r20=f77
+ cmp.ltu p7,p0=r17,r16
+ add r18=r18,r17 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r30,carry2 };;
+{ .mfb; getf.sig r21=f68 }
+{ .mii; st8 [r33]=r30,16
+(p6) add carry1=1,carry1 };;
+
+{ .mfb; getf.sig r24=f114 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r18,r17
+ add r19=r19,r18 };;
+{ .mfb; getf.sig r25=f105 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r19,r18
+ add r20=r20,r19 };;
+{ .mfb; getf.sig r26=f96 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r20,r19
+ add r21=r21,r20 };;
+{ .mfb; getf.sig r27=f87 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r21,r20
+ add r21=r21,carry1 };;
+{ .mib; getf.sig r28=f78
+ add r25=r25,r24 }
+{ .mib; (p7) add carry3=1,carry3
+ cmp.ltu p7,p8=r21,carry1};;
+{ .mii; st8 [r32]=r21,16
+ (p7) add carry2=1,carry3
+ (p8) add carry2=0,carry3 }
+
+{ .mii; mov carry1=0
+ cmp.ltu p6,p0=r25,r24
+ add r26=r26,r25 };;
+{ .mfb; getf.sig r16=f115 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,r25
+ add r27=r27,r26 };;
+{ .mfb; getf.sig r17=f106 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r27,r26
+ add r28=r28,r27 };;
+{ .mfb; getf.sig r18=f97 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r28,r27
+ add r28=r28,carry2 };;
+{ .mib; getf.sig r19=f88
+ add r17=r17,r16 }
+{ .mib;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r28,carry2 };;
+{ .mii; st8 [r33]=r28,16
+(p6) add carry1=1,carry1 }
+
+{ .mii; mov carry2=0
+ cmp.ltu p7,p0=r17,r16
+ add r18=r18,r17 };;
+{ .mfb; getf.sig r24=f116 }
+{ .mii; (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r18,r17
+ add r19=r19,r18 };;
+{ .mfb; getf.sig r25=f107 }
+{ .mii; (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r19,r18
+ add r19=r19,carry1 };;
+{ .mfb; getf.sig r26=f98 }
+{ .mii; (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r19,carry1};;
+{ .mii; st8 [r32]=r19,16
+ (p7) add carry2=1,carry2 }
+
+{ .mfb; add r25=r25,r24 };;
+
+{ .mfb; getf.sig r16=f117 }
+{ .mii; mov carry1=0
+ cmp.ltu p6,p0=r25,r24
+ add r26=r26,r25 };;
+{ .mfb; getf.sig r17=f108 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,r25
+ add r26=r26,carry2 };;
+{ .mfb; nop.m 0x0 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,carry2 };;
+{ .mii; st8 [r33]=r26,16
+(p6) add carry1=1,carry1 }
+
+{ .mfb; add r17=r17,r16 };;
+{ .mfb; getf.sig r24=f118 }
+{ .mii; mov carry2=0
+ cmp.ltu p7,p0=r17,r16
+ add r17=r17,carry1 };;
+{ .mii; (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r17,carry1};;
+{ .mii; st8 [r32]=r17
+ (p7) add carry2=1,carry2 };;
+{ .mfb; add r24=r24,carry2 };;
+{ .mib; st8 [r33]=r24 }
+
+{ .mib; rum 1<<5 // clear um.mfh
+ br.ret.sptk.many b0 };;
+.endp bn_mul_comba8#
+#undef carry3
+#undef carry2
+#undef carry1
+#endif
+
+#if 1
+// It's possible to make it faster (see comment to bn_sqr_comba8), but
+// I reckon it doesn't worth the effort. Basically because the routine
+// (actually both of them) practically never called... So I just play
+// same trick as with bn_sqr_comba8.
+//
+// void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
+//
+.global bn_sqr_comba4#
+.proc bn_sqr_comba4#
+.align 64
+bn_sqr_comba4:
+ .prologue
+ .fframe 0
+ .save ar.pfs,r2
+#if defined(_HPUX_SOURCE) && !defined(_LP64)
+{ .mii; alloc r2=ar.pfs,2,1,0,0
+ addp4 r32=0,r32
+ addp4 r33=0,r33 };;
+{ .mii;
+#else
+{ .mii; alloc r2=ar.pfs,2,1,0,0
+#endif
+ mov r34=r33
+ add r14=8,r33 };;
+ .body
+{ .mii; add r17=8,r34
+ add r15=16,r33
+ add r18=16,r34 }
+{ .mfb; add r16=24,r33
+ br .L_cheat_entry_point4 };;
+.endp bn_sqr_comba4#
+#endif
+
+#if 1
+// Runs in ~115 cycles and ~4.5 times faster than C. Well, whatever...
+//
+// void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+//
+#define carry1 r14
+#define carry2 r15
+.global bn_mul_comba4#
+.proc bn_mul_comba4#
+.align 64
+bn_mul_comba4:
+ .prologue
+ .fframe 0
+ .save ar.pfs,r2
+#if defined(_HPUX_SOURCE) && !defined(_LP64)
+{ .mii; alloc r2=ar.pfs,3,0,0,0
+ addp4 r33=0,r33
+ addp4 r34=0,r34 };;
+{ .mii; addp4 r32=0,r32
+#else
+{ .mii; alloc r2=ar.pfs,3,0,0,0
+#endif
+ add r14=8,r33
+ add r17=8,r34 }
+ .body
+{ .mii; add r15=16,r33
+ add r18=16,r34
+ add r16=24,r33 };;
+.L_cheat_entry_point4:
+{ .mmi; add r19=24,r34
+
+ ldf8 f32=[r33] }
+
+{ .mmi; ldf8 f120=[r34]
+ ldf8 f121=[r17] };;
+{ .mmi; ldf8 f122=[r18]
+ ldf8 f123=[r19] }
+
+{ .mmi; ldf8 f33=[r14]
+ ldf8 f34=[r15] }
+{ .mfi; ldf8 f35=[r16]
+
+ xma.hu f41=f32,f120,f0 }
+{ .mfi; xma.lu f40=f32,f120,f0 };;
+{ .mfi; xma.hu f51=f32,f121,f0 }
+{ .mfi; xma.lu f50=f32,f121,f0 };;
+{ .mfi; xma.hu f61=f32,f122,f0 }
+{ .mfi; xma.lu f60=f32,f122,f0 };;
+{ .mfi; xma.hu f71=f32,f123,f0 }
+{ .mfi; xma.lu f70=f32,f123,f0 };;//
+// Major stall takes place here, and 3 more places below. Result from
+// first xma is not available for another 3 ticks.
+{ .mfi; getf.sig r16=f40
+ xma.hu f42=f33,f120,f41
+ add r33=8,r32 }
+{ .mfi; xma.lu f41=f33,f120,f41 };;
+{ .mfi; getf.sig r24=f50
+ xma.hu f52=f33,f121,f51 }
+{ .mfi; xma.lu f51=f33,f121,f51 };;
+{ .mfi; st8 [r32]=r16,16
+ xma.hu f62=f33,f122,f61 }
+{ .mfi; xma.lu f61=f33,f122,f61 };;
+{ .mfi; xma.hu f72=f33,f123,f71 }
+{ .mfi; xma.lu f71=f33,f123,f71 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r25=f41
+ xma.hu f43=f34,f120,f42 }
+{ .mfi; xma.lu f42=f34,f120,f42 };;
+{ .mfi; getf.sig r16=f60
+ xma.hu f53=f34,f121,f52 }
+{ .mfi; xma.lu f52=f34,f121,f52 };;
+{ .mfi; getf.sig r17=f51
+ xma.hu f63=f34,f122,f62
+ add r25=r25,r24 }
+{ .mfi; mov carry1=0
+ xma.lu f62=f34,f122,f62 };;
+{ .mfi; st8 [r33]=r25,16
+ xma.hu f73=f34,f123,f72
+ cmp.ltu p6,p0=r25,r24 }
+{ .mfi; xma.lu f72=f34,f123,f72 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r18=f42
+ xma.hu f44=f35,f120,f43
+(p6) add carry1=1,carry1 }
+{ .mfi; add r17=r17,r16
+ xma.lu f43=f35,f120,f43
+ mov carry2=0 };;
+{ .mfi; getf.sig r24=f70
+ xma.hu f54=f35,f121,f53
+ cmp.ltu p7,p0=r17,r16 }
+{ .mfi; xma.lu f53=f35,f121,f53 };;
+{ .mfi; getf.sig r25=f61
+ xma.hu f64=f35,f122,f63
+ add r18=r18,r17 }
+{ .mfi; xma.lu f63=f35,f122,f63
+(p7) add carry2=1,carry2 };;
+{ .mfi; getf.sig r26=f52
+ xma.hu f74=f35,f123,f73
+ cmp.ltu p7,p0=r18,r17 }
+{ .mfi; xma.lu f73=f35,f123,f73
+ add r18=r18,carry1 };;
+//-------------------------------------------------//
+{ .mii; st8 [r32]=r18,16
+(p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r18,carry1 };;
+
+{ .mfi; getf.sig r27=f43 // last major stall
+(p7) add carry2=1,carry2 };;
+{ .mii; getf.sig r16=f71
+ add r25=r25,r24
+ mov carry1=0 };;
+{ .mii; getf.sig r17=f62
+ cmp.ltu p6,p0=r25,r24
+ add r26=r26,r25 };;
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,r25
+ add r27=r27,r26 };;
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r27,r26
+ add r27=r27,carry2 };;
+{ .mii; getf.sig r18=f53
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r27,carry2 };;
+{ .mfi; st8 [r33]=r27,16
+(p6) add carry1=1,carry1 }
+
+{ .mii; getf.sig r19=f44
+ add r17=r17,r16
+ mov carry2=0 };;
+{ .mii; getf.sig r24=f72
+ cmp.ltu p7,p0=r17,r16
+ add r18=r18,r17 };;
+{ .mii; (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r18,r17
+ add r19=r19,r18 };;
+{ .mii; (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r19,r18
+ add r19=r19,carry1 };;
+{ .mii; getf.sig r25=f63
+ (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r19,carry1};;
+{ .mii; st8 [r32]=r19,16
+ (p7) add carry2=1,carry2 }
+
+{ .mii; getf.sig r26=f54
+ add r25=r25,r24
+ mov carry1=0 };;
+{ .mii; getf.sig r16=f73
+ cmp.ltu p6,p0=r25,r24
+ add r26=r26,r25 };;
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,r25
+ add r26=r26,carry2 };;
+{ .mii; getf.sig r17=f64
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,carry2 };;
+{ .mii; st8 [r33]=r26,16
+(p6) add carry1=1,carry1 }
+
+{ .mii; getf.sig r24=f74
+ add r17=r17,r16
+ mov carry2=0 };;
+{ .mii; cmp.ltu p7,p0=r17,r16
+ add r17=r17,carry1 };;
+
+{ .mii; (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r17,carry1};;
+{ .mii; st8 [r32]=r17,16
+ (p7) add carry2=1,carry2 };;
+
+{ .mii; add r24=r24,carry2 };;
+{ .mii; st8 [r33]=r24 }
+
+{ .mib; rum 1<<5 // clear um.mfh
+ br.ret.sptk.many b0 };;
+.endp bn_mul_comba4#
+#undef carry2
+#undef carry1
+#endif
+
+#if 1
+//
+// BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
+//
+// In the nutshell it's a port of my MIPS III/IV implementation.
+//
+#define AT r14
+#define H r16
+#define HH r20
+#define L r17
+#define D r18
+#define DH r22
+#define I r21
+
+#if 0
+// Some preprocessors (most notably HP-UX) appear to be allergic to
+// macros enclosed to parenthesis [as these three were].
+#define cont p16
+#define break p0 // p20
+#define equ p24
+#else
+cont=p16
+break=p0
+equ=p24
+#endif
+
+.global abort#
+.global bn_div_words#
+.proc bn_div_words#
+.align 64
+bn_div_words:
+ .prologue
+ .fframe 0
+ .save ar.pfs,r2
+ .save b0,r3
+{ .mii; alloc r2=ar.pfs,3,5,0,8
+ mov r3=b0
+ mov r10=pr };;
+{ .mmb; cmp.eq p6,p0=r34,r0
+ mov r8=-1
+(p6) br.ret.spnt.many b0 };;
+
+ .body
+{ .mii; mov H=r32 // save h
+ mov ar.ec=0 // don't rotate at exit
+ mov pr.rot=0 }
+{ .mii; mov L=r33 // save l
+ mov r36=r0 };;
+
+.L_divw_shift: // -vv- note signed comparison
+{ .mfi; (p0) cmp.lt p16,p0=r0,r34 // d
+ (p0) shladd r33=r34,1,r0 }
+{ .mfb; (p0) add r35=1,r36
+ (p0) nop.f 0x0
+(p16) br.wtop.dpnt .L_divw_shift };;
+
+{ .mii; mov D=r34
+ shr.u DH=r34,32
+ sub r35=64,r36 };;
+{ .mii; setf.sig f7=DH
+ shr.u AT=H,r35
+ mov I=r36 };;
+{ .mib; cmp.ne p6,p0=r0,AT
+ shl H=H,r36
+(p6) br.call.spnt.clr b0=abort };; // overflow, die...
+
+{ .mfi; fcvt.xuf.s1 f7=f7
+ shr.u AT=L,r35 };;
+{ .mii; shl L=L,r36
+ or H=H,AT };;
+
+{ .mii; nop.m 0x0
+ cmp.leu p6,p0=D,H;;
+(p6) sub H=H,D }
+
+{ .mlx; setf.sig f14=D
+ movl AT=0xffffffff };;
+///////////////////////////////////////////////////////////
+{ .mii; setf.sig f6=H
+ shr.u HH=H,32;;
+ cmp.eq p6,p7=HH,DH };;
+{ .mfb;
+(p6) setf.sig f8=AT
+(p7) fcvt.xuf.s1 f6=f6
+(p7) br.call.sptk b6=.L_udiv64_32_b6 };;
+
+{ .mfi; getf.sig r33=f8 // q
+ xmpy.lu f9=f8,f14 }
+{ .mfi; xmpy.hu f10=f8,f14
+ shrp H=H,L,32 };;
+
+{ .mmi; getf.sig r35=f9 // tl
+ getf.sig r31=f10 };; // th
+
+.L_divw_1st_iter:
+{ .mii; (p0) add r32=-1,r33
+ (p0) cmp.eq equ,cont=HH,r31 };;
+{ .mii; (p0) cmp.ltu p8,p0=r35,D
+ (p0) sub r34=r35,D
+ (equ) cmp.leu break,cont=r35,H };;
+{ .mib; (cont) cmp.leu cont,break=HH,r31
+ (p8) add r31=-1,r31
+(cont) br.wtop.spnt .L_divw_1st_iter };;
+///////////////////////////////////////////////////////////
+{ .mii; sub H=H,r35
+ shl r8=r33,32
+ shl L=L,32 };;
+///////////////////////////////////////////////////////////
+{ .mii; setf.sig f6=H
+ shr.u HH=H,32;;
+ cmp.eq p6,p7=HH,DH };;
+{ .mfb;
+(p6) setf.sig f8=AT
+(p7) fcvt.xuf.s1 f6=f6
+(p7) br.call.sptk b6=.L_udiv64_32_b6 };;
+
+{ .mfi; getf.sig r33=f8 // q
+ xmpy.lu f9=f8,f14 }
+{ .mfi; xmpy.hu f10=f8,f14
+ shrp H=H,L,32 };;
+
+{ .mmi; getf.sig r35=f9 // tl
+ getf.sig r31=f10 };; // th
+
+.L_divw_2nd_iter:
+{ .mii; (p0) add r32=-1,r33
+ (p0) cmp.eq equ,cont=HH,r31 };;
+{ .mii; (p0) cmp.ltu p8,p0=r35,D
+ (p0) sub r34=r35,D
+ (equ) cmp.leu break,cont=r35,H };;
+{ .mib; (cont) cmp.leu cont,break=HH,r31
+ (p8) add r31=-1,r31
+(cont) br.wtop.spnt .L_divw_2nd_iter };;
+///////////////////////////////////////////////////////////
+{ .mii; sub H=H,r35
+ or r8=r8,r33
+ mov ar.pfs=r2 };;
+{ .mii; shr.u r9=H,I // remainder if anybody wants it
+ mov pr=r10,0x1ffff }
+{ .mfb; br.ret.sptk.many b0 };;
+
+// Unsigned 64 by 32 (well, by 64 for the moment) bit integer division
+// procedure.
+//
+// inputs: f6 = (double)a, f7 = (double)b
+// output: f8 = (int)(a/b)
+// clobbered: f8,f9,f10,f11,pred
+pred=p15
+// One can argue that this snippet is copyrighted to Intel
+// Corporation, as it's essentially identical to one of those
+// found in "Divide, Square Root and Remainder" section at
+// http://www.intel.com/software/products/opensource/libraries/num.htm.
+// Yes, I admit that the referred code was used as template,
+// but after I realized that there hardly is any other instruction
+// sequence which would perform this operation. I mean I figure that
+// any independent attempt to implement high-performance division
+// will result in code virtually identical to the Intel code. It
+// should be noted though that below division kernel is 1 cycle
+// faster than Intel one (note commented splits:-), not to mention
+// original prologue (rather lack of one) and epilogue.
+.align 32
+.skip 16
+.L_udiv64_32_b6:
+ frcpa.s1 f8,pred=f6,f7;; // [0] y0 = 1 / b
+
+(pred) fnma.s1 f9=f7,f8,f1 // [5] e0 = 1 - b * y0
+(pred) fmpy.s1 f10=f6,f8;; // [5] q0 = a * y0
+(pred) fmpy.s1 f11=f9,f9 // [10] e1 = e0 * e0
+(pred) fma.s1 f10=f9,f10,f10;; // [10] q1 = q0 + e0 * q0
+(pred) fma.s1 f8=f9,f8,f8 //;; // [15] y1 = y0 + e0 * y0
+(pred) fma.s1 f9=f11,f10,f10;; // [15] q2 = q1 + e1 * q1
+(pred) fma.s1 f8=f11,f8,f8 //;; // [20] y2 = y1 + e1 * y1
+(pred) fnma.s1 f10=f7,f9,f6;; // [20] r2 = a - b * q2
+(pred) fma.s1 f8=f10,f8,f9;; // [25] q3 = q2 + r2 * y2
+
+ fcvt.fxu.trunc.s1 f8=f8 // [30] q = trunc(q3)
+ br.ret.sptk.many b6;;
+.endp bn_div_words#
+#endif
diff --git a/crypto/bn/asm/mips1.s b/crypto/bn/asm/mips1.s
new file mode 100644
index 0000000..44fa125
--- /dev/null
+++ b/crypto/bn/asm/mips1.s
@@ -0,0 +1,539 @@
+/* This assember is for R2000/R3000 machines, or higher ones that do
+ * no want to do any 64 bit arithmatic.
+ * Make sure that the SSLeay bignum library is compiled with
+ * THIRTY_TWO_BIT set.
+ * This must either be compiled with the system CC, or, if you use GNU gas,
+ * cc -E mips1.s|gas -o mips1.o
+ */
+ .set reorder
+ .set noat
+
+#define R1 $1
+#define CC $2
+#define R2 $3
+#define R3 $8
+#define R4 $9
+#define L1 $10
+#define L2 $11
+#define L3 $12
+#define L4 $13
+#define H1 $14
+#define H2 $15
+#define H3 $24
+#define H4 $25
+
+#define P1 $4
+#define P2 $5
+#define P3 $6
+#define P4 $7
+
+ .align 2
+ .ent bn_mul_add_words
+ .globl bn_mul_add_words
+.text
+bn_mul_add_words:
+ .frame $sp,0,$31
+ .mask 0x00000000,0
+ .fmask 0x00000000,0
+
+ #blt P3,4,$lab34
+
+ subu R1,P3,4
+ move CC,$0
+ bltz R1,$lab34
+$lab2:
+ lw R1,0(P1)
+ lw L1,0(P2)
+ lw R2,4(P1)
+ lw L2,4(P2)
+ lw R3,8(P1)
+ lw L3,8(P2)
+ lw R4,12(P1)
+ lw L4,12(P2)
+ multu L1,P4
+ addu R1,R1,CC
+ mflo L1
+ sltu CC,R1,CC
+ addu R1,R1,L1
+ mfhi H1
+ sltu L1,R1,L1
+ sw R1,0(P1)
+ addu CC,CC,L1
+ multu L2,P4
+ addu CC,H1,CC
+ mflo L2
+ addu R2,R2,CC
+ sltu CC,R2,CC
+ mfhi H2
+ addu R2,R2,L2
+ addu P2,P2,16
+ sltu L2,R2,L2
+ sw R2,4(P1)
+ addu CC,CC,L2
+ multu L3,P4
+ addu CC,H2,CC
+ mflo L3
+ addu R3,R3,CC
+ sltu CC,R3,CC
+ mfhi H3
+ addu R3,R3,L3
+ addu P1,P1,16
+ sltu L3,R3,L3
+ sw R3,-8(P1)
+ addu CC,CC,L3
+ multu L4,P4
+ addu CC,H3,CC
+ mflo L4
+ addu R4,R4,CC
+ sltu CC,R4,CC
+ mfhi H4
+ addu R4,R4,L4
+ subu P3,P3,4
+ sltu L4,R4,L4
+ addu CC,CC,L4
+ addu CC,H4,CC
+
+ subu R1,P3,4
+ sw R4,-4(P1) # delay slot
+ bgez R1,$lab2
+
+ bleu P3,0,$lab3
+ .align 2
+$lab33:
+ lw L1,0(P2)
+ lw R1,0(P1)
+ multu L1,P4
+ addu R1,R1,CC
+ sltu CC,R1,CC
+ addu P1,P1,4
+ mflo L1
+ mfhi H1
+ addu R1,R1,L1
+ addu P2,P2,4
+ sltu L1,R1,L1
+ subu P3,P3,1
+ addu CC,CC,L1
+ sw R1,-4(P1)
+ addu CC,H1,CC
+ bgtz P3,$lab33
+ j $31
+ .align 2
+$lab3:
+ j $31
+ .align 2
+$lab34:
+ bgt P3,0,$lab33
+ j $31
+ .end bn_mul_add_words
+
+ .align 2
+ # Program Unit: bn_mul_words
+ .ent bn_mul_words
+ .globl bn_mul_words
+.text
+bn_mul_words:
+ .frame $sp,0,$31
+ .mask 0x00000000,0
+ .fmask 0x00000000,0
+
+ subu P3,P3,4
+ move CC,$0
+ bltz P3,$lab45
+$lab44:
+ lw L1,0(P2)
+ lw L2,4(P2)
+ lw L3,8(P2)
+ lw L4,12(P2)
+ multu L1,P4
+ subu P3,P3,4
+ mflo L1
+ mfhi H1
+ addu L1,L1,CC
+ multu L2,P4
+ sltu CC,L1,CC
+ sw L1,0(P1)
+ addu CC,H1,CC
+ mflo L2
+ mfhi H2
+ addu L2,L2,CC
+ multu L3,P4
+ sltu CC,L2,CC
+ sw L2,4(P1)
+ addu CC,H2,CC
+ mflo L3
+ mfhi H3
+ addu L3,L3,CC
+ multu L4,P4
+ sltu CC,L3,CC
+ sw L3,8(P1)
+ addu CC,H3,CC
+ mflo L4
+ mfhi H4
+ addu L4,L4,CC
+ addu P1,P1,16
+ sltu CC,L4,CC
+ addu P2,P2,16
+ addu CC,H4,CC
+ sw L4,-4(P1)
+
+ bgez P3,$lab44
+ b $lab45
+$lab46:
+ lw L1,0(P2)
+ addu P1,P1,4
+ multu L1,P4
+ addu P2,P2,4
+ mflo L1
+ mfhi H1
+ addu L1,L1,CC
+ subu P3,P3,1
+ sltu CC,L1,CC
+ sw L1,-4(P1)
+ addu CC,H1,CC
+ bgtz P3,$lab46
+ j $31
+$lab45:
+ addu P3,P3,4
+ bgtz P3,$lab46
+ j $31
+ .align 2
+ .end bn_mul_words
+
+ # Program Unit: bn_sqr_words
+ .ent bn_sqr_words
+ .globl bn_sqr_words
+.text
+bn_sqr_words:
+ .frame $sp,0,$31
+ .mask 0x00000000,0
+ .fmask 0x00000000,0
+
+ subu P3,P3,4
+ bltz P3,$lab55
+$lab54:
+ lw L1,0(P2)
+ lw L2,4(P2)
+ lw L3,8(P2)
+ lw L4,12(P2)
+
+ multu L1,L1
+ subu P3,P3,4
+ mflo L1
+ mfhi H1
+ sw L1,0(P1)
+ sw H1,4(P1)
+
+ multu L2,L2
+ addu P1,P1,32
+ mflo L2
+ mfhi H2
+ sw L2,-24(P1)
+ sw H2,-20(P1)
+
+ multu L3,L3
+ addu P2,P2,16
+ mflo L3
+ mfhi H3
+ sw L3,-16(P1)
+ sw H3,-12(P1)
+
+ multu L4,L4
+
+ mflo L4
+ mfhi H4
+ sw L4,-8(P1)
+ sw H4,-4(P1)
+
+ bgtz P3,$lab54
+ b $lab55
+$lab56:
+ lw L1,0(P2)
+ addu P1,P1,8
+ multu L1,L1
+ addu P2,P2,4
+ subu P3,P3,1
+ mflo L1
+ mfhi H1
+ sw L1,-8(P1)
+ sw H1,-4(P1)
+
+ bgtz P3,$lab56
+ j $31
+$lab55:
+ addu P3,P3,4
+ bgtz P3,$lab56
+ j $31
+ .align 2
+ .end bn_sqr_words
+
+ # Program Unit: bn_add_words
+ .ent bn_add_words
+ .globl bn_add_words
+.text
+bn_add_words: # 0x590
+ .frame $sp,0,$31
+ .mask 0x00000000,0
+ .fmask 0x00000000,0
+
+ subu P4,P4,4
+ move CC,$0
+ bltz P4,$lab65
+$lab64:
+ lw L1,0(P2)
+ lw R1,0(P3)
+ lw L2,4(P2)
+ lw R2,4(P3)
+
+ addu L1,L1,CC
+ lw L3,8(P2)
+ sltu CC,L1,CC
+ addu L1,L1,R1
+ sltu R1,L1,R1
+ lw R3,8(P3)
+ addu CC,CC,R1
+ lw L4,12(P2)
+
+ addu L2,L2,CC
+ lw R4,12(P3)
+ sltu CC,L2,CC
+ addu L2,L2,R2
+ sltu R2,L2,R2
+ sw L1,0(P1)
+ addu CC,CC,R2
+ addu P1,P1,16
+ addu L3,L3,CC
+ sw L2,-12(P1)
+
+ sltu CC,L3,CC
+ addu L3,L3,R3
+ sltu R3,L3,R3
+ addu P2,P2,16
+ addu CC,CC,R3
+
+ addu L4,L4,CC
+ addu P3,P3,16
+ sltu CC,L4,CC
+ addu L4,L4,R4
+ subu P4,P4,4
+ sltu R4,L4,R4
+ sw L3,-8(P1)
+ addu CC,CC,R4
+ sw L4,-4(P1)
+
+ bgtz P4,$lab64
+ b $lab65
+$lab66:
+ lw L1,0(P2)
+ lw R1,0(P3)
+ addu L1,L1,CC
+ addu P1,P1,4
+ sltu CC,L1,CC
+ addu P2,P2,4
+ addu P3,P3,4
+ addu L1,L1,R1
+ subu P4,P4,1
+ sltu R1,L1,R1
+ sw L1,-4(P1)
+ addu CC,CC,R1
+
+ bgtz P4,$lab66
+ j $31
+$lab65:
+ addu P4,P4,4
+ bgtz P4,$lab66
+ j $31
+ .end bn_add_words
+
+ # Program Unit: bn_div64
+ .set at
+ .set reorder
+ .text
+ .align 2
+ .globl bn_div64
+ # 321 {
+ .ent bn_div64 2
+bn_div64:
+ subu $sp, 64
+ sw $31, 56($sp)
+ sw $16, 48($sp)
+ .mask 0x80010000, -56
+ .frame $sp, 64, $31
+ move $9, $4
+ move $12, $5
+ move $16, $6
+ # 322 BN_ULONG dh,dl,q,ret=0,th,tl,t;
+ move $31, $0
+ # 323 int i,count=2;
+ li $13, 2
+ # 324
+ # 325 if (d == 0) return(BN_MASK2);
+ bne $16, 0, $80
+ li $2, -1
+ b $93
+$80:
+ # 326
+ # 327 i=BN_num_bits_word(d);
+ move $4, $16
+ sw $31, 16($sp)
+ sw $9, 24($sp)
+ sw $12, 32($sp)
+ sw $13, 40($sp)
+ .livereg 0x800ff0e,0xfff
+ jal BN_num_bits_word
+ li $4, 32
+ lw $31, 16($sp)
+ lw $9, 24($sp)
+ lw $12, 32($sp)
+ lw $13, 40($sp)
+ move $3, $2
+ # 328 if ((i != BN_BITS2) && (h > (BN_ULONG)1<<i))
+ beq $2, $4, $81
+ li $14, 1
+ sll $15, $14, $2
+ bleu $9, $15, $81
+ # 329 {
+ # 330 #if !defined(NO_STDIO) && !defined(WIN16)
+ # 331 fprintf(stderr,"Division would overflow (%d)\n",i);
+ # 332 #endif
+ # 333 abort();
+ sw $3, 8($sp)
+ sw $9, 24($sp)
+ sw $12, 32($sp)
+ sw $13, 40($sp)
+ sw $31, 26($sp)
+ .livereg 0xff0e,0xfff
+ jal abort
+ lw $3, 8($sp)
+ li $4, 32
+ lw $9, 24($sp)
+ lw $12, 32($sp)
+ lw $13, 40($sp)
+ lw $31, 26($sp)
+ # 334 }
+$81:
+ # 335 i=BN_BITS2-i;
+ subu $3, $4, $3
+ # 336 if (h >= d) h-=d;
+ bltu $9, $16, $82
+ subu $9, $9, $16
+$82:
+ # 337
+ # 338 if (i)
+ beq $3, 0, $83
+ # 339 {
+ # 340 d<<=i;
+ sll $16, $16, $3
+ # 341 h=(h<<i)|(l>>(BN_BITS2-i));
+ sll $24, $9, $3
+ subu $25, $4, $3
+ srl $14, $12, $25
+ or $9, $24, $14
+ # 342 l<<=i;
+ sll $12, $12, $3
+ # 343 }
+$83:
+ # 344 dh=(d&BN_MASK2h)>>BN_BITS4;
+ # 345 dl=(d&BN_MASK2l);
+ and $8, $16, -65536
+ srl $8, $8, 16
+ and $10, $16, 65535
+ li $6, -65536
+$84:
+ # 346 for (;;)
+ # 347 {
+ # 348 if ((h>>BN_BITS4) == dh)
+ srl $15, $9, 16
+ bne $8, $15, $85
+ # 349 q=BN_MASK2l;
+ li $5, 65535
+ b $86
+$85:
+ # 350 else
+ # 351 q=h/dh;
+ divu $5, $9, $8
+$86:
+ # 352
+ # 353 for (;;)
+ # 354 {
+ # 355 t=(h-q*dh);
+ mul $4, $5, $8
+ subu $2, $9, $4
+ move $3, $2
+ # 356 if ((t&BN_MASK2h) ||
+ # 357 ((dl*q) <= (
+ # 358 (t<<BN_BITS4)+
+ # 359 ((l&BN_MASK2h)>>BN_BITS4))))
+ and $25, $2, $6
+ bne $25, $0, $87
+ mul $24, $10, $5
+ sll $14, $3, 16
+ and $15, $12, $6
+ srl $25, $15, 16
+ addu $15, $14, $25
+ bgtu $24, $15, $88
+$87:
+ # 360 break;
+ mul $3, $10, $5
+ b $89
+$88:
+ # 361 q--;
+ addu $5, $5, -1
+ # 362 }
+ b $86
+$89:
+ # 363 th=q*dh;
+ # 364 tl=q*dl;
+ # 365 t=(tl>>BN_BITS4);
+ # 366 tl=(tl<<BN_BITS4)&BN_MASK2h;
+ sll $14, $3, 16
+ and $2, $14, $6
+ move $11, $2
+ # 367 th+=t;
+ srl $25, $3, 16
+ addu $7, $4, $25
+ # 368
+ # 369 if (l < tl) th++;
+ bgeu $12, $2, $90
+ addu $7, $7, 1
+$90:
+ # 370 l-=tl;
+ subu $12, $12, $11
+ # 371 if (h < th)
+ bgeu $9, $7, $91
+ # 372 {
+ # 373 h+=d;
+ addu $9, $9, $16
+ # 374 q--;
+ addu $5, $5, -1
+ # 375 }
+$91:
+ # 376 h-=th;
+ subu $9, $9, $7
+ # 377
+ # 378 if (--count == 0) break;
+ addu $13, $13, -1
+ beq $13, 0, $92
+ # 379
+ # 380 ret=q<<BN_BITS4;
+ sll $31, $5, 16
+ # 381 h=((h<<BN_BITS4)|(l>>BN_BITS4))&BN_MASK2;
+ sll $24, $9, 16
+ srl $15, $12, 16
+ or $9, $24, $15
+ # 382 l=(l&BN_MASK2l)<<BN_BITS4;
+ and $12, $12, 65535
+ sll $12, $12, 16
+ # 383 }
+ b $84
+$92:
+ # 384 ret|=q;
+ or $31, $31, $5
+ # 385 return(ret);
+ move $2, $31
+$93:
+ lw $16, 48($sp)
+ lw $31, 56($sp)
+ addu $sp, 64
+ j $31
+ .end bn_div64
+
diff --git a/crypto/bn/asm/mips3.s b/crypto/bn/asm/mips3.s
new file mode 100644
index 0000000..dca4105
--- /dev/null
+++ b/crypto/bn/asm/mips3.s
@@ -0,0 +1,2201 @@
+.rdata
+.asciiz "mips3.s, Version 1.1"
+.asciiz "MIPS III/IV ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
+
+/*
+ * ====================================================================
+ * Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+ * project.
+ *
+ * Rights for redistribution and usage in source and binary forms are
+ * granted according to the OpenSSL license. Warranty of any kind is
+ * disclaimed.
+ * ====================================================================
+ */
+
+/*
+ * This is my modest contributon to the OpenSSL project (see
+ * http://www.openssl.org/ for more information about it) and is
+ * a drop-in MIPS III/IV ISA replacement for crypto/bn/bn_asm.c
+ * module. For updates see http://fy.chalmers.se/~appro/hpe/.
+ *
+ * The module is designed to work with either of the "new" MIPS ABI(5),
+ * namely N32 or N64, offered by IRIX 6.x. It's not ment to work under
+ * IRIX 5.x not only because it doesn't support new ABIs but also
+ * because 5.x kernels put R4x00 CPU into 32-bit mode and all those
+ * 64-bit instructions (daddu, dmultu, etc.) found below gonna only
+ * cause illegal instruction exception:-(
+ *
+ * In addition the code depends on preprocessor flags set up by MIPSpro
+ * compiler driver (either as or cc) and therefore (probably?) can't be
+ * compiled by the GNU assembler. GNU C driver manages fine though...
+ * I mean as long as -mmips-as is specified or is the default option,
+ * because then it simply invokes /usr/bin/as which in turn takes
+ * perfect care of the preprocessor definitions. Another neat feature
+ * offered by the MIPSpro assembler is an optimization pass. This gave
+ * me the opportunity to have the code looking more regular as all those
+ * architecture dependent instruction rescheduling details were left to
+ * the assembler. Cool, huh?
+ *
+ * Performance improvement is astonishing! 'apps/openssl speed rsa dsa'
+ * goes way over 3 times faster!
+ *
+ * <appro@fy.chalmers.se>
+ */
+#include <asm.h>
+#include <regdef.h>
+
+#if _MIPS_ISA>=4
+#define MOVNZ(cond,dst,src) \
+ movn dst,src,cond
+#else
+#define MOVNZ(cond,dst,src) \
+ .set noreorder; \
+ bnezl cond,.+8; \
+ move dst,src; \
+ .set reorder
+#endif
+
+.text
+
+.set noat
+.set reorder
+
+#define MINUS4 v1
+
+.align 5
+LEAF(bn_mul_add_words)
+ .set noreorder
+ bgtzl a2,.L_bn_mul_add_words_proceed
+ ld t0,0(a1)
+ jr ra
+ move v0,zero
+ .set reorder
+
+.L_bn_mul_add_words_proceed:
+ li MINUS4,-4
+ and ta0,a2,MINUS4
+ move v0,zero
+ beqz ta0,.L_bn_mul_add_words_tail
+
+.L_bn_mul_add_words_loop:
+ dmultu t0,a3
+ ld t1,0(a0)
+ ld t2,8(a1)
+ ld t3,8(a0)
+ ld ta0,16(a1)
+ ld ta1,16(a0)
+ daddu t1,v0
+ sltu v0,t1,v0 /* All manuals say it "compares 32-bit
+ * values", but it seems to work fine
+ * even on 64-bit registers. */
+ mflo AT
+ mfhi t0
+ daddu t1,AT
+ daddu v0,t0
+ sltu AT,t1,AT
+ sd t1,0(a0)
+ daddu v0,AT
+
+ dmultu t2,a3
+ ld ta2,24(a1)
+ ld ta3,24(a0)
+ daddu t3,v0
+ sltu v0,t3,v0
+ mflo AT
+ mfhi t2
+ daddu t3,AT
+ daddu v0,t2
+ sltu AT,t3,AT
+ sd t3,8(a0)
+ daddu v0,AT
+
+ dmultu ta0,a3
+ subu a2,4
+ PTR_ADD a0,32
+ PTR_ADD a1,32
+ daddu ta1,v0
+ sltu v0,ta1,v0
+ mflo AT
+ mfhi ta0
+ daddu ta1,AT
+ daddu v0,ta0
+ sltu AT,ta1,AT
+ sd ta1,-16(a0)
+ daddu v0,AT
+
+
+ dmultu ta2,a3
+ and ta0,a2,MINUS4
+ daddu ta3,v0
+ sltu v0,ta3,v0
+ mflo AT
+ mfhi ta2
+ daddu ta3,AT
+ daddu v0,ta2
+ sltu AT,ta3,AT
+ sd ta3,-8(a0)
+ daddu v0,AT
+ .set noreorder
+ bgtzl ta0,.L_bn_mul_add_words_loop
+ ld t0,0(a1)
+
+ bnezl a2,.L_bn_mul_add_words_tail
+ ld t0,0(a1)
+ .set reorder
+
+.L_bn_mul_add_words_return:
+ jr ra
+
+.L_bn_mul_add_words_tail:
+ dmultu t0,a3
+ ld t1,0(a0)
+ subu a2,1
+ daddu t1,v0
+ sltu v0,t1,v0
+ mflo AT
+ mfhi t0
+ daddu t1,AT
+ daddu v0,t0
+ sltu AT,t1,AT
+ sd t1,0(a0)
+ daddu v0,AT
+ beqz a2,.L_bn_mul_add_words_return
+
+ ld t0,8(a1)
+ dmultu t0,a3
+ ld t1,8(a0)
+ subu a2,1
+ daddu t1,v0
+ sltu v0,t1,v0
+ mflo AT
+ mfhi t0
+ daddu t1,AT
+ daddu v0,t0
+ sltu AT,t1,AT
+ sd t1,8(a0)
+ daddu v0,AT
+ beqz a2,.L_bn_mul_add_words_return
+
+ ld t0,16(a1)
+ dmultu t0,a3
+ ld t1,16(a0)
+ daddu t1,v0
+ sltu v0,t1,v0
+ mflo AT
+ mfhi t0
+ daddu t1,AT
+ daddu v0,t0
+ sltu AT,t1,AT
+ sd t1,16(a0)
+ daddu v0,AT
+ jr ra
+END(bn_mul_add_words)
+
+.align 5
+LEAF(bn_mul_words)
+ .set noreorder
+ bgtzl a2,.L_bn_mul_words_proceed
+ ld t0,0(a1)
+ jr ra
+ move v0,zero
+ .set reorder
+
+.L_bn_mul_words_proceed:
+ li MINUS4,-4
+ and ta0,a2,MINUS4
+ move v0,zero
+ beqz ta0,.L_bn_mul_words_tail
+
+.L_bn_mul_words_loop:
+ dmultu t0,a3
+ ld t2,8(a1)
+ ld ta0,16(a1)
+ ld ta2,24(a1)
+ mflo AT
+ mfhi t0
+ daddu v0,AT
+ sltu t1,v0,AT
+ sd v0,0(a0)
+ daddu v0,t1,t0
+
+ dmultu t2,a3
+ subu a2,4
+ PTR_ADD a0,32
+ PTR_ADD a1,32
+ mflo AT
+ mfhi t2
+ daddu v0,AT
+ sltu t3,v0,AT
+ sd v0,-24(a0)
+ daddu v0,t3,t2
+
+ dmultu ta0,a3
+ mflo AT
+ mfhi ta0
+ daddu v0,AT
+ sltu ta1,v0,AT
+ sd v0,-16(a0)
+ daddu v0,ta1,ta0
+
+
+ dmultu ta2,a3
+ and ta0,a2,MINUS4
+ mflo AT
+ mfhi ta2
+ daddu v0,AT
+ sltu ta3,v0,AT
+ sd v0,-8(a0)
+ daddu v0,ta3,ta2
+ .set noreorder
+ bgtzl ta0,.L_bn_mul_words_loop
+ ld t0,0(a1)
+
+ bnezl a2,.L_bn_mul_words_tail
+ ld t0,0(a1)
+ .set reorder
+
+.L_bn_mul_words_return:
+ jr ra
+
+.L_bn_mul_words_tail:
+ dmultu t0,a3
+ subu a2,1
+ mflo AT
+ mfhi t0
+ daddu v0,AT
+ sltu t1,v0,AT
+ sd v0,0(a0)
+ daddu v0,t1,t0
+ beqz a2,.L_bn_mul_words_return
+
+ ld t0,8(a1)
+ dmultu t0,a3
+ subu a2,1
+ mflo AT
+ mfhi t0
+ daddu v0,AT
+ sltu t1,v0,AT
+ sd v0,8(a0)
+ daddu v0,t1,t0
+ beqz a2,.L_bn_mul_words_return
+
+ ld t0,16(a1)
+ dmultu t0,a3
+ mflo AT
+ mfhi t0
+ daddu v0,AT
+ sltu t1,v0,AT
+ sd v0,16(a0)
+ daddu v0,t1,t0
+ jr ra
+END(bn_mul_words)
+
+.align 5
+LEAF(bn_sqr_words)
+ .set noreorder
+ bgtzl a2,.L_bn_sqr_words_proceed
+ ld t0,0(a1)
+ jr ra
+ move v0,zero
+ .set reorder
+
+.L_bn_sqr_words_proceed:
+ li MINUS4,-4
+ and ta0,a2,MINUS4
+ move v0,zero
+ beqz ta0,.L_bn_sqr_words_tail
+
+.L_bn_sqr_words_loop:
+ dmultu t0,t0
+ ld t2,8(a1)
+ ld ta0,16(a1)
+ ld ta2,24(a1)
+ mflo t1
+ mfhi t0
+ sd t1,0(a0)
+ sd t0,8(a0)
+
+ dmultu t2,t2
+ subu a2,4
+ PTR_ADD a0,64
+ PTR_ADD a1,32
+ mflo t3
+ mfhi t2
+ sd t3,-48(a0)
+ sd t2,-40(a0)
+
+ dmultu ta0,ta0
+ mflo ta1
+ mfhi ta0
+ sd ta1,-32(a0)
+ sd ta0,-24(a0)
+
+
+ dmultu ta2,ta2
+ and ta0,a2,MINUS4
+ mflo ta3
+ mfhi ta2
+ sd ta3,-16(a0)
+ sd ta2,-8(a0)
+
+ .set noreorder
+ bgtzl ta0,.L_bn_sqr_words_loop
+ ld t0,0(a1)
+
+ bnezl a2,.L_bn_sqr_words_tail
+ ld t0,0(a1)
+ .set reorder
+
+.L_bn_sqr_words_return:
+ move v0,zero
+ jr ra
+
+.L_bn_sqr_words_tail:
+ dmultu t0,t0
+ subu a2,1
+ mflo t1
+ mfhi t0
+ sd t1,0(a0)
+ sd t0,8(a0)
+ beqz a2,.L_bn_sqr_words_return
+
+ ld t0,8(a1)
+ dmultu t0,t0
+ subu a2,1
+ mflo t1
+ mfhi t0
+ sd t1,16(a0)
+ sd t0,24(a0)
+ beqz a2,.L_bn_sqr_words_return
+
+ ld t0,16(a1)
+ dmultu t0,t0
+ mflo t1
+ mfhi t0
+ sd t1,32(a0)
+ sd t0,40(a0)
+ jr ra
+END(bn_sqr_words)
+
+.align 5
+LEAF(bn_add_words)
+ .set noreorder
+ bgtzl a3,.L_bn_add_words_proceed
+ ld t0,0(a1)
+ jr ra
+ move v0,zero
+ .set reorder
+
+.L_bn_add_words_proceed:
+ li MINUS4,-4
+ and AT,a3,MINUS4
+ move v0,zero
+ beqz AT,.L_bn_add_words_tail
+
+.L_bn_add_words_loop:
+ ld ta0,0(a2)
+ subu a3,4
+ ld t1,8(a1)
+ and AT,a3,MINUS4
+ ld t2,16(a1)
+ PTR_ADD a2,32
+ ld t3,24(a1)
+ PTR_ADD a0,32
+ ld ta1,-24(a2)
+ PTR_ADD a1,32
+ ld ta2,-16(a2)
+ ld ta3,-8(a2)
+ daddu ta0,t0
+ sltu t8,ta0,t0
+ daddu t0,ta0,v0
+ sltu v0,t0,ta0
+ sd t0,-32(a0)
+ daddu v0,t8
+
+ daddu ta1,t1
+ sltu t9,ta1,t1
+ daddu t1,ta1,v0
+ sltu v0,t1,ta1
+ sd t1,-24(a0)
+ daddu v0,t9
+
+ daddu ta2,t2
+ sltu t8,ta2,t2
+ daddu t2,ta2,v0
+ sltu v0,t2,ta2
+ sd t2,-16(a0)
+ daddu v0,t8
+
+ daddu ta3,t3
+ sltu t9,ta3,t3
+ daddu t3,ta3,v0
+ sltu v0,t3,ta3
+ sd t3,-8(a0)
+ daddu v0,t9
+
+ .set noreorder
+ bgtzl AT,.L_bn_add_words_loop
+ ld t0,0(a1)
+
+ bnezl a3,.L_bn_add_words_tail
+ ld t0,0(a1)
+ .set reorder
+
+.L_bn_add_words_return:
+ jr ra
+
+.L_bn_add_words_tail:
+ ld ta0,0(a2)
+ daddu ta0,t0
+ subu a3,1
+ sltu t8,ta0,t0
+ daddu t0,ta0,v0
+ sltu v0,t0,ta0
+ sd t0,0(a0)
+ daddu v0,t8
+ beqz a3,.L_bn_add_words_return
+
+ ld t1,8(a1)
+ ld ta1,8(a2)
+ daddu ta1,t1
+ subu a3,1
+ sltu t9,ta1,t1
+ daddu t1,ta1,v0
+ sltu v0,t1,ta1
+ sd t1,8(a0)
+ daddu v0,t9
+ beqz a3,.L_bn_add_words_return
+
+ ld t2,16(a1)
+ ld ta2,16(a2)
+ daddu ta2,t2
+ sltu t8,ta2,t2
+ daddu t2,ta2,v0
+ sltu v0,t2,ta2
+ sd t2,16(a0)
+ daddu v0,t8
+ jr ra
+END(bn_add_words)
+
+.align 5
+LEAF(bn_sub_words)
+ .set noreorder
+ bgtzl a3,.L_bn_sub_words_proceed
+ ld t0,0(a1)
+ jr ra
+ move v0,zero
+ .set reorder
+
+.L_bn_sub_words_proceed:
+ li MINUS4,-4
+ and AT,a3,MINUS4
+ move v0,zero
+ beqz AT,.L_bn_sub_words_tail
+
+.L_bn_sub_words_loop:
+ ld ta0,0(a2)
+ subu a3,4
+ ld t1,8(a1)
+ and AT,a3,MINUS4
+ ld t2,16(a1)
+ PTR_ADD a2,32
+ ld t3,24(a1)
+ PTR_ADD a0,32
+ ld ta1,-24(a2)
+ PTR_ADD a1,32
+ ld ta2,-16(a2)
+ ld ta3,-8(a2)
+ sltu t8,t0,ta0
+ dsubu t0,ta0
+ dsubu ta0,t0,v0
+ sd ta0,-32(a0)
+ MOVNZ (t0,v0,t8)
+
+ sltu t9,t1,ta1
+ dsubu t1,ta1
+ dsubu ta1,t1,v0
+ sd ta1,-24(a0)
+ MOVNZ (t1,v0,t9)
+
+
+ sltu t8,t2,ta2
+ dsubu t2,ta2
+ dsubu ta2,t2,v0
+ sd ta2,-16(a0)
+ MOVNZ (t2,v0,t8)
+
+ sltu t9,t3,ta3
+ dsubu t3,ta3
+ dsubu ta3,t3,v0
+ sd ta3,-8(a0)
+ MOVNZ (t3,v0,t9)
+
+ .set noreorder
+ bgtzl AT,.L_bn_sub_words_loop
+ ld t0,0(a1)
+
+ bnezl a3,.L_bn_sub_words_tail
+ ld t0,0(a1)
+ .set reorder
+
+.L_bn_sub_words_return:
+ jr ra
+
+.L_bn_sub_words_tail:
+ ld ta0,0(a2)
+ subu a3,1
+ sltu t8,t0,ta0
+ dsubu t0,ta0
+ dsubu ta0,t0,v0
+ MOVNZ (t0,v0,t8)
+ sd ta0,0(a0)
+ beqz a3,.L_bn_sub_words_return
+
+ ld t1,8(a1)
+ subu a3,1
+ ld ta1,8(a2)
+ sltu t9,t1,ta1
+ dsubu t1,ta1
+ dsubu ta1,t1,v0
+ MOVNZ (t1,v0,t9)
+ sd ta1,8(a0)
+ beqz a3,.L_bn_sub_words_return
+
+ ld t2,16(a1)
+ ld ta2,16(a2)
+ sltu t8,t2,ta2
+ dsubu t2,ta2
+ dsubu ta2,t2,v0
+ MOVNZ (t2,v0,t8)
+ sd ta2,16(a0)
+ jr ra
+END(bn_sub_words)
+
+#undef MINUS4
+
+.align 5
+LEAF(bn_div_3_words)
+ .set reorder
+ move a3,a0 /* we know that bn_div_words doesn't
+ * touch a3, ta2, ta3 and preserves a2
+ * so that we can save two arguments
+ * and return address in registers
+ * instead of stack:-)
+ */
+ ld a0,(a3)
+ move ta2,a1
+ ld a1,-8(a3)
+ bne a0,a2,.L_bn_div_3_words_proceed
+ li v0,-1
+ jr ra
+.L_bn_div_3_words_proceed:
+ move ta3,ra
+ bal bn_div_words
+ move ra,ta3
+ dmultu ta2,v0
+ ld t2,-16(a3)
+ move ta0,zero
+ mfhi t1
+ mflo t0
+ sltu t8,t1,v1
+.L_bn_div_3_words_inner_loop:
+ bnez t8,.L_bn_div_3_words_inner_loop_done
+ sgeu AT,t2,t0
+ seq t9,t1,v1
+ and AT,t9
+ sltu t3,t0,ta2
+ daddu v1,a2
+ dsubu t1,t3
+ dsubu t0,ta2
+ sltu t8,t1,v1
+ sltu ta0,v1,a2
+ or t8,ta0
+ .set noreorder
+ beqzl AT,.L_bn_div_3_words_inner_loop
+ dsubu v0,1
+ .set reorder
+.L_bn_div_3_words_inner_loop_done:
+ jr ra
+END(bn_div_3_words)
+
+.align 5
+LEAF(bn_div_words)
+ .set noreorder
+ bnezl a2,.L_bn_div_words_proceed
+ move v1,zero
+ jr ra
+ li v0,-1 /* I'd rather signal div-by-zero
+ * which can be done with 'break 7' */
+
+.L_bn_div_words_proceed:
+ bltz a2,.L_bn_div_words_body
+ move t9,v1
+ dsll a2,1
+ bgtz a2,.-4
+ addu t9,1
+
+ .set reorder
+ negu t1,t9
+ li t2,-1
+ dsll t2,t1
+ and t2,a0
+ dsrl AT,a1,t1
+ .set noreorder
+ bnezl t2,.+8
+ break 6 /* signal overflow */
+ .set reorder
+ dsll a0,t9
+ dsll a1,t9
+ or a0,AT
+
+#define QT ta0
+#define HH ta1
+#define DH v1
+.L_bn_div_words_body:
+ dsrl DH,a2,32
+ sgeu AT,a0,a2
+ .set noreorder
+ bnezl AT,.+8
+ dsubu a0,a2
+ .set reorder
+
+ li QT,-1
+ dsrl HH,a0,32
+ dsrl QT,32 /* q=0xffffffff */
+ beq DH,HH,.L_bn_div_words_skip_div1
+ ddivu zero,a0,DH
+ mflo QT
+.L_bn_div_words_skip_div1:
+ dmultu a2,QT
+ dsll t3,a0,32
+ dsrl AT,a1,32
+ or t3,AT
+ mflo t0
+ mfhi t1
+.L_bn_div_words_inner_loop1:
+ sltu t2,t3,t0
+ seq t8,HH,t1
+ sltu AT,HH,t1
+ and t2,t8
+ sltu v0,t0,a2
+ or AT,t2
+ .set noreorder
+ beqz AT,.L_bn_div_words_inner_loop1_done
+ dsubu t1,v0
+ dsubu t0,a2
+ b .L_bn_div_words_inner_loop1
+ dsubu QT,1
+ .set reorder
+.L_bn_div_words_inner_loop1_done:
+
+ dsll a1,32
+ dsubu a0,t3,t0
+ dsll v0,QT,32
+
+ li QT,-1
+ dsrl HH,a0,32
+ dsrl QT,32 /* q=0xffffffff */
+ beq DH,HH,.L_bn_div_words_skip_div2
+ ddivu zero,a0,DH
+ mflo QT
+.L_bn_div_words_skip_div2:
+#undef DH
+ dmultu a2,QT
+ dsll t3,a0,32
+ dsrl AT,a1,32
+ or t3,AT
+ mflo t0
+ mfhi t1
+.L_bn_div_words_inner_loop2:
+ sltu t2,t3,t0
+ seq t8,HH,t1
+ sltu AT,HH,t1
+ and t2,t8
+ sltu v1,t0,a2
+ or AT,t2
+ .set noreorder
+ beqz AT,.L_bn_div_words_inner_loop2_done
+ dsubu t1,v1
+ dsubu t0,a2
+ b .L_bn_div_words_inner_loop2
+ dsubu QT,1
+ .set reorder
+.L_bn_div_words_inner_loop2_done:
+#undef HH
+
+ dsubu a0,t3,t0
+ or v0,QT
+ dsrl v1,a0,t9 /* v1 contains remainder if anybody wants it */
+ dsrl a2,t9 /* restore a2 */
+ jr ra
+#undef QT
+END(bn_div_words)
+
+#define a_0 t0
+#define a_1 t1
+#define a_2 t2
+#define a_3 t3
+#define b_0 ta0
+#define b_1 ta1
+#define b_2 ta2
+#define b_3 ta3
+
+#define a_4 s0
+#define a_5 s2
+#define a_6 s4
+#define a_7 a1 /* once we load a[7] we don't need a anymore */
+#define b_4 s1
+#define b_5 s3
+#define b_6 s5
+#define b_7 a2 /* once we load b[7] we don't need b anymore */
+
+#define t_1 t8
+#define t_2 t9
+
+#define c_1 v0
+#define c_2 v1
+#define c_3 a3
+
+#define FRAME_SIZE 48
+
+.align 5
+LEAF(bn_mul_comba8)
+ .set noreorder
+ PTR_SUB sp,FRAME_SIZE
+ .frame sp,64,ra
+ .set reorder
+ ld a_0,0(a1) /* If compiled with -mips3 option on
+ * R5000 box assembler barks on this
+ * line with "shouldn't have mult/div
+ * as last instruction in bb (R10K
+ * bug)" warning. If anybody out there
+ * has a clue about how to circumvent
+ * this do send me a note.
+ * <appro@fy.chalmers.se>
+ */
+ ld b_0,0(a2)
+ ld a_1,8(a1)
+ ld a_2,16(a1)
+ ld a_3,24(a1)
+ ld b_1,8(a2)
+ ld b_2,16(a2)
+ ld b_3,24(a2)
+ dmultu a_0,b_0 /* mul_add_c(a[0],b[0],c1,c2,c3); */
+ sd s0,0(sp)
+ sd s1,8(sp)
+ sd s2,16(sp)
+ sd s3,24(sp)
+ sd s4,32(sp)
+ sd s5,40(sp)
+ mflo c_1
+ mfhi c_2
+
+ dmultu a_0,b_1 /* mul_add_c(a[0],b[1],c2,c3,c1); */
+ ld a_4,32(a1)
+ ld a_5,40(a1)
+ ld a_6,48(a1)
+ ld a_7,56(a1)
+ ld b_4,32(a2)
+ ld b_5,40(a2)
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu c_3,t_2,AT
+ dmultu a_1,b_0 /* mul_add_c(a[1],b[0],c2,c3,c1); */
+ ld b_6,48(a2)
+ ld b_7,56(a2)
+ sd c_1,0(a0) /* r[0]=c1; */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu c_1,c_3,t_2
+ sd c_2,8(a0) /* r[1]=c2; */
+
+ dmultu a_2,b_0 /* mul_add_c(a[2],b[0],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ dmultu a_1,b_1 /* mul_add_c(a[1],b[1],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu c_2,c_1,t_2
+ dmultu a_0,b_2 /* mul_add_c(a[0],b[2],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,16(a0) /* r[2]=c3; */
+
+ dmultu a_0,b_3 /* mul_add_c(a[0],b[3],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu c_3,c_2,t_2
+ dmultu a_1,b_2 /* mul_add_c(a[1],b[2],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_2,b_1 /* mul_add_c(a[2],b[1],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_3,b_0 /* mul_add_c(a[3],b[0],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ sd c_1,24(a0) /* r[3]=c1; */
+
+ dmultu a_4,b_0 /* mul_add_c(a[4],b[0],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu c_1,c_3,t_2
+ dmultu a_3,b_1 /* mul_add_c(a[3],b[1],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_2,b_2 /* mul_add_c(a[2],b[2],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_1,b_3 /* mul_add_c(a[1],b[3],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_0,b_4 /* mul_add_c(a[0],b[4],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ sd c_2,32(a0) /* r[4]=c2; */
+
+ dmultu a_0,b_5 /* mul_add_c(a[0],b[5],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu c_2,c_1,t_2
+ dmultu a_1,b_4 /* mul_add_c(a[1],b[4],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_2,b_3 /* mul_add_c(a[2],b[3],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_3,b_2 /* mul_add_c(a[3],b[2],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_4,b_1 /* mul_add_c(a[4],b[1],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_5,b_0 /* mul_add_c(a[5],b[0],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,40(a0) /* r[5]=c3; */
+
+ dmultu a_6,b_0 /* mul_add_c(a[6],b[0],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu c_3,c_2,t_2
+ dmultu a_5,b_1 /* mul_add_c(a[5],b[1],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_4,b_2 /* mul_add_c(a[4],b[2],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_3,b_3 /* mul_add_c(a[3],b[3],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_2,b_4 /* mul_add_c(a[2],b[4],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_1,b_5 /* mul_add_c(a[1],b[5],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_0,b_6 /* mul_add_c(a[0],b[6],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ sd c_1,48(a0) /* r[6]=c1; */
+
+ dmultu a_0,b_7 /* mul_add_c(a[0],b[7],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu c_1,c_3,t_2
+ dmultu a_1,b_6 /* mul_add_c(a[1],b[6],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_2,b_5 /* mul_add_c(a[2],b[5],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_3,b_4 /* mul_add_c(a[3],b[4],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_4,b_3 /* mul_add_c(a[4],b[3],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_5,b_2 /* mul_add_c(a[5],b[2],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_6,b_1 /* mul_add_c(a[6],b[1],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_7,b_0 /* mul_add_c(a[7],b[0],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ sd c_2,56(a0) /* r[7]=c2; */
+
+ dmultu a_7,b_1 /* mul_add_c(a[7],b[1],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu c_2,c_1,t_2
+ dmultu a_6,b_2 /* mul_add_c(a[6],b[2],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_5,b_3 /* mul_add_c(a[5],b[3],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_4,b_4 /* mul_add_c(a[4],b[4],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_3,b_5 /* mul_add_c(a[3],b[5],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_2,b_6 /* mul_add_c(a[2],b[6],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_1,b_7 /* mul_add_c(a[1],b[7],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,64(a0) /* r[8]=c3; */
+
+ dmultu a_2,b_7 /* mul_add_c(a[2],b[7],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu c_3,c_2,t_2
+ dmultu a_3,b_6 /* mul_add_c(a[3],b[6],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_4,b_5 /* mul_add_c(a[4],b[5],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_5,b_4 /* mul_add_c(a[5],b[4],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_6,b_3 /* mul_add_c(a[6],b[3],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_7,b_2 /* mul_add_c(a[7],b[2],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ sd c_1,72(a0) /* r[9]=c1; */
+
+ dmultu a_7,b_3 /* mul_add_c(a[7],b[3],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu c_1,c_3,t_2
+ dmultu a_6,b_4 /* mul_add_c(a[6],b[4],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_5,b_5 /* mul_add_c(a[5],b[5],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_4,b_6 /* mul_add_c(a[4],b[6],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_3,b_7 /* mul_add_c(a[3],b[7],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ sd c_2,80(a0) /* r[10]=c2; */
+
+ dmultu a_4,b_7 /* mul_add_c(a[4],b[7],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu c_2,c_1,t_2
+ dmultu a_5,b_6 /* mul_add_c(a[5],b[6],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_6,b_5 /* mul_add_c(a[6],b[5],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_7,b_4 /* mul_add_c(a[7],b[4],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,88(a0) /* r[11]=c3; */
+
+ dmultu a_7,b_5 /* mul_add_c(a[7],b[5],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu c_3,c_2,t_2
+ dmultu a_6,b_6 /* mul_add_c(a[6],b[6],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_5,b_7 /* mul_add_c(a[5],b[7],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ sd c_1,96(a0) /* r[12]=c1; */
+
+ dmultu a_6,b_7 /* mul_add_c(a[6],b[7],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu c_1,c_3,t_2
+ dmultu a_7,b_6 /* mul_add_c(a[7],b[6],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ sd c_2,104(a0) /* r[13]=c2; */
+
+ dmultu a_7,b_7 /* mul_add_c(a[7],b[7],c3,c1,c2); */
+ ld s0,0(sp)
+ ld s1,8(sp)
+ ld s2,16(sp)
+ ld s3,24(sp)
+ ld s4,32(sp)
+ ld s5,40(sp)
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sd c_3,112(a0) /* r[14]=c3; */
+ sd c_1,120(a0) /* r[15]=c1; */
+
+ PTR_ADD sp,FRAME_SIZE
+
+ jr ra
+END(bn_mul_comba8)
+
+.align 5
+LEAF(bn_mul_comba4)
+ .set reorder
+ ld a_0,0(a1)
+ ld b_0,0(a2)
+ ld a_1,8(a1)
+ ld a_2,16(a1)
+ dmultu a_0,b_0 /* mul_add_c(a[0],b[0],c1,c2,c3); */
+ ld a_3,24(a1)
+ ld b_1,8(a2)
+ ld b_2,16(a2)
+ ld b_3,24(a2)
+ mflo c_1
+ mfhi c_2
+ sd c_1,0(a0)
+
+ dmultu a_0,b_1 /* mul_add_c(a[0],b[1],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu c_3,t_2,AT
+ dmultu a_1,b_0 /* mul_add_c(a[1],b[0],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu c_1,c_3,t_2
+ sd c_2,8(a0)
+
+ dmultu a_2,b_0 /* mul_add_c(a[2],b[0],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ dmultu a_1,b_1 /* mul_add_c(a[1],b[1],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu c_2,c_1,t_2
+ dmultu a_0,b_2 /* mul_add_c(a[0],b[2],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,16(a0)
+
+ dmultu a_0,b_3 /* mul_add_c(a[0],b[3],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu c_3,c_2,t_2
+ dmultu a_1,b_2 /* mul_add_c(a[1],b[2],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_2,b_1 /* mul_add_c(a[2],b[1],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_3,b_0 /* mul_add_c(a[3],b[0],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ sd c_1,24(a0)
+
+ dmultu a_3,b_1 /* mul_add_c(a[3],b[1],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu c_1,c_3,t_2
+ dmultu a_2,b_2 /* mul_add_c(a[2],b[2],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_1,b_3 /* mul_add_c(a[1],b[3],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ sd c_2,32(a0)
+
+ dmultu a_2,b_3 /* mul_add_c(a[2],b[3],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu c_2,c_1,t_2
+ dmultu a_3,b_2 /* mul_add_c(a[3],b[2],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,40(a0)
+
+ dmultu a_3,b_3 /* mul_add_c(a[3],b[3],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sd c_1,48(a0)
+ sd c_2,56(a0)
+
+ jr ra
+END(bn_mul_comba4)
+
+#undef a_4
+#undef a_5
+#undef a_6
+#undef a_7
+#define a_4 b_0
+#define a_5 b_1
+#define a_6 b_2
+#define a_7 b_3
+
+.align 5
+LEAF(bn_sqr_comba8)
+ .set reorder
+ ld a_0,0(a1)
+ ld a_1,8(a1)
+ ld a_2,16(a1)
+ ld a_3,24(a1)
+
+ dmultu a_0,a_0 /* mul_add_c(a[0],b[0],c1,c2,c3); */
+ ld a_4,32(a1)
+ ld a_5,40(a1)
+ ld a_6,48(a1)
+ ld a_7,56(a1)
+ mflo c_1
+ mfhi c_2
+ sd c_1,0(a0)
+
+ dmultu a_0,a_1 /* mul_add_c2(a[0],b[1],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt c_1,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu c_3,t_2,AT
+ sd c_2,8(a0)
+
+ dmultu a_2,a_0 /* mul_add_c2(a[2],b[0],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ slt c_2,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_1,a_1 /* mul_add_c(a[1],b[1],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,16(a0)
+
+ dmultu a_0,a_3 /* mul_add_c2(a[0],b[3],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ slt c_3,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_1,a_2 /* mul_add_c2(a[1],b[2],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_3,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ sd c_1,24(a0)
+
+ dmultu a_4,a_0 /* mul_add_c2(a[4],b[0],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt c_1,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_3,a_1 /* mul_add_c2(a[3],b[1],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_1,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_2,a_2 /* mul_add_c(a[2],b[2],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ sd c_2,32(a0)
+
+ dmultu a_0,a_5 /* mul_add_c2(a[0],b[5],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ slt c_2,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_1,a_4 /* mul_add_c2(a[1],b[4],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_2,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_2,a_3 /* mul_add_c2(a[2],b[3],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_2,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,40(a0)
+
+ dmultu a_6,a_0 /* mul_add_c2(a[6],b[0],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ slt c_3,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_5,a_1 /* mul_add_c2(a[5],b[1],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_3,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_4,a_2 /* mul_add_c2(a[4],b[2],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_3,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_3,a_3 /* mul_add_c(a[3],b[3],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ sd c_1,48(a0)
+
+ dmultu a_0,a_7 /* mul_add_c2(a[0],b[7],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt c_1,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_1,a_6 /* mul_add_c2(a[1],b[6],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_1,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_2,a_5 /* mul_add_c2(a[2],b[5],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_1,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_3,a_4 /* mul_add_c2(a[3],b[4],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_1,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ sd c_2,56(a0)
+
+ dmultu a_7,a_1 /* mul_add_c2(a[7],b[1],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ slt c_2,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_6,a_2 /* mul_add_c2(a[6],b[2],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_2,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_5,a_3 /* mul_add_c2(a[5],b[3],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_2,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_4,a_4 /* mul_add_c(a[4],b[4],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,64(a0)
+
+ dmultu a_2,a_7 /* mul_add_c2(a[2],b[7],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ slt c_3,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_3,a_6 /* mul_add_c2(a[3],b[6],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_3,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_4,a_5 /* mul_add_c2(a[4],b[5],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_3,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ sd c_1,72(a0)
+
+ dmultu a_7,a_3 /* mul_add_c2(a[7],b[3],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt c_1,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_6,a_4 /* mul_add_c2(a[6],b[4],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_1,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_5,a_5 /* mul_add_c(a[5],b[5],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ sd c_2,80(a0)
+
+ dmultu a_4,a_7 /* mul_add_c2(a[4],b[7],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ slt c_2,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_5,a_6 /* mul_add_c2(a[5],b[6],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_2,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,88(a0)
+
+ dmultu a_7,a_5 /* mul_add_c2(a[7],b[5],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ slt c_3,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_6,a_6 /* mul_add_c(a[6],b[6],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ sd c_1,96(a0)
+
+ dmultu a_6,a_7 /* mul_add_c2(a[6],b[7],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt c_1,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ sd c_2,104(a0)
+
+ dmultu a_7,a_7 /* mul_add_c(a[7],b[7],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sd c_3,112(a0)
+ sd c_1,120(a0)
+
+ jr ra
+END(bn_sqr_comba8)
+
+.align 5
+LEAF(bn_sqr_comba4)
+ .set reorder
+ ld a_0,0(a1)
+ ld a_1,8(a1)
+ ld a_2,16(a1)
+ ld a_3,24(a1)
+ dmultu a_0,a_0 /* mul_add_c(a[0],b[0],c1,c2,c3); */
+ mflo c_1
+ mfhi c_2
+ sd c_1,0(a0)
+
+ dmultu a_0,a_1 /* mul_add_c2(a[0],b[1],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt c_1,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu c_3,t_2,AT
+ sd c_2,8(a0)
+
+ dmultu a_2,a_0 /* mul_add_c2(a[2],b[0],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ slt c_2,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_1,a_1 /* mul_add_c(a[1],b[1],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,16(a0)
+
+ dmultu a_0,a_3 /* mul_add_c2(a[0],b[3],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ slt c_3,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_1,a_2 /* mul_add_c(a2[1],b[2],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_3,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ sd c_1,24(a0)
+
+ dmultu a_3,a_1 /* mul_add_c2(a[3],b[1],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt c_1,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_2,a_2 /* mul_add_c(a[2],b[2],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ sd c_2,32(a0)
+
+ dmultu a_2,a_3 /* mul_add_c2(a[2],b[3],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ slt c_2,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,40(a0)
+
+ dmultu a_3,a_3 /* mul_add_c(a[3],b[3],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sd c_1,48(a0)
+ sd c_2,56(a0)
+
+ jr ra
+END(bn_sqr_comba4)
diff --git a/crypto/bn/asm/pa-risc.s b/crypto/bn/asm/pa-risc.s
new file mode 100644
index 0000000..775130a
--- /dev/null
+++ b/crypto/bn/asm/pa-risc.s
@@ -0,0 +1,710 @@
+ .SPACE $PRIVATE$
+ .SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31
+ .SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82
+ .SPACE $TEXT$
+ .SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44
+ .SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY
+ .IMPORT $global$,DATA
+ .IMPORT $$dyncall,MILLICODE
+; gcc_compiled.:
+ .SPACE $TEXT$
+ .SUBSPA $CODE$
+
+ .align 4
+ .EXPORT bn_mul_add_words,ENTRY,PRIV_LEV=3,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR,RTNVAL=GR
+bn_mul_add_words
+ .PROC
+ .CALLINFO FRAME=0,CALLS,SAVE_RP
+ .ENTRY
+ stw %r2,-20(0,%r30)
+ ldi 0,%r28
+ extru %r23,31,16,%r2
+ stw %r2,-16(0,%r30)
+ extru %r23,15,16,%r23
+ ldil L'65536,%r31
+ fldws -16(0,%r30),%fr11R
+ stw %r23,-16(0,%r30)
+ ldo 12(%r25),%r29
+ ldo 12(%r26),%r23
+ fldws -16(0,%r30),%fr11L
+L$0002
+ ldw 0(0,%r25),%r19
+ extru %r19,31,16,%r20
+ stw %r20,-16(0,%r30)
+ extru %r19,15,16,%r19
+ fldws -16(0,%r30),%fr22L
+ stw %r19,-16(0,%r30)
+ xmpyu %fr22L,%fr11R,%fr8
+ fldws -16(0,%r30),%fr22L
+ fstws %fr8R,-16(0,%r30)
+ xmpyu %fr11R,%fr22L,%fr10
+ ldw -16(0,%r30),%r2
+ stw %r20,-16(0,%r30)
+ xmpyu %fr22L,%fr11L,%fr9
+ fldws -16(0,%r30),%fr22L
+ fstws %fr10R,-16(0,%r30)
+ copy %r2,%r22
+ ldw -16(0,%r30),%r2
+ fstws %fr9R,-16(0,%r30)
+ xmpyu %fr11L,%fr22L,%fr8
+ copy %r2,%r19
+ ldw -16(0,%r30),%r2
+ fstws %fr8R,-16(0,%r30)
+ copy %r2,%r20
+ ldw -16(0,%r30),%r2
+ addl %r2,%r19,%r21
+ comclr,<<= %r19,%r21,0
+ addl %r20,%r31,%r20
+L$0005
+ extru %r21,15,16,%r19
+ addl %r20,%r19,%r20
+ zdep %r21,15,16,%r19
+ addl %r22,%r19,%r22
+ comclr,<<= %r19,%r22,0
+ addi,tr 1,%r20,%r19
+ copy %r20,%r19
+ addl %r22,%r28,%r20
+ comclr,<<= %r28,%r20,0
+ addi 1,%r19,%r19
+ ldw 0(0,%r26),%r28
+ addl %r20,%r28,%r20
+ comclr,<<= %r28,%r20,0
+ addi,tr 1,%r19,%r28
+ copy %r19,%r28
+ addib,= -1,%r24,L$0003
+ stw %r20,0(0,%r26)
+ ldw -8(0,%r29),%r19
+ extru %r19,31,16,%r20
+ stw %r20,-16(0,%r30)
+ extru %r19,15,16,%r19
+ fldws -16(0,%r30),%fr22L
+ stw %r19,-16(0,%r30)
+ xmpyu %fr22L,%fr11R,%fr8
+ fldws -16(0,%r30),%fr22L
+ fstws %fr8R,-16(0,%r30)
+ xmpyu %fr11R,%fr22L,%fr10
+ ldw -16(0,%r30),%r2
+ stw %r20,-16(0,%r30)
+ xmpyu %fr22L,%fr11L,%fr9
+ fldws -16(0,%r30),%fr22L
+ fstws %fr10R,-16(0,%r30)
+ copy %r2,%r22
+ ldw -16(0,%r30),%r2
+ fstws %fr9R,-16(0,%r30)
+ xmpyu %fr11L,%fr22L,%fr8
+ copy %r2,%r19
+ ldw -16(0,%r30),%r2
+ fstws %fr8R,-16(0,%r30)
+ copy %r2,%r20
+ ldw -16(0,%r30),%r2
+ addl %r2,%r19,%r21
+ comclr,<<= %r19,%r21,0
+ addl %r20,%r31,%r20
+L$0010
+ extru %r21,15,16,%r19
+ addl %r20,%r19,%r20
+ zdep %r21,15,16,%r19
+ addl %r22,%r19,%r22
+ comclr,<<= %r19,%r22,0
+ addi,tr 1,%r20,%r19
+ copy %r20,%r19
+ addl %r22,%r28,%r20
+ comclr,<<= %r28,%r20,0
+ addi 1,%r19,%r19
+ ldw -8(0,%r23),%r28
+ addl %r20,%r28,%r20
+ comclr,<<= %r28,%r20,0
+ addi,tr 1,%r19,%r28
+ copy %r19,%r28
+ addib,= -1,%r24,L$0003
+ stw %r20,-8(0,%r23)
+ ldw -4(0,%r29),%r19
+ extru %r19,31,16,%r20
+ stw %r20,-16(0,%r30)
+ extru %r19,15,16,%r19
+ fldws -16(0,%r30),%fr22L
+ stw %r19,-16(0,%r30)
+ xmpyu %fr22L,%fr11R,%fr8
+ fldws -16(0,%r30),%fr22L
+ fstws %fr8R,-16(0,%r30)
+ xmpyu %fr11R,%fr22L,%fr10
+ ldw -16(0,%r30),%r2
+ stw %r20,-16(0,%r30)
+ xmpyu %fr22L,%fr11L,%fr9
+ fldws -16(0,%r30),%fr22L
+ fstws %fr10R,-16(0,%r30)
+ copy %r2,%r22
+ ldw -16(0,%r30),%r2
+ fstws %fr9R,-16(0,%r30)
+ xmpyu %fr11L,%fr22L,%fr8
+ copy %r2,%r19
+ ldw -16(0,%r30),%r2
+ fstws %fr8R,-16(0,%r30)
+ copy %r2,%r20
+ ldw -16(0,%r30),%r2
+ addl %r2,%r19,%r21
+ comclr,<<= %r19,%r21,0
+ addl %r20,%r31,%r20
+L$0015
+ extru %r21,15,16,%r19
+ addl %r20,%r19,%r20
+ zdep %r21,15,16,%r19
+ addl %r22,%r19,%r22
+ comclr,<<= %r19,%r22,0
+ addi,tr 1,%r20,%r19
+ copy %r20,%r19
+ addl %r22,%r28,%r20
+ comclr,<<= %r28,%r20,0
+ addi 1,%r19,%r19
+ ldw -4(0,%r23),%r28
+ addl %r20,%r28,%r20
+ comclr,<<= %r28,%r20,0
+ addi,tr 1,%r19,%r28
+ copy %r19,%r28
+ addib,= -1,%r24,L$0003
+ stw %r20,-4(0,%r23)
+ ldw 0(0,%r29),%r19
+ extru %r19,31,16,%r20
+ stw %r20,-16(0,%r30)
+ extru %r19,15,16,%r19
+ fldws -16(0,%r30),%fr22L
+ stw %r19,-16(0,%r30)
+ xmpyu %fr22L,%fr11R,%fr8
+ fldws -16(0,%r30),%fr22L
+ fstws %fr8R,-16(0,%r30)
+ xmpyu %fr11R,%fr22L,%fr10
+ ldw -16(0,%r30),%r2
+ stw %r20,-16(0,%r30)
+ xmpyu %fr22L,%fr11L,%fr9
+ fldws -16(0,%r30),%fr22L
+ fstws %fr10R,-16(0,%r30)
+ copy %r2,%r22
+ ldw -16(0,%r30),%r2
+ fstws %fr9R,-16(0,%r30)
+ xmpyu %fr11L,%fr22L,%fr8
+ copy %r2,%r19
+ ldw -16(0,%r30),%r2
+ fstws %fr8R,-16(0,%r30)
+ copy %r2,%r20
+ ldw -16(0,%r30),%r2
+ addl %r2,%r19,%r21
+ comclr,<<= %r19,%r21,0
+ addl %r20,%r31,%r20
+L$0020
+ extru %r21,15,16,%r19
+ addl %r20,%r19,%r20
+ zdep %r21,15,16,%r19
+ addl %r22,%r19,%r22
+ comclr,<<= %r19,%r22,0
+ addi,tr 1,%r20,%r19
+ copy %r20,%r19
+ addl %r22,%r28,%r20
+ comclr,<<= %r28,%r20,0
+ addi 1,%r19,%r19
+ ldw 0(0,%r23),%r28
+ addl %r20,%r28,%r20
+ comclr,<<= %r28,%r20,0
+ addi,tr 1,%r19,%r28
+ copy %r19,%r28
+ addib,= -1,%r24,L$0003
+ stw %r20,0(0,%r23)
+ ldo 16(%r29),%r29
+ ldo 16(%r25),%r25
+ ldo 16(%r23),%r23
+ bl L$0002,0
+ ldo 16(%r26),%r26
+L$0003
+ ldw -20(0,%r30),%r2
+ bv,n 0(%r2)
+ .EXIT
+ .PROCEND
+ .align 4
+ .EXPORT bn_mul_words,ENTRY,PRIV_LEV=3,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR,RTNVAL=GR
+bn_mul_words
+ .PROC
+ .CALLINFO FRAME=0,CALLS,SAVE_RP
+ .ENTRY
+ stw %r2,-20(0,%r30)
+ ldi 0,%r28
+ extru %r23,31,16,%r2
+ stw %r2,-16(0,%r30)
+ extru %r23,15,16,%r23
+ ldil L'65536,%r31
+ fldws -16(0,%r30),%fr11R
+ stw %r23,-16(0,%r30)
+ ldo 12(%r26),%r29
+ ldo 12(%r25),%r23
+ fldws -16(0,%r30),%fr11L
+L$0026
+ ldw 0(0,%r25),%r19
+ extru %r19,31,16,%r20
+ stw %r20,-16(0,%r30)
+ extru %r19,15,16,%r19
+ fldws -16(0,%r30),%fr22L
+ stw %r19,-16(0,%r30)
+ xmpyu %fr22L,%fr11R,%fr8
+ fldws -16(0,%r30),%fr22L
+ fstws %fr8R,-16(0,%r30)
+ xmpyu %fr11R,%fr22L,%fr10
+ ldw -16(0,%r30),%r2
+ stw %r20,-16(0,%r30)
+ xmpyu %fr22L,%fr11L,%fr9
+ fldws -16(0,%r30),%fr22L
+ fstws %fr10R,-16(0,%r30)
+ copy %r2,%r22
+ ldw -16(0,%r30),%r2
+ fstws %fr9R,-16(0,%r30)
+ xmpyu %fr11L,%fr22L,%fr8
+ copy %r2,%r19
+ ldw -16(0,%r30),%r2
+ fstws %fr8R,-16(0,%r30)
+ copy %r2,%r20
+ ldw -16(0,%r30),%r2
+ addl %r2,%r19,%r21
+ comclr,<<= %r19,%r21,0
+ addl %r20,%r31,%r20
+L$0029
+ extru %r21,15,16,%r19
+ addl %r20,%r19,%r20
+ zdep %r21,15,16,%r19
+ addl %r22,%r19,%r22
+ comclr,<<= %r19,%r22,0
+ addi,tr 1,%r20,%r19
+ copy %r20,%r19
+ addl %r22,%r28,%r20
+ comclr,<<= %r28,%r20,0
+ addi,tr 1,%r19,%r28
+ copy %r19,%r28
+ addib,= -1,%r24,L$0027
+ stw %r20,0(0,%r26)
+ ldw -8(0,%r23),%r19
+ extru %r19,31,16,%r20
+ stw %r20,-16(0,%r30)
+ extru %r19,15,16,%r19
+ fldws -16(0,%r30),%fr22L
+ stw %r19,-16(0,%r30)
+ xmpyu %fr22L,%fr11R,%fr8
+ fldws -16(0,%r30),%fr22L
+ fstws %fr8R,-16(0,%r30)
+ xmpyu %fr11R,%fr22L,%fr10
+ ldw -16(0,%r30),%r2
+ stw %r20,-16(0,%r30)
+ xmpyu %fr22L,%fr11L,%fr9
+ fldws -16(0,%r30),%fr22L
+ fstws %fr10R,-16(0,%r30)
+ copy %r2,%r22
+ ldw -16(0,%r30),%r2
+ fstws %fr9R,-16(0,%r30)
+ xmpyu %fr11L,%fr22L,%fr8
+ copy %r2,%r19
+ ldw -16(0,%r30),%r2
+ fstws %fr8R,-16(0,%r30)
+ copy %r2,%r20
+ ldw -16(0,%r30),%r2
+ addl %r2,%r19,%r21
+ comclr,<<= %r19,%r21,0
+ addl %r20,%r31,%r20
+L$0033
+ extru %r21,15,16,%r19
+ addl %r20,%r19,%r20
+ zdep %r21,15,16,%r19
+ addl %r22,%r19,%r22
+ comclr,<<= %r19,%r22,0
+ addi,tr 1,%r20,%r19
+ copy %r20,%r19
+ addl %r22,%r28,%r20
+ comclr,<<= %r28,%r20,0
+ addi,tr 1,%r19,%r28
+ copy %r19,%r28
+ addib,= -1,%r24,L$0027
+ stw %r20,-8(0,%r29)
+ ldw -4(0,%r23),%r19
+ extru %r19,31,16,%r20
+ stw %r20,-16(0,%r30)
+ extru %r19,15,16,%r19
+ fldws -16(0,%r30),%fr22L
+ stw %r19,-16(0,%r30)
+ xmpyu %fr22L,%fr11R,%fr8
+ fldws -16(0,%r30),%fr22L
+ fstws %fr8R,-16(0,%r30)
+ xmpyu %fr11R,%fr22L,%fr10
+ ldw -16(0,%r30),%r2
+ stw %r20,-16(0,%r30)
+ xmpyu %fr22L,%fr11L,%fr9
+ fldws -16(0,%r30),%fr22L
+ fstws %fr10R,-16(0,%r30)
+ copy %r2,%r22
+ ldw -16(0,%r30),%r2
+ fstws %fr9R,-16(0,%r30)
+ xmpyu %fr11L,%fr22L,%fr8
+ copy %r2,%r19
+ ldw -16(0,%r30),%r2
+ fstws %fr8R,-16(0,%r30)
+ copy %r2,%r20
+ ldw -16(0,%r30),%r2
+ addl %r2,%r19,%r21
+ comclr,<<= %r19,%r21,0
+ addl %r20,%r31,%r20
+L$0037
+ extru %r21,15,16,%r19
+ addl %r20,%r19,%r20
+ zdep %r21,15,16,%r19
+ addl %r22,%r19,%r22
+ comclr,<<= %r19,%r22,0
+ addi,tr 1,%r20,%r19
+ copy %r20,%r19
+ addl %r22,%r28,%r20
+ comclr,<<= %r28,%r20,0
+ addi,tr 1,%r19,%r28
+ copy %r19,%r28
+ addib,= -1,%r24,L$0027
+ stw %r20,-4(0,%r29)
+ ldw 0(0,%r23),%r19
+ extru %r19,31,16,%r20
+ stw %r20,-16(0,%r30)
+ extru %r19,15,16,%r19
+ fldws -16(0,%r30),%fr22L
+ stw %r19,-16(0,%r30)
+ xmpyu %fr22L,%fr11R,%fr8
+ fldws -16(0,%r30),%fr22L
+ fstws %fr8R,-16(0,%r30)
+ xmpyu %fr11R,%fr22L,%fr10
+ ldw -16(0,%r30),%r2
+ stw %r20,-16(0,%r30)
+ xmpyu %fr22L,%fr11L,%fr9
+ fldws -16(0,%r30),%fr22L
+ fstws %fr10R,-16(0,%r30)
+ copy %r2,%r22
+ ldw -16(0,%r30),%r2
+ fstws %fr9R,-16(0,%r30)
+ xmpyu %fr11L,%fr22L,%fr8
+ copy %r2,%r19
+ ldw -16(0,%r30),%r2
+ fstws %fr8R,-16(0,%r30)
+ copy %r2,%r20
+ ldw -16(0,%r30),%r2
+ addl %r2,%r19,%r21
+ comclr,<<= %r19,%r21,0
+ addl %r20,%r31,%r20
+L$0041
+ extru %r21,15,16,%r19
+ addl %r20,%r19,%r20
+ zdep %r21,15,16,%r19
+ addl %r22,%r19,%r22
+ comclr,<<= %r19,%r22,0
+ addi,tr 1,%r20,%r19
+ copy %r20,%r19
+ addl %r22,%r28,%r20
+ comclr,<<= %r28,%r20,0
+ addi,tr 1,%r19,%r28
+ copy %r19,%r28
+ addib,= -1,%r24,L$0027
+ stw %r20,0(0,%r29)
+ ldo 16(%r23),%r23
+ ldo 16(%r25),%r25
+ ldo 16(%r29),%r29
+ bl L$0026,0
+ ldo 16(%r26),%r26
+L$0027
+ ldw -20(0,%r30),%r2
+ bv,n 0(%r2)
+ .EXIT
+ .PROCEND
+ .align 4
+ .EXPORT bn_sqr_words,ENTRY,PRIV_LEV=3,ARGW0=GR,ARGW1=GR,ARGW2=GR
+bn_sqr_words
+ .PROC
+ .CALLINFO FRAME=0,NO_CALLS
+ .ENTRY
+ ldo 28(%r26),%r23
+ ldo 12(%r25),%r28
+L$0046
+ ldw 0(0,%r25),%r21
+ extru %r21,31,16,%r22
+ stw %r22,-16(0,%r30)
+ extru %r21,15,16,%r21
+ fldws -16(0,%r30),%fr10L
+ stw %r21,-16(0,%r30)
+ fldws -16(0,%r30),%fr10R
+ xmpyu %fr10L,%fr10R,%fr8
+ fstws %fr8R,-16(0,%r30)
+ ldw -16(0,%r30),%r29
+ stw %r22,-16(0,%r30)
+ fldws -16(0,%r30),%fr10R
+ stw %r21,-16(0,%r30)
+ copy %r29,%r19
+ xmpyu %fr10L,%fr10R,%fr8
+ fldws -16(0,%r30),%fr10L
+ stw %r21,-16(0,%r30)
+ fldws -16(0,%r30),%fr10R
+ fstws %fr8R,-16(0,%r30)
+ extru %r19,16,17,%r20
+ zdep %r19,14,15,%r19
+ ldw -16(0,%r30),%r29
+ xmpyu %fr10L,%fr10R,%fr9
+ addl %r29,%r19,%r22
+ stw %r22,0(0,%r26)
+ fstws %fr9R,-16(0,%r30)
+ ldw -16(0,%r30),%r29
+ addl %r29,%r20,%r21
+ comclr,<<= %r19,%r22,0
+ addi 1,%r21,%r21
+ addib,= -1,%r24,L$0057
+ stw %r21,-24(0,%r23)
+ ldw -8(0,%r28),%r21
+ extru %r21,31,16,%r22
+ stw %r22,-16(0,%r30)
+ extru %r21,15,16,%r21
+ fldws -16(0,%r30),%fr10L
+ stw %r21,-16(0,%r30)
+ fldws -16(0,%r30),%fr10R
+ xmpyu %fr10L,%fr10R,%fr8
+ fstws %fr8R,-16(0,%r30)
+ ldw -16(0,%r30),%r29
+ stw %r22,-16(0,%r30)
+ fldws -16(0,%r30),%fr10R
+ stw %r21,-16(0,%r30)
+ copy %r29,%r19
+ xmpyu %fr10L,%fr10R,%fr8
+ fldws -16(0,%r30),%fr10L
+ stw %r21,-16(0,%r30)
+ fldws -16(0,%r30),%fr10R
+ fstws %fr8R,-16(0,%r30)
+ extru %r19,16,17,%r20
+ zdep %r19,14,15,%r19
+ ldw -16(0,%r30),%r29
+ xmpyu %fr10L,%fr10R,%fr9
+ addl %r29,%r19,%r22
+ stw %r22,-20(0,%r23)
+ fstws %fr9R,-16(0,%r30)
+ ldw -16(0,%r30),%r29
+ addl %r29,%r20,%r21
+ comclr,<<= %r19,%r22,0
+ addi 1,%r21,%r21
+ addib,= -1,%r24,L$0057
+ stw %r21,-16(0,%r23)
+ ldw -4(0,%r28),%r21
+ extru %r21,31,16,%r22
+ stw %r22,-16(0,%r30)
+ extru %r21,15,16,%r21
+ fldws -16(0,%r30),%fr10L
+ stw %r21,-16(0,%r30)
+ fldws -16(0,%r30),%fr10R
+ xmpyu %fr10L,%fr10R,%fr8
+ fstws %fr8R,-16(0,%r30)
+ ldw -16(0,%r30),%r29
+ stw %r22,-16(0,%r30)
+ fldws -16(0,%r30),%fr10R
+ stw %r21,-16(0,%r30)
+ copy %r29,%r19
+ xmpyu %fr10L,%fr10R,%fr8
+ fldws -16(0,%r30),%fr10L
+ stw %r21,-16(0,%r30)
+ fldws -16(0,%r30),%fr10R
+ fstws %fr8R,-16(0,%r30)
+ extru %r19,16,17,%r20
+ zdep %r19,14,15,%r19
+ ldw -16(0,%r30),%r29
+ xmpyu %fr10L,%fr10R,%fr9
+ addl %r29,%r19,%r22
+ stw %r22,-12(0,%r23)
+ fstws %fr9R,-16(0,%r30)
+ ldw -16(0,%r30),%r29
+ addl %r29,%r20,%r21
+ comclr,<<= %r19,%r22,0
+ addi 1,%r21,%r21
+ addib,= -1,%r24,L$0057
+ stw %r21,-8(0,%r23)
+ ldw 0(0,%r28),%r21
+ extru %r21,31,16,%r22
+ stw %r22,-16(0,%r30)
+ extru %r21,15,16,%r21
+ fldws -16(0,%r30),%fr10L
+ stw %r21,-16(0,%r30)
+ fldws -16(0,%r30),%fr10R
+ xmpyu %fr10L,%fr10R,%fr8
+ fstws %fr8R,-16(0,%r30)
+ ldw -16(0,%r30),%r29
+ stw %r22,-16(0,%r30)
+ fldws -16(0,%r30),%fr10R
+ stw %r21,-16(0,%r30)
+ copy %r29,%r19
+ xmpyu %fr10L,%fr10R,%fr8
+ fldws -16(0,%r30),%fr10L
+ stw %r21,-16(0,%r30)
+ fldws -16(0,%r30),%fr10R
+ fstws %fr8R,-16(0,%r30)
+ extru %r19,16,17,%r20
+ zdep %r19,14,15,%r19
+ ldw -16(0,%r30),%r29
+ xmpyu %fr10L,%fr10R,%fr9
+ addl %r29,%r19,%r22
+ stw %r22,-4(0,%r23)
+ fstws %fr9R,-16(0,%r30)
+ ldw -16(0,%r30),%r29
+ addl %r29,%r20,%r21
+ comclr,<<= %r19,%r22,0
+ addi 1,%r21,%r21
+ addib,= -1,%r24,L$0057
+ stw %r21,0(0,%r23)
+ ldo 16(%r28),%r28
+ ldo 16(%r25),%r25
+ ldo 32(%r23),%r23
+ bl L$0046,0
+ ldo 32(%r26),%r26
+L$0057
+ bv,n 0(%r2)
+ .EXIT
+ .PROCEND
+ .IMPORT BN_num_bits_word,CODE
+ .IMPORT fprintf,CODE
+ .IMPORT __iob,DATA
+ .SPACE $TEXT$
+ .SUBSPA $LIT$
+
+ .align 4
+L$C0000
+ .STRING "Division would overflow\x0a\x00"
+ .IMPORT abort,CODE
+ .SPACE $TEXT$
+ .SUBSPA $CODE$
+
+ .align 4
+ .EXPORT bn_div64,ENTRY,PRIV_LEV=3,ARGW0=GR,ARGW1=GR,ARGW2=GR,RTNVAL=GR
+bn_div64
+ .PROC
+ .CALLINFO FRAME=128,CALLS,SAVE_RP,ENTRY_GR=8
+ .ENTRY
+ stw %r2,-20(0,%r30)
+ stwm %r8,128(0,%r30)
+ stw %r7,-124(0,%r30)
+ stw %r4,-112(0,%r30)
+ stw %r3,-108(0,%r30)
+ copy %r26,%r3
+ copy %r25,%r4
+ stw %r6,-120(0,%r30)
+ ldi 0,%r7
+ stw %r5,-116(0,%r30)
+ movb,<> %r24,%r5,L$0059
+ ldi 2,%r6
+ bl L$0076,0
+ ldi -1,%r28
+L$0059
+ .CALL ARGW0=GR
+ bl BN_num_bits_word,%r2
+ copy %r5,%r26
+ ldi 32,%r19
+ comb,= %r19,%r28,L$0060
+ subi 31,%r28,%r19
+ mtsar %r19
+ zvdepi 1,32,%r19
+ comb,>>= %r19,%r3,L$0060
+ addil LR'__iob-$global$+32,%r27
+ ldo RR'__iob-$global$+32(%r1),%r26
+ ldil LR'L$C0000,%r25
+ .CALL ARGW0=GR,ARGW1=GR
+ bl fprintf,%r2
+ ldo RR'L$C0000(%r25),%r25
+ .CALL
+ bl abort,%r2
+ nop
+L$0060
+ comb,>> %r5,%r3,L$0061
+ subi 32,%r28,%r28
+ sub %r3,%r5,%r3
+L$0061
+ comib,= 0,%r28,L$0062
+ subi 31,%r28,%r19
+ mtsar %r19
+ zvdep %r5,32,%r5
+ zvdep %r3,32,%r21
+ subi 32,%r28,%r20
+ mtsar %r20
+ vshd 0,%r4,%r20
+ or %r21,%r20,%r3
+ mtsar %r19
+ zvdep %r4,32,%r4
+L$0062
+ extru %r5,15,16,%r23
+ extru %r5,31,16,%r28
+L$0063
+ extru %r3,15,16,%r19
+ comb,<> %r23,%r19,L$0066
+ copy %r3,%r26
+ bl L$0067,0
+ zdepi -1,31,16,%r29
+L$0066
+ .IMPORT $$divU,MILLICODE
+ bl $$divU,%r31
+ copy %r23,%r25
+L$0067
+ stw %r29,-16(0,%r30)
+ fldws -16(0,%r30),%fr10L
+ stw %r28,-16(0,%r30)
+ fldws -16(0,%r30),%fr10R
+ stw %r23,-16(0,%r30)
+ xmpyu %fr10L,%fr10R,%fr8
+ fldws -16(0,%r30),%fr10R
+ fstws %fr8R,-16(0,%r30)
+ xmpyu %fr10L,%fr10R,%fr9
+ ldw -16(0,%r30),%r8
+ fstws %fr9R,-16(0,%r30)
+ copy %r8,%r22
+ ldw -16(0,%r30),%r8
+ extru %r4,15,16,%r24
+ copy %r8,%r21
+L$0068
+ sub %r3,%r21,%r20
+ copy %r20,%r19
+ depi 0,31,16,%r19
+ comib,<> 0,%r19,L$0069
+ zdep %r20,15,16,%r19
+ addl %r19,%r24,%r19
+ comb,>>= %r19,%r22,L$0069
+ sub %r22,%r28,%r22
+ sub %r21,%r23,%r21
+ bl L$0068,0
+ ldo -1(%r29),%r29
+L$0069
+ stw %r29,-16(0,%r30)
+ fldws -16(0,%r30),%fr10L
+ stw %r28,-16(0,%r30)
+ fldws -16(0,%r30),%fr10R
+ xmpyu %fr10L,%fr10R,%fr8
+ fstws %fr8R,-16(0,%r30)
+ ldw -16(0,%r30),%r8
+ stw %r23,-16(0,%r30)
+ fldws -16(0,%r30),%fr10R
+ copy %r8,%r19
+ xmpyu %fr10L,%fr10R,%fr8
+ fstws %fr8R,-16(0,%r30)
+ extru %r19,15,16,%r20
+ ldw -16(0,%r30),%r8
+ zdep %r19,15,16,%r19
+ addl %r8,%r20,%r20
+ comclr,<<= %r19,%r4,0
+ addi 1,%r20,%r20
+ comb,<<= %r20,%r3,L$0074
+ sub %r4,%r19,%r4
+ addl %r3,%r5,%r3
+ ldo -1(%r29),%r29
+L$0074
+ addib,= -1,%r6,L$0064
+ sub %r3,%r20,%r3
+ zdep %r29,15,16,%r7
+ shd %r3,%r4,16,%r3
+ bl L$0063,0
+ zdep %r4,15,16,%r4
+L$0064
+ or %r7,%r29,%r28
+L$0076
+ ldw -148(0,%r30),%r2
+ ldw -124(0,%r30),%r7
+ ldw -120(0,%r30),%r6
+ ldw -116(0,%r30),%r5
+ ldw -112(0,%r30),%r4
+ ldw -108(0,%r30),%r3
+ bv 0(%r2)
+ ldwm -128(0,%r30),%r8
+ .EXIT
+ .PROCEND
diff --git a/crypto/bn/asm/pa-risc2.s b/crypto/bn/asm/pa-risc2.s
new file mode 100644
index 0000000..f3b1629
--- /dev/null
+++ b/crypto/bn/asm/pa-risc2.s
@@ -0,0 +1,1618 @@
+;
+; PA-RISC 2.0 implementation of bn_asm code, based on the
+; 64-bit version of the code. This code is effectively the
+; same as the 64-bit version except the register model is
+; slightly different given all values must be 32-bit between
+; function calls. Thus the 64-bit return values are returned
+; in %ret0 and %ret1 vs just %ret0 as is done in 64-bit
+;
+;
+; This code is approximately 2x faster than the C version
+; for RSA/DSA.
+;
+; See http://devresource.hp.com/ for more details on the PA-RISC
+; architecture. Also see the book "PA-RISC 2.0 Architecture"
+; by Gerry Kane for information on the instruction set architecture.
+;
+; Code written by Chris Ruemmler (with some help from the HP C
+; compiler).
+;
+; The code compiles with HP's assembler
+;
+
+ .level 2.0N
+ .space $TEXT$
+ .subspa $CODE$,QUAD=0,ALIGN=8,ACCESS=0x2c,CODE_ONLY
+
+;
+; Global Register definitions used for the routines.
+;
+; Some information about HP's runtime architecture for 32-bits.
+;
+; "Caller save" means the calling function must save the register
+; if it wants the register to be preserved.
+; "Callee save" means if a function uses the register, it must save
+; the value before using it.
+;
+; For the floating point registers
+;
+; "caller save" registers: fr4-fr11, fr22-fr31
+; "callee save" registers: fr12-fr21
+; "special" registers: fr0-fr3 (status and exception registers)
+;
+; For the integer registers
+; value zero : r0
+; "caller save" registers: r1,r19-r26
+; "callee save" registers: r3-r18
+; return register : r2 (rp)
+; return values ; r28,r29 (ret0,ret1)
+; Stack pointer ; r30 (sp)
+; millicode return ptr ; r31 (also a caller save register)
+
+
+;
+; Arguments to the routines
+;
+r_ptr .reg %r26
+a_ptr .reg %r25
+b_ptr .reg %r24
+num .reg %r24
+n .reg %r23
+
+;
+; Note that the "w" argument for bn_mul_add_words and bn_mul_words
+; is passed on the stack at a delta of -56 from the top of stack
+; as the routine is entered.
+;
+
+;
+; Globals used in some routines
+;
+
+top_overflow .reg %r23
+high_mask .reg %r22 ; value 0xffffffff80000000L
+
+
+;------------------------------------------------------------------------------
+;
+; bn_mul_add_words
+;
+;BN_ULONG bn_mul_add_words(BN_ULONG *r_ptr, BN_ULONG *a_ptr,
+; int num, BN_ULONG w)
+;
+; arg0 = r_ptr
+; arg1 = a_ptr
+; arg3 = num
+; -56(sp) = w
+;
+; Local register definitions
+;
+
+fm1 .reg %fr22
+fm .reg %fr23
+ht_temp .reg %fr24
+ht_temp_1 .reg %fr25
+lt_temp .reg %fr26
+lt_temp_1 .reg %fr27
+fm1_1 .reg %fr28
+fm_1 .reg %fr29
+
+fw_h .reg %fr7L
+fw_l .reg %fr7R
+fw .reg %fr7
+
+fht_0 .reg %fr8L
+flt_0 .reg %fr8R
+t_float_0 .reg %fr8
+
+fht_1 .reg %fr9L
+flt_1 .reg %fr9R
+t_float_1 .reg %fr9
+
+tmp_0 .reg %r31
+tmp_1 .reg %r21
+m_0 .reg %r20
+m_1 .reg %r19
+ht_0 .reg %r1
+ht_1 .reg %r3
+lt_0 .reg %r4
+lt_1 .reg %r5
+m1_0 .reg %r6
+m1_1 .reg %r7
+rp_val .reg %r8
+rp_val_1 .reg %r9
+
+bn_mul_add_words
+ .export bn_mul_add_words,entry,NO_RELOCATION,LONG_RETURN
+ .proc
+ .callinfo frame=128
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ NOP ; Needed to make the loop 16-byte aligned
+ NOP ; needed to make the loop 16-byte aligned
+
+ STD %r5,16(%sp) ; save r5
+ NOP
+ STD %r6,24(%sp) ; save r6
+ STD %r7,32(%sp) ; save r7
+
+ STD %r8,40(%sp) ; save r8
+ STD %r9,48(%sp) ; save r9
+ COPY %r0,%ret1 ; return 0 by default
+ DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
+
+ CMPIB,>= 0,num,bn_mul_add_words_exit ; if (num <= 0) then exit
+ LDO 128(%sp),%sp ; bump stack
+
+ ;
+ ; The loop is unrolled twice, so if there is only 1 number
+ ; then go straight to the cleanup code.
+ ;
+ CMPIB,= 1,num,bn_mul_add_words_single_top
+ FLDD -184(%sp),fw ; (-56-128) load up w into fw (fw_h/fw_l)
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+ ; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
+ ; two 32-bit mutiplies can be issued per cycle.
+ ;
+bn_mul_add_words_unroll2
+
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ LDD 0(r_ptr),rp_val ; rp[0]
+ LDD 8(r_ptr),rp_val_1 ; rp[1]
+
+ XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
+ XMPYU fht_1,fw_l,fm1_1 ; m1[1] = fht_1*fw_l
+ FSTD fm1,-16(%sp) ; -16(sp) = m1[0]
+ FSTD fm1_1,-48(%sp) ; -48(sp) = m1[1]
+
+ XMPYU flt_0,fw_h,fm ; m[0] = flt_0*fw_h
+ XMPYU flt_1,fw_h,fm_1 ; m[1] = flt_1*fw_h
+ FSTD fm,-8(%sp) ; -8(sp) = m[0]
+ FSTD fm_1,-40(%sp) ; -40(sp) = m[1]
+
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
+ XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp_1 = fht_1*fw_h
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht_temp
+ FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht_temp_1
+
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt_temp
+ FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt_temp_1
+
+ LDD -8(%sp),m_0 ; m[0]
+ LDD -40(%sp),m_1 ; m[1]
+ LDD -16(%sp),m1_0 ; m1[0]
+ LDD -48(%sp),m1_1 ; m1[1]
+
+ LDD -24(%sp),ht_0 ; ht[0]
+ LDD -56(%sp),ht_1 ; ht[1]
+ ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m[0] + m1[0];
+ ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m[1] + m1[1];
+
+ LDD -32(%sp),lt_0
+ LDD -64(%sp),lt_1
+ CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m[0] < m1[0])
+ ADD,L ht_0,top_overflow,ht_0 ; ht[0] += (1<<32)
+
+ CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m[1] < m1[1])
+ ADD,L ht_1,top_overflow,ht_1 ; ht[1] += (1<<32)
+ EXTRD,U tmp_0,31,32,m_0 ; m[0]>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1[0] = m[0]<<32
+
+ EXTRD,U tmp_1,31,32,m_1 ; m[1]>>32
+ DEPD,Z tmp_1,31,32,m1_1 ; m1[1] = m[1]<<32
+ ADD,L ht_0,m_0,ht_0 ; ht[0]+= (m[0]>>32)
+ ADD,L ht_1,m_1,ht_1 ; ht[1]+= (m[1]>>32)
+
+ ADD lt_0,m1_0,lt_0 ; lt[0] = lt[0]+m1[0];
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+ ADD lt_1,m1_1,lt_1 ; lt[1] = lt[1]+m1[1];
+ ADD,DC ht_1,%r0,ht_1 ; ht[1]++
+
+ ADD %ret1,lt_0,lt_0 ; lt[0] = lt[0] + c;
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+ ADD lt_0,rp_val,lt_0 ; lt[0] = lt[0]+rp[0]
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+
+ LDO -2(num),num ; num = num - 2;
+ ADD ht_0,lt_1,lt_1 ; lt[1] = lt[1] + ht_0 (c);
+ ADD,DC ht_1,%r0,ht_1 ; ht[1]++
+ STD lt_0,0(r_ptr) ; rp[0] = lt[0]
+
+ ADD lt_1,rp_val_1,lt_1 ; lt[1] = lt[1]+rp[1]
+ ADD,DC ht_1,%r0,%ret1 ; ht[1]++
+ LDO 16(a_ptr),a_ptr ; a_ptr += 2
+
+ STD lt_1,8(r_ptr) ; rp[1] = lt[1]
+ CMPIB,<= 2,num,bn_mul_add_words_unroll2 ; go again if more to do
+ LDO 16(r_ptr),r_ptr ; r_ptr += 2
+
+ CMPIB,=,N 0,num,bn_mul_add_words_exit ; are we done, or cleanup last one
+
+ ;
+ ; Top of loop aligned on 64-byte boundary
+ ;
+bn_mul_add_words_single_top
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ LDD 0(r_ptr),rp_val ; rp[0]
+ LDO 8(a_ptr),a_ptr ; a_ptr++
+ XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
+ FSTD fm1,-16(%sp) ; -16(sp) = m1
+ XMPYU flt_0,fw_h,fm ; m = lt*fw_h
+ FSTD fm,-8(%sp) ; -8(sp) = m
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt
+
+ LDD -8(%sp),m_0
+ LDD -16(%sp),m1_0 ; m1 = temp1
+ ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
+ LDD -24(%sp),ht_0
+ LDD -32(%sp),lt_0
+
+ CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
+ ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
+
+ EXTRD,U tmp_0,31,32,m_0 ; m>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
+
+ ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
+ ADD lt_0,m1_0,tmp_0 ; tmp_0 = lt+m1;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+ ADD %ret1,tmp_0,lt_0 ; lt = lt + c;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+ ADD lt_0,rp_val,lt_0 ; lt = lt+rp[0]
+ ADD,DC ht_0,%r0,%ret1 ; ht++
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+
+bn_mul_add_words_exit
+ .EXIT
+
+ EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
+ LDD -80(%sp),%r9 ; restore r9
+ LDD -88(%sp),%r8 ; restore r8
+ LDD -96(%sp),%r7 ; restore r7
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3 ; restore r3
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+;----------------------------------------------------------------------------
+;
+;BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
+;
+; arg0 = rp
+; arg1 = ap
+; arg3 = num
+; w on stack at -56(sp)
+
+bn_mul_words
+ .proc
+ .callinfo frame=128
+ .entry
+ .EXPORT bn_mul_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ NOP
+ STD %r5,16(%sp) ; save r5
+
+ STD %r6,24(%sp) ; save r6
+ STD %r7,32(%sp) ; save r7
+ COPY %r0,%ret1 ; return 0 by default
+ DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
+
+ CMPIB,>= 0,num,bn_mul_words_exit
+ LDO 128(%sp),%sp ; bump stack
+
+ ;
+ ; See if only 1 word to do, thus just do cleanup
+ ;
+ CMPIB,= 1,num,bn_mul_words_single_top
+ FLDD -184(%sp),fw ; (-56-128) load up w into fw (fw_h/fw_l)
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+ ; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
+ ; two 32-bit mutiplies can be issued per cycle.
+ ;
+bn_mul_words_unroll2
+
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
+ XMPYU fht_1,fw_l,fm1_1 ; m1[1] = ht*fw_l
+
+ FSTD fm1,-16(%sp) ; -16(sp) = m1
+ FSTD fm1_1,-48(%sp) ; -48(sp) = m1
+ XMPYU flt_0,fw_h,fm ; m = lt*fw_h
+ XMPYU flt_1,fw_h,fm_1 ; m = lt*fw_h
+
+ FSTD fm,-8(%sp) ; -8(sp) = m
+ FSTD fm_1,-40(%sp) ; -40(sp) = m
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
+ XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp = ht*fw_h
+
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht
+ FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
+
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt
+ FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt
+ LDD -8(%sp),m_0
+ LDD -40(%sp),m_1
+
+ LDD -16(%sp),m1_0
+ LDD -48(%sp),m1_1
+ LDD -24(%sp),ht_0
+ LDD -56(%sp),ht_1
+
+ ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m + m1;
+ ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m + m1;
+ LDD -32(%sp),lt_0
+ LDD -64(%sp),lt_1
+
+ CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m < m1)
+ ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
+ CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m < m1)
+ ADD,L ht_1,top_overflow,ht_1 ; ht += (1<<32)
+
+ EXTRD,U tmp_0,31,32,m_0 ; m>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
+ EXTRD,U tmp_1,31,32,m_1 ; m>>32
+ DEPD,Z tmp_1,31,32,m1_1 ; m1 = m<<32
+
+ ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
+ ADD,L ht_1,m_1,ht_1 ; ht+= (m>>32)
+ ADD lt_0,m1_0,lt_0 ; lt = lt+m1;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ ADD lt_1,m1_1,lt_1 ; lt = lt+m1;
+ ADD,DC ht_1,%r0,ht_1 ; ht++
+ ADD %ret1,lt_0,lt_0 ; lt = lt + c (ret1);
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ ADD ht_0,lt_1,lt_1 ; lt = lt + c (ht_0)
+ ADD,DC ht_1,%r0,ht_1 ; ht++
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+ STD lt_1,8(r_ptr) ; rp[1] = lt
+
+ COPY ht_1,%ret1 ; carry = ht
+ LDO -2(num),num ; num = num - 2;
+ LDO 16(a_ptr),a_ptr ; ap += 2
+ CMPIB,<= 2,num,bn_mul_words_unroll2
+ LDO 16(r_ptr),r_ptr ; rp++
+
+ CMPIB,=,N 0,num,bn_mul_words_exit ; are we done?
+
+ ;
+ ; Top of loop aligned on 64-byte boundary
+ ;
+bn_mul_words_single_top
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+
+ XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
+ FSTD fm1,-16(%sp) ; -16(sp) = m1
+ XMPYU flt_0,fw_h,fm ; m = lt*fw_h
+ FSTD fm,-8(%sp) ; -8(sp) = m
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt
+
+ LDD -8(%sp),m_0
+ LDD -16(%sp),m1_0
+ ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
+ LDD -24(%sp),ht_0
+ LDD -32(%sp),lt_0
+
+ CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
+ ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
+
+ EXTRD,U tmp_0,31,32,m_0 ; m>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
+
+ ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
+ ADD lt_0,m1_0,lt_0 ; lt= lt+m1;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ ADD %ret1,lt_0,lt_0 ; lt = lt + c;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ COPY ht_0,%ret1 ; copy carry
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+
+bn_mul_words_exit
+ .EXIT
+ EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
+ LDD -96(%sp),%r7 ; restore r7
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3 ; restore r3
+ .PROCEND
+
+;----------------------------------------------------------------------------
+;
+;void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num)
+;
+; arg0 = rp
+; arg1 = ap
+; arg2 = num
+;
+
+bn_sqr_words
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_sqr_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ NOP
+ STD %r5,16(%sp) ; save r5
+
+ CMPIB,>= 0,num,bn_sqr_words_exit
+ LDO 128(%sp),%sp ; bump stack
+
+ ;
+ ; If only 1, the goto straight to cleanup
+ ;
+ CMPIB,= 1,num,bn_sqr_words_single_top
+ DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+
+bn_sqr_words_unroll2
+ FLDD 0(a_ptr),t_float_0 ; a[0]
+ FLDD 8(a_ptr),t_float_1 ; a[1]
+ XMPYU fht_0,flt_0,fm ; m[0]
+ XMPYU fht_1,flt_1,fm_1 ; m[1]
+
+ FSTD fm,-24(%sp) ; store m[0]
+ FSTD fm_1,-56(%sp) ; store m[1]
+ XMPYU flt_0,flt_0,lt_temp ; lt[0]
+ XMPYU flt_1,flt_1,lt_temp_1 ; lt[1]
+
+ FSTD lt_temp,-16(%sp) ; store lt[0]
+ FSTD lt_temp_1,-48(%sp) ; store lt[1]
+ XMPYU fht_0,fht_0,ht_temp ; ht[0]
+ XMPYU fht_1,fht_1,ht_temp_1 ; ht[1]
+
+ FSTD ht_temp,-8(%sp) ; store ht[0]
+ FSTD ht_temp_1,-40(%sp) ; store ht[1]
+ LDD -24(%sp),m_0
+ LDD -56(%sp),m_1
+
+ AND m_0,high_mask,tmp_0 ; m[0] & Mask
+ AND m_1,high_mask,tmp_1 ; m[1] & Mask
+ DEPD,Z m_0,30,31,m_0 ; m[0] << 32+1
+ DEPD,Z m_1,30,31,m_1 ; m[1] << 32+1
+
+ LDD -16(%sp),lt_0
+ LDD -48(%sp),lt_1
+ EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m[0]&Mask >> 32-1
+ EXTRD,U tmp_1,32,33,tmp_1 ; tmp_1 = m[1]&Mask >> 32-1
+
+ LDD -8(%sp),ht_0
+ LDD -40(%sp),ht_1
+ ADD,L ht_0,tmp_0,ht_0 ; ht[0] += tmp_0
+ ADD,L ht_1,tmp_1,ht_1 ; ht[1] += tmp_1
+
+ ADD lt_0,m_0,lt_0 ; lt = lt+m
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+ STD lt_0,0(r_ptr) ; rp[0] = lt[0]
+ STD ht_0,8(r_ptr) ; rp[1] = ht[1]
+
+ ADD lt_1,m_1,lt_1 ; lt = lt+m
+ ADD,DC ht_1,%r0,ht_1 ; ht[1]++
+ STD lt_1,16(r_ptr) ; rp[2] = lt[1]
+ STD ht_1,24(r_ptr) ; rp[3] = ht[1]
+
+ LDO -2(num),num ; num = num - 2;
+ LDO 16(a_ptr),a_ptr ; ap += 2
+ CMPIB,<= 2,num,bn_sqr_words_unroll2
+ LDO 32(r_ptr),r_ptr ; rp += 4
+
+ CMPIB,=,N 0,num,bn_sqr_words_exit ; are we done?
+
+ ;
+ ; Top of loop aligned on 64-byte boundary
+ ;
+bn_sqr_words_single_top
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+
+ XMPYU fht_0,flt_0,fm ; m
+ FSTD fm,-24(%sp) ; store m
+
+ XMPYU flt_0,flt_0,lt_temp ; lt
+ FSTD lt_temp,-16(%sp) ; store lt
+
+ XMPYU fht_0,fht_0,ht_temp ; ht
+ FSTD ht_temp,-8(%sp) ; store ht
+
+ LDD -24(%sp),m_0 ; load m
+ AND m_0,high_mask,tmp_0 ; m & Mask
+ DEPD,Z m_0,30,31,m_0 ; m << 32+1
+ LDD -16(%sp),lt_0 ; lt
+
+ LDD -8(%sp),ht_0 ; ht
+ EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m&Mask >> 32-1
+ ADD m_0,lt_0,lt_0 ; lt = lt+m
+ ADD,L ht_0,tmp_0,ht_0 ; ht += tmp_0
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+ STD ht_0,8(r_ptr) ; rp[1] = ht
+
+bn_sqr_words_exit
+ .EXIT
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+
+;----------------------------------------------------------------------------
+;
+;BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
+;
+; arg0 = rp
+; arg1 = ap
+; arg2 = bp
+; arg3 = n
+
+t .reg %r22
+b .reg %r21
+l .reg %r20
+
+bn_add_words
+ .proc
+ .entry
+ .callinfo
+ .EXPORT bn_add_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .align 64
+
+ CMPIB,>= 0,n,bn_add_words_exit
+ COPY %r0,%ret1 ; return 0 by default
+
+ ;
+ ; If 2 or more numbers do the loop
+ ;
+ CMPIB,= 1,n,bn_add_words_single_top
+ NOP
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+bn_add_words_unroll2
+ LDD 0(a_ptr),t
+ LDD 0(b_ptr),b
+ ADD t,%ret1,t ; t = t+c;
+ ADD,DC %r0,%r0,%ret1 ; set c to carry
+ ADD t,b,l ; l = t + b[0]
+ ADD,DC %ret1,%r0,%ret1 ; c+= carry
+ STD l,0(r_ptr)
+
+ LDD 8(a_ptr),t
+ LDD 8(b_ptr),b
+ ADD t,%ret1,t ; t = t+c;
+ ADD,DC %r0,%r0,%ret1 ; set c to carry
+ ADD t,b,l ; l = t + b[0]
+ ADD,DC %ret1,%r0,%ret1 ; c+= carry
+ STD l,8(r_ptr)
+
+ LDO -2(n),n
+ LDO 16(a_ptr),a_ptr
+ LDO 16(b_ptr),b_ptr
+
+ CMPIB,<= 2,n,bn_add_words_unroll2
+ LDO 16(r_ptr),r_ptr
+
+ CMPIB,=,N 0,n,bn_add_words_exit ; are we done?
+
+bn_add_words_single_top
+ LDD 0(a_ptr),t
+ LDD 0(b_ptr),b
+
+ ADD t,%ret1,t ; t = t+c;
+ ADD,DC %r0,%r0,%ret1 ; set c to carry (could use CMPCLR??)
+ ADD t,b,l ; l = t + b[0]
+ ADD,DC %ret1,%r0,%ret1 ; c+= carry
+ STD l,0(r_ptr)
+
+bn_add_words_exit
+ .EXIT
+ BVE (%rp)
+ EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+;----------------------------------------------------------------------------
+;
+;BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
+;
+; arg0 = rp
+; arg1 = ap
+; arg2 = bp
+; arg3 = n
+
+t1 .reg %r22
+t2 .reg %r21
+sub_tmp1 .reg %r20
+sub_tmp2 .reg %r19
+
+
+bn_sub_words
+ .proc
+ .callinfo
+ .EXPORT bn_sub_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ CMPIB,>= 0,n,bn_sub_words_exit
+ COPY %r0,%ret1 ; return 0 by default
+
+ ;
+ ; If 2 or more numbers do the loop
+ ;
+ CMPIB,= 1,n,bn_sub_words_single_top
+ NOP
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+bn_sub_words_unroll2
+ LDD 0(a_ptr),t1
+ LDD 0(b_ptr),t2
+ SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
+ SUB sub_tmp1,%ret1,sub_tmp1 ; t3 = t3- c;
+
+ CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
+ LDO 1(%r0),sub_tmp2
+
+ CMPCLR,*= t1,t2,%r0
+ COPY sub_tmp2,%ret1
+ STD sub_tmp1,0(r_ptr)
+
+ LDD 8(a_ptr),t1
+ LDD 8(b_ptr),t2
+ SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
+ SUB sub_tmp1,%ret1,sub_tmp1 ; t3 = t3- c;
+ CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
+ LDO 1(%r0),sub_tmp2
+
+ CMPCLR,*= t1,t2,%r0
+ COPY sub_tmp2,%ret1
+ STD sub_tmp1,8(r_ptr)
+
+ LDO -2(n),n
+ LDO 16(a_ptr),a_ptr
+ LDO 16(b_ptr),b_ptr
+
+ CMPIB,<= 2,n,bn_sub_words_unroll2
+ LDO 16(r_ptr),r_ptr
+
+ CMPIB,=,N 0,n,bn_sub_words_exit ; are we done?
+
+bn_sub_words_single_top
+ LDD 0(a_ptr),t1
+ LDD 0(b_ptr),t2
+ SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
+ SUB sub_tmp1,%ret1,sub_tmp1 ; t3 = t3- c;
+ CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
+ LDO 1(%r0),sub_tmp2
+
+ CMPCLR,*= t1,t2,%r0
+ COPY sub_tmp2,%ret1
+
+ STD sub_tmp1,0(r_ptr)
+
+bn_sub_words_exit
+ .EXIT
+ BVE (%rp)
+ EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+;------------------------------------------------------------------------------
+;
+; unsigned long bn_div_words(unsigned long h, unsigned long l, unsigned long d)
+;
+; arg0 = h
+; arg1 = l
+; arg2 = d
+;
+; This is mainly just output from the HP C compiler.
+;
+;------------------------------------------------------------------------------
+bn_div_words
+ .PROC
+ .EXPORT bn_div_words,ENTRY,PRIV_LEV=3,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR,RTNVAL=GR,LONG_RETURN
+ .IMPORT BN_num_bits_word,CODE
+ ;--- not PIC .IMPORT __iob,DATA
+ ;--- not PIC .IMPORT fprintf,CODE
+ .IMPORT abort,CODE
+ .IMPORT $$div2U,MILLICODE
+ .CALLINFO CALLER,FRAME=144,ENTRY_GR=%r9,SAVE_RP,ARGS_SAVED,ORDERING_AWARE
+ .ENTRY
+ STW %r2,-20(%r30) ;offset 0x8ec
+ STW,MA %r3,192(%r30) ;offset 0x8f0
+ STW %r4,-188(%r30) ;offset 0x8f4
+ DEPD %r5,31,32,%r6 ;offset 0x8f8
+ STD %r6,-184(%r30) ;offset 0x8fc
+ DEPD %r7,31,32,%r8 ;offset 0x900
+ STD %r8,-176(%r30) ;offset 0x904
+ STW %r9,-168(%r30) ;offset 0x908
+ LDD -248(%r30),%r3 ;offset 0x90c
+ COPY %r26,%r4 ;offset 0x910
+ COPY %r24,%r5 ;offset 0x914
+ DEPD %r25,31,32,%r4 ;offset 0x918
+ CMPB,*<> %r3,%r0,$0006000C ;offset 0x91c
+ DEPD %r23,31,32,%r5 ;offset 0x920
+ MOVIB,TR -1,%r29,$00060002 ;offset 0x924
+ EXTRD,U %r29,31,32,%r28 ;offset 0x928
+$0006002A
+ LDO -1(%r29),%r29 ;offset 0x92c
+ SUB %r23,%r7,%r23 ;offset 0x930
+$00060024
+ SUB %r4,%r31,%r25 ;offset 0x934
+ AND %r25,%r19,%r26 ;offset 0x938
+ CMPB,*<>,N %r0,%r26,$00060046 ;offset 0x93c
+ DEPD,Z %r25,31,32,%r20 ;offset 0x940
+ OR %r20,%r24,%r21 ;offset 0x944
+ CMPB,*<<,N %r21,%r23,$0006002A ;offset 0x948
+ SUB %r31,%r2,%r31 ;offset 0x94c
+$00060046
+$0006002E
+ DEPD,Z %r23,31,32,%r25 ;offset 0x950
+ EXTRD,U %r23,31,32,%r26 ;offset 0x954
+ AND %r25,%r19,%r24 ;offset 0x958
+ ADD,L %r31,%r26,%r31 ;offset 0x95c
+ CMPCLR,*>>= %r5,%r24,%r0 ;offset 0x960
+ LDO 1(%r31),%r31 ;offset 0x964
+$00060032
+ CMPB,*<<=,N %r31,%r4,$00060036 ;offset 0x968
+ LDO -1(%r29),%r29 ;offset 0x96c
+ ADD,L %r4,%r3,%r4 ;offset 0x970
+$00060036
+ ADDIB,=,N -1,%r8,$D0 ;offset 0x974
+ SUB %r5,%r24,%r28 ;offset 0x978
+$0006003A
+ SUB %r4,%r31,%r24 ;offset 0x97c
+ SHRPD %r24,%r28,32,%r4 ;offset 0x980
+ DEPD,Z %r29,31,32,%r9 ;offset 0x984
+ DEPD,Z %r28,31,32,%r5 ;offset 0x988
+$0006001C
+ EXTRD,U %r4,31,32,%r31 ;offset 0x98c
+ CMPB,*<>,N %r31,%r2,$00060020 ;offset 0x990
+ MOVB,TR %r6,%r29,$D1 ;offset 0x994
+ STD %r29,-152(%r30) ;offset 0x998
+$0006000C
+ EXTRD,U %r3,31,32,%r25 ;offset 0x99c
+ COPY %r3,%r26 ;offset 0x9a0
+ EXTRD,U %r3,31,32,%r9 ;offset 0x9a4
+ EXTRD,U %r4,31,32,%r8 ;offset 0x9a8
+ .CALL ARGW0=GR,ARGW1=GR,RTNVAL=GR ;in=25,26;out=28;
+ B,L BN_num_bits_word,%r2 ;offset 0x9ac
+ EXTRD,U %r5,31,32,%r7 ;offset 0x9b0
+ LDI 64,%r20 ;offset 0x9b4
+ DEPD %r7,31,32,%r5 ;offset 0x9b8
+ DEPD %r8,31,32,%r4 ;offset 0x9bc
+ DEPD %r9,31,32,%r3 ;offset 0x9c0
+ CMPB,= %r28,%r20,$00060012 ;offset 0x9c4
+ COPY %r28,%r24 ;offset 0x9c8
+ MTSARCM %r24 ;offset 0x9cc
+ DEPDI,Z -1,%sar,1,%r19 ;offset 0x9d0
+ CMPB,*>>,N %r4,%r19,$D2 ;offset 0x9d4
+$00060012
+ SUBI 64,%r24,%r31 ;offset 0x9d8
+ CMPCLR,*<< %r4,%r3,%r0 ;offset 0x9dc
+ SUB %r4,%r3,%r4 ;offset 0x9e0
+$00060016
+ CMPB,= %r31,%r0,$0006001A ;offset 0x9e4
+ COPY %r0,%r9 ;offset 0x9e8
+ MTSARCM %r31 ;offset 0x9ec
+ DEPD,Z %r3,%sar,64,%r3 ;offset 0x9f0
+ SUBI 64,%r31,%r26 ;offset 0x9f4
+ MTSAR %r26 ;offset 0x9f8
+ SHRPD %r4,%r5,%sar,%r4 ;offset 0x9fc
+ MTSARCM %r31 ;offset 0xa00
+ DEPD,Z %r5,%sar,64,%r5 ;offset 0xa04
+$0006001A
+ DEPDI,Z -1,31,32,%r19 ;offset 0xa08
+ AND %r3,%r19,%r29 ;offset 0xa0c
+ EXTRD,U %r29,31,32,%r2 ;offset 0xa10
+ DEPDI,Z -1,63,32,%r6 ;offset 0xa14
+ MOVIB,TR 2,%r8,$0006001C ;offset 0xa18
+ EXTRD,U %r3,63,32,%r7 ;offset 0xa1c
+$D2
+ ;--- not PIC ADDIL LR'__iob-$global$,%r27,%r1 ;offset 0xa20
+ ;--- not PIC LDIL LR'C$7,%r21 ;offset 0xa24
+ ;--- not PIC LDO RR'__iob-$global$+32(%r1),%r26 ;offset 0xa28
+ ;--- not PIC .CALL ARGW0=GR,ARGW1=GR,ARGW2=GR,RTNVAL=GR ;in=24,25,26;out=28;
+ ;--- not PIC B,L fprintf,%r2 ;offset 0xa2c
+ ;--- not PIC LDO RR'C$7(%r21),%r25 ;offset 0xa30
+ .CALL ;
+ B,L abort,%r2 ;offset 0xa34
+ NOP ;offset 0xa38
+ B $D3 ;offset 0xa3c
+ LDW -212(%r30),%r2 ;offset 0xa40
+$00060020
+ COPY %r4,%r26 ;offset 0xa44
+ EXTRD,U %r4,31,32,%r25 ;offset 0xa48
+ COPY %r2,%r24 ;offset 0xa4c
+ .CALL ;in=23,24,25,26;out=20,21,22,28,29; (MILLICALL)
+ B,L $$div2U,%r31 ;offset 0xa50
+ EXTRD,U %r2,31,32,%r23 ;offset 0xa54
+ DEPD %r28,31,32,%r29 ;offset 0xa58
+$00060022
+ STD %r29,-152(%r30) ;offset 0xa5c
+$D1
+ AND %r5,%r19,%r24 ;offset 0xa60
+ EXTRD,U %r24,31,32,%r24 ;offset 0xa64
+ STW %r2,-160(%r30) ;offset 0xa68
+ STW %r7,-128(%r30) ;offset 0xa6c
+ FLDD -152(%r30),%fr4 ;offset 0xa70
+ FLDD -152(%r30),%fr7 ;offset 0xa74
+ FLDW -160(%r30),%fr8L ;offset 0xa78
+ FLDW -128(%r30),%fr5L ;offset 0xa7c
+ XMPYU %fr8L,%fr7L,%fr10 ;offset 0xa80
+ FSTD %fr10,-136(%r30) ;offset 0xa84
+ XMPYU %fr8L,%fr7R,%fr22 ;offset 0xa88
+ FSTD %fr22,-144(%r30) ;offset 0xa8c
+ XMPYU %fr5L,%fr4L,%fr11 ;offset 0xa90
+ XMPYU %fr5L,%fr4R,%fr23 ;offset 0xa94
+ FSTD %fr11,-112(%r30) ;offset 0xa98
+ FSTD %fr23,-120(%r30) ;offset 0xa9c
+ LDD -136(%r30),%r28 ;offset 0xaa0
+ DEPD,Z %r28,31,32,%r31 ;offset 0xaa4
+ LDD -144(%r30),%r20 ;offset 0xaa8
+ ADD,L %r20,%r31,%r31 ;offset 0xaac
+ LDD -112(%r30),%r22 ;offset 0xab0
+ DEPD,Z %r22,31,32,%r22 ;offset 0xab4
+ LDD -120(%r30),%r21 ;offset 0xab8
+ B $00060024 ;offset 0xabc
+ ADD,L %r21,%r22,%r23 ;offset 0xac0
+$D0
+ OR %r9,%r29,%r29 ;offset 0xac4
+$00060040
+ EXTRD,U %r29,31,32,%r28 ;offset 0xac8
+$00060002
+$L2
+ LDW -212(%r30),%r2 ;offset 0xacc
+$D3
+ LDW -168(%r30),%r9 ;offset 0xad0
+ LDD -176(%r30),%r8 ;offset 0xad4
+ EXTRD,U %r8,31,32,%r7 ;offset 0xad8
+ LDD -184(%r30),%r6 ;offset 0xadc
+ EXTRD,U %r6,31,32,%r5 ;offset 0xae0
+ LDW -188(%r30),%r4 ;offset 0xae4
+ BVE (%r2) ;offset 0xae8
+ .EXIT
+ LDW,MB -192(%r30),%r3 ;offset 0xaec
+ .PROCEND ;in=23,25;out=28,29;fpin=105,107;
+
+
+
+
+;----------------------------------------------------------------------------
+;
+; Registers to hold 64-bit values to manipulate. The "L" part
+; of the register corresponds to the upper 32-bits, while the "R"
+; part corresponds to the lower 32-bits
+;
+; Note, that when using b6 and b7, the code must save these before
+; using them because they are callee save registers
+;
+;
+; Floating point registers to use to save values that
+; are manipulated. These don't collide with ftemp1-6 and
+; are all caller save registers
+;
+a0 .reg %fr22
+a0L .reg %fr22L
+a0R .reg %fr22R
+
+a1 .reg %fr23
+a1L .reg %fr23L
+a1R .reg %fr23R
+
+a2 .reg %fr24
+a2L .reg %fr24L
+a2R .reg %fr24R
+
+a3 .reg %fr25
+a3L .reg %fr25L
+a3R .reg %fr25R
+
+a4 .reg %fr26
+a4L .reg %fr26L
+a4R .reg %fr26R
+
+a5 .reg %fr27
+a5L .reg %fr27L
+a5R .reg %fr27R
+
+a6 .reg %fr28
+a6L .reg %fr28L
+a6R .reg %fr28R
+
+a7 .reg %fr29
+a7L .reg %fr29L
+a7R .reg %fr29R
+
+b0 .reg %fr30
+b0L .reg %fr30L
+b0R .reg %fr30R
+
+b1 .reg %fr31
+b1L .reg %fr31L
+b1R .reg %fr31R
+
+;
+; Temporary floating point variables, these are all caller save
+; registers
+;
+ftemp1 .reg %fr4
+ftemp2 .reg %fr5
+ftemp3 .reg %fr6
+ftemp4 .reg %fr7
+
+;
+; The B set of registers when used.
+;
+
+b2 .reg %fr8
+b2L .reg %fr8L
+b2R .reg %fr8R
+
+b3 .reg %fr9
+b3L .reg %fr9L
+b3R .reg %fr9R
+
+b4 .reg %fr10
+b4L .reg %fr10L
+b4R .reg %fr10R
+
+b5 .reg %fr11
+b5L .reg %fr11L
+b5R .reg %fr11R
+
+b6 .reg %fr12
+b6L .reg %fr12L
+b6R .reg %fr12R
+
+b7 .reg %fr13
+b7L .reg %fr13L
+b7R .reg %fr13R
+
+c1 .reg %r21 ; only reg
+temp1 .reg %r20 ; only reg
+temp2 .reg %r19 ; only reg
+temp3 .reg %r31 ; only reg
+
+m1 .reg %r28
+c2 .reg %r23
+high_one .reg %r1
+ht .reg %r6
+lt .reg %r5
+m .reg %r4
+c3 .reg %r3
+
+SQR_ADD_C .macro A0L,A0R,C1,C2,C3
+ XMPYU A0L,A0R,ftemp1 ; m
+ FSTD ftemp1,-24(%sp) ; store m
+
+ XMPYU A0R,A0R,ftemp2 ; lt
+ FSTD ftemp2,-16(%sp) ; store lt
+
+ XMPYU A0L,A0L,ftemp3 ; ht
+ FSTD ftemp3,-8(%sp) ; store ht
+
+ LDD -24(%sp),m ; load m
+ AND m,high_mask,temp2 ; m & Mask
+ DEPD,Z m,30,31,temp3 ; m << 32+1
+ LDD -16(%sp),lt ; lt
+
+ LDD -8(%sp),ht ; ht
+ EXTRD,U temp2,32,33,temp1 ; temp1 = m&Mask >> 32-1
+ ADD temp3,lt,lt ; lt = lt+m
+ ADD,L ht,temp1,ht ; ht += temp1
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD C1,lt,C1 ; c1=c1+lt
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD C2,ht,C2 ; c2=c2+ht
+ ADD,DC C3,%r0,C3 ; c3++
+.endm
+
+SQR_ADD_C2 .macro A0L,A0R,A1L,A1R,C1,C2,C3
+ XMPYU A0L,A1R,ftemp1 ; m1 = bl*ht
+ FSTD ftemp1,-16(%sp) ;
+ XMPYU A0R,A1L,ftemp2 ; m = bh*lt
+ FSTD ftemp2,-8(%sp) ;
+ XMPYU A0R,A1R,ftemp3 ; lt = bl*lt
+ FSTD ftemp3,-32(%sp)
+ XMPYU A0L,A1L,ftemp4 ; ht = bh*ht
+ FSTD ftemp4,-24(%sp) ;
+
+ LDD -8(%sp),m ; r21 = m
+ LDD -16(%sp),m1 ; r19 = m1
+ ADD,L m,m1,m ; m+m1
+
+ DEPD,Z m,31,32,temp3 ; (m+m1<<32)
+ LDD -24(%sp),ht ; r24 = ht
+
+ CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
+ ADD,L ht,high_one,ht ; ht+=high_one
+
+ EXTRD,U m,31,32,temp1 ; m >> 32
+ LDD -32(%sp),lt ; lt
+ ADD,L ht,temp1,ht ; ht+= m>>32
+ ADD lt,temp3,lt ; lt = lt+m1
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD ht,ht,ht ; ht=ht+ht;
+ ADD,DC C3,%r0,C3 ; add in carry (c3++)
+
+ ADD lt,lt,lt ; lt=lt+lt;
+ ADD,DC ht,%r0,ht ; add in carry (ht++)
+
+ ADD C1,lt,C1 ; c1=c1+lt
+ ADD,DC,*NUV ht,%r0,ht ; add in carry (ht++)
+ LDO 1(C3),C3 ; bump c3 if overflow,nullify otherwise
+
+ ADD C2,ht,C2 ; c2 = c2 + ht
+ ADD,DC C3,%r0,C3 ; add in carry (c3++)
+.endm
+
+;
+;void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
+; arg0 = r_ptr
+; arg1 = a_ptr
+;
+
+bn_sqr_comba8
+ .PROC
+ .CALLINFO FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_sqr_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .ENTRY
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+ FLDD 32(a_ptr),a4
+ FLDD 40(a_ptr),a5
+ FLDD 48(a_ptr),a6
+ FLDD 56(a_ptr),a7
+
+ SQR_ADD_C a0L,a0R,c1,c2,c3
+ STD c1,0(r_ptr) ; r[0] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
+ STD c2,8(r_ptr) ; r[1] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a1L,a1R,c3,c1,c2
+ SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
+ STD c3,16(r_ptr) ; r[2] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
+ SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
+ STD c1,24(r_ptr) ; r[3] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C a2L,a2R,c2,c3,c1
+ SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
+ SQR_ADD_C2 a4L,a4R,a0L,a0R,c2,c3,c1
+ STD c2,32(r_ptr) ; r[4] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C2 a5L,a5R,a0L,a0R,c3,c1,c2
+ SQR_ADD_C2 a4L,a4R,a1L,a1R,c3,c1,c2
+ SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
+ STD c3,40(r_ptr) ; r[5] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C a3L,a3R,c1,c2,c3
+ SQR_ADD_C2 a4L,a4R,a2L,a2R,c1,c2,c3
+ SQR_ADD_C2 a5L,a5R,a1L,a1R,c1,c2,c3
+ SQR_ADD_C2 a6L,a6R,a0L,a0R,c1,c2,c3
+ STD c1,48(r_ptr) ; r[6] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a7L,a7R,a0L,a0R,c2,c3,c1
+ SQR_ADD_C2 a6L,a6R,a1L,a1R,c2,c3,c1
+ SQR_ADD_C2 a5L,a5R,a2L,a2R,c2,c3,c1
+ SQR_ADD_C2 a4L,a4R,a3L,a3R,c2,c3,c1
+ STD c2,56(r_ptr) ; r[7] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a4L,a4R,c3,c1,c2
+ SQR_ADD_C2 a5L,a5R,a3L,a3R,c3,c1,c2
+ SQR_ADD_C2 a6L,a6R,a2L,a2R,c3,c1,c2
+ SQR_ADD_C2 a7L,a7R,a1L,a1R,c3,c1,c2
+ STD c3,64(r_ptr) ; r[8] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C2 a7L,a7R,a2L,a2R,c1,c2,c3
+ SQR_ADD_C2 a6L,a6R,a3L,a3R,c1,c2,c3
+ SQR_ADD_C2 a5L,a5R,a4L,a4R,c1,c2,c3
+ STD c1,72(r_ptr) ; r[9] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C a5L,a5R,c2,c3,c1
+ SQR_ADD_C2 a6L,a6R,a4L,a4R,c2,c3,c1
+ SQR_ADD_C2 a7L,a7R,a3L,a3R,c2,c3,c1
+ STD c2,80(r_ptr) ; r[10] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C2 a7L,a7R,a4L,a4R,c3,c1,c2
+ SQR_ADD_C2 a6L,a6R,a5L,a5R,c3,c1,c2
+ STD c3,88(r_ptr) ; r[11] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C a6L,a6R,c1,c2,c3
+ SQR_ADD_C2 a7L,a7R,a5L,a5R,c1,c2,c3
+ STD c1,96(r_ptr) ; r[12] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a7L,a7R,a6L,a6R,c2,c3,c1
+ STD c2,104(r_ptr) ; r[13] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a7L,a7R,c3,c1,c2
+ STD c3, 112(r_ptr) ; r[14] = c3
+ STD c1, 120(r_ptr) ; r[15] = c1
+
+ .EXIT
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+;-----------------------------------------------------------------------------
+;
+;void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
+; arg0 = r_ptr
+; arg1 = a_ptr
+;
+
+bn_sqr_comba4
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_sqr_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+ FLDD 32(a_ptr),a4
+ FLDD 40(a_ptr),a5
+ FLDD 48(a_ptr),a6
+ FLDD 56(a_ptr),a7
+
+ SQR_ADD_C a0L,a0R,c1,c2,c3
+
+ STD c1,0(r_ptr) ; r[0] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
+
+ STD c2,8(r_ptr) ; r[1] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a1L,a1R,c3,c1,c2
+ SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
+
+ STD c3,16(r_ptr) ; r[2] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
+ SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
+
+ STD c1,24(r_ptr) ; r[3] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C a2L,a2R,c2,c3,c1
+ SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
+
+ STD c2,32(r_ptr) ; r[4] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
+ STD c3,40(r_ptr) ; r[5] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C a3L,a3R,c1,c2,c3
+ STD c1,48(r_ptr) ; r[6] = c1;
+ STD c2,56(r_ptr) ; r[7] = c2;
+
+ .EXIT
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+
+;---------------------------------------------------------------------------
+
+MUL_ADD_C .macro A0L,A0R,B0L,B0R,C1,C2,C3
+ XMPYU A0L,B0R,ftemp1 ; m1 = bl*ht
+ FSTD ftemp1,-16(%sp) ;
+ XMPYU A0R,B0L,ftemp2 ; m = bh*lt
+ FSTD ftemp2,-8(%sp) ;
+ XMPYU A0R,B0R,ftemp3 ; lt = bl*lt
+ FSTD ftemp3,-32(%sp)
+ XMPYU A0L,B0L,ftemp4 ; ht = bh*ht
+ FSTD ftemp4,-24(%sp) ;
+
+ LDD -8(%sp),m ; r21 = m
+ LDD -16(%sp),m1 ; r19 = m1
+ ADD,L m,m1,m ; m+m1
+
+ DEPD,Z m,31,32,temp3 ; (m+m1<<32)
+ LDD -24(%sp),ht ; r24 = ht
+
+ CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
+ ADD,L ht,high_one,ht ; ht+=high_one
+
+ EXTRD,U m,31,32,temp1 ; m >> 32
+ LDD -32(%sp),lt ; lt
+ ADD,L ht,temp1,ht ; ht+= m>>32
+ ADD lt,temp3,lt ; lt = lt+m1
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD C1,lt,C1 ; c1=c1+lt
+ ADD,DC ht,%r0,ht ; bump c3 if overflow,nullify otherwise
+
+ ADD C2,ht,C2 ; c2 = c2 + ht
+ ADD,DC C3,%r0,C3 ; add in carry (c3++)
+.endm
+
+
+;
+;void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+; arg0 = r_ptr
+; arg1 = a_ptr
+; arg2 = b_ptr
+;
+
+bn_mul_comba8
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_mul_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+ FSTD %fr12,32(%sp) ; save r6
+ FSTD %fr13,40(%sp) ; save r7
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+ FLDD 32(a_ptr),a4
+ FLDD 40(a_ptr),a5
+ FLDD 48(a_ptr),a6
+ FLDD 56(a_ptr),a7
+
+ FLDD 0(b_ptr),b0
+ FLDD 8(b_ptr),b1
+ FLDD 16(b_ptr),b2
+ FLDD 24(b_ptr),b3
+ FLDD 32(b_ptr),b4
+ FLDD 40(b_ptr),b5
+ FLDD 48(b_ptr),b6
+ FLDD 56(b_ptr),b7
+
+ MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
+ STD c1,0(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
+ STD c2,8(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
+ STD c3,16(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
+ MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
+ STD c1,24(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a4L,a4R,b0L,b0R,c2,c3,c1
+ MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
+ MUL_ADD_C a0L,a0R,b4L,b4R,c2,c3,c1
+ STD c2,32(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a0L,a0R,b5L,b5R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b4L,b4R,c3,c1,c2
+ MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
+ MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
+ MUL_ADD_C a4L,a4R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a5L,a5R,b0L,b0R,c3,c1,c2
+ STD c3,40(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a6L,a6R,b0L,b0R,c1,c2,c3
+ MUL_ADD_C a5L,a5R,b1L,b1R,c1,c2,c3
+ MUL_ADD_C a4L,a4R,b2L,b2R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a2L,a2R,b4L,b4R,c1,c2,c3
+ MUL_ADD_C a1L,a1R,b5L,b5R,c1,c2,c3
+ MUL_ADD_C a0L,a0R,b6L,b6R,c1,c2,c3
+ STD c1,48(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a0L,a0R,b7L,b7R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b6L,b6R,c2,c3,c1
+ MUL_ADD_C a2L,a2R,b5L,b5R,c2,c3,c1
+ MUL_ADD_C a3L,a3R,b4L,b4R,c2,c3,c1
+ MUL_ADD_C a4L,a4R,b3L,b3R,c2,c3,c1
+ MUL_ADD_C a5L,a5R,b2L,b2R,c2,c3,c1
+ MUL_ADD_C a6L,a6R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a7L,a7R,b0L,b0R,c2,c3,c1
+ STD c2,56(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a7L,a7R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a6L,a6R,b2L,b2R,c3,c1,c2
+ MUL_ADD_C a5L,a5R,b3L,b3R,c3,c1,c2
+ MUL_ADD_C a4L,a4R,b4L,b4R,c3,c1,c2
+ MUL_ADD_C a3L,a3R,b5L,b5R,c3,c1,c2
+ MUL_ADD_C a2L,a2R,b6L,b6R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b7L,b7R,c3,c1,c2
+ STD c3,64(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a2L,a2R,b7L,b7R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b6L,b6R,c1,c2,c3
+ MUL_ADD_C a4L,a4R,b5L,b5R,c1,c2,c3
+ MUL_ADD_C a5L,a5R,b4L,b4R,c1,c2,c3
+ MUL_ADD_C a6L,a6R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a7L,a7R,b2L,b2R,c1,c2,c3
+ STD c1,72(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a7L,a7R,b3L,b3R,c2,c3,c1
+ MUL_ADD_C a6L,a6R,b4L,b4R,c2,c3,c1
+ MUL_ADD_C a5L,a5R,b5L,b5R,c2,c3,c1
+ MUL_ADD_C a4L,a4R,b6L,b6R,c2,c3,c1
+ MUL_ADD_C a3L,a3R,b7L,b7R,c2,c3,c1
+ STD c2,80(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a4L,a4R,b7L,b7R,c3,c1,c2
+ MUL_ADD_C a5L,a5R,b6L,b6R,c3,c1,c2
+ MUL_ADD_C a6L,a6R,b5L,b5R,c3,c1,c2
+ MUL_ADD_C a7L,a7R,b4L,b4R,c3,c1,c2
+ STD c3,88(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a7L,a7R,b5L,b5R,c1,c2,c3
+ MUL_ADD_C a6L,a6R,b6L,b6R,c1,c2,c3
+ MUL_ADD_C a5L,a5R,b7L,b7R,c1,c2,c3
+ STD c1,96(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a6L,a6R,b7L,b7R,c2,c3,c1
+ MUL_ADD_C a7L,a7R,b6L,b6R,c2,c3,c1
+ STD c2,104(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a7L,a7R,b7L,b7R,c3,c1,c2
+ STD c3,112(r_ptr)
+ STD c1,120(r_ptr)
+
+ .EXIT
+ FLDD -88(%sp),%fr13
+ FLDD -96(%sp),%fr12
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+;-----------------------------------------------------------------------------
+;
+;void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+; arg0 = r_ptr
+; arg1 = a_ptr
+; arg2 = b_ptr
+;
+
+bn_mul_comba4
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_mul_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+ FSTD %fr12,32(%sp) ; save r6
+ FSTD %fr13,40(%sp) ; save r7
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+
+ FLDD 0(b_ptr),b0
+ FLDD 8(b_ptr),b1
+ FLDD 16(b_ptr),b2
+ FLDD 24(b_ptr),b3
+
+ MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
+ STD c1,0(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
+ STD c2,8(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
+ STD c3,16(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
+ MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
+ STD c1,24(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
+ STD c2,32(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
+ MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
+ STD c3,40(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
+ STD c1,48(r_ptr)
+ STD c2,56(r_ptr)
+
+ .EXIT
+ FLDD -88(%sp),%fr13
+ FLDD -96(%sp),%fr12
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+
+;--- not PIC .SPACE $TEXT$
+;--- not PIC .SUBSPA $CODE$
+;--- not PIC .SPACE $PRIVATE$,SORT=16
+;--- not PIC .IMPORT $global$,DATA
+;--- not PIC .SPACE $TEXT$
+;--- not PIC .SUBSPA $CODE$
+;--- not PIC .SUBSPA $LIT$,ACCESS=0x2c
+;--- not PIC C$7
+;--- not PIC .ALIGN 8
+;--- not PIC .STRINGZ "Division would overflow (%d)\n"
+ .END
diff --git a/crypto/bn/asm/pa-risc2W.s b/crypto/bn/asm/pa-risc2W.s
new file mode 100644
index 0000000..a995457
--- /dev/null
+++ b/crypto/bn/asm/pa-risc2W.s
@@ -0,0 +1,1605 @@
+;
+; PA-RISC 64-bit implementation of bn_asm code
+;
+; This code is approximately 2x faster than the C version
+; for RSA/DSA.
+;
+; See http://devresource.hp.com/ for more details on the PA-RISC
+; architecture. Also see the book "PA-RISC 2.0 Architecture"
+; by Gerry Kane for information on the instruction set architecture.
+;
+; Code written by Chris Ruemmler (with some help from the HP C
+; compiler).
+;
+; The code compiles with HP's assembler
+;
+
+ .level 2.0W
+ .space $TEXT$
+ .subspa $CODE$,QUAD=0,ALIGN=8,ACCESS=0x2c,CODE_ONLY
+
+;
+; Global Register definitions used for the routines.
+;
+; Some information about HP's runtime architecture for 64-bits.
+;
+; "Caller save" means the calling function must save the register
+; if it wants the register to be preserved.
+; "Callee save" means if a function uses the register, it must save
+; the value before using it.
+;
+; For the floating point registers
+;
+; "caller save" registers: fr4-fr11, fr22-fr31
+; "callee save" registers: fr12-fr21
+; "special" registers: fr0-fr3 (status and exception registers)
+;
+; For the integer registers
+; value zero : r0
+; "caller save" registers: r1,r19-r26
+; "callee save" registers: r3-r18
+; return register : r2 (rp)
+; return values ; r28 (ret0,ret1)
+; Stack pointer ; r30 (sp)
+; global data pointer ; r27 (dp)
+; argument pointer ; r29 (ap)
+; millicode return ptr ; r31 (also a caller save register)
+
+
+;
+; Arguments to the routines
+;
+r_ptr .reg %r26
+a_ptr .reg %r25
+b_ptr .reg %r24
+num .reg %r24
+w .reg %r23
+n .reg %r23
+
+
+;
+; Globals used in some routines
+;
+
+top_overflow .reg %r29
+high_mask .reg %r22 ; value 0xffffffff80000000L
+
+
+;------------------------------------------------------------------------------
+;
+; bn_mul_add_words
+;
+;BN_ULONG bn_mul_add_words(BN_ULONG *r_ptr, BN_ULONG *a_ptr,
+; int num, BN_ULONG w)
+;
+; arg0 = r_ptr
+; arg1 = a_ptr
+; arg2 = num
+; arg3 = w
+;
+; Local register definitions
+;
+
+fm1 .reg %fr22
+fm .reg %fr23
+ht_temp .reg %fr24
+ht_temp_1 .reg %fr25
+lt_temp .reg %fr26
+lt_temp_1 .reg %fr27
+fm1_1 .reg %fr28
+fm_1 .reg %fr29
+
+fw_h .reg %fr7L
+fw_l .reg %fr7R
+fw .reg %fr7
+
+fht_0 .reg %fr8L
+flt_0 .reg %fr8R
+t_float_0 .reg %fr8
+
+fht_1 .reg %fr9L
+flt_1 .reg %fr9R
+t_float_1 .reg %fr9
+
+tmp_0 .reg %r31
+tmp_1 .reg %r21
+m_0 .reg %r20
+m_1 .reg %r19
+ht_0 .reg %r1
+ht_1 .reg %r3
+lt_0 .reg %r4
+lt_1 .reg %r5
+m1_0 .reg %r6
+m1_1 .reg %r7
+rp_val .reg %r8
+rp_val_1 .reg %r9
+
+bn_mul_add_words
+ .export bn_mul_add_words,entry,NO_RELOCATION,LONG_RETURN
+ .proc
+ .callinfo frame=128
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ NOP ; Needed to make the loop 16-byte aligned
+ NOP ; Needed to make the loop 16-byte aligned
+
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+ STD %r7,32(%sp) ; save r7
+ STD %r8,40(%sp) ; save r8
+
+ STD %r9,48(%sp) ; save r9
+ COPY %r0,%ret0 ; return 0 by default
+ DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
+ STD w,56(%sp) ; store w on stack
+
+ CMPIB,>= 0,num,bn_mul_add_words_exit ; if (num <= 0) then exit
+ LDO 128(%sp),%sp ; bump stack
+
+ ;
+ ; The loop is unrolled twice, so if there is only 1 number
+ ; then go straight to the cleanup code.
+ ;
+ CMPIB,= 1,num,bn_mul_add_words_single_top
+ FLDD -72(%sp),fw ; load up w into fp register fw (fw_h/fw_l)
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+ ; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
+ ; two 32-bit mutiplies can be issued per cycle.
+ ;
+bn_mul_add_words_unroll2
+
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ LDD 0(r_ptr),rp_val ; rp[0]
+ LDD 8(r_ptr),rp_val_1 ; rp[1]
+
+ XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
+ XMPYU fht_1,fw_l,fm1_1 ; m1[1] = fht_1*fw_l
+ FSTD fm1,-16(%sp) ; -16(sp) = m1[0]
+ FSTD fm1_1,-48(%sp) ; -48(sp) = m1[1]
+
+ XMPYU flt_0,fw_h,fm ; m[0] = flt_0*fw_h
+ XMPYU flt_1,fw_h,fm_1 ; m[1] = flt_1*fw_h
+ FSTD fm,-8(%sp) ; -8(sp) = m[0]
+ FSTD fm_1,-40(%sp) ; -40(sp) = m[1]
+
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
+ XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp_1 = fht_1*fw_h
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht_temp
+ FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht_temp_1
+
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt_temp
+ FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt_temp_1
+
+ LDD -8(%sp),m_0 ; m[0]
+ LDD -40(%sp),m_1 ; m[1]
+ LDD -16(%sp),m1_0 ; m1[0]
+ LDD -48(%sp),m1_1 ; m1[1]
+
+ LDD -24(%sp),ht_0 ; ht[0]
+ LDD -56(%sp),ht_1 ; ht[1]
+ ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m[0] + m1[0];
+ ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m[1] + m1[1];
+
+ LDD -32(%sp),lt_0
+ LDD -64(%sp),lt_1
+ CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m[0] < m1[0])
+ ADD,L ht_0,top_overflow,ht_0 ; ht[0] += (1<<32)
+
+ CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m[1] < m1[1])
+ ADD,L ht_1,top_overflow,ht_1 ; ht[1] += (1<<32)
+ EXTRD,U tmp_0,31,32,m_0 ; m[0]>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1[0] = m[0]<<32
+
+ EXTRD,U tmp_1,31,32,m_1 ; m[1]>>32
+ DEPD,Z tmp_1,31,32,m1_1 ; m1[1] = m[1]<<32
+ ADD,L ht_0,m_0,ht_0 ; ht[0]+= (m[0]>>32)
+ ADD,L ht_1,m_1,ht_1 ; ht[1]+= (m[1]>>32)
+
+ ADD lt_0,m1_0,lt_0 ; lt[0] = lt[0]+m1[0];
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+ ADD lt_1,m1_1,lt_1 ; lt[1] = lt[1]+m1[1];
+ ADD,DC ht_1,%r0,ht_1 ; ht[1]++
+
+ ADD %ret0,lt_0,lt_0 ; lt[0] = lt[0] + c;
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+ ADD lt_0,rp_val,lt_0 ; lt[0] = lt[0]+rp[0]
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+
+ LDO -2(num),num ; num = num - 2;
+ ADD ht_0,lt_1,lt_1 ; lt[1] = lt[1] + ht_0 (c);
+ ADD,DC ht_1,%r0,ht_1 ; ht[1]++
+ STD lt_0,0(r_ptr) ; rp[0] = lt[0]
+
+ ADD lt_1,rp_val_1,lt_1 ; lt[1] = lt[1]+rp[1]
+ ADD,DC ht_1,%r0,%ret0 ; ht[1]++
+ LDO 16(a_ptr),a_ptr ; a_ptr += 2
+
+ STD lt_1,8(r_ptr) ; rp[1] = lt[1]
+ CMPIB,<= 2,num,bn_mul_add_words_unroll2 ; go again if more to do
+ LDO 16(r_ptr),r_ptr ; r_ptr += 2
+
+ CMPIB,=,N 0,num,bn_mul_add_words_exit ; are we done, or cleanup last one
+
+ ;
+ ; Top of loop aligned on 64-byte boundary
+ ;
+bn_mul_add_words_single_top
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ LDD 0(r_ptr),rp_val ; rp[0]
+ LDO 8(a_ptr),a_ptr ; a_ptr++
+ XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
+ FSTD fm1,-16(%sp) ; -16(sp) = m1
+ XMPYU flt_0,fw_h,fm ; m = lt*fw_h
+ FSTD fm,-8(%sp) ; -8(sp) = m
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt
+
+ LDD -8(%sp),m_0
+ LDD -16(%sp),m1_0 ; m1 = temp1
+ ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
+ LDD -24(%sp),ht_0
+ LDD -32(%sp),lt_0
+
+ CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
+ ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
+
+ EXTRD,U tmp_0,31,32,m_0 ; m>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
+
+ ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
+ ADD lt_0,m1_0,tmp_0 ; tmp_0 = lt+m1;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+ ADD %ret0,tmp_0,lt_0 ; lt = lt + c;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+ ADD lt_0,rp_val,lt_0 ; lt = lt+rp[0]
+ ADD,DC ht_0,%r0,%ret0 ; ht++
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+
+bn_mul_add_words_exit
+ .EXIT
+ LDD -80(%sp),%r9 ; restore r9
+ LDD -88(%sp),%r8 ; restore r8
+ LDD -96(%sp),%r7 ; restore r7
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3 ; restore r3
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+;----------------------------------------------------------------------------
+;
+;BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
+;
+; arg0 = rp
+; arg1 = ap
+; arg2 = num
+; arg3 = w
+
+bn_mul_words
+ .proc
+ .callinfo frame=128
+ .entry
+ .EXPORT bn_mul_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+
+ STD %r7,32(%sp) ; save r7
+ COPY %r0,%ret0 ; return 0 by default
+ DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
+ STD w,56(%sp) ; w on stack
+
+ CMPIB,>= 0,num,bn_mul_words_exit
+ LDO 128(%sp),%sp ; bump stack
+
+ ;
+ ; See if only 1 word to do, thus just do cleanup
+ ;
+ CMPIB,= 1,num,bn_mul_words_single_top
+ FLDD -72(%sp),fw ; load up w into fp register fw (fw_h/fw_l)
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+ ; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
+ ; two 32-bit mutiplies can be issued per cycle.
+ ;
+bn_mul_words_unroll2
+
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
+ XMPYU fht_1,fw_l,fm1_1 ; m1[1] = ht*fw_l
+
+ FSTD fm1,-16(%sp) ; -16(sp) = m1
+ FSTD fm1_1,-48(%sp) ; -48(sp) = m1
+ XMPYU flt_0,fw_h,fm ; m = lt*fw_h
+ XMPYU flt_1,fw_h,fm_1 ; m = lt*fw_h
+
+ FSTD fm,-8(%sp) ; -8(sp) = m
+ FSTD fm_1,-40(%sp) ; -40(sp) = m
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
+ XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp = ht*fw_h
+
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht
+ FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
+
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt
+ FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt
+ LDD -8(%sp),m_0
+ LDD -40(%sp),m_1
+
+ LDD -16(%sp),m1_0
+ LDD -48(%sp),m1_1
+ LDD -24(%sp),ht_0
+ LDD -56(%sp),ht_1
+
+ ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m + m1;
+ ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m + m1;
+ LDD -32(%sp),lt_0
+ LDD -64(%sp),lt_1
+
+ CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m < m1)
+ ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
+ CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m < m1)
+ ADD,L ht_1,top_overflow,ht_1 ; ht += (1<<32)
+
+ EXTRD,U tmp_0,31,32,m_0 ; m>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
+ EXTRD,U tmp_1,31,32,m_1 ; m>>32
+ DEPD,Z tmp_1,31,32,m1_1 ; m1 = m<<32
+
+ ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
+ ADD,L ht_1,m_1,ht_1 ; ht+= (m>>32)
+ ADD lt_0,m1_0,lt_0 ; lt = lt+m1;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ ADD lt_1,m1_1,lt_1 ; lt = lt+m1;
+ ADD,DC ht_1,%r0,ht_1 ; ht++
+ ADD %ret0,lt_0,lt_0 ; lt = lt + c (ret0);
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ ADD ht_0,lt_1,lt_1 ; lt = lt + c (ht_0)
+ ADD,DC ht_1,%r0,ht_1 ; ht++
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+ STD lt_1,8(r_ptr) ; rp[1] = lt
+
+ COPY ht_1,%ret0 ; carry = ht
+ LDO -2(num),num ; num = num - 2;
+ LDO 16(a_ptr),a_ptr ; ap += 2
+ CMPIB,<= 2,num,bn_mul_words_unroll2
+ LDO 16(r_ptr),r_ptr ; rp++
+
+ CMPIB,=,N 0,num,bn_mul_words_exit ; are we done?
+
+ ;
+ ; Top of loop aligned on 64-byte boundary
+ ;
+bn_mul_words_single_top
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+
+ XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
+ FSTD fm1,-16(%sp) ; -16(sp) = m1
+ XMPYU flt_0,fw_h,fm ; m = lt*fw_h
+ FSTD fm,-8(%sp) ; -8(sp) = m
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt
+
+ LDD -8(%sp),m_0
+ LDD -16(%sp),m1_0
+ ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
+ LDD -24(%sp),ht_0
+ LDD -32(%sp),lt_0
+
+ CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
+ ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
+
+ EXTRD,U tmp_0,31,32,m_0 ; m>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
+
+ ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
+ ADD lt_0,m1_0,lt_0 ; lt= lt+m1;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ ADD %ret0,lt_0,lt_0 ; lt = lt + c;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ COPY ht_0,%ret0 ; copy carry
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+
+bn_mul_words_exit
+ .EXIT
+ LDD -96(%sp),%r7 ; restore r7
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3 ; restore r3
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+;----------------------------------------------------------------------------
+;
+;void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num)
+;
+; arg0 = rp
+; arg1 = ap
+; arg2 = num
+;
+
+bn_sqr_words
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_sqr_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ NOP
+ STD %r5,16(%sp) ; save r5
+
+ CMPIB,>= 0,num,bn_sqr_words_exit
+ LDO 128(%sp),%sp ; bump stack
+
+ ;
+ ; If only 1, the goto straight to cleanup
+ ;
+ CMPIB,= 1,num,bn_sqr_words_single_top
+ DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+
+bn_sqr_words_unroll2
+ FLDD 0(a_ptr),t_float_0 ; a[0]
+ FLDD 8(a_ptr),t_float_1 ; a[1]
+ XMPYU fht_0,flt_0,fm ; m[0]
+ XMPYU fht_1,flt_1,fm_1 ; m[1]
+
+ FSTD fm,-24(%sp) ; store m[0]
+ FSTD fm_1,-56(%sp) ; store m[1]
+ XMPYU flt_0,flt_0,lt_temp ; lt[0]
+ XMPYU flt_1,flt_1,lt_temp_1 ; lt[1]
+
+ FSTD lt_temp,-16(%sp) ; store lt[0]
+ FSTD lt_temp_1,-48(%sp) ; store lt[1]
+ XMPYU fht_0,fht_0,ht_temp ; ht[0]
+ XMPYU fht_1,fht_1,ht_temp_1 ; ht[1]
+
+ FSTD ht_temp,-8(%sp) ; store ht[0]
+ FSTD ht_temp_1,-40(%sp) ; store ht[1]
+ LDD -24(%sp),m_0
+ LDD -56(%sp),m_1
+
+ AND m_0,high_mask,tmp_0 ; m[0] & Mask
+ AND m_1,high_mask,tmp_1 ; m[1] & Mask
+ DEPD,Z m_0,30,31,m_0 ; m[0] << 32+1
+ DEPD,Z m_1,30,31,m_1 ; m[1] << 32+1
+
+ LDD -16(%sp),lt_0
+ LDD -48(%sp),lt_1
+ EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m[0]&Mask >> 32-1
+ EXTRD,U tmp_1,32,33,tmp_1 ; tmp_1 = m[1]&Mask >> 32-1
+
+ LDD -8(%sp),ht_0
+ LDD -40(%sp),ht_1
+ ADD,L ht_0,tmp_0,ht_0 ; ht[0] += tmp_0
+ ADD,L ht_1,tmp_1,ht_1 ; ht[1] += tmp_1
+
+ ADD lt_0,m_0,lt_0 ; lt = lt+m
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+ STD lt_0,0(r_ptr) ; rp[0] = lt[0]
+ STD ht_0,8(r_ptr) ; rp[1] = ht[1]
+
+ ADD lt_1,m_1,lt_1 ; lt = lt+m
+ ADD,DC ht_1,%r0,ht_1 ; ht[1]++
+ STD lt_1,16(r_ptr) ; rp[2] = lt[1]
+ STD ht_1,24(r_ptr) ; rp[3] = ht[1]
+
+ LDO -2(num),num ; num = num - 2;
+ LDO 16(a_ptr),a_ptr ; ap += 2
+ CMPIB,<= 2,num,bn_sqr_words_unroll2
+ LDO 32(r_ptr),r_ptr ; rp += 4
+
+ CMPIB,=,N 0,num,bn_sqr_words_exit ; are we done?
+
+ ;
+ ; Top of loop aligned on 64-byte boundary
+ ;
+bn_sqr_words_single_top
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+
+ XMPYU fht_0,flt_0,fm ; m
+ FSTD fm,-24(%sp) ; store m
+
+ XMPYU flt_0,flt_0,lt_temp ; lt
+ FSTD lt_temp,-16(%sp) ; store lt
+
+ XMPYU fht_0,fht_0,ht_temp ; ht
+ FSTD ht_temp,-8(%sp) ; store ht
+
+ LDD -24(%sp),m_0 ; load m
+ AND m_0,high_mask,tmp_0 ; m & Mask
+ DEPD,Z m_0,30,31,m_0 ; m << 32+1
+ LDD -16(%sp),lt_0 ; lt
+
+ LDD -8(%sp),ht_0 ; ht
+ EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m&Mask >> 32-1
+ ADD m_0,lt_0,lt_0 ; lt = lt+m
+ ADD,L ht_0,tmp_0,ht_0 ; ht += tmp_0
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+ STD ht_0,8(r_ptr) ; rp[1] = ht
+
+bn_sqr_words_exit
+ .EXIT
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+
+;----------------------------------------------------------------------------
+;
+;BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
+;
+; arg0 = rp
+; arg1 = ap
+; arg2 = bp
+; arg3 = n
+
+t .reg %r22
+b .reg %r21
+l .reg %r20
+
+bn_add_words
+ .proc
+ .entry
+ .callinfo
+ .EXPORT bn_add_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .align 64
+
+ CMPIB,>= 0,n,bn_add_words_exit
+ COPY %r0,%ret0 ; return 0 by default
+
+ ;
+ ; If 2 or more numbers do the loop
+ ;
+ CMPIB,= 1,n,bn_add_words_single_top
+ NOP
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+bn_add_words_unroll2
+ LDD 0(a_ptr),t
+ LDD 0(b_ptr),b
+ ADD t,%ret0,t ; t = t+c;
+ ADD,DC %r0,%r0,%ret0 ; set c to carry
+ ADD t,b,l ; l = t + b[0]
+ ADD,DC %ret0,%r0,%ret0 ; c+= carry
+ STD l,0(r_ptr)
+
+ LDD 8(a_ptr),t
+ LDD 8(b_ptr),b
+ ADD t,%ret0,t ; t = t+c;
+ ADD,DC %r0,%r0,%ret0 ; set c to carry
+ ADD t,b,l ; l = t + b[0]
+ ADD,DC %ret0,%r0,%ret0 ; c+= carry
+ STD l,8(r_ptr)
+
+ LDO -2(n),n
+ LDO 16(a_ptr),a_ptr
+ LDO 16(b_ptr),b_ptr
+
+ CMPIB,<= 2,n,bn_add_words_unroll2
+ LDO 16(r_ptr),r_ptr
+
+ CMPIB,=,N 0,n,bn_add_words_exit ; are we done?
+
+bn_add_words_single_top
+ LDD 0(a_ptr),t
+ LDD 0(b_ptr),b
+
+ ADD t,%ret0,t ; t = t+c;
+ ADD,DC %r0,%r0,%ret0 ; set c to carry (could use CMPCLR??)
+ ADD t,b,l ; l = t + b[0]
+ ADD,DC %ret0,%r0,%ret0 ; c+= carry
+ STD l,0(r_ptr)
+
+bn_add_words_exit
+ .EXIT
+ BVE (%rp)
+ NOP
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+;----------------------------------------------------------------------------
+;
+;BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
+;
+; arg0 = rp
+; arg1 = ap
+; arg2 = bp
+; arg3 = n
+
+t1 .reg %r22
+t2 .reg %r21
+sub_tmp1 .reg %r20
+sub_tmp2 .reg %r19
+
+
+bn_sub_words
+ .proc
+ .callinfo
+ .EXPORT bn_sub_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ CMPIB,>= 0,n,bn_sub_words_exit
+ COPY %r0,%ret0 ; return 0 by default
+
+ ;
+ ; If 2 or more numbers do the loop
+ ;
+ CMPIB,= 1,n,bn_sub_words_single_top
+ NOP
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+bn_sub_words_unroll2
+ LDD 0(a_ptr),t1
+ LDD 0(b_ptr),t2
+ SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
+ SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c;
+
+ CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
+ LDO 1(%r0),sub_tmp2
+
+ CMPCLR,*= t1,t2,%r0
+ COPY sub_tmp2,%ret0
+ STD sub_tmp1,0(r_ptr)
+
+ LDD 8(a_ptr),t1
+ LDD 8(b_ptr),t2
+ SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
+ SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c;
+ CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
+ LDO 1(%r0),sub_tmp2
+
+ CMPCLR,*= t1,t2,%r0
+ COPY sub_tmp2,%ret0
+ STD sub_tmp1,8(r_ptr)
+
+ LDO -2(n),n
+ LDO 16(a_ptr),a_ptr
+ LDO 16(b_ptr),b_ptr
+
+ CMPIB,<= 2,n,bn_sub_words_unroll2
+ LDO 16(r_ptr),r_ptr
+
+ CMPIB,=,N 0,n,bn_sub_words_exit ; are we done?
+
+bn_sub_words_single_top
+ LDD 0(a_ptr),t1
+ LDD 0(b_ptr),t2
+ SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
+ SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c;
+ CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
+ LDO 1(%r0),sub_tmp2
+
+ CMPCLR,*= t1,t2,%r0
+ COPY sub_tmp2,%ret0
+
+ STD sub_tmp1,0(r_ptr)
+
+bn_sub_words_exit
+ .EXIT
+ BVE (%rp)
+ NOP
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+;------------------------------------------------------------------------------
+;
+; unsigned long bn_div_words(unsigned long h, unsigned long l, unsigned long d)
+;
+; arg0 = h
+; arg1 = l
+; arg2 = d
+;
+; This is mainly just modified assembly from the compiler, thus the
+; lack of variable names.
+;
+;------------------------------------------------------------------------------
+bn_div_words
+ .proc
+ .callinfo CALLER,FRAME=272,ENTRY_GR=%r10,SAVE_RP,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_div_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .IMPORT BN_num_bits_word,CODE,NO_RELOCATION
+ .IMPORT __iob,DATA
+ .IMPORT fprintf,CODE,NO_RELOCATION
+ .IMPORT abort,CODE,NO_RELOCATION
+ .IMPORT $$div2U,MILLICODE
+ .entry
+ STD %r2,-16(%r30)
+ STD,MA %r3,352(%r30)
+ STD %r4,-344(%r30)
+ STD %r5,-336(%r30)
+ STD %r6,-328(%r30)
+ STD %r7,-320(%r30)
+ STD %r8,-312(%r30)
+ STD %r9,-304(%r30)
+ STD %r10,-296(%r30)
+
+ STD %r27,-288(%r30) ; save gp
+
+ COPY %r24,%r3 ; save d
+ COPY %r26,%r4 ; save h (high 64-bits)
+ LDO -1(%r0),%ret0 ; return -1 by default
+
+ CMPB,*= %r0,%arg2,$D3 ; if (d == 0)
+ COPY %r25,%r5 ; save l (low 64-bits)
+
+ LDO -48(%r30),%r29 ; create ap
+ .CALL ;in=26,29;out=28;
+ B,L BN_num_bits_word,%r2
+ COPY %r3,%r26
+ LDD -288(%r30),%r27 ; restore gp
+ LDI 64,%r21
+
+ CMPB,= %r21,%ret0,$00000012 ;if (i == 64) (forward)
+ COPY %ret0,%r24 ; i
+ MTSARCM %r24
+ DEPDI,Z -1,%sar,1,%r29
+ CMPB,*<<,N %r29,%r4,bn_div_err_case ; if (h > 1<<i) (forward)
+
+$00000012
+ SUBI 64,%r24,%r31 ; i = 64 - i;
+ CMPCLR,*<< %r4,%r3,%r0 ; if (h >= d)
+ SUB %r4,%r3,%r4 ; h -= d
+ CMPB,= %r31,%r0,$0000001A ; if (i)
+ COPY %r0,%r10 ; ret = 0
+ MTSARCM %r31 ; i to shift
+ DEPD,Z %r3,%sar,64,%r3 ; d <<= i;
+ SUBI 64,%r31,%r19 ; 64 - i; redundent
+ MTSAR %r19 ; (64 -i) to shift
+ SHRPD %r4,%r5,%sar,%r4 ; l>> (64-i)
+ MTSARCM %r31 ; i to shift
+ DEPD,Z %r5,%sar,64,%r5 ; l <<= i;
+
+$0000001A
+ DEPDI,Z -1,31,32,%r19
+ EXTRD,U %r3,31,32,%r6 ; dh=(d&0xfff)>>32
+ EXTRD,U %r3,63,32,%r8 ; dl = d&0xffffff
+ LDO 2(%r0),%r9
+ STD %r3,-280(%r30) ; "d" to stack
+
+$0000001C
+ DEPDI,Z -1,63,32,%r29 ;
+ EXTRD,U %r4,31,32,%r31 ; h >> 32
+ CMPB,*=,N %r31,%r6,$D2 ; if ((h>>32) != dh)(forward) div
+ COPY %r4,%r26
+ EXTRD,U %r4,31,32,%r25
+ COPY %r6,%r24
+ .CALL ;in=23,24,25,26;out=20,21,22,28,29; (MILLICALL)
+ B,L $$div2U,%r2
+ EXTRD,U %r6,31,32,%r23
+ DEPD %r28,31,32,%r29
+$D2
+ STD %r29,-272(%r30) ; q
+ AND %r5,%r19,%r24 ; t & 0xffffffff00000000;
+ EXTRD,U %r24,31,32,%r24 ; ???
+ FLDD -272(%r30),%fr7 ; q
+ FLDD -280(%r30),%fr8 ; d
+ XMPYU %fr8L,%fr7L,%fr10
+ FSTD %fr10,-256(%r30)
+ XMPYU %fr8L,%fr7R,%fr22
+ FSTD %fr22,-264(%r30)
+ XMPYU %fr8R,%fr7L,%fr11
+ XMPYU %fr8R,%fr7R,%fr23
+ FSTD %fr11,-232(%r30)
+ FSTD %fr23,-240(%r30)
+ LDD -256(%r30),%r28
+ DEPD,Z %r28,31,32,%r2
+ LDD -264(%r30),%r20
+ ADD,L %r20,%r2,%r31
+ LDD -232(%r30),%r22
+ DEPD,Z %r22,31,32,%r22
+ LDD -240(%r30),%r21
+ B $00000024 ; enter loop
+ ADD,L %r21,%r22,%r23
+
+$0000002A
+ LDO -1(%r29),%r29
+ SUB %r23,%r8,%r23
+$00000024
+ SUB %r4,%r31,%r25
+ AND %r25,%r19,%r26
+ CMPB,*<>,N %r0,%r26,$00000046 ; (forward)
+ DEPD,Z %r25,31,32,%r20
+ OR %r20,%r24,%r21
+ CMPB,*<<,N %r21,%r23,$0000002A ;(backward)
+ SUB %r31,%r6,%r31
+;-------------Break path---------------------
+
+$00000046
+ DEPD,Z %r23,31,32,%r25 ;tl
+ EXTRD,U %r23,31,32,%r26 ;t
+ AND %r25,%r19,%r24 ;tl = (tl<<32)&0xfffffff0000000L
+ ADD,L %r31,%r26,%r31 ;th += t;
+ CMPCLR,*>>= %r5,%r24,%r0 ;if (l<tl)
+ LDO 1(%r31),%r31 ; th++;
+ CMPB,*<<=,N %r31,%r4,$00000036 ;if (n < th) (forward)
+ LDO -1(%r29),%r29 ;q--;
+ ADD,L %r4,%r3,%r4 ;h += d;
+$00000036
+ ADDIB,=,N -1,%r9,$D1 ;if (--count == 0) break (forward)
+ SUB %r5,%r24,%r28 ; l -= tl;
+ SUB %r4,%r31,%r24 ; h -= th;
+ SHRPD %r24,%r28,32,%r4 ; h = ((h<<32)|(l>>32));
+ DEPD,Z %r29,31,32,%r10 ; ret = q<<32
+ b $0000001C
+ DEPD,Z %r28,31,32,%r5 ; l = l << 32
+
+$D1
+ OR %r10,%r29,%r28 ; ret |= q
+$D3
+ LDD -368(%r30),%r2
+$D0
+ LDD -296(%r30),%r10
+ LDD -304(%r30),%r9
+ LDD -312(%r30),%r8
+ LDD -320(%r30),%r7
+ LDD -328(%r30),%r6
+ LDD -336(%r30),%r5
+ LDD -344(%r30),%r4
+ BVE (%r2)
+ .EXIT
+ LDD,MB -352(%r30),%r3
+
+bn_div_err_case
+ MFIA %r6
+ ADDIL L'bn_div_words-bn_div_err_case,%r6,%r1
+ LDO R'bn_div_words-bn_div_err_case(%r1),%r6
+ ADDIL LT'__iob,%r27,%r1
+ LDD RT'__iob(%r1),%r26
+ ADDIL L'C$4-bn_div_words,%r6,%r1
+ LDO R'C$4-bn_div_words(%r1),%r25
+ LDO 64(%r26),%r26
+ .CALL ;in=24,25,26,29;out=28;
+ B,L fprintf,%r2
+ LDO -48(%r30),%r29
+ LDD -288(%r30),%r27
+ .CALL ;in=29;
+ B,L abort,%r2
+ LDO -48(%r30),%r29
+ LDD -288(%r30),%r27
+ B $D0
+ LDD -368(%r30),%r2
+ .PROCEND ;in=24,25,26,29;out=28;
+
+;----------------------------------------------------------------------------
+;
+; Registers to hold 64-bit values to manipulate. The "L" part
+; of the register corresponds to the upper 32-bits, while the "R"
+; part corresponds to the lower 32-bits
+;
+; Note, that when using b6 and b7, the code must save these before
+; using them because they are callee save registers
+;
+;
+; Floating point registers to use to save values that
+; are manipulated. These don't collide with ftemp1-6 and
+; are all caller save registers
+;
+a0 .reg %fr22
+a0L .reg %fr22L
+a0R .reg %fr22R
+
+a1 .reg %fr23
+a1L .reg %fr23L
+a1R .reg %fr23R
+
+a2 .reg %fr24
+a2L .reg %fr24L
+a2R .reg %fr24R
+
+a3 .reg %fr25
+a3L .reg %fr25L
+a3R .reg %fr25R
+
+a4 .reg %fr26
+a4L .reg %fr26L
+a4R .reg %fr26R
+
+a5 .reg %fr27
+a5L .reg %fr27L
+a5R .reg %fr27R
+
+a6 .reg %fr28
+a6L .reg %fr28L
+a6R .reg %fr28R
+
+a7 .reg %fr29
+a7L .reg %fr29L
+a7R .reg %fr29R
+
+b0 .reg %fr30
+b0L .reg %fr30L
+b0R .reg %fr30R
+
+b1 .reg %fr31
+b1L .reg %fr31L
+b1R .reg %fr31R
+
+;
+; Temporary floating point variables, these are all caller save
+; registers
+;
+ftemp1 .reg %fr4
+ftemp2 .reg %fr5
+ftemp3 .reg %fr6
+ftemp4 .reg %fr7
+
+;
+; The B set of registers when used.
+;
+
+b2 .reg %fr8
+b2L .reg %fr8L
+b2R .reg %fr8R
+
+b3 .reg %fr9
+b3L .reg %fr9L
+b3R .reg %fr9R
+
+b4 .reg %fr10
+b4L .reg %fr10L
+b4R .reg %fr10R
+
+b5 .reg %fr11
+b5L .reg %fr11L
+b5R .reg %fr11R
+
+b6 .reg %fr12
+b6L .reg %fr12L
+b6R .reg %fr12R
+
+b7 .reg %fr13
+b7L .reg %fr13L
+b7R .reg %fr13R
+
+c1 .reg %r21 ; only reg
+temp1 .reg %r20 ; only reg
+temp2 .reg %r19 ; only reg
+temp3 .reg %r31 ; only reg
+
+m1 .reg %r28
+c2 .reg %r23
+high_one .reg %r1
+ht .reg %r6
+lt .reg %r5
+m .reg %r4
+c3 .reg %r3
+
+SQR_ADD_C .macro A0L,A0R,C1,C2,C3
+ XMPYU A0L,A0R,ftemp1 ; m
+ FSTD ftemp1,-24(%sp) ; store m
+
+ XMPYU A0R,A0R,ftemp2 ; lt
+ FSTD ftemp2,-16(%sp) ; store lt
+
+ XMPYU A0L,A0L,ftemp3 ; ht
+ FSTD ftemp3,-8(%sp) ; store ht
+
+ LDD -24(%sp),m ; load m
+ AND m,high_mask,temp2 ; m & Mask
+ DEPD,Z m,30,31,temp3 ; m << 32+1
+ LDD -16(%sp),lt ; lt
+
+ LDD -8(%sp),ht ; ht
+ EXTRD,U temp2,32,33,temp1 ; temp1 = m&Mask >> 32-1
+ ADD temp3,lt,lt ; lt = lt+m
+ ADD,L ht,temp1,ht ; ht += temp1
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD C1,lt,C1 ; c1=c1+lt
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD C2,ht,C2 ; c2=c2+ht
+ ADD,DC C3,%r0,C3 ; c3++
+.endm
+
+SQR_ADD_C2 .macro A0L,A0R,A1L,A1R,C1,C2,C3
+ XMPYU A0L,A1R,ftemp1 ; m1 = bl*ht
+ FSTD ftemp1,-16(%sp) ;
+ XMPYU A0R,A1L,ftemp2 ; m = bh*lt
+ FSTD ftemp2,-8(%sp) ;
+ XMPYU A0R,A1R,ftemp3 ; lt = bl*lt
+ FSTD ftemp3,-32(%sp)
+ XMPYU A0L,A1L,ftemp4 ; ht = bh*ht
+ FSTD ftemp4,-24(%sp) ;
+
+ LDD -8(%sp),m ; r21 = m
+ LDD -16(%sp),m1 ; r19 = m1
+ ADD,L m,m1,m ; m+m1
+
+ DEPD,Z m,31,32,temp3 ; (m+m1<<32)
+ LDD -24(%sp),ht ; r24 = ht
+
+ CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
+ ADD,L ht,high_one,ht ; ht+=high_one
+
+ EXTRD,U m,31,32,temp1 ; m >> 32
+ LDD -32(%sp),lt ; lt
+ ADD,L ht,temp1,ht ; ht+= m>>32
+ ADD lt,temp3,lt ; lt = lt+m1
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD ht,ht,ht ; ht=ht+ht;
+ ADD,DC C3,%r0,C3 ; add in carry (c3++)
+
+ ADD lt,lt,lt ; lt=lt+lt;
+ ADD,DC ht,%r0,ht ; add in carry (ht++)
+
+ ADD C1,lt,C1 ; c1=c1+lt
+ ADD,DC,*NUV ht,%r0,ht ; add in carry (ht++)
+ LDO 1(C3),C3 ; bump c3 if overflow,nullify otherwise
+
+ ADD C2,ht,C2 ; c2 = c2 + ht
+ ADD,DC C3,%r0,C3 ; add in carry (c3++)
+.endm
+
+;
+;void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
+; arg0 = r_ptr
+; arg1 = a_ptr
+;
+
+bn_sqr_comba8
+ .PROC
+ .CALLINFO FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_sqr_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .ENTRY
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+ FLDD 32(a_ptr),a4
+ FLDD 40(a_ptr),a5
+ FLDD 48(a_ptr),a6
+ FLDD 56(a_ptr),a7
+
+ SQR_ADD_C a0L,a0R,c1,c2,c3
+ STD c1,0(r_ptr) ; r[0] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
+ STD c2,8(r_ptr) ; r[1] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a1L,a1R,c3,c1,c2
+ SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
+ STD c3,16(r_ptr) ; r[2] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
+ SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
+ STD c1,24(r_ptr) ; r[3] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C a2L,a2R,c2,c3,c1
+ SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
+ SQR_ADD_C2 a4L,a4R,a0L,a0R,c2,c3,c1
+ STD c2,32(r_ptr) ; r[4] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C2 a5L,a5R,a0L,a0R,c3,c1,c2
+ SQR_ADD_C2 a4L,a4R,a1L,a1R,c3,c1,c2
+ SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
+ STD c3,40(r_ptr) ; r[5] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C a3L,a3R,c1,c2,c3
+ SQR_ADD_C2 a4L,a4R,a2L,a2R,c1,c2,c3
+ SQR_ADD_C2 a5L,a5R,a1L,a1R,c1,c2,c3
+ SQR_ADD_C2 a6L,a6R,a0L,a0R,c1,c2,c3
+ STD c1,48(r_ptr) ; r[6] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a7L,a7R,a0L,a0R,c2,c3,c1
+ SQR_ADD_C2 a6L,a6R,a1L,a1R,c2,c3,c1
+ SQR_ADD_C2 a5L,a5R,a2L,a2R,c2,c3,c1
+ SQR_ADD_C2 a4L,a4R,a3L,a3R,c2,c3,c1
+ STD c2,56(r_ptr) ; r[7] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a4L,a4R,c3,c1,c2
+ SQR_ADD_C2 a5L,a5R,a3L,a3R,c3,c1,c2
+ SQR_ADD_C2 a6L,a6R,a2L,a2R,c3,c1,c2
+ SQR_ADD_C2 a7L,a7R,a1L,a1R,c3,c1,c2
+ STD c3,64(r_ptr) ; r[8] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C2 a7L,a7R,a2L,a2R,c1,c2,c3
+ SQR_ADD_C2 a6L,a6R,a3L,a3R,c1,c2,c3
+ SQR_ADD_C2 a5L,a5R,a4L,a4R,c1,c2,c3
+ STD c1,72(r_ptr) ; r[9] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C a5L,a5R,c2,c3,c1
+ SQR_ADD_C2 a6L,a6R,a4L,a4R,c2,c3,c1
+ SQR_ADD_C2 a7L,a7R,a3L,a3R,c2,c3,c1
+ STD c2,80(r_ptr) ; r[10] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C2 a7L,a7R,a4L,a4R,c3,c1,c2
+ SQR_ADD_C2 a6L,a6R,a5L,a5R,c3,c1,c2
+ STD c3,88(r_ptr) ; r[11] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C a6L,a6R,c1,c2,c3
+ SQR_ADD_C2 a7L,a7R,a5L,a5R,c1,c2,c3
+ STD c1,96(r_ptr) ; r[12] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a7L,a7R,a6L,a6R,c2,c3,c1
+ STD c2,104(r_ptr) ; r[13] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a7L,a7R,c3,c1,c2
+ STD c3, 112(r_ptr) ; r[14] = c3
+ STD c1, 120(r_ptr) ; r[15] = c1
+
+ .EXIT
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+;-----------------------------------------------------------------------------
+;
+;void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
+; arg0 = r_ptr
+; arg1 = a_ptr
+;
+
+bn_sqr_comba4
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_sqr_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+ FLDD 32(a_ptr),a4
+ FLDD 40(a_ptr),a5
+ FLDD 48(a_ptr),a6
+ FLDD 56(a_ptr),a7
+
+ SQR_ADD_C a0L,a0R,c1,c2,c3
+
+ STD c1,0(r_ptr) ; r[0] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
+
+ STD c2,8(r_ptr) ; r[1] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a1L,a1R,c3,c1,c2
+ SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
+
+ STD c3,16(r_ptr) ; r[2] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
+ SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
+
+ STD c1,24(r_ptr) ; r[3] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C a2L,a2R,c2,c3,c1
+ SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
+
+ STD c2,32(r_ptr) ; r[4] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
+ STD c3,40(r_ptr) ; r[5] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C a3L,a3R,c1,c2,c3
+ STD c1,48(r_ptr) ; r[6] = c1;
+ STD c2,56(r_ptr) ; r[7] = c2;
+
+ .EXIT
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+
+;---------------------------------------------------------------------------
+
+MUL_ADD_C .macro A0L,A0R,B0L,B0R,C1,C2,C3
+ XMPYU A0L,B0R,ftemp1 ; m1 = bl*ht
+ FSTD ftemp1,-16(%sp) ;
+ XMPYU A0R,B0L,ftemp2 ; m = bh*lt
+ FSTD ftemp2,-8(%sp) ;
+ XMPYU A0R,B0R,ftemp3 ; lt = bl*lt
+ FSTD ftemp3,-32(%sp)
+ XMPYU A0L,B0L,ftemp4 ; ht = bh*ht
+ FSTD ftemp4,-24(%sp) ;
+
+ LDD -8(%sp),m ; r21 = m
+ LDD -16(%sp),m1 ; r19 = m1
+ ADD,L m,m1,m ; m+m1
+
+ DEPD,Z m,31,32,temp3 ; (m+m1<<32)
+ LDD -24(%sp),ht ; r24 = ht
+
+ CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
+ ADD,L ht,high_one,ht ; ht+=high_one
+
+ EXTRD,U m,31,32,temp1 ; m >> 32
+ LDD -32(%sp),lt ; lt
+ ADD,L ht,temp1,ht ; ht+= m>>32
+ ADD lt,temp3,lt ; lt = lt+m1
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD C1,lt,C1 ; c1=c1+lt
+ ADD,DC ht,%r0,ht ; bump c3 if overflow,nullify otherwise
+
+ ADD C2,ht,C2 ; c2 = c2 + ht
+ ADD,DC C3,%r0,C3 ; add in carry (c3++)
+.endm
+
+
+;
+;void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+; arg0 = r_ptr
+; arg1 = a_ptr
+; arg2 = b_ptr
+;
+
+bn_mul_comba8
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_mul_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+ FSTD %fr12,32(%sp) ; save r6
+ FSTD %fr13,40(%sp) ; save r7
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+ FLDD 32(a_ptr),a4
+ FLDD 40(a_ptr),a5
+ FLDD 48(a_ptr),a6
+ FLDD 56(a_ptr),a7
+
+ FLDD 0(b_ptr),b0
+ FLDD 8(b_ptr),b1
+ FLDD 16(b_ptr),b2
+ FLDD 24(b_ptr),b3
+ FLDD 32(b_ptr),b4
+ FLDD 40(b_ptr),b5
+ FLDD 48(b_ptr),b6
+ FLDD 56(b_ptr),b7
+
+ MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
+ STD c1,0(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
+ STD c2,8(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
+ STD c3,16(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
+ MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
+ STD c1,24(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a4L,a4R,b0L,b0R,c2,c3,c1
+ MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
+ MUL_ADD_C a0L,a0R,b4L,b4R,c2,c3,c1
+ STD c2,32(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a0L,a0R,b5L,b5R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b4L,b4R,c3,c1,c2
+ MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
+ MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
+ MUL_ADD_C a4L,a4R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a5L,a5R,b0L,b0R,c3,c1,c2
+ STD c3,40(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a6L,a6R,b0L,b0R,c1,c2,c3
+ MUL_ADD_C a5L,a5R,b1L,b1R,c1,c2,c3
+ MUL_ADD_C a4L,a4R,b2L,b2R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a2L,a2R,b4L,b4R,c1,c2,c3
+ MUL_ADD_C a1L,a1R,b5L,b5R,c1,c2,c3
+ MUL_ADD_C a0L,a0R,b6L,b6R,c1,c2,c3
+ STD c1,48(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a0L,a0R,b7L,b7R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b6L,b6R,c2,c3,c1
+ MUL_ADD_C a2L,a2R,b5L,b5R,c2,c3,c1
+ MUL_ADD_C a3L,a3R,b4L,b4R,c2,c3,c1
+ MUL_ADD_C a4L,a4R,b3L,b3R,c2,c3,c1
+ MUL_ADD_C a5L,a5R,b2L,b2R,c2,c3,c1
+ MUL_ADD_C a6L,a6R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a7L,a7R,b0L,b0R,c2,c3,c1
+ STD c2,56(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a7L,a7R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a6L,a6R,b2L,b2R,c3,c1,c2
+ MUL_ADD_C a5L,a5R,b3L,b3R,c3,c1,c2
+ MUL_ADD_C a4L,a4R,b4L,b4R,c3,c1,c2
+ MUL_ADD_C a3L,a3R,b5L,b5R,c3,c1,c2
+ MUL_ADD_C a2L,a2R,b6L,b6R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b7L,b7R,c3,c1,c2
+ STD c3,64(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a2L,a2R,b7L,b7R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b6L,b6R,c1,c2,c3
+ MUL_ADD_C a4L,a4R,b5L,b5R,c1,c2,c3
+ MUL_ADD_C a5L,a5R,b4L,b4R,c1,c2,c3
+ MUL_ADD_C a6L,a6R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a7L,a7R,b2L,b2R,c1,c2,c3
+ STD c1,72(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a7L,a7R,b3L,b3R,c2,c3,c1
+ MUL_ADD_C a6L,a6R,b4L,b4R,c2,c3,c1
+ MUL_ADD_C a5L,a5R,b5L,b5R,c2,c3,c1
+ MUL_ADD_C a4L,a4R,b6L,b6R,c2,c3,c1
+ MUL_ADD_C a3L,a3R,b7L,b7R,c2,c3,c1
+ STD c2,80(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a4L,a4R,b7L,b7R,c3,c1,c2
+ MUL_ADD_C a5L,a5R,b6L,b6R,c3,c1,c2
+ MUL_ADD_C a6L,a6R,b5L,b5R,c3,c1,c2
+ MUL_ADD_C a7L,a7R,b4L,b4R,c3,c1,c2
+ STD c3,88(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a7L,a7R,b5L,b5R,c1,c2,c3
+ MUL_ADD_C a6L,a6R,b6L,b6R,c1,c2,c3
+ MUL_ADD_C a5L,a5R,b7L,b7R,c1,c2,c3
+ STD c1,96(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a6L,a6R,b7L,b7R,c2,c3,c1
+ MUL_ADD_C a7L,a7R,b6L,b6R,c2,c3,c1
+ STD c2,104(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a7L,a7R,b7L,b7R,c3,c1,c2
+ STD c3,112(r_ptr)
+ STD c1,120(r_ptr)
+
+ .EXIT
+ FLDD -88(%sp),%fr13
+ FLDD -96(%sp),%fr12
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+;-----------------------------------------------------------------------------
+;
+;void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+; arg0 = r_ptr
+; arg1 = a_ptr
+; arg2 = b_ptr
+;
+
+bn_mul_comba4
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_mul_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+ FSTD %fr12,32(%sp) ; save r6
+ FSTD %fr13,40(%sp) ; save r7
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+
+ FLDD 0(b_ptr),b0
+ FLDD 8(b_ptr),b1
+ FLDD 16(b_ptr),b2
+ FLDD 24(b_ptr),b3
+
+ MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
+ STD c1,0(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
+ STD c2,8(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
+ STD c3,16(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
+ MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
+ STD c1,24(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
+ STD c2,32(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
+ MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
+ STD c3,40(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
+ STD c1,48(r_ptr)
+ STD c2,56(r_ptr)
+
+ .EXIT
+ FLDD -88(%sp),%fr13
+ FLDD -96(%sp),%fr12
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+
+ .SPACE $TEXT$
+ .SUBSPA $CODE$
+ .SPACE $PRIVATE$,SORT=16
+ .IMPORT $global$,DATA
+ .SPACE $TEXT$
+ .SUBSPA $CODE$
+ .SUBSPA $LIT$,ACCESS=0x2c
+C$4
+ .ALIGN 8
+ .STRINGZ "Division would overflow (%d)\n"
+ .END
diff --git a/crypto/bn/asm/ppc.pl b/crypto/bn/asm/ppc.pl
new file mode 100644
index 0000000..08e0053
--- /dev/null
+++ b/crypto/bn/asm/ppc.pl
@@ -0,0 +1,2078 @@
+#!/usr/bin/env perl
+#
+# Implemented as a Perl wrapper as we want to support several different
+# architectures with single file. We pick up the target based on the
+# file name we are asked to generate.
+#
+# It should be noted though that this perl code is nothing like
+# <openssl>/crypto/perlasm/x86*. In this case perl is used pretty much
+# as pre-processor to cover for platform differences in name decoration,
+# linker tables, 32-/64-bit instruction sets...
+#
+# As you might know there're several PowerPC ABI in use. Most notably
+# Linux and AIX use different 32-bit ABIs. Good news are that these ABIs
+# are similar enough to implement leaf(!) functions, which would be ABI
+# neutral. And that's what you find here: ABI neutral leaf functions.
+# In case you wonder what that is...
+#
+# AIX performance
+#
+# MEASUREMENTS WITH cc ON a 200 MhZ PowerPC 604e.
+#
+# The following is the performance of 32-bit compiler
+# generated code:
+#
+# OpenSSL 0.9.6c 21 dec 2001
+# built on: Tue Jun 11 11:06:51 EDT 2002
+# options:bn(64,32) ...
+#compiler: cc -DTHREADS -DAIX -DB_ENDIAN -DBN_LLONG -O3
+# sign verify sign/s verify/s
+#rsa 512 bits 0.0098s 0.0009s 102.0 1170.6
+#rsa 1024 bits 0.0507s 0.0026s 19.7 387.5
+#rsa 2048 bits 0.3036s 0.0085s 3.3 117.1
+#rsa 4096 bits 2.0040s 0.0299s 0.5 33.4
+#dsa 512 bits 0.0087s 0.0106s 114.3 94.5
+#dsa 1024 bits 0.0256s 0.0313s 39.0 32.0
+#
+# Same bechmark with this assembler code:
+#
+#rsa 512 bits 0.0056s 0.0005s 178.6 2049.2
+#rsa 1024 bits 0.0283s 0.0015s 35.3 674.1
+#rsa 2048 bits 0.1744s 0.0050s 5.7 201.2
+#rsa 4096 bits 1.1644s 0.0179s 0.9 55.7
+#dsa 512 bits 0.0052s 0.0062s 191.6 162.0
+#dsa 1024 bits 0.0149s 0.0180s 67.0 55.5
+#
+# Number of operations increases by at almost 75%
+#
+# Here are performance numbers for 64-bit compiler
+# generated code:
+#
+# OpenSSL 0.9.6g [engine] 9 Aug 2002
+# built on: Fri Apr 18 16:59:20 EDT 2003
+# options:bn(64,64) ...
+# compiler: cc -DTHREADS -D_REENTRANT -q64 -DB_ENDIAN -O3
+# sign verify sign/s verify/s
+#rsa 512 bits 0.0028s 0.0003s 357.1 3844.4
+#rsa 1024 bits 0.0148s 0.0008s 67.5 1239.7
+#rsa 2048 bits 0.0963s 0.0028s 10.4 353.0
+#rsa 4096 bits 0.6538s 0.0102s 1.5 98.1
+#dsa 512 bits 0.0026s 0.0032s 382.5 313.7
+#dsa 1024 bits 0.0081s 0.0099s 122.8 100.6
+#
+# Same benchmark with this assembler code:
+#
+#rsa 512 bits 0.0020s 0.0002s 510.4 6273.7
+#rsa 1024 bits 0.0088s 0.0005s 114.1 2128.3
+#rsa 2048 bits 0.0540s 0.0016s 18.5 622.5
+#rsa 4096 bits 0.3700s 0.0058s 2.7 171.0
+#dsa 512 bits 0.0016s 0.0020s 610.7 507.1
+#dsa 1024 bits 0.0047s 0.0058s 212.5 173.2
+#
+# Again, performance increases by at about 75%
+#
+# Mac OS X, Apple G5 1.8GHz (Note this is 32 bit code)
+# OpenSSL 0.9.7c 30 Sep 2003
+#
+# Original code.
+#
+#rsa 512 bits 0.0011s 0.0001s 906.1 11012.5
+#rsa 1024 bits 0.0060s 0.0003s 166.6 3363.1
+#rsa 2048 bits 0.0370s 0.0010s 27.1 982.4
+#rsa 4096 bits 0.2426s 0.0036s 4.1 280.4
+#dsa 512 bits 0.0010s 0.0012s 1038.1 841.5
+#dsa 1024 bits 0.0030s 0.0037s 329.6 269.7
+#dsa 2048 bits 0.0101s 0.0127s 98.9 78.6
+#
+# Same benchmark with this assembler code:
+#
+#rsa 512 bits 0.0007s 0.0001s 1416.2 16645.9
+#rsa 1024 bits 0.0036s 0.0002s 274.4 5380.6
+#rsa 2048 bits 0.0222s 0.0006s 45.1 1589.5
+#rsa 4096 bits 0.1469s 0.0022s 6.8 449.6
+#dsa 512 bits 0.0006s 0.0007s 1664.2 1376.2
+#dsa 1024 bits 0.0018s 0.0023s 545.0 442.2
+#dsa 2048 bits 0.0061s 0.0075s 163.5 132.8
+#
+# Performance increase of ~60%
+#
+# If you have comments or suggestions to improve code send
+# me a note at schari@us.ibm.com
+#
+
+$opf = shift;
+
+if ($opf =~ /32\.s/) {
+ $BITS= 32;
+ $BNSZ= $BITS/8;
+ $ISA= "\"ppc\"";
+
+ $LD= "lwz"; # load
+ $LDU= "lwzu"; # load and update
+ $ST= "stw"; # store
+ $STU= "stwu"; # store and update
+ $UMULL= "mullw"; # unsigned multiply low
+ $UMULH= "mulhwu"; # unsigned multiply high
+ $UDIV= "divwu"; # unsigned divide
+ $UCMPI= "cmplwi"; # unsigned compare with immediate
+ $UCMP= "cmplw"; # unsigned compare
+ $CNTLZ= "cntlzw"; # count leading zeros
+ $SHL= "slw"; # shift left
+ $SHR= "srw"; # unsigned shift right
+ $SHRI= "srwi"; # unsigned shift right by immediate
+ $SHLI= "slwi"; # shift left by immediate
+ $CLRU= "clrlwi"; # clear upper bits
+ $INSR= "insrwi"; # insert right
+ $ROTL= "rotlwi"; # rotate left by immediate
+ $TR= "tw"; # conditional trap
+} elsif ($opf =~ /64\.s/) {
+ $BITS= 64;
+ $BNSZ= $BITS/8;
+ $ISA= "\"ppc64\"";
+
+ # same as above, but 64-bit mnemonics...
+ $LD= "ld"; # load
+ $LDU= "ldu"; # load and update
+ $ST= "std"; # store
+ $STU= "stdu"; # store and update
+ $UMULL= "mulld"; # unsigned multiply low
+ $UMULH= "mulhdu"; # unsigned multiply high
+ $UDIV= "divdu"; # unsigned divide
+ $UCMPI= "cmpldi"; # unsigned compare with immediate
+ $UCMP= "cmpld"; # unsigned compare
+ $CNTLZ= "cntlzd"; # count leading zeros
+ $SHL= "sld"; # shift left
+ $SHR= "srd"; # unsigned shift right
+ $SHRI= "srdi"; # unsigned shift right by immediate
+ $SHLI= "sldi"; # shift left by immediate
+ $CLRU= "clrldi"; # clear upper bits
+ $INSR= "insrdi"; # insert right
+ $ROTL= "rotldi"; # rotate left by immediate
+ $TR= "td"; # conditional trap
+} else { die "nonsense $opf"; }
+
+( defined shift || open STDOUT,">$opf" ) || die "can't open $opf: $!";
+
+# function entry points from the AIX code
+#
+# There are other, more elegant, ways to handle this. We (IBM) chose
+# this approach as it plays well with scripts we run to 'namespace'
+# OpenSSL .i.e. we add a prefix to all the public symbols so we can
+# co-exist in the same process with other implementations of OpenSSL.
+# 'cleverer' ways of doing these substitutions tend to hide data we
+# need to be obvious.
+#
+my @items = ("bn_sqr_comba4",
+ "bn_sqr_comba8",
+ "bn_mul_comba4",
+ "bn_mul_comba8",
+ "bn_sub_words",
+ "bn_add_words",
+ "bn_div_words",
+ "bn_sqr_words",
+ "bn_mul_words",
+ "bn_mul_add_words");
+
+if ($opf =~ /linux/) { do_linux(); }
+elsif ($opf =~ /aix/) { do_aix(); }
+elsif ($opf =~ /osx/) { do_osx(); }
+else { do_bsd(); }
+
+sub do_linux {
+ $d=&data();
+
+ if ($BITS==64) {
+ foreach $t (@items) {
+ $d =~ s/\.$t:/\
+\t.section\t".opd","aw"\
+\t.align\t3\
+\t.globl\t$t\
+$t:\
+\t.quad\t.$t,.TOC.\@tocbase,0\
+\t.size\t$t,24\
+\t.previous\n\
+\t.type\t.$t,\@function\
+\t.globl\t.$t\
+.$t:/g;
+ }
+ }
+ else {
+ foreach $t (@items) {
+ $d=~s/\.$t/$t/g;
+ }
+ }
+ # hide internal labels to avoid pollution of name table...
+ $d=~s/Lppcasm_/.Lppcasm_/gm;
+ print $d;
+}
+
+sub do_aix {
+ # AIX assembler is smart enough to please the linker without
+ # making us do something special...
+ print &data();
+}
+
+# MacOSX 32 bit
+sub do_osx {
+ $d=&data();
+ # Change the bn symbol prefix from '.' to '_'
+ foreach $t (@items) {
+ $d=~s/\.$t/_$t/g;
+ }
+ # Change .machine to something OS X asm will accept
+ $d=~s/\.machine.*/.text/g;
+ $d=~s/\#/;/g; # change comment from '#' to ';'
+ print $d;
+}
+
+# BSD (Untested)
+sub do_bsd {
+ $d=&data();
+ foreach $t (@items) {
+ $d=~s/\.$t/_$t/g;
+ }
+ print $d;
+}
+
+sub data {
+ local($data)=<<EOF;
+#--------------------------------------------------------------------
+#
+#
+#
+#
+# File: ppc32.s
+#
+# Created by: Suresh Chari
+# IBM Thomas J. Watson Research Library
+# Hawthorne, NY
+#
+#
+# Description: Optimized assembly routines for OpenSSL crypto
+# on the 32 bitPowerPC platform.
+#
+#
+# Version History
+#
+# 2. Fixed bn_add,bn_sub and bn_div_words, added comments,
+# cleaned up code. Also made a single version which can
+# be used for both the AIX and Linux compilers. See NOTE
+# below.
+# 12/05/03 Suresh Chari
+# (with lots of help from) Andy Polyakov
+##
+# 1. Initial version 10/20/02 Suresh Chari
+#
+#
+# The following file works for the xlc,cc
+# and gcc compilers.
+#
+# NOTE: To get the file to link correctly with the gcc compiler
+# you have to change the names of the routines and remove
+# the first .(dot) character. This should automatically
+# be done in the build process.
+#
+# Hand optimized assembly code for the following routines
+#
+# bn_sqr_comba4
+# bn_sqr_comba8
+# bn_mul_comba4
+# bn_mul_comba8
+# bn_sub_words
+# bn_add_words
+# bn_div_words
+# bn_sqr_words
+# bn_mul_words
+# bn_mul_add_words
+#
+# NOTE: It is possible to optimize this code more for
+# specific PowerPC or Power architectures. On the Northstar
+# architecture the optimizations in this file do
+# NOT provide much improvement.
+#
+# If you have comments or suggestions to improve code send
+# me a note at schari\@us.ibm.com
+#
+#--------------------------------------------------------------------------
+#
+# Defines to be used in the assembly code.
+#
+.set r0,0 # we use it as storage for value of 0
+.set SP,1 # preserved
+.set RTOC,2 # preserved
+.set r3,3 # 1st argument/return value
+.set r4,4 # 2nd argument/volatile register
+.set r5,5 # 3rd argument/volatile register
+.set r6,6 # ...
+.set r7,7
+.set r8,8
+.set r9,9
+.set r10,10
+.set r11,11
+.set r12,12
+.set r13,13 # not used, nor any other "below" it...
+
+.set BO_IF_NOT,4
+.set BO_IF,12
+.set BO_dCTR_NZERO,16
+.set BO_dCTR_ZERO,18
+.set BO_ALWAYS,20
+.set CR0_LT,0;
+.set CR0_GT,1;
+.set CR0_EQ,2
+.set CR1_FX,4;
+.set CR1_FEX,5;
+.set CR1_VX,6
+.set LR,8
+
+# Declare function names to be global
+# NOTE: For gcc these names MUST be changed to remove
+# the first . i.e. for example change ".bn_sqr_comba4"
+# to "bn_sqr_comba4". This should be automatically done
+# in the build.
+
+ .globl .bn_sqr_comba4
+ .globl .bn_sqr_comba8
+ .globl .bn_mul_comba4
+ .globl .bn_mul_comba8
+ .globl .bn_sub_words
+ .globl .bn_add_words
+ .globl .bn_div_words
+ .globl .bn_sqr_words
+ .globl .bn_mul_words
+ .globl .bn_mul_add_words
+
+# .text section
+
+ .machine $ISA
+
+#
+# NOTE: The following label name should be changed to
+# "bn_sqr_comba4" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_sqr_comba4:
+#
+# Optimized version of bn_sqr_comba4.
+#
+# void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
+# r3 contains r
+# r4 contains a
+#
+# Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
+#
+# r5,r6 are the two BN_ULONGs being multiplied.
+# r7,r8 are the results of the 32x32 giving 64 bit multiply.
+# r9,r10, r11 are the equivalents of c1,c2, c3.
+# Here's the assembly
+#
+#
+ xor r0,r0,r0 # set r0 = 0. Used in the addze
+ # instructions below
+
+ #sqr_add_c(a,0,c1,c2,c3)
+ $LD r5,`0*$BNSZ`(r4)
+ $UMULL r9,r5,r5
+ $UMULH r10,r5,r5 #in first iteration. No need
+ #to add since c1=c2=c3=0.
+ # Note c3(r11) is NOT set to 0
+ # but will be.
+
+ $ST r9,`0*$BNSZ`(r3) # r[0]=c1;
+ # sqr_add_c2(a,1,0,c2,c3,c1);
+ $LD r6,`1*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r7,r7,r7 # compute (r7,r8)=2*(r7,r8)
+ adde r8,r8,r8
+ addze r9,r0 # catch carry if any.
+ # r9= r0(=0) and carry
+
+ addc r10,r7,r10 # now add to temp result.
+ addze r11,r8 # r8 added to r11 which is 0
+ addze r9,r9
+
+ $ST r10,`1*$BNSZ`(r3) #r[1]=c2;
+ #sqr_add_c(a,1,c3,c1,c2)
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r0
+ #sqr_add_c2(a,2,0,c3,c1,c2)
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r7,r7,r7
+ adde r8,r8,r8
+ addze r10,r10
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ $ST r11,`2*$BNSZ`(r3) #r[2]=c3
+ #sqr_add_c2(a,3,0,c1,c2,c3);
+ $LD r6,`3*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r7,r7,r7
+ adde r8,r8,r8
+ addze r11,r0
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ #sqr_add_c2(a,2,1,c1,c2,c3);
+ $LD r5,`1*$BNSZ`(r4)
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r7,r7,r7
+ adde r8,r8,r8
+ addze r11,r11
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ $ST r9,`3*$BNSZ`(r3) #r[3]=c1
+ #sqr_add_c(a,2,c2,c3,c1);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r0
+ #sqr_add_c2(a,3,1,c2,c3,c1);
+ $LD r6,`3*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r7,r7,r7
+ adde r8,r8,r8
+ addze r9,r9
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ $ST r10,`4*$BNSZ`(r3) #r[4]=c2
+ #sqr_add_c2(a,3,2,c3,c1,c2);
+ $LD r5,`2*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r7,r7,r7
+ adde r8,r8,r8
+ addze r10,r0
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ $ST r11,`5*$BNSZ`(r3) #r[5] = c3
+ #sqr_add_c(a,3,c1,c2,c3);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r9,r7,r9
+ adde r10,r8,r10
+
+ $ST r9,`6*$BNSZ`(r3) #r[6]=c1
+ $ST r10,`7*$BNSZ`(r3) #r[7]=c2
+ bclr BO_ALWAYS,CR0_LT
+ .long 0x00000000
+
+#
+# NOTE: The following label name should be changed to
+# "bn_sqr_comba8" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_sqr_comba8:
+#
+# This is an optimized version of the bn_sqr_comba8 routine.
+# Tightly uses the adde instruction
+#
+#
+# void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
+# r3 contains r
+# r4 contains a
+#
+# Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
+#
+# r5,r6 are the two BN_ULONGs being multiplied.
+# r7,r8 are the results of the 32x32 giving 64 bit multiply.
+# r9,r10, r11 are the equivalents of c1,c2, c3.
+#
+# Possible optimization of loading all 8 longs of a into registers
+# doesnt provide any speedup
+#
+
+ xor r0,r0,r0 #set r0 = 0.Used in addze
+ #instructions below.
+
+ #sqr_add_c(a,0,c1,c2,c3);
+ $LD r5,`0*$BNSZ`(r4)
+ $UMULL r9,r5,r5 #1st iteration: no carries.
+ $UMULH r10,r5,r5
+ $ST r9,`0*$BNSZ`(r3) # r[0]=c1;
+ #sqr_add_c2(a,1,0,c2,c3,c1);
+ $LD r6,`1*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r10,r7,r10 #add the two register number
+ adde r11,r8,r0 # (r8,r7) to the three register
+ addze r9,r0 # number (r9,r11,r10).NOTE:r0=0
+
+ addc r10,r7,r10 #add the two register number
+ adde r11,r8,r11 # (r8,r7) to the three register
+ addze r9,r9 # number (r9,r11,r10).
+
+ $ST r10,`1*$BNSZ`(r3) # r[1]=c2
+
+ #sqr_add_c(a,1,c3,c1,c2);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r0
+ #sqr_add_c2(a,2,0,c3,c1,c2);
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+
+ $ST r11,`2*$BNSZ`(r3) #r[2]=c3
+ #sqr_add_c2(a,3,0,c1,c2,c3);
+ $LD r6,`3*$BNSZ`(r4) #r6 = a[3]. r5 is already a[0].
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r0
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ #sqr_add_c2(a,2,1,c1,c2,c3);
+ $LD r5,`1*$BNSZ`(r4)
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+
+ $ST r9,`3*$BNSZ`(r3) #r[3]=c1;
+ #sqr_add_c(a,2,c2,c3,c1);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r0
+ #sqr_add_c2(a,3,1,c2,c3,c1);
+ $LD r6,`3*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ #sqr_add_c2(a,4,0,c2,c3,c1);
+ $LD r5,`0*$BNSZ`(r4)
+ $LD r6,`4*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ $ST r10,`4*$BNSZ`(r3) #r[4]=c2;
+ #sqr_add_c2(a,5,0,c3,c1,c2);
+ $LD r6,`5*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r0
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ #sqr_add_c2(a,4,1,c3,c1,c2);
+ $LD r5,`1*$BNSZ`(r4)
+ $LD r6,`4*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ #sqr_add_c2(a,3,2,c3,c1,c2);
+ $LD r5,`2*$BNSZ`(r4)
+ $LD r6,`3*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ $ST r11,`5*$BNSZ`(r3) #r[5]=c3;
+ #sqr_add_c(a,3,c1,c2,c3);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r0
+ #sqr_add_c2(a,4,2,c1,c2,c3);
+ $LD r6,`4*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ #sqr_add_c2(a,5,1,c1,c2,c3);
+ $LD r5,`1*$BNSZ`(r4)
+ $LD r6,`5*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ #sqr_add_c2(a,6,0,c1,c2,c3);
+ $LD r5,`0*$BNSZ`(r4)
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ $ST r9,`6*$BNSZ`(r3) #r[6]=c1;
+ #sqr_add_c2(a,7,0,c2,c3,c1);
+ $LD r6,`7*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r0
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ #sqr_add_c2(a,6,1,c2,c3,c1);
+ $LD r5,`1*$BNSZ`(r4)
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ #sqr_add_c2(a,5,2,c2,c3,c1);
+ $LD r5,`2*$BNSZ`(r4)
+ $LD r6,`5*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ #sqr_add_c2(a,4,3,c2,c3,c1);
+ $LD r5,`3*$BNSZ`(r4)
+ $LD r6,`4*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ $ST r10,`7*$BNSZ`(r3) #r[7]=c2;
+ #sqr_add_c(a,4,c3,c1,c2);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r0
+ #sqr_add_c2(a,5,3,c3,c1,c2);
+ $LD r6,`5*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ #sqr_add_c2(a,6,2,c3,c1,c2);
+ $LD r5,`2*$BNSZ`(r4)
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ #sqr_add_c2(a,7,1,c3,c1,c2);
+ $LD r5,`1*$BNSZ`(r4)
+ $LD r6,`7*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ $ST r11,`8*$BNSZ`(r3) #r[8]=c3;
+ #sqr_add_c2(a,7,2,c1,c2,c3);
+ $LD r5,`2*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r0
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ #sqr_add_c2(a,6,3,c1,c2,c3);
+ $LD r5,`3*$BNSZ`(r4)
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ #sqr_add_c2(a,5,4,c1,c2,c3);
+ $LD r5,`4*$BNSZ`(r4)
+ $LD r6,`5*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ $ST r9,`9*$BNSZ`(r3) #r[9]=c1;
+ #sqr_add_c(a,5,c2,c3,c1);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r0
+ #sqr_add_c2(a,6,4,c2,c3,c1);
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ #sqr_add_c2(a,7,3,c2,c3,c1);
+ $LD r5,`3*$BNSZ`(r4)
+ $LD r6,`7*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ $ST r10,`10*$BNSZ`(r3) #r[10]=c2;
+ #sqr_add_c2(a,7,4,c3,c1,c2);
+ $LD r5,`4*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r0
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ #sqr_add_c2(a,6,5,c3,c1,c2);
+ $LD r5,`5*$BNSZ`(r4)
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ $ST r11,`11*$BNSZ`(r3) #r[11]=c3;
+ #sqr_add_c(a,6,c1,c2,c3);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r0
+ #sqr_add_c2(a,7,5,c1,c2,c3)
+ $LD r6,`7*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ $ST r9,`12*$BNSZ`(r3) #r[12]=c1;
+
+ #sqr_add_c2(a,7,6,c2,c3,c1)
+ $LD r5,`6*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r0
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ $ST r10,`13*$BNSZ`(r3) #r[13]=c2;
+ #sqr_add_c(a,7,c3,c1,c2);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ $ST r11,`14*$BNSZ`(r3) #r[14]=c3;
+ $ST r9, `15*$BNSZ`(r3) #r[15]=c1;
+
+
+ bclr BO_ALWAYS,CR0_LT
+
+ .long 0x00000000
+
+#
+# NOTE: The following label name should be changed to
+# "bn_mul_comba4" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_mul_comba4:
+#
+# This is an optimized version of the bn_mul_comba4 routine.
+#
+# void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+# r3 contains r
+# r4 contains a
+# r5 contains b
+# r6, r7 are the 2 BN_ULONGs being multiplied.
+# r8, r9 are the results of the 32x32 giving 64 multiply.
+# r10, r11, r12 are the equivalents of c1, c2, and c3.
+#
+ xor r0,r0,r0 #r0=0. Used in addze below.
+ #mul_add_c(a[0],b[0],c1,c2,c3);
+ $LD r6,`0*$BNSZ`(r4)
+ $LD r7,`0*$BNSZ`(r5)
+ $UMULL r10,r6,r7
+ $UMULH r11,r6,r7
+ $ST r10,`0*$BNSZ`(r3) #r[0]=c1
+ #mul_add_c(a[0],b[1],c2,c3,c1);
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r8,r11
+ adde r12,r9,r0
+ addze r10,r0
+ #mul_add_c(a[1],b[0],c2,c3,c1);
+ $LD r6, `1*$BNSZ`(r4)
+ $LD r7, `0*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r8,r11
+ adde r12,r9,r12
+ addze r10,r10
+ $ST r11,`1*$BNSZ`(r3) #r[1]=c2
+ #mul_add_c(a[2],b[0],c3,c1,c2);
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r8,r12
+ adde r10,r9,r10
+ addze r11,r0
+ #mul_add_c(a[1],b[1],c3,c1,c2);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r8,r12
+ adde r10,r9,r10
+ addze r11,r11
+ #mul_add_c(a[0],b[2],c3,c1,c2);
+ $LD r6,`0*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r8,r12
+ adde r10,r9,r10
+ addze r11,r11
+ $ST r12,`2*$BNSZ`(r3) #r[2]=c3
+ #mul_add_c(a[0],b[3],c1,c2,c3);
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r8,r10
+ adde r11,r9,r11
+ addze r12,r0
+ #mul_add_c(a[1],b[2],c1,c2,c3);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r8,r10
+ adde r11,r9,r11
+ addze r12,r12
+ #mul_add_c(a[2],b[1],c1,c2,c3);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r8,r10
+ adde r11,r9,r11
+ addze r12,r12
+ #mul_add_c(a[3],b[0],c1,c2,c3);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`0*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r8,r10
+ adde r11,r9,r11
+ addze r12,r12
+ $ST r10,`3*$BNSZ`(r3) #r[3]=c1
+ #mul_add_c(a[3],b[1],c2,c3,c1);
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r8,r11
+ adde r12,r9,r12
+ addze r10,r0
+ #mul_add_c(a[2],b[2],c2,c3,c1);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r8,r11
+ adde r12,r9,r12
+ addze r10,r10
+ #mul_add_c(a[1],b[3],c2,c3,c1);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r8,r11
+ adde r12,r9,r12
+ addze r10,r10
+ $ST r11,`4*$BNSZ`(r3) #r[4]=c2
+ #mul_add_c(a[2],b[3],c3,c1,c2);
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r8,r12
+ adde r10,r9,r10
+ addze r11,r0
+ #mul_add_c(a[3],b[2],c3,c1,c2);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r8,r12
+ adde r10,r9,r10
+ addze r11,r11
+ $ST r12,`5*$BNSZ`(r3) #r[5]=c3
+ #mul_add_c(a[3],b[3],c1,c2,c3);
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r8,r10
+ adde r11,r9,r11
+
+ $ST r10,`6*$BNSZ`(r3) #r[6]=c1
+ $ST r11,`7*$BNSZ`(r3) #r[7]=c2
+ bclr BO_ALWAYS,CR0_LT
+ .long 0x00000000
+
+#
+# NOTE: The following label name should be changed to
+# "bn_mul_comba8" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_mul_comba8:
+#
+# Optimized version of the bn_mul_comba8 routine.
+#
+# void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+# r3 contains r
+# r4 contains a
+# r5 contains b
+# r6, r7 are the 2 BN_ULONGs being multiplied.
+# r8, r9 are the results of the 32x32 giving 64 multiply.
+# r10, r11, r12 are the equivalents of c1, c2, and c3.
+#
+ xor r0,r0,r0 #r0=0. Used in addze below.
+
+ #mul_add_c(a[0],b[0],c1,c2,c3);
+ $LD r6,`0*$BNSZ`(r4) #a[0]
+ $LD r7,`0*$BNSZ`(r5) #b[0]
+ $UMULL r10,r6,r7
+ $UMULH r11,r6,r7
+ $ST r10,`0*$BNSZ`(r3) #r[0]=c1;
+ #mul_add_c(a[0],b[1],c2,c3,c1);
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ addze r12,r9 # since we didnt set r12 to zero before.
+ addze r10,r0
+ #mul_add_c(a[1],b[0],c2,c3,c1);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`0*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ $ST r11,`1*$BNSZ`(r3) #r[1]=c2;
+ #mul_add_c(a[2],b[0],c3,c1,c2);
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r0
+ #mul_add_c(a[1],b[1],c3,c1,c2);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[0],b[2],c3,c1,c2);
+ $LD r6,`0*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ $ST r12,`2*$BNSZ`(r3) #r[2]=c3;
+ #mul_add_c(a[0],b[3],c1,c2,c3);
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r0
+ #mul_add_c(a[1],b[2],c1,c2,c3);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+
+ #mul_add_c(a[2],b[1],c1,c2,c3);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[3],b[0],c1,c2,c3);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`0*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ $ST r10,`3*$BNSZ`(r3) #r[3]=c1;
+ #mul_add_c(a[4],b[0],c2,c3,c1);
+ $LD r6,`4*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r0
+ #mul_add_c(a[3],b[1],c2,c3,c1);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[2],b[2],c2,c3,c1);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[1],b[3],c2,c3,c1);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[0],b[4],c2,c3,c1);
+ $LD r6,`0*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ $ST r11,`4*$BNSZ`(r3) #r[4]=c2;
+ #mul_add_c(a[0],b[5],c3,c1,c2);
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r0
+ #mul_add_c(a[1],b[4],c3,c1,c2);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[2],b[3],c3,c1,c2);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[3],b[2],c3,c1,c2);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[4],b[1],c3,c1,c2);
+ $LD r6,`4*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[5],b[0],c3,c1,c2);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`0*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ $ST r12,`5*$BNSZ`(r3) #r[5]=c3;
+ #mul_add_c(a[6],b[0],c1,c2,c3);
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r0
+ #mul_add_c(a[5],b[1],c1,c2,c3);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[4],b[2],c1,c2,c3);
+ $LD r6,`4*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[3],b[3],c1,c2,c3);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[2],b[4],c1,c2,c3);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[1],b[5],c1,c2,c3);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[0],b[6],c1,c2,c3);
+ $LD r6,`0*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ $ST r10,`6*$BNSZ`(r3) #r[6]=c1;
+ #mul_add_c(a[0],b[7],c2,c3,c1);
+ $LD r7,`7*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r0
+ #mul_add_c(a[1],b[6],c2,c3,c1);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[2],b[5],c2,c3,c1);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[3],b[4],c2,c3,c1);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[4],b[3],c2,c3,c1);
+ $LD r6,`4*$BNSZ`(r4)
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[5],b[2],c2,c3,c1);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[6],b[1],c2,c3,c1);
+ $LD r6,`6*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[7],b[0],c2,c3,c1);
+ $LD r6,`7*$BNSZ`(r4)
+ $LD r7,`0*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ $ST r11,`7*$BNSZ`(r3) #r[7]=c2;
+ #mul_add_c(a[7],b[1],c3,c1,c2);
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r0
+ #mul_add_c(a[6],b[2],c3,c1,c2);
+ $LD r6,`6*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[5],b[3],c3,c1,c2);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[4],b[4],c3,c1,c2);
+ $LD r6,`4*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[3],b[5],c3,c1,c2);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[2],b[6],c3,c1,c2);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[1],b[7],c3,c1,c2);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`7*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ $ST r12,`8*$BNSZ`(r3) #r[8]=c3;
+ #mul_add_c(a[2],b[7],c1,c2,c3);
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r0
+ #mul_add_c(a[3],b[6],c1,c2,c3);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[4],b[5],c1,c2,c3);
+ $LD r6,`4*$BNSZ`(r4)
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[5],b[4],c1,c2,c3);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[6],b[3],c1,c2,c3);
+ $LD r6,`6*$BNSZ`(r4)
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[7],b[2],c1,c2,c3);
+ $LD r6,`7*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ $ST r10,`9*$BNSZ`(r3) #r[9]=c1;
+ #mul_add_c(a[7],b[3],c2,c3,c1);
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r0
+ #mul_add_c(a[6],b[4],c2,c3,c1);
+ $LD r6,`6*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[5],b[5],c2,c3,c1);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[4],b[6],c2,c3,c1);
+ $LD r6,`4*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[3],b[7],c2,c3,c1);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`7*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ $ST r11,`10*$BNSZ`(r3) #r[10]=c2;
+ #mul_add_c(a[4],b[7],c3,c1,c2);
+ $LD r6,`4*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r0
+ #mul_add_c(a[5],b[6],c3,c1,c2);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[6],b[5],c3,c1,c2);
+ $LD r6,`6*$BNSZ`(r4)
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[7],b[4],c3,c1,c2);
+ $LD r6,`7*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ $ST r12,`11*$BNSZ`(r3) #r[11]=c3;
+ #mul_add_c(a[7],b[5],c1,c2,c3);
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r0
+ #mul_add_c(a[6],b[6],c1,c2,c3);
+ $LD r6,`6*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[5],b[7],c1,c2,c3);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`7*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ $ST r10,`12*$BNSZ`(r3) #r[12]=c1;
+ #mul_add_c(a[6],b[7],c2,c3,c1);
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r0
+ #mul_add_c(a[7],b[6],c2,c3,c1);
+ $LD r6,`7*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ $ST r11,`13*$BNSZ`(r3) #r[13]=c2;
+ #mul_add_c(a[7],b[7],c3,c1,c2);
+ $LD r7,`7*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ $ST r12,`14*$BNSZ`(r3) #r[14]=c3;
+ $ST r10,`15*$BNSZ`(r3) #r[15]=c1;
+ bclr BO_ALWAYS,CR0_LT
+ .long 0x00000000
+
+#
+# NOTE: The following label name should be changed to
+# "bn_sub_words" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+#
+.align 4
+.bn_sub_words:
+#
+# Handcoded version of bn_sub_words
+#
+#BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
+#
+# r3 = r
+# r4 = a
+# r5 = b
+# r6 = n
+#
+# Note: No loop unrolling done since this is not a performance
+# critical loop.
+
+ xor r0,r0,r0 #set r0 = 0
+#
+# check for r6 = 0 AND set carry bit.
+#
+ subfc. r7,r0,r6 # If r6 is 0 then result is 0.
+ # if r6 > 0 then result !=0
+ # In either case carry bit is set.
+ bc BO_IF,CR0_EQ,Lppcasm_sub_adios
+ addi r4,r4,-$BNSZ
+ addi r3,r3,-$BNSZ
+ addi r5,r5,-$BNSZ
+ mtctr r6
+Lppcasm_sub_mainloop:
+ $LDU r7,$BNSZ(r4)
+ $LDU r8,$BNSZ(r5)
+ subfe r6,r8,r7 # r6 = r7+carry bit + onescomplement(r8)
+ # if carry = 1 this is r7-r8. Else it
+ # is r7-r8 -1 as we need.
+ $STU r6,$BNSZ(r3)
+ bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_sub_mainloop
+Lppcasm_sub_adios:
+ subfze r3,r0 # if carry bit is set then r3 = 0 else -1
+ andi. r3,r3,1 # keep only last bit.
+ bclr BO_ALWAYS,CR0_LT
+ .long 0x00000000
+
+
+#
+# NOTE: The following label name should be changed to
+# "bn_add_words" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_add_words:
+#
+# Handcoded version of bn_add_words
+#
+#BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
+#
+# r3 = r
+# r4 = a
+# r5 = b
+# r6 = n
+#
+# Note: No loop unrolling done since this is not a performance
+# critical loop.
+
+ xor r0,r0,r0
+#
+# check for r6 = 0. Is this needed?
+#
+ addic. r6,r6,0 #test r6 and clear carry bit.
+ bc BO_IF,CR0_EQ,Lppcasm_add_adios
+ addi r4,r4,-$BNSZ
+ addi r3,r3,-$BNSZ
+ addi r5,r5,-$BNSZ
+ mtctr r6
+Lppcasm_add_mainloop:
+ $LDU r7,$BNSZ(r4)
+ $LDU r8,$BNSZ(r5)
+ adde r8,r7,r8
+ $STU r8,$BNSZ(r3)
+ bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_add_mainloop
+Lppcasm_add_adios:
+ addze r3,r0 #return carry bit.
+ bclr BO_ALWAYS,CR0_LT
+ .long 0x00000000
+
+#
+# NOTE: The following label name should be changed to
+# "bn_div_words" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_div_words:
+#
+# This is a cleaned up version of code generated by
+# the AIX compiler. The only optimization is to use
+# the PPC instruction to count leading zeros instead
+# of call to num_bits_word. Since this was compiled
+# only at level -O2 we can possibly squeeze it more?
+#
+# r3 = h
+# r4 = l
+# r5 = d
+
+ $UCMPI 0,r5,0 # compare r5 and 0
+ bc BO_IF_NOT,CR0_EQ,Lppcasm_div1 # proceed if d!=0
+ li r3,-1 # d=0 return -1
+ bclr BO_ALWAYS,CR0_LT
+Lppcasm_div1:
+ xor r0,r0,r0 #r0=0
+ li r8,$BITS
+ $CNTLZ. r7,r5 #r7 = num leading 0s in d.
+ bc BO_IF,CR0_EQ,Lppcasm_div2 #proceed if no leading zeros
+ subf r8,r7,r8 #r8 = BN_num_bits_word(d)
+ $SHR. r9,r3,r8 #are there any bits above r8'th?
+ $TR 16,r9,r0 #if there're, signal to dump core...
+Lppcasm_div2:
+ $UCMP 0,r3,r5 #h>=d?
+ bc BO_IF,CR0_LT,Lppcasm_div3 #goto Lppcasm_div3 if not
+ subf r3,r5,r3 #h-=d ;
+Lppcasm_div3: #r7 = BN_BITS2-i. so r7=i
+ cmpi 0,0,r7,0 # is (i == 0)?
+ bc BO_IF,CR0_EQ,Lppcasm_div4
+ $SHL r3,r3,r7 # h = (h<< i)
+ $SHR r8,r4,r8 # r8 = (l >> BN_BITS2 -i)
+ $SHL r5,r5,r7 # d<<=i
+ or r3,r3,r8 # h = (h<<i)|(l>>(BN_BITS2-i))
+ $SHL r4,r4,r7 # l <<=i
+Lppcasm_div4:
+ $SHRI r9,r5,`$BITS/2` # r9 = dh
+ # dl will be computed when needed
+ # as it saves registers.
+ li r6,2 #r6=2
+ mtctr r6 #counter will be in count.
+Lppcasm_divouterloop:
+ $SHRI r8,r3,`$BITS/2` #r8 = (h>>BN_BITS4)
+ $SHRI r11,r4,`$BITS/2` #r11= (l&BN_MASK2h)>>BN_BITS4
+ # compute here for innerloop.
+ $UCMP 0,r8,r9 # is (h>>BN_BITS4)==dh
+ bc BO_IF_NOT,CR0_EQ,Lppcasm_div5 # goto Lppcasm_div5 if not
+
+ li r8,-1
+ $CLRU r8,r8,`$BITS/2` #q = BN_MASK2l
+ b Lppcasm_div6
+Lppcasm_div5:
+ $UDIV r8,r3,r9 #q = h/dh
+Lppcasm_div6:
+ $UMULL r12,r9,r8 #th = q*dh
+ $CLRU r10,r5,`$BITS/2` #r10=dl
+ $UMULL r6,r8,r10 #tl = q*dl
+
+Lppcasm_divinnerloop:
+ subf r10,r12,r3 #t = h -th
+ $SHRI r7,r10,`$BITS/2` #r7= (t &BN_MASK2H), sort of...
+ addic. r7,r7,0 #test if r7 == 0. used below.
+ # now want to compute
+ # r7 = (t<<BN_BITS4)|((l&BN_MASK2h)>>BN_BITS4)
+ # the following 2 instructions do that
+ $SHLI r7,r10,`$BITS/2` # r7 = (t<<BN_BITS4)
+ or r7,r7,r11 # r7|=((l&BN_MASK2h)>>BN_BITS4)
+ $UCMP 1,r6,r7 # compare (tl <= r7)
+ bc BO_IF_NOT,CR0_EQ,Lppcasm_divinnerexit
+ bc BO_IF_NOT,CR1_FEX,Lppcasm_divinnerexit
+ addi r8,r8,-1 #q--
+ subf r12,r9,r12 #th -=dh
+ $CLRU r10,r5,`$BITS/2` #r10=dl. t is no longer needed in loop.
+ subf r6,r10,r6 #tl -=dl
+ b Lppcasm_divinnerloop
+Lppcasm_divinnerexit:
+ $SHRI r10,r6,`$BITS/2` #t=(tl>>BN_BITS4)
+ $SHLI r11,r6,`$BITS/2` #tl=(tl<<BN_BITS4)&BN_MASK2h;
+ $UCMP 1,r4,r11 # compare l and tl
+ add r12,r12,r10 # th+=t
+ bc BO_IF_NOT,CR1_FX,Lppcasm_div7 # if (l>=tl) goto Lppcasm_div7
+ addi r12,r12,1 # th++
+Lppcasm_div7:
+ subf r11,r11,r4 #r11=l-tl
+ $UCMP 1,r3,r12 #compare h and th
+ bc BO_IF_NOT,CR1_FX,Lppcasm_div8 #if (h>=th) goto Lppcasm_div8
+ addi r8,r8,-1 # q--
+ add r3,r5,r3 # h+=d
+Lppcasm_div8:
+ subf r12,r12,r3 #r12 = h-th
+ $SHLI r4,r11,`$BITS/2` #l=(l&BN_MASK2l)<<BN_BITS4
+ # want to compute
+ # h = ((h<<BN_BITS4)|(l>>BN_BITS4))&BN_MASK2
+ # the following 2 instructions will do this.
+ $INSR r11,r12,`$BITS/2`,`$BITS/2` # r11 is the value we want rotated $BITS/2.
+ $ROTL r3,r11,`$BITS/2` # rotate by $BITS/2 and store in r3
+ bc BO_dCTR_ZERO,CR0_EQ,Lppcasm_div9#if (count==0) break ;
+ $SHLI r0,r8,`$BITS/2` #ret =q<<BN_BITS4
+ b Lppcasm_divouterloop
+Lppcasm_div9:
+ or r3,r8,r0
+ bclr BO_ALWAYS,CR0_LT
+ .long 0x00000000
+
+#
+# NOTE: The following label name should be changed to
+# "bn_sqr_words" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+.align 4
+.bn_sqr_words:
+#
+# Optimized version of bn_sqr_words
+#
+# void bn_sqr_words(BN_ULONG *r, BN_ULONG *a, int n)
+#
+# r3 = r
+# r4 = a
+# r5 = n
+#
+# r6 = a[i].
+# r7,r8 = product.
+#
+# No unrolling done here. Not performance critical.
+
+ addic. r5,r5,0 #test r5.
+ bc BO_IF,CR0_EQ,Lppcasm_sqr_adios
+ addi r4,r4,-$BNSZ
+ addi r3,r3,-$BNSZ
+ mtctr r5
+Lppcasm_sqr_mainloop:
+ #sqr(r[0],r[1],a[0]);
+ $LDU r6,$BNSZ(r4)
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ $STU r7,$BNSZ(r3)
+ $STU r8,$BNSZ(r3)
+ bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_sqr_mainloop
+Lppcasm_sqr_adios:
+ bclr BO_ALWAYS,CR0_LT
+ .long 0x00000000
+
+
+#
+# NOTE: The following label name should be changed to
+# "bn_mul_words" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_mul_words:
+#
+# BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
+#
+# r3 = rp
+# r4 = ap
+# r5 = num
+# r6 = w
+ xor r0,r0,r0
+ xor r12,r12,r12 # used for carry
+ rlwinm. r7,r5,30,2,31 # num >> 2
+ bc BO_IF,CR0_EQ,Lppcasm_mw_REM
+ mtctr r7
+Lppcasm_mw_LOOP:
+ #mul(rp[0],ap[0],w,c1);
+ $LD r8,`0*$BNSZ`(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ addc r9,r9,r12
+ #addze r10,r10 #carry is NOT ignored.
+ #will be taken care of
+ #in second spin below
+ #using adde.
+ $ST r9,`0*$BNSZ`(r3)
+ #mul(rp[1],ap[1],w,c1);
+ $LD r8,`1*$BNSZ`(r4)
+ $UMULL r11,r6,r8
+ $UMULH r12,r6,r8
+ adde r11,r11,r10
+ #addze r12,r12
+ $ST r11,`1*$BNSZ`(r3)
+ #mul(rp[2],ap[2],w,c1);
+ $LD r8,`2*$BNSZ`(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ adde r9,r9,r12
+ #addze r10,r10
+ $ST r9,`2*$BNSZ`(r3)
+ #mul_add(rp[3],ap[3],w,c1);
+ $LD r8,`3*$BNSZ`(r4)
+ $UMULL r11,r6,r8
+ $UMULH r12,r6,r8
+ adde r11,r11,r10
+ addze r12,r12 #this spin we collect carry into
+ #r12
+ $ST r11,`3*$BNSZ`(r3)
+
+ addi r3,r3,`4*$BNSZ`
+ addi r4,r4,`4*$BNSZ`
+ bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_mw_LOOP
+
+Lppcasm_mw_REM:
+ andi. r5,r5,0x3
+ bc BO_IF,CR0_EQ,Lppcasm_mw_OVER
+ #mul(rp[0],ap[0],w,c1);
+ $LD r8,`0*$BNSZ`(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ addc r9,r9,r12
+ addze r10,r10
+ $ST r9,`0*$BNSZ`(r3)
+ addi r12,r10,0
+
+ addi r5,r5,-1
+ cmpli 0,0,r5,0
+ bc BO_IF,CR0_EQ,Lppcasm_mw_OVER
+
+
+ #mul(rp[1],ap[1],w,c1);
+ $LD r8,`1*$BNSZ`(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ addc r9,r9,r12
+ addze r10,r10
+ $ST r9,`1*$BNSZ`(r3)
+ addi r12,r10,0
+
+ addi r5,r5,-1
+ cmpli 0,0,r5,0
+ bc BO_IF,CR0_EQ,Lppcasm_mw_OVER
+
+ #mul_add(rp[2],ap[2],w,c1);
+ $LD r8,`2*$BNSZ`(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ addc r9,r9,r12
+ addze r10,r10
+ $ST r9,`2*$BNSZ`(r3)
+ addi r12,r10,0
+
+Lppcasm_mw_OVER:
+ addi r3,r12,0
+ bclr BO_ALWAYS,CR0_LT
+ .long 0x00000000
+
+#
+# NOTE: The following label name should be changed to
+# "bn_mul_add_words" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_mul_add_words:
+#
+# BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
+#
+# r3 = rp
+# r4 = ap
+# r5 = num
+# r6 = w
+#
+# empirical evidence suggests that unrolled version performs best!!
+#
+ xor r0,r0,r0 #r0 = 0
+ xor r12,r12,r12 #r12 = 0 . used for carry
+ rlwinm. r7,r5,30,2,31 # num >> 2
+ bc BO_IF,CR0_EQ,Lppcasm_maw_leftover # if (num < 4) go LPPCASM_maw_leftover
+ mtctr r7
+Lppcasm_maw_mainloop:
+ #mul_add(rp[0],ap[0],w,c1);
+ $LD r8,`0*$BNSZ`(r4)
+ $LD r11,`0*$BNSZ`(r3)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ addc r9,r9,r12 #r12 is carry.
+ addze r10,r10
+ addc r9,r9,r11
+ #addze r10,r10
+ #the above instruction addze
+ #is NOT needed. Carry will NOT
+ #be ignored. It's not affected
+ #by multiply and will be collected
+ #in the next spin
+ $ST r9,`0*$BNSZ`(r3)
+
+ #mul_add(rp[1],ap[1],w,c1);
+ $LD r8,`1*$BNSZ`(r4)
+ $LD r9,`1*$BNSZ`(r3)
+ $UMULL r11,r6,r8
+ $UMULH r12,r6,r8
+ adde r11,r11,r10 #r10 is carry.
+ addze r12,r12
+ addc r11,r11,r9
+ #addze r12,r12
+ $ST r11,`1*$BNSZ`(r3)
+
+ #mul_add(rp[2],ap[2],w,c1);
+ $LD r8,`2*$BNSZ`(r4)
+ $UMULL r9,r6,r8
+ $LD r11,`2*$BNSZ`(r3)
+ $UMULH r10,r6,r8
+ adde r9,r9,r12
+ addze r10,r10
+ addc r9,r9,r11
+ #addze r10,r10
+ $ST r9,`2*$BNSZ`(r3)
+
+ #mul_add(rp[3],ap[3],w,c1);
+ $LD r8,`3*$BNSZ`(r4)
+ $UMULL r11,r6,r8
+ $LD r9,`3*$BNSZ`(r3)
+ $UMULH r12,r6,r8
+ adde r11,r11,r10
+ addze r12,r12
+ addc r11,r11,r9
+ addze r12,r12
+ $ST r11,`3*$BNSZ`(r3)
+ addi r3,r3,`4*$BNSZ`
+ addi r4,r4,`4*$BNSZ`
+ bc BO_dCTR_NZERO,CR0_EQ,Lppcasm_maw_mainloop
+
+Lppcasm_maw_leftover:
+ andi. r5,r5,0x3
+ bc BO_IF,CR0_EQ,Lppcasm_maw_adios
+ addi r3,r3,-$BNSZ
+ addi r4,r4,-$BNSZ
+ #mul_add(rp[0],ap[0],w,c1);
+ mtctr r5
+ $LDU r8,$BNSZ(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ $LDU r11,$BNSZ(r3)
+ addc r9,r9,r11
+ addze r10,r10
+ addc r9,r9,r12
+ addze r12,r10
+ $ST r9,0(r3)
+
+ bc BO_dCTR_ZERO,CR0_EQ,Lppcasm_maw_adios
+ #mul_add(rp[1],ap[1],w,c1);
+ $LDU r8,$BNSZ(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ $LDU r11,$BNSZ(r3)
+ addc r9,r9,r11
+ addze r10,r10
+ addc r9,r9,r12
+ addze r12,r10
+ $ST r9,0(r3)
+
+ bc BO_dCTR_ZERO,CR0_EQ,Lppcasm_maw_adios
+ #mul_add(rp[2],ap[2],w,c1);
+ $LDU r8,$BNSZ(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ $LDU r11,$BNSZ(r3)
+ addc r9,r9,r11
+ addze r10,r10
+ addc r9,r9,r12
+ addze r12,r10
+ $ST r9,0(r3)
+
+Lppcasm_maw_adios:
+ addi r3,r12,0
+ bclr BO_ALWAYS,CR0_LT
+ .long 0x00000000
+ .align 4
+EOF
+ $data =~ s/\`([^\`]*)\`/eval $1/gem;
+
+ # if some assembler chokes on some simplified mnemonic,
+ # this is the spot to fix it up, e.g.:
+ # GNU as doesn't seem to accept cmplw, 32-bit unsigned compare
+ $data =~ s/^(\s*)cmplw(\s+)([^,]+),(.*)/$1cmpl$2$3,0,$4/gm;
+ # assembler X doesn't accept li, load immediate value
+ #$data =~ s/^(\s*)li(\s+)([^,]+),(.*)/$1addi$2$3,0,$4/gm;
+ return($data);
+}
diff --git a/crypto/bn/asm/r3000.s b/crypto/bn/asm/r3000.s
new file mode 100644
index 0000000..e95269a
--- /dev/null
+++ b/crypto/bn/asm/r3000.s
@@ -0,0 +1,646 @@
+ .file 1 "../bn_mulw.c"
+ .set nobopt
+ .option pic2
+
+ # GNU C 2.6.3 [AL 1.1, MM 40] SGI running IRIX 5.0 compiled by GNU C
+
+ # Cc1 defaults:
+ # -mabicalls
+
+ # Cc1 arguments (-G value = 0, Cpu = 3000, ISA = 1):
+ # -quiet -dumpbase -O2 -o
+
+gcc2_compiled.:
+__gnu_compiled_c:
+ .rdata
+
+ .byte 0x24,0x52,0x65,0x76,0x69,0x73,0x69,0x6f
+ .byte 0x6e,0x3a,0x20,0x31,0x2e,0x34,0x39,0x20
+ .byte 0x24,0x0
+
+ .byte 0x24,0x52,0x65,0x76,0x69,0x73,0x69,0x6f
+ .byte 0x6e,0x3a,0x20,0x31,0x2e,0x33,0x34,0x20
+ .byte 0x24,0x0
+
+ .byte 0x24,0x52,0x65,0x76,0x69,0x73,0x69,0x6f
+ .byte 0x6e,0x3a,0x20,0x31,0x2e,0x35,0x20,0x24
+ .byte 0x0
+
+ .byte 0x24,0x52,0x65,0x76,0x69,0x73,0x69,0x6f
+ .byte 0x6e,0x3a,0x20,0x31,0x2e,0x38,0x20,0x24
+ .byte 0x0
+
+ .byte 0x24,0x52,0x65,0x76,0x69,0x73,0x69,0x6f
+ .byte 0x6e,0x3a,0x20,0x31,0x2e,0x32,0x33,0x20
+ .byte 0x24,0x0
+
+ .byte 0x24,0x52,0x65,0x76,0x69,0x73,0x69,0x6f
+ .byte 0x6e,0x3a,0x20,0x31,0x2e,0x37,0x38,0x20
+ .byte 0x24,0x0
+
+ .byte 0x24,0x52,0x65,0x76,0x69,0x73,0x69,0x6f
+ .byte 0x6e,0x3a,0x20,0x33,0x2e,0x37,0x30,0x20
+ .byte 0x24,0x0
+
+ .byte 0x24,0x52,0x65,0x76,0x69,0x73,0x69,0x6f
+ .byte 0x6e,0x3a,0x20,0x31,0x2e,0x32,0x20,0x24
+ .byte 0x0
+
+ .byte 0x24,0x52,0x65,0x76,0x69,0x73,0x69,0x6f
+ .byte 0x6e,0x3a,0x20,0x31,0x2e,0x34,0x20,0x24
+ .byte 0x0
+
+ .byte 0x24,0x52,0x65,0x76,0x69,0x73,0x69,0x6f
+ .byte 0x6e,0x3a,0x20,0x31,0x2e,0x38,0x20,0x24
+ .byte 0x0
+ .text
+ .align 2
+ .globl bn_mul_add_words
+ .ent bn_mul_add_words
+bn_mul_add_words:
+ .frame $sp,0,$31 # vars= 0, regs= 0/0, args= 0, extra= 0
+ .mask 0x00000000,0
+ .fmask 0x00000000,0
+ .set noreorder
+ .cpload $25
+ .set reorder
+ move $12,$4
+ move $14,$5
+ move $9,$6
+ move $13,$7
+ move $8,$0
+ addu $10,$12,12
+ addu $11,$14,12
+$L2:
+ lw $6,0($14)
+ #nop
+ multu $13,$6
+ mfhi $6
+ mflo $7
+ #nop
+ move $5,$8
+ move $4,$0
+ lw $3,0($12)
+ addu $9,$9,-1
+ move $2,$0
+ addu $7,$7,$3
+ sltu $8,$7,$3
+ addu $6,$6,$2
+ addu $6,$6,$8
+ addu $7,$7,$5
+ sltu $2,$7,$5
+ addu $6,$6,$4
+ addu $6,$6,$2
+ srl $3,$6,0
+ move $2,$0
+ move $8,$3
+ .set noreorder
+ .set nomacro
+ beq $9,$0,$L3
+ sw $7,0($12)
+ .set macro
+ .set reorder
+
+ lw $6,-8($11)
+ #nop
+ multu $13,$6
+ mfhi $6
+ mflo $7
+ #nop
+ move $5,$8
+ move $4,$0
+ lw $3,-8($10)
+ addu $9,$9,-1
+ move $2,$0
+ addu $7,$7,$3
+ sltu $8,$7,$3
+ addu $6,$6,$2
+ addu $6,$6,$8
+ addu $7,$7,$5
+ sltu $2,$7,$5
+ addu $6,$6,$4
+ addu $6,$6,$2
+ srl $3,$6,0
+ move $2,$0
+ move $8,$3
+ .set noreorder
+ .set nomacro
+ beq $9,$0,$L3
+ sw $7,-8($10)
+ .set macro
+ .set reorder
+
+ lw $6,-4($11)
+ #nop
+ multu $13,$6
+ mfhi $6
+ mflo $7
+ #nop
+ move $5,$8
+ move $4,$0
+ lw $3,-4($10)
+ addu $9,$9,-1
+ move $2,$0
+ addu $7,$7,$3
+ sltu $8,$7,$3
+ addu $6,$6,$2
+ addu $6,$6,$8
+ addu $7,$7,$5
+ sltu $2,$7,$5
+ addu $6,$6,$4
+ addu $6,$6,$2
+ srl $3,$6,0
+ move $2,$0
+ move $8,$3
+ .set noreorder
+ .set nomacro
+ beq $9,$0,$L3
+ sw $7,-4($10)
+ .set macro
+ .set reorder
+
+ lw $6,0($11)
+ #nop
+ multu $13,$6
+ mfhi $6
+ mflo $7
+ #nop
+ move $5,$8
+ move $4,$0
+ lw $3,0($10)
+ addu $9,$9,-1
+ move $2,$0
+ addu $7,$7,$3
+ sltu $8,$7,$3
+ addu $6,$6,$2
+ addu $6,$6,$8
+ addu $7,$7,$5
+ sltu $2,$7,$5
+ addu $6,$6,$4
+ addu $6,$6,$2
+ srl $3,$6,0
+ move $2,$0
+ move $8,$3
+ .set noreorder
+ .set nomacro
+ beq $9,$0,$L3
+ sw $7,0($10)
+ .set macro
+ .set reorder
+
+ addu $11,$11,16
+ addu $14,$14,16
+ addu $10,$10,16
+ .set noreorder
+ .set nomacro
+ j $L2
+ addu $12,$12,16
+ .set macro
+ .set reorder
+
+$L3:
+ .set noreorder
+ .set nomacro
+ j $31
+ move $2,$8
+ .set macro
+ .set reorder
+
+ .end bn_mul_add_words
+ .align 2
+ .globl bn_mul_words
+ .ent bn_mul_words
+bn_mul_words:
+ .frame $sp,0,$31 # vars= 0, regs= 0/0, args= 0, extra= 0
+ .mask 0x00000000,0
+ .fmask 0x00000000,0
+ .set noreorder
+ .cpload $25
+ .set reorder
+ move $11,$4
+ move $12,$5
+ move $8,$6
+ move $6,$0
+ addu $10,$11,12
+ addu $9,$12,12
+$L10:
+ lw $4,0($12)
+ #nop
+ multu $7,$4
+ mfhi $4
+ mflo $5
+ #nop
+ move $3,$6
+ move $2,$0
+ addu $8,$8,-1
+ addu $5,$5,$3
+ sltu $6,$5,$3
+ addu $4,$4,$2
+ addu $4,$4,$6
+ srl $3,$4,0
+ move $2,$0
+ move $6,$3
+ .set noreorder
+ .set nomacro
+ beq $8,$0,$L11
+ sw $5,0($11)
+ .set macro
+ .set reorder
+
+ lw $4,-8($9)
+ #nop
+ multu $7,$4
+ mfhi $4
+ mflo $5
+ #nop
+ move $3,$6
+ move $2,$0
+ addu $8,$8,-1
+ addu $5,$5,$3
+ sltu $6,$5,$3
+ addu $4,$4,$2
+ addu $4,$4,$6
+ srl $3,$4,0
+ move $2,$0
+ move $6,$3
+ .set noreorder
+ .set nomacro
+ beq $8,$0,$L11
+ sw $5,-8($10)
+ .set macro
+ .set reorder
+
+ lw $4,-4($9)
+ #nop
+ multu $7,$4
+ mfhi $4
+ mflo $5
+ #nop
+ move $3,$6
+ move $2,$0
+ addu $8,$8,-1
+ addu $5,$5,$3
+ sltu $6,$5,$3
+ addu $4,$4,$2
+ addu $4,$4,$6
+ srl $3,$4,0
+ move $2,$0
+ move $6,$3
+ .set noreorder
+ .set nomacro
+ beq $8,$0,$L11
+ sw $5,-4($10)
+ .set macro
+ .set reorder
+
+ lw $4,0($9)
+ #nop
+ multu $7,$4
+ mfhi $4
+ mflo $5
+ #nop
+ move $3,$6
+ move $2,$0
+ addu $8,$8,-1
+ addu $5,$5,$3
+ sltu $6,$5,$3
+ addu $4,$4,$2
+ addu $4,$4,$6
+ srl $3,$4,0
+ move $2,$0
+ move $6,$3
+ .set noreorder
+ .set nomacro
+ beq $8,$0,$L11
+ sw $5,0($10)
+ .set macro
+ .set reorder
+
+ addu $9,$9,16
+ addu $12,$12,16
+ addu $10,$10,16
+ .set noreorder
+ .set nomacro
+ j $L10
+ addu $11,$11,16
+ .set macro
+ .set reorder
+
+$L11:
+ .set noreorder
+ .set nomacro
+ j $31
+ move $2,$6
+ .set macro
+ .set reorder
+
+ .end bn_mul_words
+ .align 2
+ .globl bn_sqr_words
+ .ent bn_sqr_words
+bn_sqr_words:
+ .frame $sp,0,$31 # vars= 0, regs= 0/0, args= 0, extra= 0
+ .mask 0x00000000,0
+ .fmask 0x00000000,0
+ .set noreorder
+ .cpload $25
+ .set reorder
+ move $9,$4
+ addu $7,$9,28
+ addu $8,$5,12
+$L18:
+ lw $2,0($5)
+ #nop
+ multu $2,$2
+ mfhi $2
+ mflo $3
+ #nop
+ addu $6,$6,-1
+ sw $3,0($9)
+ srl $3,$2,0
+ move $2,$0
+ .set noreorder
+ .set nomacro
+ beq $6,$0,$L19
+ sw $3,-24($7)
+ .set macro
+ .set reorder
+
+ lw $2,-8($8)
+ #nop
+ multu $2,$2
+ mfhi $2
+ mflo $3
+ #nop
+ addu $6,$6,-1
+ sw $3,-20($7)
+ srl $3,$2,0
+ move $2,$0
+ .set noreorder
+ .set nomacro
+ beq $6,$0,$L19
+ sw $3,-16($7)
+ .set macro
+ .set reorder
+
+ lw $2,-4($8)
+ #nop
+ multu $2,$2
+ mfhi $2
+ mflo $3
+ #nop
+ addu $6,$6,-1
+ sw $3,-12($7)
+ srl $3,$2,0
+ move $2,$0
+ .set noreorder
+ .set nomacro
+ beq $6,$0,$L19
+ sw $3,-8($7)
+ .set macro
+ .set reorder
+
+ lw $2,0($8)
+ #nop
+ multu $2,$2
+ mfhi $2
+ mflo $3
+ #nop
+ addu $6,$6,-1
+ sw $3,-4($7)
+ srl $3,$2,0
+ move $2,$0
+ .set noreorder
+ .set nomacro
+ beq $6,$0,$L19
+ sw $3,0($7)
+ .set macro
+ .set reorder
+
+ addu $8,$8,16
+ addu $5,$5,16
+ addu $7,$7,32
+ .set noreorder
+ .set nomacro
+ j $L18
+ addu $9,$9,32
+ .set macro
+ .set reorder
+
+$L19:
+ j $31
+ .end bn_sqr_words
+ .rdata
+ .align 2
+$LC0:
+
+ .byte 0x44,0x69,0x76,0x69,0x73,0x69,0x6f,0x6e
+ .byte 0x20,0x77,0x6f,0x75,0x6c,0x64,0x20,0x6f
+ .byte 0x76,0x65,0x72,0x66,0x6c,0x6f,0x77,0xa
+ .byte 0x0
+ .text
+ .align 2
+ .globl bn_div64
+ .ent bn_div64
+bn_div64:
+ .frame $sp,56,$31 # vars= 0, regs= 7/0, args= 16, extra= 8
+ .mask 0x901f0000,-8
+ .fmask 0x00000000,0
+ .set noreorder
+ .cpload $25
+ .set reorder
+ subu $sp,$sp,56
+ .cprestore 16
+ sw $16,24($sp)
+ move $16,$4
+ sw $17,28($sp)
+ move $17,$5
+ sw $18,32($sp)
+ move $18,$6
+ sw $20,40($sp)
+ move $20,$0
+ sw $19,36($sp)
+ li $19,0x00000002 # 2
+ sw $31,48($sp)
+ .set noreorder
+ .set nomacro
+ bne $18,$0,$L26
+ sw $28,44($sp)
+ .set macro
+ .set reorder
+
+ .set noreorder
+ .set nomacro
+ j $L43
+ li $2,-1 # 0xffffffff
+ .set macro
+ .set reorder
+
+$L26:
+ move $4,$18
+ jal BN_num_bits_word
+ move $4,$2
+ li $2,0x00000020 # 32
+ .set noreorder
+ .set nomacro
+ beq $4,$2,$L27
+ li $2,0x00000001 # 1
+ .set macro
+ .set reorder
+
+ sll $2,$2,$4
+ sltu $2,$2,$16
+ .set noreorder
+ .set nomacro
+ beq $2,$0,$L44
+ li $5,0x00000020 # 32
+ .set macro
+ .set reorder
+
+ la $4,__iob+32
+ la $5,$LC0
+ jal fprintf
+ jal abort
+$L27:
+ li $5,0x00000020 # 32
+$L44:
+ sltu $2,$16,$18
+ .set noreorder
+ .set nomacro
+ bne $2,$0,$L28
+ subu $4,$5,$4
+ .set macro
+ .set reorder
+
+ subu $16,$16,$18
+$L28:
+ .set noreorder
+ .set nomacro
+ beq $4,$0,$L29
+ li $10,-65536 # 0xffff0000
+ .set macro
+ .set reorder
+
+ sll $18,$18,$4
+ sll $3,$16,$4
+ subu $2,$5,$4
+ srl $2,$17,$2
+ or $16,$3,$2
+ sll $17,$17,$4
+$L29:
+ srl $7,$18,16
+ andi $9,$18,0xffff
+$L30:
+ srl $2,$16,16
+ .set noreorder
+ .set nomacro
+ beq $2,$7,$L34
+ li $6,0x0000ffff # 65535
+ .set macro
+ .set reorder
+
+ divu $6,$16,$7
+$L34:
+ mult $6,$9
+ mflo $5
+ #nop
+ #nop
+ mult $6,$7
+ and $2,$17,$10
+ srl $8,$2,16
+ mflo $4
+$L35:
+ subu $3,$16,$4
+ and $2,$3,$10
+ .set noreorder
+ .set nomacro
+ bne $2,$0,$L36
+ sll $2,$3,16
+ .set macro
+ .set reorder
+
+ addu $2,$2,$8
+ sltu $2,$2,$5
+ .set noreorder
+ .set nomacro
+ beq $2,$0,$L36
+ subu $5,$5,$9
+ .set macro
+ .set reorder
+
+ subu $4,$4,$7
+ .set noreorder
+ .set nomacro
+ j $L35
+ addu $6,$6,-1
+ .set macro
+ .set reorder
+
+$L36:
+ mult $6,$7
+ mflo $5
+ #nop
+ #nop
+ mult $6,$9
+ mflo $4
+ #nop
+ #nop
+ srl $3,$4,16
+ sll $2,$4,16
+ and $4,$2,$10
+ sltu $2,$17,$4
+ .set noreorder
+ .set nomacro
+ beq $2,$0,$L40
+ addu $5,$5,$3
+ .set macro
+ .set reorder
+
+ addu $5,$5,1
+$L40:
+ sltu $2,$16,$5
+ .set noreorder
+ .set nomacro
+ beq $2,$0,$L41
+ subu $17,$17,$4
+ .set macro
+ .set reorder
+
+ addu $16,$16,$18
+ addu $6,$6,-1
+$L41:
+ addu $19,$19,-1
+ .set noreorder
+ .set nomacro
+ beq $19,$0,$L31
+ subu $16,$16,$5
+ .set macro
+ .set reorder
+
+ sll $20,$6,16
+ sll $3,$16,16
+ srl $2,$17,16
+ or $16,$3,$2
+ .set noreorder
+ .set nomacro
+ j $L30
+ sll $17,$17,16
+ .set macro
+ .set reorder
+
+$L31:
+ or $2,$20,$6
+$L43:
+ lw $31,48($sp)
+ lw $20,40($sp)
+ lw $19,36($sp)
+ lw $18,32($sp)
+ lw $17,28($sp)
+ lw $16,24($sp)
+ addu $sp,$sp,56
+ j $31
+ .end bn_div64
+
+ .globl abort .text
+ .globl fprintf .text
+ .globl BN_num_bits_word .text
diff --git a/crypto/bn/asm/sparcv8.S b/crypto/bn/asm/sparcv8.S
new file mode 100644
index 0000000..88c5dc4
--- /dev/null
+++ b/crypto/bn/asm/sparcv8.S
@@ -0,0 +1,1458 @@
+.ident "sparcv8.s, Version 1.4"
+.ident "SPARC v8 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
+
+/*
+ * ====================================================================
+ * Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+ * project.
+ *
+ * Rights for redistribution and usage in source and binary forms are
+ * granted according to the OpenSSL license. Warranty of any kind is
+ * disclaimed.
+ * ====================================================================
+ */
+
+/*
+ * This is my modest contributon to OpenSSL project (see
+ * http://www.openssl.org/ for more information about it) and is
+ * a drop-in SuperSPARC ISA replacement for crypto/bn/bn_asm.c
+ * module. For updates see http://fy.chalmers.se/~appro/hpe/.
+ *
+ * See bn_asm.sparc.v8plus.S for more details.
+ */
+
+/*
+ * Revision history.
+ *
+ * 1.1 - new loop unrolling model(*);
+ * 1.2 - made gas friendly;
+ * 1.3 - fixed problem with /usr/ccs/lib/cpp;
+ * 1.4 - some retunes;
+ *
+ * (*) see bn_asm.sparc.v8plus.S for details
+ */
+
+.section ".text",#alloc,#execinstr
+.file "bn_asm.sparc.v8.S"
+
+.align 32
+
+.global bn_mul_add_words
+/*
+ * BN_ULONG bn_mul_add_words(rp,ap,num,w)
+ * BN_ULONG *rp,*ap;
+ * int num;
+ * BN_ULONG w;
+ */
+bn_mul_add_words:
+ cmp %o2,0
+ bg,a .L_bn_mul_add_words_proceed
+ ld [%o1],%g2
+ retl
+ clr %o0
+
+.L_bn_mul_add_words_proceed:
+ andcc %o2,-4,%g0
+ bz .L_bn_mul_add_words_tail
+ clr %o5
+
+.L_bn_mul_add_words_loop:
+ ld [%o0],%o4
+ ld [%o1+4],%g3
+ umul %o3,%g2,%g2
+ rd %y,%g1
+ addcc %o4,%o5,%o4
+ addx %g1,0,%g1
+ addcc %o4,%g2,%o4
+ st %o4,[%o0]
+ addx %g1,0,%o5
+
+ ld [%o0+4],%o4
+ ld [%o1+8],%g2
+ umul %o3,%g3,%g3
+ dec 4,%o2
+ rd %y,%g1
+ addcc %o4,%o5,%o4
+ addx %g1,0,%g1
+ addcc %o4,%g3,%o4
+ st %o4,[%o0+4]
+ addx %g1,0,%o5
+
+ ld [%o0+8],%o4
+ ld [%o1+12],%g3
+ umul %o3,%g2,%g2
+ inc 16,%o1
+ rd %y,%g1
+ addcc %o4,%o5,%o4
+ addx %g1,0,%g1
+ addcc %o4,%g2,%o4
+ st %o4,[%o0+8]
+ addx %g1,0,%o5
+
+ ld [%o0+12],%o4
+ umul %o3,%g3,%g3
+ inc 16,%o0
+ rd %y,%g1
+ addcc %o4,%o5,%o4
+ addx %g1,0,%g1
+ addcc %o4,%g3,%o4
+ st %o4,[%o0-4]
+ addx %g1,0,%o5
+ andcc %o2,-4,%g0
+ bnz,a .L_bn_mul_add_words_loop
+ ld [%o1],%g2
+
+ tst %o2
+ bnz,a .L_bn_mul_add_words_tail
+ ld [%o1],%g2
+.L_bn_mul_add_words_return:
+ retl
+ mov %o5,%o0
+ nop
+
+.L_bn_mul_add_words_tail:
+ ld [%o0],%o4
+ umul %o3,%g2,%g2
+ addcc %o4,%o5,%o4
+ rd %y,%g1
+ addx %g1,0,%g1
+ addcc %o4,%g2,%o4
+ addx %g1,0,%o5
+ deccc %o2
+ bz .L_bn_mul_add_words_return
+ st %o4,[%o0]
+
+ ld [%o1+4],%g2
+ ld [%o0+4],%o4
+ umul %o3,%g2,%g2
+ rd %y,%g1
+ addcc %o4,%o5,%o4
+ addx %g1,0,%g1
+ addcc %o4,%g2,%o4
+ addx %g1,0,%o5
+ deccc %o2
+ bz .L_bn_mul_add_words_return
+ st %o4,[%o0+4]
+
+ ld [%o1+8],%g2
+ ld [%o0+8],%o4
+ umul %o3,%g2,%g2
+ rd %y,%g1
+ addcc %o4,%o5,%o4
+ addx %g1,0,%g1
+ addcc %o4,%g2,%o4
+ st %o4,[%o0+8]
+ retl
+ addx %g1,0,%o0
+
+.type bn_mul_add_words,#function
+.size bn_mul_add_words,(.-bn_mul_add_words)
+
+.align 32
+
+.global bn_mul_words
+/*
+ * BN_ULONG bn_mul_words(rp,ap,num,w)
+ * BN_ULONG *rp,*ap;
+ * int num;
+ * BN_ULONG w;
+ */
+bn_mul_words:
+ cmp %o2,0
+ bg,a .L_bn_mul_words_proceeed
+ ld [%o1],%g2
+ retl
+ clr %o0
+
+.L_bn_mul_words_proceeed:
+ andcc %o2,-4,%g0
+ bz .L_bn_mul_words_tail
+ clr %o5
+
+.L_bn_mul_words_loop:
+ ld [%o1+4],%g3
+ umul %o3,%g2,%g2
+ addcc %g2,%o5,%g2
+ rd %y,%g1
+ addx %g1,0,%o5
+ st %g2,[%o0]
+
+ ld [%o1+8],%g2
+ umul %o3,%g3,%g3
+ addcc %g3,%o5,%g3
+ rd %y,%g1
+ dec 4,%o2
+ addx %g1,0,%o5
+ st %g3,[%o0+4]
+
+ ld [%o1+12],%g3
+ umul %o3,%g2,%g2
+ addcc %g2,%o5,%g2
+ rd %y,%g1
+ inc 16,%o1
+ st %g2,[%o0+8]
+ addx %g1,0,%o5
+
+ umul %o3,%g3,%g3
+ addcc %g3,%o5,%g3
+ rd %y,%g1
+ inc 16,%o0
+ addx %g1,0,%o5
+ st %g3,[%o0-4]
+ andcc %o2,-4,%g0
+ nop
+ bnz,a .L_bn_mul_words_loop
+ ld [%o1],%g2
+
+ tst %o2
+ bnz,a .L_bn_mul_words_tail
+ ld [%o1],%g2
+.L_bn_mul_words_return:
+ retl
+ mov %o5,%o0
+ nop
+
+.L_bn_mul_words_tail:
+ umul %o3,%g2,%g2
+ addcc %g2,%o5,%g2
+ rd %y,%g1
+ addx %g1,0,%o5
+ deccc %o2
+ bz .L_bn_mul_words_return
+ st %g2,[%o0]
+ nop
+
+ ld [%o1+4],%g2
+ umul %o3,%g2,%g2
+ addcc %g2,%o5,%g2
+ rd %y,%g1
+ addx %g1,0,%o5
+ deccc %o2
+ bz .L_bn_mul_words_return
+ st %g2,[%o0+4]
+
+ ld [%o1+8],%g2
+ umul %o3,%g2,%g2
+ addcc %g2,%o5,%g2
+ rd %y,%g1
+ st %g2,[%o0+8]
+ retl
+ addx %g1,0,%o0
+
+.type bn_mul_words,#function
+.size bn_mul_words,(.-bn_mul_words)
+
+.align 32
+.global bn_sqr_words
+/*
+ * void bn_sqr_words(r,a,n)
+ * BN_ULONG *r,*a;
+ * int n;
+ */
+bn_sqr_words:
+ cmp %o2,0
+ bg,a .L_bn_sqr_words_proceeed
+ ld [%o1],%g2
+ retl
+ clr %o0
+
+.L_bn_sqr_words_proceeed:
+ andcc %o2,-4,%g0
+ bz .L_bn_sqr_words_tail
+ clr %o5
+
+.L_bn_sqr_words_loop:
+ ld [%o1+4],%g3
+ umul %g2,%g2,%o4
+ st %o4,[%o0]
+ rd %y,%o5
+ st %o5,[%o0+4]
+
+ ld [%o1+8],%g2
+ umul %g3,%g3,%o4
+ dec 4,%o2
+ st %o4,[%o0+8]
+ rd %y,%o5
+ st %o5,[%o0+12]
+ nop
+
+ ld [%o1+12],%g3
+ umul %g2,%g2,%o4
+ st %o4,[%o0+16]
+ rd %y,%o5
+ inc 16,%o1
+ st %o5,[%o0+20]
+
+ umul %g3,%g3,%o4
+ inc 32,%o0
+ st %o4,[%o0-8]
+ rd %y,%o5
+ st %o5,[%o0-4]
+ andcc %o2,-4,%g2
+ bnz,a .L_bn_sqr_words_loop
+ ld [%o1],%g2
+
+ tst %o2
+ nop
+ bnz,a .L_bn_sqr_words_tail
+ ld [%o1],%g2
+.L_bn_sqr_words_return:
+ retl
+ clr %o0
+
+.L_bn_sqr_words_tail:
+ umul %g2,%g2,%o4
+ st %o4,[%o0]
+ deccc %o2
+ rd %y,%o5
+ bz .L_bn_sqr_words_return
+ st %o5,[%o0+4]
+
+ ld [%o1+4],%g2
+ umul %g2,%g2,%o4
+ st %o4,[%o0+8]
+ deccc %o2
+ rd %y,%o5
+ nop
+ bz .L_bn_sqr_words_return
+ st %o5,[%o0+12]
+
+ ld [%o1+8],%g2
+ umul %g2,%g2,%o4
+ st %o4,[%o0+16]
+ rd %y,%o5
+ st %o5,[%o0+20]
+ retl
+ clr %o0
+
+.type bn_sqr_words,#function
+.size bn_sqr_words,(.-bn_sqr_words)
+
+.align 32
+
+.global bn_div_words
+/*
+ * BN_ULONG bn_div_words(h,l,d)
+ * BN_ULONG h,l,d;
+ */
+bn_div_words:
+ wr %o0,%y
+ udiv %o1,%o2,%o0
+ retl
+ nop
+
+.type bn_div_words,#function
+.size bn_div_words,(.-bn_div_words)
+
+.align 32
+
+.global bn_add_words
+/*
+ * BN_ULONG bn_add_words(rp,ap,bp,n)
+ * BN_ULONG *rp,*ap,*bp;
+ * int n;
+ */
+bn_add_words:
+ cmp %o3,0
+ bg,a .L_bn_add_words_proceed
+ ld [%o1],%o4
+ retl
+ clr %o0
+
+.L_bn_add_words_proceed:
+ andcc %o3,-4,%g0
+ bz .L_bn_add_words_tail
+ clr %g1
+ ba .L_bn_add_words_warn_loop
+ addcc %g0,0,%g0 ! clear carry flag
+
+.L_bn_add_words_loop:
+ ld [%o1],%o4
+.L_bn_add_words_warn_loop:
+ ld [%o2],%o5
+ ld [%o1+4],%g3
+ ld [%o2+4],%g4
+ dec 4,%o3
+ addxcc %o5,%o4,%o5
+ st %o5,[%o0]
+
+ ld [%o1+8],%o4
+ ld [%o2+8],%o5
+ inc 16,%o1
+ addxcc %g3,%g4,%g3
+ st %g3,[%o0+4]
+
+ ld [%o1-4],%g3
+ ld [%o2+12],%g4
+ inc 16,%o2
+ addxcc %o5,%o4,%o5
+ st %o5,[%o0+8]
+
+ inc 16,%o0
+ addxcc %g3,%g4,%g3
+ st %g3,[%o0-4]
+ addx %g0,0,%g1
+ andcc %o3,-4,%g0
+ bnz,a .L_bn_add_words_loop
+ addcc %g1,-1,%g0
+
+ tst %o3
+ bnz,a .L_bn_add_words_tail
+ ld [%o1],%o4
+.L_bn_add_words_return:
+ retl
+ mov %g1,%o0
+
+.L_bn_add_words_tail:
+ addcc %g1,-1,%g0
+ ld [%o2],%o5
+ addxcc %o5,%o4,%o5
+ addx %g0,0,%g1
+ deccc %o3
+ bz .L_bn_add_words_return
+ st %o5,[%o0]
+
+ ld [%o1+4],%o4
+ addcc %g1,-1,%g0
+ ld [%o2+4],%o5
+ addxcc %o5,%o4,%o5
+ addx %g0,0,%g1
+ deccc %o3
+ bz .L_bn_add_words_return
+ st %o5,[%o0+4]
+
+ ld [%o1+8],%o4
+ addcc %g1,-1,%g0
+ ld [%o2+8],%o5
+ addxcc %o5,%o4,%o5
+ st %o5,[%o0+8]
+ retl
+ addx %g0,0,%o0
+
+.type bn_add_words,#function
+.size bn_add_words,(.-bn_add_words)
+
+.align 32
+
+.global bn_sub_words
+/*
+ * BN_ULONG bn_sub_words(rp,ap,bp,n)
+ * BN_ULONG *rp,*ap,*bp;
+ * int n;
+ */
+bn_sub_words:
+ cmp %o3,0
+ bg,a .L_bn_sub_words_proceed
+ ld [%o1],%o4
+ retl
+ clr %o0
+
+.L_bn_sub_words_proceed:
+ andcc %o3,-4,%g0
+ bz .L_bn_sub_words_tail
+ clr %g1
+ ba .L_bn_sub_words_warm_loop
+ addcc %g0,0,%g0 ! clear carry flag
+
+.L_bn_sub_words_loop:
+ ld [%o1],%o4
+.L_bn_sub_words_warm_loop:
+ ld [%o2],%o5
+ ld [%o1+4],%g3
+ ld [%o2+4],%g4
+ dec 4,%o3
+ subxcc %o4,%o5,%o5
+ st %o5,[%o0]
+
+ ld [%o1+8],%o4
+ ld [%o2+8],%o5
+ inc 16,%o1
+ subxcc %g3,%g4,%g4
+ st %g4,[%o0+4]
+
+ ld [%o1-4],%g3
+ ld [%o2+12],%g4
+ inc 16,%o2
+ subxcc %o4,%o5,%o5
+ st %o5,[%o0+8]
+
+ inc 16,%o0
+ subxcc %g3,%g4,%g4
+ st %g4,[%o0-4]
+ addx %g0,0,%g1
+ andcc %o3,-4,%g0
+ bnz,a .L_bn_sub_words_loop
+ addcc %g1,-1,%g0
+
+ tst %o3
+ nop
+ bnz,a .L_bn_sub_words_tail
+ ld [%o1],%o4
+.L_bn_sub_words_return:
+ retl
+ mov %g1,%o0
+
+.L_bn_sub_words_tail:
+ addcc %g1,-1,%g0
+ ld [%o2],%o5
+ subxcc %o4,%o5,%o5
+ addx %g0,0,%g1
+ deccc %o3
+ bz .L_bn_sub_words_return
+ st %o5,[%o0]
+ nop
+
+ ld [%o1+4],%o4
+ addcc %g1,-1,%g0
+ ld [%o2+4],%o5
+ subxcc %o4,%o5,%o5
+ addx %g0,0,%g1
+ deccc %o3
+ bz .L_bn_sub_words_return
+ st %o5,[%o0+4]
+
+ ld [%o1+8],%o4
+ addcc %g1,-1,%g0
+ ld [%o2+8],%o5
+ subxcc %o4,%o5,%o5
+ st %o5,[%o0+8]
+ retl
+ addx %g0,0,%o0
+
+.type bn_sub_words,#function
+.size bn_sub_words,(.-bn_sub_words)
+
+#define FRAME_SIZE -96
+
+/*
+ * Here is register usage map for *all* routines below.
+ */
+#define t_1 %o0
+#define t_2 %o1
+#define c_1 %o2
+#define c_2 %o3
+#define c_3 %o4
+
+#define ap(I) [%i1+4*I]
+#define bp(I) [%i2+4*I]
+#define rp(I) [%i0+4*I]
+
+#define a_0 %l0
+#define a_1 %l1
+#define a_2 %l2
+#define a_3 %l3
+#define a_4 %l4
+#define a_5 %l5
+#define a_6 %l6
+#define a_7 %l7
+
+#define b_0 %i3
+#define b_1 %i4
+#define b_2 %i5
+#define b_3 %o5
+#define b_4 %g1
+#define b_5 %g2
+#define b_6 %g3
+#define b_7 %g4
+
+.align 32
+.global bn_mul_comba8
+/*
+ * void bn_mul_comba8(r,a,b)
+ * BN_ULONG *r,*a,*b;
+ */
+bn_mul_comba8:
+ save %sp,FRAME_SIZE,%sp
+ ld ap(0),a_0
+ ld bp(0),b_0
+ umul a_0,b_0,c_1 !=!mul_add_c(a[0],b[0],c1,c2,c3);
+ ld bp(1),b_1
+ rd %y,c_2
+ st c_1,rp(0) !r[0]=c1;
+
+ umul a_0,b_1,t_1 !=!mul_add_c(a[0],b[1],c2,c3,c1);
+ ld ap(1),a_1
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc %g0,t_2,c_3 !=
+ addx %g0,%g0,c_1
+ ld ap(2),a_2
+ umul a_1,b_0,t_1 !mul_add_c(a[1],b[0],c2,c3,c1);
+ addcc c_2,t_1,c_2 !=
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ st c_2,rp(1) !r[1]=c2;
+ addx c_1,%g0,c_1 !=
+
+ umul a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx %g0,%g0,c_2
+ ld bp(2),b_2
+ umul a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ ld bp(3),b_3
+ addx c_2,%g0,c_2 !=
+ umul a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ st c_3,rp(2) !r[2]=c3;
+
+ umul a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx %g0,%g0,c_3
+ umul a_1,b_2,t_1 !=!mul_add_c(a[1],b[2],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ ld ap(3),a_3
+ umul a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2 !=
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ ld ap(4),a_4
+ umul a_3,b_0,t_1 !mul_add_c(a[3],b[0],c1,c2,c3);!=
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ st c_1,rp(3) !r[3]=c1;
+
+ umul a_4,b_0,t_1 !mul_add_c(a[4],b[0],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ umul a_3,b_1,t_1 !mul_add_c(a[3],b[1],c2,c3,c1);
+ addcc c_2,t_1,c_2 !=
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ umul a_2,b_2,t_1 !=!mul_add_c(a[2],b[2],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ ld bp(4),b_4
+ umul a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ ld bp(5),b_5
+ umul a_0,b_4,t_1 !=!mul_add_c(a[0],b[4],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ st c_2,rp(4) !r[4]=c2;
+
+ umul a_0,b_5,t_1 !mul_add_c(a[0],b[5],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2
+ umul a_1,b_4,t_1 !mul_add_c(a[1],b[4],c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_2,b_3,t_1 !=!mul_add_c(a[2],b[3],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ umul a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ ld ap(5),a_5
+ umul a_4,b_1,t_1 !mul_add_c(a[4],b[1],c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ ld ap(6),a_6
+ addx c_2,%g0,c_2 !=
+ umul a_5,b_0,t_1 !mul_add_c(a[5],b[0],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ st c_3,rp(5) !r[5]=c3;
+
+ umul a_6,b_0,t_1 !mul_add_c(a[6],b[0],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx %g0,%g0,c_3
+ umul a_5,b_1,t_1 !=!mul_add_c(a[5],b[1],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ umul a_4,b_2,t_1 !mul_add_c(a[4],b[2],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ umul a_3,b_3,t_1 !mul_add_c(a[3],b[3],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2 !=
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ umul a_2,b_4,t_1 !mul_add_c(a[2],b[4],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ ld bp(6),b_6
+ addx c_3,%g0,c_3 !=
+ umul a_1,b_5,t_1 !mul_add_c(a[1],b[5],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ ld bp(7),b_7
+ umul a_0,b_6,t_1 !mul_add_c(a[0],b[6],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ st c_1,rp(6) !r[6]=c1;
+ addx c_3,%g0,c_3 !=
+
+ umul a_0,b_7,t_1 !mul_add_c(a[0],b[7],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3 !=
+ addx %g0,%g0,c_1
+ umul a_1,b_6,t_1 !mul_add_c(a[1],b[6],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ umul a_2,b_5,t_1 !mul_add_c(a[2],b[5],c2,c3,c1);
+ addcc c_2,t_1,c_2 !=
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ umul a_3,b_4,t_1 !=!mul_add_c(a[3],b[4],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ umul a_4,b_3,t_1 !mul_add_c(a[4],b[3],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_5,b_2,t_1 !mul_add_c(a[5],b[2],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ ld ap(7),a_7
+ umul a_6,b_1,t_1 !=!mul_add_c(a[6],b[1],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ umul a_7,b_0,t_1 !mul_add_c(a[7],b[0],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ st c_2,rp(7) !r[7]=c2;
+
+ umul a_7,b_1,t_1 !mul_add_c(a[7],b[1],c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2
+ umul a_6,b_2,t_1 !=!mul_add_c(a[6],b[2],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ umul a_5,b_3,t_1 !mul_add_c(a[5],b[3],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ umul a_4,b_4,t_1 !mul_add_c(a[4],b[4],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_3,b_5,t_1 !mul_add_c(a[3],b[5],c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_2,b_6,t_1 !=!mul_add_c(a[2],b[6],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ umul a_1,b_7,t_1 !mul_add_c(a[1],b[7],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !
+ addx c_2,%g0,c_2
+ st c_3,rp(8) !r[8]=c3;
+
+ umul a_2,b_7,t_1 !mul_add_c(a[2],b[7],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx %g0,%g0,c_3
+ umul a_3,b_6,t_1 !=!mul_add_c(a[3],b[6],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ umul a_4,b_5,t_1 !mul_add_c(a[4],b[5],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ umul a_5,b_4,t_1 !mul_add_c(a[5],b[4],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2 !=
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ umul a_6,b_3,t_1 !mul_add_c(a[6],b[3],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ umul a_7,b_2,t_1 !=!mul_add_c(a[7],b[2],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ st c_1,rp(9) !r[9]=c1;
+
+ umul a_7,b_3,t_1 !mul_add_c(a[7],b[3],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ umul a_6,b_4,t_1 !mul_add_c(a[6],b[4],c2,c3,c1);
+ addcc c_2,t_1,c_2 !=
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ umul a_5,b_5,t_1 !=!mul_add_c(a[5],b[5],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ umul a_4,b_6,t_1 !mul_add_c(a[4],b[6],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_3,b_7,t_1 !mul_add_c(a[3],b[7],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ st c_2,rp(10) !r[10]=c2;
+
+ umul a_4,b_7,t_1 !=!mul_add_c(a[4],b[7],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2 !=
+ umul a_5,b_6,t_1 !mul_add_c(a[5],b[6],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ umul a_6,b_5,t_1 !mul_add_c(a[6],b[5],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_7,b_4,t_1 !mul_add_c(a[7],b[4],c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ st c_3,rp(11) !r[11]=c3;
+ addx c_2,%g0,c_2 !=
+
+ umul a_7,b_5,t_1 !mul_add_c(a[7],b[5],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx %g0,%g0,c_3
+ umul a_6,b_6,t_1 !mul_add_c(a[6],b[6],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2 !=
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ umul a_5,b_7,t_1 !mul_add_c(a[5],b[7],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ st c_1,rp(12) !r[12]=c1;
+ addx c_3,%g0,c_3 !=
+
+ umul a_6,b_7,t_1 !mul_add_c(a[6],b[7],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3 !=
+ addx %g0,%g0,c_1
+ umul a_7,b_6,t_1 !mul_add_c(a[7],b[6],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ st c_2,rp(13) !r[13]=c2;
+
+ umul a_7,b_7,t_1 !=!mul_add_c(a[7],b[7],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ nop !=
+ st c_3,rp(14) !r[14]=c3;
+ st c_1,rp(15) !r[15]=c1;
+
+ ret
+ restore %g0,%g0,%o0
+
+.type bn_mul_comba8,#function
+.size bn_mul_comba8,(.-bn_mul_comba8)
+
+.align 32
+
+.global bn_mul_comba4
+/*
+ * void bn_mul_comba4(r,a,b)
+ * BN_ULONG *r,*a,*b;
+ */
+bn_mul_comba4:
+ save %sp,FRAME_SIZE,%sp
+ ld ap(0),a_0
+ ld bp(0),b_0
+ umul a_0,b_0,c_1 !=!mul_add_c(a[0],b[0],c1,c2,c3);
+ ld bp(1),b_1
+ rd %y,c_2
+ st c_1,rp(0) !r[0]=c1;
+
+ umul a_0,b_1,t_1 !=!mul_add_c(a[0],b[1],c2,c3,c1);
+ ld ap(1),a_1
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc %g0,t_2,c_3
+ addx %g0,%g0,c_1
+ ld ap(2),a_2
+ umul a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ st c_2,rp(1) !r[1]=c2;
+
+ umul a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2
+ ld bp(2),b_2
+ umul a_1,b_1,t_1 !=!mul_add_c(a[1],b[1],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ ld bp(3),b_3
+ umul a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ st c_3,rp(2) !r[2]=c3;
+
+ umul a_0,b_3,t_1 !=!mul_add_c(a[0],b[3],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx %g0,%g0,c_3 !=
+ umul a_1,b_2,t_1 !mul_add_c(a[1],b[2],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ ld ap(3),a_3
+ umul a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ umul a_3,b_0,t_1 !=!mul_add_c(a[3],b[0],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ st c_1,rp(3) !r[3]=c1;
+
+ umul a_3,b_1,t_1 !mul_add_c(a[3],b[1],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ umul a_2,b_2,t_1 !mul_add_c(a[2],b[2],c2,c3,c1);
+ addcc c_2,t_1,c_2 !=
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ umul a_1,b_3,t_1 !=!mul_add_c(a[1],b[3],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ st c_2,rp(4) !r[4]=c2;
+
+ umul a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2
+ umul a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ st c_3,rp(5) !r[5]=c3;
+ addx c_2,%g0,c_2 !=
+
+ umul a_3,b_3,t_1 !mul_add_c(a[3],b[3],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ st c_1,rp(6) !r[6]=c1;
+ st c_2,rp(7) !r[7]=c2;
+
+ ret
+ restore %g0,%g0,%o0
+
+.type bn_mul_comba4,#function
+.size bn_mul_comba4,(.-bn_mul_comba4)
+
+.align 32
+
+.global bn_sqr_comba8
+bn_sqr_comba8:
+ save %sp,FRAME_SIZE,%sp
+ ld ap(0),a_0
+ ld ap(1),a_1
+ umul a_0,a_0,c_1 !=!sqr_add_c(a,0,c1,c2,c3);
+ rd %y,c_2
+ st c_1,rp(0) !r[0]=c1;
+
+ ld ap(2),a_2
+ umul a_0,a_1,t_1 !=!sqr_add_c2(a,1,0,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc %g0,t_2,c_3
+ addx %g0,%g0,c_1 !=
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3
+ st c_2,rp(1) !r[1]=c2;
+ addx c_1,%g0,c_1 !=
+
+ umul a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx %g0,%g0,c_2
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ ld ap(3),a_3
+ umul a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ st c_3,rp(2) !r[2]=c3;
+
+ umul a_0,a_3,t_1 !=!sqr_add_c2(a,3,0,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx %g0,%g0,c_3 !=
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ ld ap(4),a_4
+ addx c_3,%g0,c_3 !=
+ umul a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ st c_1,rp(3) !r[3]=c1;
+
+ umul a_4,a_0,t_1 !sqr_add_c2(a,4,0,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ ld ap(5),a_5
+ umul a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1);
+ addcc c_2,t_1,c_2 !=
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ st c_2,rp(4) !r[4]=c2;
+ addx c_1,%g0,c_1 !=
+
+ umul a_0,a_5,t_1 !sqr_add_c2(a,5,0,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx %g0,%g0,c_2
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ umul a_1,a_4,t_1 !sqr_add_c2(a,4,1,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ ld ap(6),a_6
+ umul a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ st c_3,rp(5) !r[5]=c3;
+
+ umul a_6,a_0,t_1 !sqr_add_c2(a,6,0,c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx %g0,%g0,c_3
+ addcc c_1,t_1,c_1 !=
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ umul a_5,a_1,t_1 !sqr_add_c2(a,5,1,c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ addcc c_1,t_1,c_1 !=
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ umul a_4,a_2,t_1 !sqr_add_c2(a,4,2,c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ addcc c_1,t_1,c_1 !=
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ ld ap(7),a_7
+ umul a_3,a_3,t_1 !=!sqr_add_c(a,3,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ st c_1,rp(6) !r[6]=c1;
+
+ umul a_0,a_7,t_1 !sqr_add_c2(a,7,0,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_1,a_6,t_1 !sqr_add_c2(a,6,1,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_2,a_5,t_1 !sqr_add_c2(a,5,2,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_3,a_4,t_1 !sqr_add_c2(a,4,3,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ st c_2,rp(7) !r[7]=c2;
+
+ umul a_7,a_1,t_1 !sqr_add_c2(a,7,1,c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2
+ addcc c_3,t_1,c_3 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_6,a_2,t_1 !sqr_add_c2(a,6,2,c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ addcc c_3,t_1,c_3 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_5,a_3,t_1 !sqr_add_c2(a,5,3,c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ addcc c_3,t_1,c_3 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_4,a_4,t_1 !sqr_add_c(a,4,c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ st c_3,rp(8) !r[8]=c3;
+ addx c_2,%g0,c_2 !=
+
+ umul a_2,a_7,t_1 !sqr_add_c2(a,7,2,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx %g0,%g0,c_3
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ umul a_3,a_6,t_1 !sqr_add_c2(a,6,3,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ umul a_4,a_5,t_1 !sqr_add_c2(a,5,4,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ st c_1,rp(9) !r[9]=c1;
+
+ umul a_7,a_3,t_1 !sqr_add_c2(a,7,3,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_6,a_4,t_1 !sqr_add_c2(a,6,4,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_5,a_5,t_1 !sqr_add_c(a,5,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ st c_2,rp(10) !r[10]=c2;
+
+ umul a_4,a_7,t_1 !=!sqr_add_c2(a,7,4,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2 !=
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_5,a_6,t_1 !=!sqr_add_c2(a,6,5,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1
+ st c_3,rp(11) !r[11]=c3;
+ addx c_2,%g0,c_2 !=
+
+ umul a_7,a_5,t_1 !sqr_add_c2(a,7,5,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx %g0,%g0,c_3
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ umul a_6,a_6,t_1 !sqr_add_c(a,6,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ st c_1,rp(12) !r[12]=c1;
+
+ umul a_6,a_7,t_1 !sqr_add_c2(a,7,6,c2,c3,c1);
+ addcc c_2,t_1,c_2 !=
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ addcc c_2,t_1,c_2 !=
+ addxcc c_3,t_2,c_3
+ st c_2,rp(13) !r[13]=c2;
+ addx c_1,%g0,c_1 !=
+
+ umul a_7,a_7,t_1 !sqr_add_c(a,7,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ st c_3,rp(14) !r[14]=c3;
+ st c_1,rp(15) !r[15]=c1;
+
+ ret
+ restore %g0,%g0,%o0
+
+.type bn_sqr_comba8,#function
+.size bn_sqr_comba8,(.-bn_sqr_comba8)
+
+.align 32
+
+.global bn_sqr_comba4
+/*
+ * void bn_sqr_comba4(r,a)
+ * BN_ULONG *r,*a;
+ */
+bn_sqr_comba4:
+ save %sp,FRAME_SIZE,%sp
+ ld ap(0),a_0
+ umul a_0,a_0,c_1 !sqr_add_c(a,0,c1,c2,c3);
+ ld ap(1),a_1 !=
+ rd %y,c_2
+ st c_1,rp(0) !r[0]=c1;
+
+ ld ap(2),a_2
+ umul a_0,a_1,t_1 !=!sqr_add_c2(a,1,0,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc %g0,t_2,c_3
+ addx %g0,%g0,c_1 !=
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ st c_2,rp(1) !r[1]=c2;
+
+ umul a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ ld ap(3),a_3
+ umul a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ st c_3,rp(2) !r[2]=c3;
+ addx c_2,%g0,c_2 !=
+
+ umul a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx %g0,%g0,c_3
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ umul a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ st c_1,rp(3) !r[3]=c1;
+
+ umul a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ st c_2,rp(4) !r[4]=c2;
+
+ umul a_2,a_3,t_1 !=!sqr_add_c2(a,3,2,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2 !=
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1
+ st c_3,rp(5) !r[5]=c3;
+ addx c_2,%g0,c_2 !=
+
+ umul a_3,a_3,t_1 !sqr_add_c(a,3,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ st c_1,rp(6) !r[6]=c1;
+ st c_2,rp(7) !r[7]=c2;
+
+ ret
+ restore %g0,%g0,%o0
+
+.type bn_sqr_comba4,#function
+.size bn_sqr_comba4,(.-bn_sqr_comba4)
+
+.align 32
diff --git a/crypto/bn/asm/sparcv8plus.S b/crypto/bn/asm/sparcv8plus.S
new file mode 100644
index 0000000..8c56e2e
--- /dev/null
+++ b/crypto/bn/asm/sparcv8plus.S
@@ -0,0 +1,1547 @@
+.ident "sparcv8plus.s, Version 1.4"
+.ident "SPARC v9 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
+
+/*
+ * ====================================================================
+ * Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+ * project.
+ *
+ * Rights for redistribution and usage in source and binary forms are
+ * granted according to the OpenSSL license. Warranty of any kind is
+ * disclaimed.
+ * ====================================================================
+ */
+
+/*
+ * This is my modest contributon to OpenSSL project (see
+ * http://www.openssl.org/ for more information about it) and is
+ * a drop-in UltraSPARC ISA replacement for crypto/bn/bn_asm.c
+ * module. For updates see http://fy.chalmers.se/~appro/hpe/.
+ *
+ * Questions-n-answers.
+ *
+ * Q. How to compile?
+ * A. With SC4.x/SC5.x:
+ *
+ * cc -xarch=v8plus -c bn_asm.sparc.v8plus.S -o bn_asm.o
+ *
+ * and with gcc:
+ *
+ * gcc -mcpu=ultrasparc -c bn_asm.sparc.v8plus.S -o bn_asm.o
+ *
+ * or if above fails (it does if you have gas installed):
+ *
+ * gcc -E bn_asm.sparc.v8plus.S | as -xarch=v8plus /dev/fd/0 -o bn_asm.o
+ *
+ * Quick-n-dirty way to fuse the module into the library.
+ * Provided that the library is already configured and built
+ * (in 0.9.2 case with no-asm option):
+ *
+ * # cd crypto/bn
+ * # cp /some/place/bn_asm.sparc.v8plus.S .
+ * # cc -xarch=v8plus -c bn_asm.sparc.v8plus.S -o bn_asm.o
+ * # make
+ * # cd ../..
+ * # make; make test
+ *
+ * Quick-n-dirty way to get rid of it:
+ *
+ * # cd crypto/bn
+ * # touch bn_asm.c
+ * # make
+ * # cd ../..
+ * # make; make test
+ *
+ * Q. V8plus achitecture? What kind of beast is that?
+ * A. Well, it's rather a programming model than an architecture...
+ * It's actually v9-compliant, i.e. *any* UltraSPARC, CPU under
+ * special conditions, namely when kernel doesn't preserve upper
+ * 32 bits of otherwise 64-bit registers during a context switch.
+ *
+ * Q. Why just UltraSPARC? What about SuperSPARC?
+ * A. Original release did target UltraSPARC only. Now SuperSPARC
+ * version is provided along. Both version share bn_*comba[48]
+ * implementations (see comment later in code for explanation).
+ * But what's so special about this UltraSPARC implementation?
+ * Why didn't I let compiler do the job? Trouble is that most of
+ * available compilers (well, SC5.0 is the only exception) don't
+ * attempt to take advantage of UltraSPARC's 64-bitness under
+ * 32-bit kernels even though it's perfectly possible (see next
+ * question).
+ *
+ * Q. 64-bit registers under 32-bit kernels? Didn't you just say it
+ * doesn't work?
+ * A. You can't adress *all* registers as 64-bit wide:-( The catch is
+ * that you actually may rely upon %o0-%o5 and %g1-%g4 being fully
+ * preserved if you're in a leaf function, i.e. such never calling
+ * any other functions. All functions in this module are leaf and
+ * 10 registers is a handful. And as a matter of fact none-"comba"
+ * routines don't require even that much and I could even afford to
+ * not allocate own stack frame for 'em:-)
+ *
+ * Q. What about 64-bit kernels?
+ * A. What about 'em? Just kidding:-) Pure 64-bit version is currently
+ * under evaluation and development...
+ *
+ * Q. What about shared libraries?
+ * A. What about 'em? Kidding again:-) Code does *not* contain any
+ * code position dependencies and it's safe to include it into
+ * shared library as is.
+ *
+ * Q. How much faster does it go?
+ * A. Do you have a good benchmark? In either case below is what I
+ * experience with crypto/bn/expspeed.c test program:
+ *
+ * v8plus module on U10/300MHz against bn_asm.c compiled with:
+ *
+ * cc-5.0 -xarch=v8plus -xO5 -xdepend +7-12%
+ * cc-4.2 -xarch=v8plus -xO5 -xdepend +25-35%
+ * egcs-1.1.2 -mcpu=ultrasparc -O3 +35-45%
+ *
+ * v8 module on SS10/60MHz against bn_asm.c compiled with:
+ *
+ * cc-5.0 -xarch=v8 -xO5 -xdepend +7-10%
+ * cc-4.2 -xarch=v8 -xO5 -xdepend +10%
+ * egcs-1.1.2 -mv8 -O3 +35-45%
+ *
+ * As you can see it's damn hard to beat the new Sun C compiler
+ * and it's in first place GNU C users who will appreciate this
+ * assembler implementation:-)
+ */
+
+/*
+ * Revision history.
+ *
+ * 1.0 - initial release;
+ * 1.1 - new loop unrolling model(*);
+ * - some more fine tuning;
+ * 1.2 - made gas friendly;
+ * - updates to documentation concerning v9;
+ * - new performance comparison matrix;
+ * 1.3 - fixed problem with /usr/ccs/lib/cpp;
+ * 1.4 - native V9 bn_*_comba[48] implementation (15% more efficient)
+ * resulting in slight overall performance kick;
+ * - some retunes;
+ * - support for GNU as added;
+ *
+ * (*) Originally unrolled loop looked like this:
+ * for (;;) {
+ * op(p+0); if (--n==0) break;
+ * op(p+1); if (--n==0) break;
+ * op(p+2); if (--n==0) break;
+ * op(p+3); if (--n==0) break;
+ * p+=4;
+ * }
+ * I unroll according to following:
+ * while (n&~3) {
+ * op(p+0); op(p+1); op(p+2); op(p+3);
+ * p+=4; n=-4;
+ * }
+ * if (n) {
+ * op(p+0); if (--n==0) return;
+ * op(p+2); if (--n==0) return;
+ * op(p+3); return;
+ * }
+ */
+
+/*
+ * GNU assembler can't stand stuw:-(
+ */
+#define stuw st
+
+.section ".text",#alloc,#execinstr
+.file "bn_asm.sparc.v8plus.S"
+
+.align 32
+
+.global bn_mul_add_words
+/*
+ * BN_ULONG bn_mul_add_words(rp,ap,num,w)
+ * BN_ULONG *rp,*ap;
+ * int num;
+ * BN_ULONG w;
+ */
+bn_mul_add_words:
+ sra %o2,%g0,%o2 ! signx %o2
+ brgz,a %o2,.L_bn_mul_add_words_proceed
+ lduw [%o1],%g2
+ retl
+ clr %o0
+ nop
+ nop
+ nop
+
+.L_bn_mul_add_words_proceed:
+ srl %o3,%g0,%o3 ! clruw %o3
+ andcc %o2,-4,%g0
+ bz,pn %icc,.L_bn_mul_add_words_tail
+ clr %o5
+
+.L_bn_mul_add_words_loop: ! wow! 32 aligned!
+ lduw [%o0],%g1
+ lduw [%o1+4],%g3
+ mulx %o3,%g2,%g2
+ add %g1,%o5,%o4
+ nop
+ add %o4,%g2,%o4
+ stuw %o4,[%o0]
+ srlx %o4,32,%o5
+
+ lduw [%o0+4],%g1
+ lduw [%o1+8],%g2
+ mulx %o3,%g3,%g3
+ add %g1,%o5,%o4
+ dec 4,%o2
+ add %o4,%g3,%o4
+ stuw %o4,[%o0+4]
+ srlx %o4,32,%o5
+
+ lduw [%o0+8],%g1
+ lduw [%o1+12],%g3
+ mulx %o3,%g2,%g2
+ add %g1,%o5,%o4
+ inc 16,%o1
+ add %o4,%g2,%o4
+ stuw %o4,[%o0+8]
+ srlx %o4,32,%o5
+
+ lduw [%o0+12],%g1
+ mulx %o3,%g3,%g3
+ add %g1,%o5,%o4
+ inc 16,%o0
+ add %o4,%g3,%o4
+ andcc %o2,-4,%g0
+ stuw %o4,[%o0-4]
+ srlx %o4,32,%o5
+ bnz,a,pt %icc,.L_bn_mul_add_words_loop
+ lduw [%o1],%g2
+
+ brnz,a,pn %o2,.L_bn_mul_add_words_tail
+ lduw [%o1],%g2
+.L_bn_mul_add_words_return:
+ retl
+ mov %o5,%o0
+
+.L_bn_mul_add_words_tail:
+ lduw [%o0],%g1
+ mulx %o3,%g2,%g2
+ add %g1,%o5,%o4
+ dec %o2
+ add %o4,%g2,%o4
+ srlx %o4,32,%o5
+ brz,pt %o2,.L_bn_mul_add_words_return
+ stuw %o4,[%o0]
+
+ lduw [%o1+4],%g2
+ lduw [%o0+4],%g1
+ mulx %o3,%g2,%g2
+ add %g1,%o5,%o4
+ dec %o2
+ add %o4,%g2,%o4
+ srlx %o4,32,%o5
+ brz,pt %o2,.L_bn_mul_add_words_return
+ stuw %o4,[%o0+4]
+
+ lduw [%o1+8],%g2
+ lduw [%o0+8],%g1
+ mulx %o3,%g2,%g2
+ add %g1,%o5,%o4
+ add %o4,%g2,%o4
+ stuw %o4,[%o0+8]
+ retl
+ srlx %o4,32,%o0
+
+.type bn_mul_add_words,#function
+.size bn_mul_add_words,(.-bn_mul_add_words)
+
+.align 32
+
+.global bn_mul_words
+/*
+ * BN_ULONG bn_mul_words(rp,ap,num,w)
+ * BN_ULONG *rp,*ap;
+ * int num;
+ * BN_ULONG w;
+ */
+bn_mul_words:
+ sra %o2,%g0,%o2 ! signx %o2
+ brgz,a %o2,.L_bn_mul_words_proceeed
+ lduw [%o1],%g2
+ retl
+ clr %o0
+ nop
+ nop
+ nop
+
+.L_bn_mul_words_proceeed:
+ srl %o3,%g0,%o3 ! clruw %o3
+ andcc %o2,-4,%g0
+ bz,pn %icc,.L_bn_mul_words_tail
+ clr %o5
+
+.L_bn_mul_words_loop: ! wow! 32 aligned!
+ lduw [%o1+4],%g3
+ mulx %o3,%g2,%g2
+ add %g2,%o5,%o4
+ nop
+ stuw %o4,[%o0]
+ srlx %o4,32,%o5
+
+ lduw [%o1+8],%g2
+ mulx %o3,%g3,%g3
+ add %g3,%o5,%o4
+ dec 4,%o2
+ stuw %o4,[%o0+4]
+ srlx %o4,32,%o5
+
+ lduw [%o1+12],%g3
+ mulx %o3,%g2,%g2
+ add %g2,%o5,%o4
+ inc 16,%o1
+ stuw %o4,[%o0+8]
+ srlx %o4,32,%o5
+
+ mulx %o3,%g3,%g3
+ add %g3,%o5,%o4
+ inc 16,%o0
+ stuw %o4,[%o0-4]
+ srlx %o4,32,%o5
+ andcc %o2,-4,%g0
+ bnz,a,pt %icc,.L_bn_mul_words_loop
+ lduw [%o1],%g2
+ nop
+ nop
+
+ brnz,a,pn %o2,.L_bn_mul_words_tail
+ lduw [%o1],%g2
+.L_bn_mul_words_return:
+ retl
+ mov %o5,%o0
+
+.L_bn_mul_words_tail:
+ mulx %o3,%g2,%g2
+ add %g2,%o5,%o4
+ dec %o2
+ srlx %o4,32,%o5
+ brz,pt %o2,.L_bn_mul_words_return
+ stuw %o4,[%o0]
+
+ lduw [%o1+4],%g2
+ mulx %o3,%g2,%g2
+ add %g2,%o5,%o4
+ dec %o2
+ srlx %o4,32,%o5
+ brz,pt %o2,.L_bn_mul_words_return
+ stuw %o4,[%o0+4]
+
+ lduw [%o1+8],%g2
+ mulx %o3,%g2,%g2
+ add %g2,%o5,%o4
+ stuw %o4,[%o0+8]
+ retl
+ srlx %o4,32,%o0
+
+.type bn_mul_words,#function
+.size bn_mul_words,(.-bn_mul_words)
+
+.align 32
+.global bn_sqr_words
+/*
+ * void bn_sqr_words(r,a,n)
+ * BN_ULONG *r,*a;
+ * int n;
+ */
+bn_sqr_words:
+ sra %o2,%g0,%o2 ! signx %o2
+ brgz,a %o2,.L_bn_sqr_words_proceeed
+ lduw [%o1],%g2
+ retl
+ clr %o0
+ nop
+ nop
+ nop
+
+.L_bn_sqr_words_proceeed:
+ andcc %o2,-4,%g0
+ nop
+ bz,pn %icc,.L_bn_sqr_words_tail
+ nop
+
+.L_bn_sqr_words_loop: ! wow! 32 aligned!
+ lduw [%o1+4],%g3
+ mulx %g2,%g2,%o4
+ stuw %o4,[%o0]
+ srlx %o4,32,%o5
+ stuw %o5,[%o0+4]
+ nop
+
+ lduw [%o1+8],%g2
+ mulx %g3,%g3,%o4
+ dec 4,%o2
+ stuw %o4,[%o0+8]
+ srlx %o4,32,%o5
+ stuw %o5,[%o0+12]
+
+ lduw [%o1+12],%g3
+ mulx %g2,%g2,%o4
+ srlx %o4,32,%o5
+ stuw %o4,[%o0+16]
+ inc 16,%o1
+ stuw %o5,[%o0+20]
+
+ mulx %g3,%g3,%o4
+ inc 32,%o0
+ stuw %o4,[%o0-8]
+ srlx %o4,32,%o5
+ andcc %o2,-4,%g2
+ stuw %o5,[%o0-4]
+ bnz,a,pt %icc,.L_bn_sqr_words_loop
+ lduw [%o1],%g2
+ nop
+
+ brnz,a,pn %o2,.L_bn_sqr_words_tail
+ lduw [%o1],%g2
+.L_bn_sqr_words_return:
+ retl
+ clr %o0
+
+.L_bn_sqr_words_tail:
+ mulx %g2,%g2,%o4
+ dec %o2
+ stuw %o4,[%o0]
+ srlx %o4,32,%o5
+ brz,pt %o2,.L_bn_sqr_words_return
+ stuw %o5,[%o0+4]
+
+ lduw [%o1+4],%g2
+ mulx %g2,%g2,%o4
+ dec %o2
+ stuw %o4,[%o0+8]
+ srlx %o4,32,%o5
+ brz,pt %o2,.L_bn_sqr_words_return
+ stuw %o5,[%o0+12]
+
+ lduw [%o1+8],%g2
+ mulx %g2,%g2,%o4
+ srlx %o4,32,%o5
+ stuw %o4,[%o0+16]
+ stuw %o5,[%o0+20]
+ retl
+ clr %o0
+
+.type bn_sqr_words,#function
+.size bn_sqr_words,(.-bn_sqr_words)
+
+.align 32
+.global bn_div_words
+/*
+ * BN_ULONG bn_div_words(h,l,d)
+ * BN_ULONG h,l,d;
+ */
+bn_div_words:
+ sllx %o0,32,%o0
+ or %o0,%o1,%o0
+ udivx %o0,%o2,%o0
+ retl
+ srl %o0,%g0,%o0 ! clruw %o0
+
+.type bn_div_words,#function
+.size bn_div_words,(.-bn_div_words)
+
+.align 32
+
+.global bn_add_words
+/*
+ * BN_ULONG bn_add_words(rp,ap,bp,n)
+ * BN_ULONG *rp,*ap,*bp;
+ * int n;
+ */
+bn_add_words:
+ sra %o3,%g0,%o3 ! signx %o3
+ brgz,a %o3,.L_bn_add_words_proceed
+ lduw [%o1],%o4
+ retl
+ clr %o0
+
+.L_bn_add_words_proceed:
+ andcc %o3,-4,%g0
+ bz,pn %icc,.L_bn_add_words_tail
+ addcc %g0,0,%g0 ! clear carry flag
+
+.L_bn_add_words_loop: ! wow! 32 aligned!
+ dec 4,%o3
+ lduw [%o2],%o5
+ lduw [%o1+4],%g1
+ lduw [%o2+4],%g2
+ lduw [%o1+8],%g3
+ lduw [%o2+8],%g4
+ addccc %o5,%o4,%o5
+ stuw %o5,[%o0]
+
+ lduw [%o1+12],%o4
+ lduw [%o2+12],%o5
+ inc 16,%o1
+ addccc %g1,%g2,%g1
+ stuw %g1,[%o0+4]
+
+ inc 16,%o2
+ addccc %g3,%g4,%g3
+ stuw %g3,[%o0+8]
+
+ inc 16,%o0
+ addccc %o5,%o4,%o5
+ stuw %o5,[%o0-4]
+ and %o3,-4,%g1
+ brnz,a,pt %g1,.L_bn_add_words_loop
+ lduw [%o1],%o4
+
+ brnz,a,pn %o3,.L_bn_add_words_tail
+ lduw [%o1],%o4
+.L_bn_add_words_return:
+ clr %o0
+ retl
+ movcs %icc,1,%o0
+ nop
+
+.L_bn_add_words_tail:
+ lduw [%o2],%o5
+ dec %o3
+ addccc %o5,%o4,%o5
+ brz,pt %o3,.L_bn_add_words_return
+ stuw %o5,[%o0]
+
+ lduw [%o1+4],%o4
+ lduw [%o2+4],%o5
+ dec %o3
+ addccc %o5,%o4,%o5
+ brz,pt %o3,.L_bn_add_words_return
+ stuw %o5,[%o0+4]
+
+ lduw [%o1+8],%o4
+ lduw [%o2+8],%o5
+ addccc %o5,%o4,%o5
+ stuw %o5,[%o0+8]
+ clr %o0
+ retl
+ movcs %icc,1,%o0
+
+.type bn_add_words,#function
+.size bn_add_words,(.-bn_add_words)
+
+.global bn_sub_words
+/*
+ * BN_ULONG bn_sub_words(rp,ap,bp,n)
+ * BN_ULONG *rp,*ap,*bp;
+ * int n;
+ */
+bn_sub_words:
+ sra %o3,%g0,%o3 ! signx %o3
+ brgz,a %o3,.L_bn_sub_words_proceed
+ lduw [%o1],%o4
+ retl
+ clr %o0
+
+.L_bn_sub_words_proceed:
+ andcc %o3,-4,%g0
+ bz,pn %icc,.L_bn_sub_words_tail
+ addcc %g0,0,%g0 ! clear carry flag
+
+.L_bn_sub_words_loop: ! wow! 32 aligned!
+ dec 4,%o3
+ lduw [%o2],%o5
+ lduw [%o1+4],%g1
+ lduw [%o2+4],%g2
+ lduw [%o1+8],%g3
+ lduw [%o2+8],%g4
+ subccc %o4,%o5,%o5
+ stuw %o5,[%o0]
+
+ lduw [%o1+12],%o4
+ lduw [%o2+12],%o5
+ inc 16,%o1
+ subccc %g1,%g2,%g2
+ stuw %g2,[%o0+4]
+
+ inc 16,%o2
+ subccc %g3,%g4,%g4
+ stuw %g4,[%o0+8]
+
+ inc 16,%o0
+ subccc %o4,%o5,%o5
+ stuw %o5,[%o0-4]
+ and %o3,-4,%g1
+ brnz,a,pt %g1,.L_bn_sub_words_loop
+ lduw [%o1],%o4
+
+ brnz,a,pn %o3,.L_bn_sub_words_tail
+ lduw [%o1],%o4
+.L_bn_sub_words_return:
+ clr %o0
+ retl
+ movcs %icc,1,%o0
+ nop
+
+.L_bn_sub_words_tail: ! wow! 32 aligned!
+ lduw [%o2],%o5
+ dec %o3
+ subccc %o4,%o5,%o5
+ brz,pt %o3,.L_bn_sub_words_return
+ stuw %o5,[%o0]
+
+ lduw [%o1+4],%o4
+ lduw [%o2+4],%o5
+ dec %o3
+ subccc %o4,%o5,%o5
+ brz,pt %o3,.L_bn_sub_words_return
+ stuw %o5,[%o0+4]
+
+ lduw [%o1+8],%o4
+ lduw [%o2+8],%o5
+ subccc %o4,%o5,%o5
+ stuw %o5,[%o0+8]
+ clr %o0
+ retl
+ movcs %icc,1,%o0
+
+.type bn_sub_words,#function
+.size bn_sub_words,(.-bn_sub_words)
+
+/*
+ * Code below depends on the fact that upper parts of the %l0-%l7
+ * and %i0-%i7 are zeroed by kernel after context switch. In
+ * previous versions this comment stated that "the trouble is that
+ * it's not feasible to implement the mumbo-jumbo in less V9
+ * instructions:-(" which apparently isn't true thanks to
+ * 'bcs,a %xcc,.+8; inc %rd' pair. But the performance improvement
+ * results not from the shorter code, but from elimination of
+ * multicycle none-pairable 'rd %y,%rd' instructions.
+ *
+ * Andy.
+ */
+
+#define FRAME_SIZE -96
+
+/*
+ * Here is register usage map for *all* routines below.
+ */
+#define t_1 %o0
+#define t_2 %o1
+#define c_12 %o2
+#define c_3 %o3
+
+#define ap(I) [%i1+4*I]
+#define bp(I) [%i2+4*I]
+#define rp(I) [%i0+4*I]
+
+#define a_0 %l0
+#define a_1 %l1
+#define a_2 %l2
+#define a_3 %l3
+#define a_4 %l4
+#define a_5 %l5
+#define a_6 %l6
+#define a_7 %l7
+
+#define b_0 %i3
+#define b_1 %i4
+#define b_2 %i5
+#define b_3 %o4
+#define b_4 %o5
+#define b_5 %o7
+#define b_6 %g1
+#define b_7 %g4
+
+.align 32
+.global bn_mul_comba8
+/*
+ * void bn_mul_comba8(r,a,b)
+ * BN_ULONG *r,*a,*b;
+ */
+bn_mul_comba8:
+ save %sp,FRAME_SIZE,%sp
+ mov 1,t_2
+ lduw ap(0),a_0
+ sllx t_2,32,t_2
+ lduw bp(0),b_0 !=
+ lduw bp(1),b_1
+ mulx a_0,b_0,t_1 !mul_add_c(a[0],b[0],c1,c2,c3);
+ srlx t_1,32,c_12
+ stuw t_1,rp(0) !=!r[0]=c1;
+
+ lduw ap(1),a_1
+ mulx a_0,b_1,t_1 !mul_add_c(a[0],b[1],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(2),a_2
+ mulx a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12 !=
+ stuw t_1,rp(1) !r[1]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2);
+ addcc c_12,t_1,c_12 !=
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw bp(2),b_2 !=
+ mulx a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ lduw bp(3),b_3
+ mulx a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(2) !r[2]=c3;
+ or c_12,c_3,c_12 !=
+
+ mulx a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_1,b_2,t_1 !=!mul_add_c(a[1],b[2],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ lduw ap(3),a_3
+ mulx a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3);
+ addcc c_12,t_1,c_12 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(4),a_4
+ mulx a_3,b_0,t_1 !=!mul_add_c(a[3],b[0],c1,c2,c3);!=
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12 !=
+ stuw t_1,rp(3) !r[3]=c1;
+ or c_12,c_3,c_12
+
+ mulx a_4,b_0,t_1 !mul_add_c(a[4],b[0],c2,c3,c1);
+ addcc c_12,t_1,c_12 !=
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_3,b_1,t_1 !=!mul_add_c(a[3],b[1],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_2,b_2,t_1 !=!mul_add_c(a[2],b[2],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw bp(4),b_4 !=
+ mulx a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ lduw bp(5),b_5
+ mulx a_0,b_4,t_1 !mul_add_c(a[0],b[4],c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(4) !r[4]=c2;
+ or c_12,c_3,c_12 !=
+
+ mulx a_0,b_5,t_1 !mul_add_c(a[0],b[5],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_1,b_4,t_1 !mul_add_c(a[1],b[4],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ lduw ap(5),a_5
+ mulx a_4,b_1,t_1 !mul_add_c(a[4],b[1],c3,c1,c2);
+ addcc c_12,t_1,c_12 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(6),a_6
+ mulx a_5,b_0,t_1 !=!mul_add_c(a[5],b[0],c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12 !=
+ stuw t_1,rp(5) !r[5]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_6,b_0,t_1 !mul_add_c(a[6],b[0],c1,c2,c3);
+ addcc c_12,t_1,c_12 !=
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_5,b_1,t_1 !=!mul_add_c(a[5],b[1],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_4,b_2,t_1 !=!mul_add_c(a[4],b[2],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_3,b_3,t_1 !=!mul_add_c(a[3],b[3],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_2,b_4,t_1 !=!mul_add_c(a[2],b[4],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw bp(6),b_6 !=
+ mulx a_1,b_5,t_1 !mul_add_c(a[1],b[5],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ lduw bp(7),b_7
+ mulx a_0,b_6,t_1 !mul_add_c(a[0],b[6],c1,c2,c3);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(6) !r[6]=c1;
+ or c_12,c_3,c_12 !=
+
+ mulx a_0,b_7,t_1 !mul_add_c(a[0],b[7],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_1,b_6,t_1 !mul_add_c(a[1],b[6],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_2,b_5,t_1 !mul_add_c(a[2],b[5],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_3,b_4,t_1 !mul_add_c(a[3],b[4],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_4,b_3,t_1 !mul_add_c(a[4],b[3],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_5,b_2,t_1 !mul_add_c(a[5],b[2],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ lduw ap(7),a_7
+ mulx a_6,b_1,t_1 !=!mul_add_c(a[6],b[1],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_7,b_0,t_1 !=!mul_add_c(a[7],b[0],c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12 !=
+ stuw t_1,rp(7) !r[7]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_7,b_1,t_1 !=!mul_add_c(a[7],b[1],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ mulx a_6,b_2,t_1 !mul_add_c(a[6],b[2],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ mulx a_5,b_3,t_1 !mul_add_c(a[5],b[3],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ mulx a_4,b_4,t_1 !mul_add_c(a[4],b[4],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ mulx a_3,b_5,t_1 !mul_add_c(a[3],b[5],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ mulx a_2,b_6,t_1 !mul_add_c(a[2],b[6],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ mulx a_1,b_7,t_1 !mul_add_c(a[1],b[7],c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ srlx t_1,32,c_12
+ stuw t_1,rp(8) !r[8]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_2,b_7,t_1 !=!mul_add_c(a[2],b[7],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ mulx a_3,b_6,t_1 !mul_add_c(a[3],b[6],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_4,b_5,t_1 !mul_add_c(a[4],b[5],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_5,b_4,t_1 !mul_add_c(a[5],b[4],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_6,b_3,t_1 !mul_add_c(a[6],b[3],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_7,b_2,t_1 !mul_add_c(a[7],b[2],c1,c2,c3);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(9) !r[9]=c1;
+ or c_12,c_3,c_12 !=
+
+ mulx a_7,b_3,t_1 !mul_add_c(a[7],b[3],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_6,b_4,t_1 !mul_add_c(a[6],b[4],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_5,b_5,t_1 !mul_add_c(a[5],b[5],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_4,b_6,t_1 !mul_add_c(a[4],b[6],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_3,b_7,t_1 !mul_add_c(a[3],b[7],c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(10) !r[10]=c2;
+ or c_12,c_3,c_12 !=
+
+ mulx a_4,b_7,t_1 !mul_add_c(a[4],b[7],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_5,b_6,t_1 !mul_add_c(a[5],b[6],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_6,b_5,t_1 !mul_add_c(a[6],b[5],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_7,b_4,t_1 !mul_add_c(a[7],b[4],c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(11) !r[11]=c3;
+ or c_12,c_3,c_12 !=
+
+ mulx a_7,b_5,t_1 !mul_add_c(a[7],b[5],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_6,b_6,t_1 !mul_add_c(a[6],b[6],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_5,b_7,t_1 !mul_add_c(a[5],b[7],c1,c2,c3);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(12) !r[12]=c1;
+ or c_12,c_3,c_12 !=
+
+ mulx a_6,b_7,t_1 !mul_add_c(a[6],b[7],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_7,b_6,t_1 !mul_add_c(a[7],b[6],c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ st t_1,rp(13) !r[13]=c2;
+ or c_12,c_3,c_12 !=
+
+ mulx a_7,b_7,t_1 !mul_add_c(a[7],b[7],c3,c1,c2);
+ addcc c_12,t_1,t_1
+ srlx t_1,32,c_12 !=
+ stuw t_1,rp(14) !r[14]=c3;
+ stuw c_12,rp(15) !r[15]=c1;
+
+ ret
+ restore %g0,%g0,%o0 !=
+
+.type bn_mul_comba8,#function
+.size bn_mul_comba8,(.-bn_mul_comba8)
+
+.align 32
+
+.global bn_mul_comba4
+/*
+ * void bn_mul_comba4(r,a,b)
+ * BN_ULONG *r,*a,*b;
+ */
+bn_mul_comba4:
+ save %sp,FRAME_SIZE,%sp
+ lduw ap(0),a_0
+ mov 1,t_2
+ lduw bp(0),b_0
+ sllx t_2,32,t_2 !=
+ lduw bp(1),b_1
+ mulx a_0,b_0,t_1 !mul_add_c(a[0],b[0],c1,c2,c3);
+ srlx t_1,32,c_12
+ stuw t_1,rp(0) !=!r[0]=c1;
+
+ lduw ap(1),a_1
+ mulx a_0,b_1,t_1 !mul_add_c(a[0],b[1],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(2),a_2
+ mulx a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12 !=
+ stuw t_1,rp(1) !r[1]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2);
+ addcc c_12,t_1,c_12 !=
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw bp(2),b_2 !=
+ mulx a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ lduw bp(3),b_3
+ mulx a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(2) !r[2]=c3;
+ or c_12,c_3,c_12 !=
+
+ mulx a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_1,b_2,t_1 !mul_add_c(a[1],b[2],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ lduw ap(3),a_3
+ mulx a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3);
+ addcc c_12,t_1,c_12 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_3,b_0,t_1 !mul_add_c(a[3],b[0],c1,c2,c3);!=
+ addcc c_12,t_1,t_1 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(3) !=!r[3]=c1;
+ or c_12,c_3,c_12
+
+ mulx a_3,b_1,t_1 !mul_add_c(a[3],b[1],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_2,b_2,t_1 !mul_add_c(a[2],b[2],c2,c3,c1);
+ addcc c_12,t_1,c_12 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1);
+ addcc c_12,t_1,t_1 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(4) !=!r[4]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2);
+ addcc c_12,t_1,t_1 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(5) !=!r[5]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_3,b_3,t_1 !mul_add_c(a[3],b[3],c1,c2,c3);
+ addcc c_12,t_1,t_1
+ srlx t_1,32,c_12 !=
+ stuw t_1,rp(6) !r[6]=c1;
+ stuw c_12,rp(7) !r[7]=c2;
+
+ ret
+ restore %g0,%g0,%o0
+
+.type bn_mul_comba4,#function
+.size bn_mul_comba4,(.-bn_mul_comba4)
+
+.align 32
+
+.global bn_sqr_comba8
+bn_sqr_comba8:
+ save %sp,FRAME_SIZE,%sp
+ mov 1,t_2
+ lduw ap(0),a_0
+ sllx t_2,32,t_2
+ lduw ap(1),a_1
+ mulx a_0,a_0,t_1 !sqr_add_c(a,0,c1,c2,c3);
+ srlx t_1,32,c_12
+ stuw t_1,rp(0) !r[0]=c1;
+
+ lduw ap(2),a_2
+ mulx a_0,a_1,t_1 !=!sqr_add_c2(a,1,0,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(1) !r[1]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(3),a_3
+ mulx a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(2) !r[2]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(4),a_4
+ mulx a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ st t_1,rp(3) !r[3]=c1;
+ or c_12,c_3,c_12
+
+ mulx a_4,a_0,t_1 !sqr_add_c2(a,4,0,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(5),a_5
+ mulx a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(4) !r[4]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_0,a_5,t_1 !sqr_add_c2(a,5,0,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_1,a_4,t_1 !sqr_add_c2(a,4,1,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(6),a_6
+ mulx a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(5) !r[5]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_6,a_0,t_1 !sqr_add_c2(a,6,0,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_5,a_1,t_1 !sqr_add_c2(a,5,1,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_4,a_2,t_1 !sqr_add_c2(a,4,2,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(7),a_7
+ mulx a_3,a_3,t_1 !=!sqr_add_c(a,3,c1,c2,c3);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(6) !r[6]=c1;
+ or c_12,c_3,c_12
+
+ mulx a_0,a_7,t_1 !sqr_add_c2(a,7,0,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_1,a_6,t_1 !sqr_add_c2(a,6,1,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_2,a_5,t_1 !sqr_add_c2(a,5,2,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_3,a_4,t_1 !sqr_add_c2(a,4,3,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(7) !r[7]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_7,a_1,t_1 !sqr_add_c2(a,7,1,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_6,a_2,t_1 !sqr_add_c2(a,6,2,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_5,a_3,t_1 !sqr_add_c2(a,5,3,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_4,a_4,t_1 !sqr_add_c(a,4,c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(8) !r[8]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_2,a_7,t_1 !sqr_add_c2(a,7,2,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_3,a_6,t_1 !sqr_add_c2(a,6,3,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_4,a_5,t_1 !sqr_add_c2(a,5,4,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(9) !r[9]=c1;
+ or c_12,c_3,c_12
+
+ mulx a_7,a_3,t_1 !sqr_add_c2(a,7,3,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_6,a_4,t_1 !sqr_add_c2(a,6,4,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_5,a_5,t_1 !sqr_add_c(a,5,c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(10) !r[10]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_4,a_7,t_1 !sqr_add_c2(a,7,4,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_5,a_6,t_1 !sqr_add_c2(a,6,5,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(11) !r[11]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_7,a_5,t_1 !sqr_add_c2(a,7,5,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_6,a_6,t_1 !sqr_add_c(a,6,c1,c2,c3);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(12) !r[12]=c1;
+ or c_12,c_3,c_12
+
+ mulx a_6,a_7,t_1 !sqr_add_c2(a,7,6,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(13) !r[13]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_7,a_7,t_1 !sqr_add_c(a,7,c3,c1,c2);
+ addcc c_12,t_1,t_1
+ srlx t_1,32,c_12
+ stuw t_1,rp(14) !r[14]=c3;
+ stuw c_12,rp(15) !r[15]=c1;
+
+ ret
+ restore %g0,%g0,%o0
+
+.type bn_sqr_comba8,#function
+.size bn_sqr_comba8,(.-bn_sqr_comba8)
+
+.align 32
+
+.global bn_sqr_comba4
+/*
+ * void bn_sqr_comba4(r,a)
+ * BN_ULONG *r,*a;
+ */
+bn_sqr_comba4:
+ save %sp,FRAME_SIZE,%sp
+ mov 1,t_2
+ lduw ap(0),a_0
+ sllx t_2,32,t_2
+ lduw ap(1),a_1
+ mulx a_0,a_0,t_1 !sqr_add_c(a,0,c1,c2,c3);
+ srlx t_1,32,c_12
+ stuw t_1,rp(0) !r[0]=c1;
+
+ lduw ap(2),a_2
+ mulx a_0,a_1,t_1 !sqr_add_c2(a,1,0,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(1) !r[1]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(3),a_3
+ mulx a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(2) !r[2]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(3) !r[3]=c1;
+ or c_12,c_3,c_12
+
+ mulx a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(4) !r[4]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(5) !r[5]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_3,a_3,t_1 !sqr_add_c(a,3,c1,c2,c3);
+ addcc c_12,t_1,t_1
+ srlx t_1,32,c_12
+ stuw t_1,rp(6) !r[6]=c1;
+ stuw c_12,rp(7) !r[7]=c2;
+
+ ret
+ restore %g0,%g0,%o0
+
+.type bn_sqr_comba4,#function
+.size bn_sqr_comba4,(.-bn_sqr_comba4)
+
+.align 32
diff --git a/crypto/bn/asm/x86.pl b/crypto/bn/asm/x86.pl
new file mode 100644
index 0000000..1bc4f1b
--- /dev/null
+++ b/crypto/bn/asm/x86.pl
@@ -0,0 +1,28 @@
+#!/usr/local/bin/perl
+
+push(@INC,"perlasm","../../perlasm");
+require "x86asm.pl";
+
+require("x86/mul_add.pl");
+require("x86/mul.pl");
+require("x86/sqr.pl");
+require("x86/div.pl");
+require("x86/add.pl");
+require("x86/sub.pl");
+require("x86/comba.pl");
+
+&asm_init($ARGV[0],$0);
+
+&bn_mul_add_words("bn_mul_add_words");
+&bn_mul_words("bn_mul_words");
+&bn_sqr_words("bn_sqr_words");
+&bn_div_words("bn_div_words");
+&bn_add_words("bn_add_words");
+&bn_sub_words("bn_sub_words");
+&bn_mul_comba("bn_mul_comba8",8);
+&bn_mul_comba("bn_mul_comba4",4);
+&bn_sqr_comba("bn_sqr_comba8",8);
+&bn_sqr_comba("bn_sqr_comba4",4);
+
+&asm_finish();
+
diff --git a/crypto/bn/asm/x86/add.pl b/crypto/bn/asm/x86/add.pl
new file mode 100644
index 0000000..0b5cf58
--- /dev/null
+++ b/crypto/bn/asm/x86/add.pl
@@ -0,0 +1,76 @@
+#!/usr/local/bin/perl
+# x86 assember
+
+sub bn_add_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+
+ &comment("");
+ $a="esi";
+ $b="edi";
+ $c="eax";
+ $r="ebx";
+ $tmp1="ecx";
+ $tmp2="edx";
+ $num="ebp";
+
+ &mov($r,&wparam(0)); # get r
+ &mov($a,&wparam(1)); # get a
+ &mov($b,&wparam(2)); # get b
+ &mov($num,&wparam(3)); # get num
+ &xor($c,$c); # clear carry
+ &and($num,0xfffffff8); # num / 8
+
+ &jz(&label("aw_finish"));
+
+ &set_label("aw_loop",0);
+ for ($i=0; $i<8; $i++)
+ {
+ &comment("Round $i");
+
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0)); # *b
+ &add($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &add($tmp1,$tmp2);
+ &adc($c,0);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($b,32);
+ &add($r,32);
+ &sub($num,8);
+ &jnz(&label("aw_loop"));
+
+ &set_label("aw_finish",0);
+ &mov($num,&wparam(3)); # get num
+ &and($num,7);
+ &jz(&label("aw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0));# *b
+ &add($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &add($tmp1,$tmp2);
+ &adc($c,0);
+ &dec($num) if ($i != 6);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *a
+ &jz(&label("aw_end")) if ($i != 6);
+ }
+ &set_label("aw_end",0);
+
+# &mov("eax",$c); # $c is "eax"
+
+ &function_end($name);
+ }
+
+1;
diff --git a/crypto/bn/asm/x86/comba.pl b/crypto/bn/asm/x86/comba.pl
new file mode 100644
index 0000000..2291253
--- /dev/null
+++ b/crypto/bn/asm/x86/comba.pl
@@ -0,0 +1,277 @@
+#!/usr/local/bin/perl
+# x86 assember
+
+sub mul_add_c
+ {
+ local($a,$ai,$b,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
+
+ # pos == -1 if eax and edx are pre-loaded, 0 to load from next
+ # words, and 1 if load return value
+
+ &comment("mul a[$ai]*b[$bi]");
+
+ # "eax" and "edx" will always be pre-loaded.
+ # &mov("eax",&DWP($ai*4,$a,"",0)) ;
+ # &mov("edx",&DWP($bi*4,$b,"",0));
+
+ &mul("edx");
+ &add($c0,"eax");
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # laod next a
+ &mov("eax",&wparam(0)) if $pos > 0; # load r[]
+ ###
+ &adc($c1,"edx");
+ &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 0; # laod next b
+ &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 1; # laod next b
+ ###
+ &adc($c2,0);
+ # is pos > 1, it means it is the last loop
+ &mov(&DWP($i*4,"eax","",0),$c0) if $pos > 0; # save r[];
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # laod next a
+ }
+
+sub sqr_add_c
+ {
+ local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
+
+ # pos == -1 if eax and edx are pre-loaded, 0 to load from next
+ # words, and 1 if load return value
+
+ &comment("sqr a[$ai]*a[$bi]");
+
+ # "eax" and "edx" will always be pre-loaded.
+ # &mov("eax",&DWP($ai*4,$a,"",0)) ;
+ # &mov("edx",&DWP($bi*4,$b,"",0));
+
+ if ($ai == $bi)
+ { &mul("eax");}
+ else
+ { &mul("edx");}
+ &add($c0,"eax");
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # load next a
+ ###
+ &adc($c1,"edx");
+ &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos == 1) && ($na != $nb);
+ ###
+ &adc($c2,0);
+ # is pos > 1, it means it is the last loop
+ &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[];
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b
+ }
+
+sub sqr_add_c2
+ {
+ local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
+
+ # pos == -1 if eax and edx are pre-loaded, 0 to load from next
+ # words, and 1 if load return value
+
+ &comment("sqr a[$ai]*a[$bi]");
+
+ # "eax" and "edx" will always be pre-loaded.
+ # &mov("eax",&DWP($ai*4,$a,"",0)) ;
+ # &mov("edx",&DWP($bi*4,$a,"",0));
+
+ if ($ai == $bi)
+ { &mul("eax");}
+ else
+ { &mul("edx");}
+ &add("eax","eax");
+ ###
+ &adc("edx","edx");
+ ###
+ &adc($c2,0);
+ &add($c0,"eax");
+ &adc($c1,"edx");
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # load next a
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b
+ &adc($c2,0);
+ &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[];
+ &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos <= 1) && ($na != $nb);
+ ###
+ }
+
+sub bn_mul_comba
+ {
+ local($name,$num)=@_;
+ local($a,$b,$c0,$c1,$c2);
+ local($i,$as,$ae,$bs,$be,$ai,$bi);
+ local($tot,$end);
+
+ &function_begin_B($name,"");
+
+ $c0="ebx";
+ $c1="ecx";
+ $c2="ebp";
+ $a="esi";
+ $b="edi";
+
+ $as=0;
+ $ae=0;
+ $bs=0;
+ $be=0;
+ $tot=$num+$num-1;
+
+ &push("esi");
+ &mov($a,&wparam(1));
+ &push("edi");
+ &mov($b,&wparam(2));
+ &push("ebp");
+ &push("ebx");
+
+ &xor($c0,$c0);
+ &mov("eax",&DWP(0,$a,"",0)); # load the first word
+ &xor($c1,$c1);
+ &mov("edx",&DWP(0,$b,"",0)); # load the first second
+
+ for ($i=0; $i<$tot; $i++)
+ {
+ $ai=$as;
+ $bi=$bs;
+ $end=$be+1;
+
+ &comment("################## Calculate word $i");
+
+ for ($j=$bs; $j<$end; $j++)
+ {
+ &xor($c2,$c2) if ($j == $bs);
+ if (($j+1) == $end)
+ {
+ $v=1;
+ $v=2 if (($i+1) == $tot);
+ }
+ else
+ { $v=0; }
+ if (($j+1) != $end)
+ {
+ $na=($ai-1);
+ $nb=($bi+1);
+ }
+ else
+ {
+ $na=$as+($i < ($num-1));
+ $nb=$bs+($i >= ($num-1));
+ }
+#printf STDERR "[$ai,$bi] -> [$na,$nb]\n";
+ &mul_add_c($a,$ai,$b,$bi,$c0,$c1,$c2,$v,$i,$na,$nb);
+ if ($v)
+ {
+ &comment("saved r[$i]");
+ # &mov("eax",&wparam(0));
+ # &mov(&DWP($i*4,"eax","",0),$c0);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ }
+ $ai--;
+ $bi++;
+ }
+ $as++ if ($i < ($num-1));
+ $ae++ if ($i >= ($num-1));
+
+ $bs++ if ($i >= ($num-1));
+ $be++ if ($i < ($num-1));
+ }
+ &comment("save r[$i]");
+ # &mov("eax",&wparam(0));
+ &mov(&DWP($i*4,"eax","",0),$c0);
+
+ &pop("ebx");
+ &pop("ebp");
+ &pop("edi");
+ &pop("esi");
+ &ret();
+ &function_end_B($name);
+ }
+
+sub bn_sqr_comba
+ {
+ local($name,$num)=@_;
+ local($r,$a,$c0,$c1,$c2)=@_;
+ local($i,$as,$ae,$bs,$be,$ai,$bi);
+ local($b,$tot,$end,$half);
+
+ &function_begin_B($name,"");
+
+ $c0="ebx";
+ $c1="ecx";
+ $c2="ebp";
+ $a="esi";
+ $r="edi";
+
+ &push("esi");
+ &push("edi");
+ &push("ebp");
+ &push("ebx");
+ &mov($r,&wparam(0));
+ &mov($a,&wparam(1));
+ &xor($c0,$c0);
+ &xor($c1,$c1);
+ &mov("eax",&DWP(0,$a,"",0)); # load the first word
+
+ $as=0;
+ $ae=0;
+ $bs=0;
+ $be=0;
+ $tot=$num+$num-1;
+
+ for ($i=0; $i<$tot; $i++)
+ {
+ $ai=$as;
+ $bi=$bs;
+ $end=$be+1;
+
+ &comment("############### Calculate word $i");
+ for ($j=$bs; $j<$end; $j++)
+ {
+ &xor($c2,$c2) if ($j == $bs);
+ if (($ai-1) < ($bi+1))
+ {
+ $v=1;
+ $v=2 if ($i+1) == $tot;
+ }
+ else
+ { $v=0; }
+ if (!$v)
+ {
+ $na=$ai-1;
+ $nb=$bi+1;
+ }
+ else
+ {
+ $na=$as+($i < ($num-1));
+ $nb=$bs+($i >= ($num-1));
+ }
+ if ($ai == $bi)
+ {
+ &sqr_add_c($r,$a,$ai,$bi,
+ $c0,$c1,$c2,$v,$i,$na,$nb);
+ }
+ else
+ {
+ &sqr_add_c2($r,$a,$ai,$bi,
+ $c0,$c1,$c2,$v,$i,$na,$nb);
+ }
+ if ($v)
+ {
+ &comment("saved r[$i]");
+ #&mov(&DWP($i*4,$r,"",0),$c0);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ last;
+ }
+ $ai--;
+ $bi++;
+ }
+ $as++ if ($i < ($num-1));
+ $ae++ if ($i >= ($num-1));
+
+ $bs++ if ($i >= ($num-1));
+ $be++ if ($i < ($num-1));
+ }
+ &mov(&DWP($i*4,$r,"",0),$c0);
+ &pop("ebx");
+ &pop("ebp");
+ &pop("edi");
+ &pop("esi");
+ &ret();
+ &function_end_B($name);
+ }
+
+1;
diff --git a/crypto/bn/asm/x86/div.pl b/crypto/bn/asm/x86/div.pl
new file mode 100644
index 0000000..0e90152
--- /dev/null
+++ b/crypto/bn/asm/x86/div.pl
@@ -0,0 +1,15 @@
+#!/usr/local/bin/perl
+# x86 assember
+
+sub bn_div_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+ &mov("edx",&wparam(0)); #
+ &mov("eax",&wparam(1)); #
+ &mov("ebx",&wparam(2)); #
+ &div("ebx");
+ &function_end($name);
+ }
+1;
diff --git a/crypto/bn/asm/x86/f b/crypto/bn/asm/x86/f
new file mode 100644
index 0000000..22e4112
--- /dev/null
+++ b/crypto/bn/asm/x86/f
@@ -0,0 +1,3 @@
+#!/usr/local/bin/perl
+# x86 assember
+
diff --git a/crypto/bn/asm/x86/mul.pl b/crypto/bn/asm/x86/mul.pl
new file mode 100644
index 0000000..674cb9b
--- /dev/null
+++ b/crypto/bn/asm/x86/mul.pl
@@ -0,0 +1,77 @@
+#!/usr/local/bin/perl
+# x86 assember
+
+sub bn_mul_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+
+ &comment("");
+ $Low="eax";
+ $High="edx";
+ $a="ebx";
+ $w="ecx";
+ $r="edi";
+ $c="esi";
+ $num="ebp";
+
+ &xor($c,$c); # clear carry
+ &mov($r,&wparam(0)); #
+ &mov($a,&wparam(1)); #
+ &mov($num,&wparam(2)); #
+ &mov($w,&wparam(3)); #
+
+ &and($num,0xfffffff8); # num / 8
+ &jz(&label("mw_finish"));
+
+ &set_label("mw_loop",0);
+ for ($i=0; $i<32; $i+=4)
+ {
+ &comment("Round $i");
+
+ &mov("eax",&DWP($i,$a,"",0)); # *a
+ &mul($w); # *a * w
+ &add("eax",$c); # L(t)+=c
+ # XXX
+
+ &adc("edx",0); # H(t)+=carry
+ &mov(&DWP($i,$r,"",0),"eax"); # *r= L(t);
+
+ &mov($c,"edx"); # c= H(t);
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($r,32);
+ &sub($num,8);
+ &jz(&label("mw_finish"));
+ &jmp(&label("mw_loop"));
+
+ &set_label("mw_finish",0);
+ &mov($num,&wparam(2)); # get num
+ &and($num,7);
+ &jnz(&label("mw_finish2"));
+ &jmp(&label("mw_end"));
+
+ &set_label("mw_finish2",1);
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov("eax",&DWP($i*4,$a,"",0));# *a
+ &mul($w); # *a * w
+ &add("eax",$c); # L(t)+=c
+ # XXX
+ &adc("edx",0); # H(t)+=carry
+ &mov(&DWP($i*4,$r,"",0),"eax");# *r= L(t);
+ &mov($c,"edx"); # c= H(t);
+ &dec($num) if ($i != 7-1);
+ &jz(&label("mw_end")) if ($i != 7-1);
+ }
+ &set_label("mw_end",0);
+ &mov("eax",$c);
+
+ &function_end($name);
+ }
+
+1;
diff --git a/crypto/bn/asm/x86/mul_add.pl b/crypto/bn/asm/x86/mul_add.pl
new file mode 100644
index 0000000..61830d3
--- /dev/null
+++ b/crypto/bn/asm/x86/mul_add.pl
@@ -0,0 +1,87 @@
+#!/usr/local/bin/perl
+# x86 assember
+
+sub bn_mul_add_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+
+ &comment("");
+ $Low="eax";
+ $High="edx";
+ $a="ebx";
+ $w="ebp";
+ $r="edi";
+ $c="esi";
+
+ &xor($c,$c); # clear carry
+ &mov($r,&wparam(0)); #
+
+ &mov("ecx",&wparam(2)); #
+ &mov($a,&wparam(1)); #
+
+ &and("ecx",0xfffffff8); # num / 8
+ &mov($w,&wparam(3)); #
+
+ &push("ecx"); # Up the stack for a tmp variable
+
+ &jz(&label("maw_finish"));
+
+ &set_label("maw_loop",0);
+
+ &mov(&swtmp(0),"ecx"); #
+
+ for ($i=0; $i<32; $i+=4)
+ {
+ &comment("Round $i");
+
+ &mov("eax",&DWP($i,$a,"",0)); # *a
+ &mul($w); # *a * w
+ &add("eax",$c); # L(t)+= *r
+ &mov($c,&DWP($i,$r,"",0)); # L(t)+= *r
+ &adc("edx",0); # H(t)+=carry
+ &add("eax",$c); # L(t)+=c
+ &adc("edx",0); # H(t)+=carry
+ &mov(&DWP($i,$r,"",0),"eax"); # *r= L(t);
+ &mov($c,"edx"); # c= H(t);
+ }
+
+ &comment("");
+ &mov("ecx",&swtmp(0)); #
+ &add($a,32);
+ &add($r,32);
+ &sub("ecx",8);
+ &jnz(&label("maw_loop"));
+
+ &set_label("maw_finish",0);
+ &mov("ecx",&wparam(2)); # get num
+ &and("ecx",7);
+ &jnz(&label("maw_finish2")); # helps branch prediction
+ &jmp(&label("maw_end"));
+
+ &set_label("maw_finish2",1);
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov("eax",&DWP($i*4,$a,"",0));# *a
+ &mul($w); # *a * w
+ &add("eax",$c); # L(t)+=c
+ &mov($c,&DWP($i*4,$r,"",0)); # L(t)+= *r
+ &adc("edx",0); # H(t)+=carry
+ &add("eax",$c);
+ &adc("edx",0); # H(t)+=carry
+ &dec("ecx") if ($i != 7-1);
+ &mov(&DWP($i*4,$r,"",0),"eax"); # *r= L(t);
+ &mov($c,"edx"); # c= H(t);
+ &jz(&label("maw_end")) if ($i != 7-1);
+ }
+ &set_label("maw_end",0);
+ &mov("eax",$c);
+
+ &pop("ecx"); # clear variable from
+
+ &function_end($name);
+ }
+
+1;
diff --git a/crypto/bn/asm/x86/sqr.pl b/crypto/bn/asm/x86/sqr.pl
new file mode 100644
index 0000000..1f90993
--- /dev/null
+++ b/crypto/bn/asm/x86/sqr.pl
@@ -0,0 +1,60 @@
+#!/usr/local/bin/perl
+# x86 assember
+
+sub bn_sqr_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+
+ &comment("");
+ $r="esi";
+ $a="edi";
+ $num="ebx";
+
+ &mov($r,&wparam(0)); #
+ &mov($a,&wparam(1)); #
+ &mov($num,&wparam(2)); #
+
+ &and($num,0xfffffff8); # num / 8
+ &jz(&label("sw_finish"));
+
+ &set_label("sw_loop",0);
+ for ($i=0; $i<32; $i+=4)
+ {
+ &comment("Round $i");
+ &mov("eax",&DWP($i,$a,"",0)); # *a
+ # XXX
+ &mul("eax"); # *a * *a
+ &mov(&DWP($i*2,$r,"",0),"eax"); #
+ &mov(&DWP($i*2+4,$r,"",0),"edx");#
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($r,64);
+ &sub($num,8);
+ &jnz(&label("sw_loop"));
+
+ &set_label("sw_finish",0);
+ &mov($num,&wparam(2)); # get num
+ &and($num,7);
+ &jz(&label("sw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov("eax",&DWP($i*4,$a,"",0)); # *a
+ # XXX
+ &mul("eax"); # *a * *a
+ &mov(&DWP($i*8,$r,"",0),"eax"); #
+ &dec($num) if ($i != 7-1);
+ &mov(&DWP($i*8+4,$r,"",0),"edx");
+ &jz(&label("sw_end")) if ($i != 7-1);
+ }
+ &set_label("sw_end",0);
+
+ &function_end($name);
+ }
+
+1;
diff --git a/crypto/bn/asm/x86/sub.pl b/crypto/bn/asm/x86/sub.pl
new file mode 100644
index 0000000..837b0e1
--- /dev/null
+++ b/crypto/bn/asm/x86/sub.pl
@@ -0,0 +1,76 @@
+#!/usr/local/bin/perl
+# x86 assember
+
+sub bn_sub_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+
+ &comment("");
+ $a="esi";
+ $b="edi";
+ $c="eax";
+ $r="ebx";
+ $tmp1="ecx";
+ $tmp2="edx";
+ $num="ebp";
+
+ &mov($r,&wparam(0)); # get r
+ &mov($a,&wparam(1)); # get a
+ &mov($b,&wparam(2)); # get b
+ &mov($num,&wparam(3)); # get num
+ &xor($c,$c); # clear carry
+ &and($num,0xfffffff8); # num / 8
+
+ &jz(&label("aw_finish"));
+
+ &set_label("aw_loop",0);
+ for ($i=0; $i<8; $i++)
+ {
+ &comment("Round $i");
+
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0)); # *b
+ &sub($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &sub($tmp1,$tmp2);
+ &adc($c,0);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($b,32);
+ &add($r,32);
+ &sub($num,8);
+ &jnz(&label("aw_loop"));
+
+ &set_label("aw_finish",0);
+ &mov($num,&wparam(3)); # get num
+ &and($num,7);
+ &jz(&label("aw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0));# *b
+ &sub($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &sub($tmp1,$tmp2);
+ &adc($c,0);
+ &dec($num) if ($i != 6);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *a
+ &jz(&label("aw_end")) if ($i != 6);
+ }
+ &set_label("aw_end",0);
+
+# &mov("eax",$c); # $c is "eax"
+
+ &function_end($name);
+ }
+
+1;
diff --git a/crypto/bn/asm/x86_64-gcc.c b/crypto/bn/asm/x86_64-gcc.c
new file mode 100644
index 0000000..f13f52d
--- /dev/null
+++ b/crypto/bn/asm/x86_64-gcc.c
@@ -0,0 +1,597 @@
+#ifdef __SUNPRO_C
+# include "../bn_asm.c" /* kind of dirty hack for Sun Studio */
+#else
+/*
+ * x86_64 BIGNUM accelerator version 0.1, December 2002.
+ *
+ * Implemented by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+ * project.
+ *
+ * Rights for redistribution and usage in source and binary forms are
+ * granted according to the OpenSSL license. Warranty of any kind is
+ * disclaimed.
+ *
+ * Q. Version 0.1? It doesn't sound like Andy, he used to assign real
+ * versions, like 1.0...
+ * A. Well, that's because this code is basically a quick-n-dirty
+ * proof-of-concept hack. As you can see it's implemented with
+ * inline assembler, which means that you're bound to GCC and that
+ * there might be enough room for further improvement.
+ *
+ * Q. Why inline assembler?
+ * A. x86_64 features own ABI which I'm not familiar with. This is
+ * why I decided to let the compiler take care of subroutine
+ * prologue/epilogue as well as register allocation. For reference.
+ * Win64 implements different ABI for AMD64, different from Linux.
+ *
+ * Q. How much faster does it get?
+ * A. 'apps/openssl speed rsa dsa' output with no-asm:
+ *
+ * sign verify sign/s verify/s
+ * rsa 512 bits 0.0006s 0.0001s 1683.8 18456.2
+ * rsa 1024 bits 0.0028s 0.0002s 356.0 6407.0
+ * rsa 2048 bits 0.0172s 0.0005s 58.0 1957.8
+ * rsa 4096 bits 0.1155s 0.0018s 8.7 555.6
+ * sign verify sign/s verify/s
+ * dsa 512 bits 0.0005s 0.0006s 2100.8 1768.3
+ * dsa 1024 bits 0.0014s 0.0018s 692.3 559.2
+ * dsa 2048 bits 0.0049s 0.0061s 204.7 165.0
+ *
+ * 'apps/openssl speed rsa dsa' output with this module:
+ *
+ * sign verify sign/s verify/s
+ * rsa 512 bits 0.0004s 0.0000s 2767.1 33297.9
+ * rsa 1024 bits 0.0012s 0.0001s 867.4 14674.7
+ * rsa 2048 bits 0.0061s 0.0002s 164.0 5270.0
+ * rsa 4096 bits 0.0384s 0.0006s 26.1 1650.8
+ * sign verify sign/s verify/s
+ * dsa 512 bits 0.0002s 0.0003s 4442.2 3786.3
+ * dsa 1024 bits 0.0005s 0.0007s 1835.1 1497.4
+ * dsa 2048 bits 0.0016s 0.0020s 620.4 504.6
+ *
+ * For the reference. IA-32 assembler implementation performs
+ * very much like 64-bit code compiled with no-asm on the same
+ * machine.
+ */
+
+#define BN_ULONG unsigned long
+
+/*
+ * "m"(a), "+m"(r) is the way to favor DirectPath µ-code;
+ * "g"(0) let the compiler to decide where does it
+ * want to keep the value of zero;
+ */
+#define mul_add(r,a,word,carry) do { \
+ register BN_ULONG high,low; \
+ asm ("mulq %3" \
+ : "=a"(low),"=d"(high) \
+ : "a"(word),"m"(a) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(carry),"+d"(high)\
+ : "a"(low),"g"(0) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+m"(r),"+d"(high) \
+ : "r"(carry),"g"(0) \
+ : "cc"); \
+ carry=high; \
+ } while (0)
+
+#define mul(r,a,word,carry) do { \
+ register BN_ULONG high,low; \
+ asm ("mulq %3" \
+ : "=a"(low),"=d"(high) \
+ : "a"(word),"g"(a) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(carry),"+d"(high)\
+ : "a"(low),"g"(0) \
+ : "cc"); \
+ (r)=carry, carry=high; \
+ } while (0)
+
+#define sqr(r0,r1,a) \
+ asm ("mulq %2" \
+ : "=a"(r0),"=d"(r1) \
+ : "a"(a) \
+ : "cc");
+
+BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
+ {
+ BN_ULONG c1=0;
+
+ if (num <= 0) return(c1);
+
+ while (num&~3)
+ {
+ mul_add(rp[0],ap[0],w,c1);
+ mul_add(rp[1],ap[1],w,c1);
+ mul_add(rp[2],ap[2],w,c1);
+ mul_add(rp[3],ap[3],w,c1);
+ ap+=4; rp+=4; num-=4;
+ }
+ if (num)
+ {
+ mul_add(rp[0],ap[0],w,c1); if (--num==0) return c1;
+ mul_add(rp[1],ap[1],w,c1); if (--num==0) return c1;
+ mul_add(rp[2],ap[2],w,c1); return c1;
+ }
+
+ return(c1);
+ }
+
+BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
+ {
+ BN_ULONG c1=0;
+
+ if (num <= 0) return(c1);
+
+ while (num&~3)
+ {
+ mul(rp[0],ap[0],w,c1);
+ mul(rp[1],ap[1],w,c1);
+ mul(rp[2],ap[2],w,c1);
+ mul(rp[3],ap[3],w,c1);
+ ap+=4; rp+=4; num-=4;
+ }
+ if (num)
+ {
+ mul(rp[0],ap[0],w,c1); if (--num == 0) return c1;
+ mul(rp[1],ap[1],w,c1); if (--num == 0) return c1;
+ mul(rp[2],ap[2],w,c1);
+ }
+ return(c1);
+ }
+
+void bn_sqr_words(BN_ULONG *r, BN_ULONG *a, int n)
+ {
+ if (n <= 0) return;
+
+ while (n&~3)
+ {
+ sqr(r[0],r[1],a[0]);
+ sqr(r[2],r[3],a[1]);
+ sqr(r[4],r[5],a[2]);
+ sqr(r[6],r[7],a[3]);
+ a+=4; r+=8; n-=4;
+ }
+ if (n)
+ {
+ sqr(r[0],r[1],a[0]); if (--n == 0) return;
+ sqr(r[2],r[3],a[1]); if (--n == 0) return;
+ sqr(r[4],r[5],a[2]);
+ }
+ }
+
+BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
+{ BN_ULONG ret,waste;
+
+ asm ("divq %4"
+ : "=a"(ret),"=d"(waste)
+ : "a"(l),"d"(h),"g"(d)
+ : "cc");
+
+ return ret;
+}
+
+BN_ULONG bn_add_words (BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int n)
+{ BN_ULONG ret=0,i=0;
+
+ if (n <= 0) return 0;
+
+ asm (
+ " subq %2,%2 \n"
+ ".align 16 \n"
+ "1: movq (%4,%2,8),%0 \n"
+ " adcq (%5,%2,8),%0 \n"
+ " movq %0,(%3,%2,8) \n"
+ " leaq 1(%2),%2 \n"
+ " loop 1b \n"
+ " sbbq %0,%0 \n"
+ : "=&a"(ret),"+c"(n),"=&r"(i)
+ : "r"(rp),"r"(ap),"r"(bp)
+ : "cc"
+ );
+
+ return ret&1;
+}
+
+#ifndef SIMICS
+BN_ULONG bn_sub_words (BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int n)
+{ BN_ULONG ret=0,i=0;
+
+ if (n <= 0) return 0;
+
+ asm (
+ " subq %2,%2 \n"
+ ".align 16 \n"
+ "1: movq (%4,%2,8),%0 \n"
+ " sbbq (%5,%2,8),%0 \n"
+ " movq %0,(%3,%2,8) \n"
+ " leaq 1(%2),%2 \n"
+ " loop 1b \n"
+ " sbbq %0,%0 \n"
+ : "=&a"(ret),"+c"(n),"=&r"(i)
+ : "r"(rp),"r"(ap),"r"(bp)
+ : "cc"
+ );
+
+ return ret&1;
+}
+#else
+/* Simics 1.4<7 has buggy sbbq:-( */
+#define BN_MASK2 0xffffffffffffffffL
+BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
+ {
+ BN_ULONG t1,t2;
+ int c=0;
+
+ if (n <= 0) return((BN_ULONG)0);
+
+ for (;;)
+ {
+ t1=a[0]; t2=b[0];
+ r[0]=(t1-t2-c)&BN_MASK2;
+ if (t1 != t2) c=(t1 < t2);
+ if (--n <= 0) break;
+
+ t1=a[1]; t2=b[1];
+ r[1]=(t1-t2-c)&BN_MASK2;
+ if (t1 != t2) c=(t1 < t2);
+ if (--n <= 0) break;
+
+ t1=a[2]; t2=b[2];
+ r[2]=(t1-t2-c)&BN_MASK2;
+ if (t1 != t2) c=(t1 < t2);
+ if (--n <= 0) break;
+
+ t1=a[3]; t2=b[3];
+ r[3]=(t1-t2-c)&BN_MASK2;
+ if (t1 != t2) c=(t1 < t2);
+ if (--n <= 0) break;
+
+ a+=4;
+ b+=4;
+ r+=4;
+ }
+ return(c);
+ }
+#endif
+
+/* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */
+/* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
+/* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */
+/* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) */
+
+#if 0
+/* original macros are kept for reference purposes */
+#define mul_add_c(a,b,c0,c1,c2) { \
+ BN_ULONG ta=(a),tb=(b); \
+ t1 = ta * tb; \
+ t2 = BN_UMULT_HIGH(ta,tb); \
+ c0 += t1; t2 += (c0<t1)?1:0; \
+ c1 += t2; c2 += (c1<t2)?1:0; \
+ }
+
+#define mul_add_c2(a,b,c0,c1,c2) { \
+ BN_ULONG ta=(a),tb=(b),t0; \
+ t1 = BN_UMULT_HIGH(ta,tb); \
+ t0 = ta * tb; \
+ t2 = t1+t1; c2 += (t2<t1)?1:0; \
+ t1 = t0+t0; t2 += (t1<t0)?1:0; \
+ c0 += t1; t2 += (c0<t1)?1:0; \
+ c1 += t2; c2 += (c1<t2)?1:0; \
+ }
+#else
+#define mul_add_c(a,b,c0,c1,c2) do { \
+ asm ("mulq %3" \
+ : "=a"(t1),"=d"(t2) \
+ : "a"(a),"m"(b) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(c0),"+d"(t2) \
+ : "a"(t1),"g"(0) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(c1),"+r"(c2) \
+ : "d"(t2),"g"(0) \
+ : "cc"); \
+ } while (0)
+
+#define sqr_add_c(a,i,c0,c1,c2) do { \
+ asm ("mulq %2" \
+ : "=a"(t1),"=d"(t2) \
+ : "a"(a[i]) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(c0),"+d"(t2) \
+ : "a"(t1),"g"(0) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(c1),"+r"(c2) \
+ : "d"(t2),"g"(0) \
+ : "cc"); \
+ } while (0)
+
+#define mul_add_c2(a,b,c0,c1,c2) do { \
+ asm ("mulq %3" \
+ : "=a"(t1),"=d"(t2) \
+ : "a"(a),"m"(b) \
+ : "cc"); \
+ asm ("addq %0,%0; adcq %2,%1" \
+ : "+d"(t2),"+r"(c2) \
+ : "g"(0) \
+ : "cc"); \
+ asm ("addq %0,%0; adcq %2,%1" \
+ : "+a"(t1),"+d"(t2) \
+ : "g"(0) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(c0),"+d"(t2) \
+ : "a"(t1),"g"(0) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(c1),"+r"(c2) \
+ : "d"(t2),"g"(0) \
+ : "cc"); \
+ } while (0)
+#endif
+
+#define sqr_add_c2(a,i,j,c0,c1,c2) \
+ mul_add_c2((a)[i],(a)[j],c0,c1,c2)
+
+void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+ {
+ BN_ULONG t1,t2;
+ BN_ULONG c1,c2,c3;
+
+ c1=0;
+ c2=0;
+ c3=0;
+ mul_add_c(a[0],b[0],c1,c2,c3);
+ r[0]=c1;
+ c1=0;
+ mul_add_c(a[0],b[1],c2,c3,c1);
+ mul_add_c(a[1],b[0],c2,c3,c1);
+ r[1]=c2;
+ c2=0;
+ mul_add_c(a[2],b[0],c3,c1,c2);
+ mul_add_c(a[1],b[1],c3,c1,c2);
+ mul_add_c(a[0],b[2],c3,c1,c2);
+ r[2]=c3;
+ c3=0;
+ mul_add_c(a[0],b[3],c1,c2,c3);
+ mul_add_c(a[1],b[2],c1,c2,c3);
+ mul_add_c(a[2],b[1],c1,c2,c3);
+ mul_add_c(a[3],b[0],c1,c2,c3);
+ r[3]=c1;
+ c1=0;
+ mul_add_c(a[4],b[0],c2,c3,c1);
+ mul_add_c(a[3],b[1],c2,c3,c1);
+ mul_add_c(a[2],b[2],c2,c3,c1);
+ mul_add_c(a[1],b[3],c2,c3,c1);
+ mul_add_c(a[0],b[4],c2,c3,c1);
+ r[4]=c2;
+ c2=0;
+ mul_add_c(a[0],b[5],c3,c1,c2);
+ mul_add_c(a[1],b[4],c3,c1,c2);
+ mul_add_c(a[2],b[3],c3,c1,c2);
+ mul_add_c(a[3],b[2],c3,c1,c2);
+ mul_add_c(a[4],b[1],c3,c1,c2);
+ mul_add_c(a[5],b[0],c3,c1,c2);
+ r[5]=c3;
+ c3=0;
+ mul_add_c(a[6],b[0],c1,c2,c3);
+ mul_add_c(a[5],b[1],c1,c2,c3);
+ mul_add_c(a[4],b[2],c1,c2,c3);
+ mul_add_c(a[3],b[3],c1,c2,c3);
+ mul_add_c(a[2],b[4],c1,c2,c3);
+ mul_add_c(a[1],b[5],c1,c2,c3);
+ mul_add_c(a[0],b[6],c1,c2,c3);
+ r[6]=c1;
+ c1=0;
+ mul_add_c(a[0],b[7],c2,c3,c1);
+ mul_add_c(a[1],b[6],c2,c3,c1);
+ mul_add_c(a[2],b[5],c2,c3,c1);
+ mul_add_c(a[3],b[4],c2,c3,c1);
+ mul_add_c(a[4],b[3],c2,c3,c1);
+ mul_add_c(a[5],b[2],c2,c3,c1);
+ mul_add_c(a[6],b[1],c2,c3,c1);
+ mul_add_c(a[7],b[0],c2,c3,c1);
+ r[7]=c2;
+ c2=0;
+ mul_add_c(a[7],b[1],c3,c1,c2);
+ mul_add_c(a[6],b[2],c3,c1,c2);
+ mul_add_c(a[5],b[3],c3,c1,c2);
+ mul_add_c(a[4],b[4],c3,c1,c2);
+ mul_add_c(a[3],b[5],c3,c1,c2);
+ mul_add_c(a[2],b[6],c3,c1,c2);
+ mul_add_c(a[1],b[7],c3,c1,c2);
+ r[8]=c3;
+ c3=0;
+ mul_add_c(a[2],b[7],c1,c2,c3);
+ mul_add_c(a[3],b[6],c1,c2,c3);
+ mul_add_c(a[4],b[5],c1,c2,c3);
+ mul_add_c(a[5],b[4],c1,c2,c3);
+ mul_add_c(a[6],b[3],c1,c2,c3);
+ mul_add_c(a[7],b[2],c1,c2,c3);
+ r[9]=c1;
+ c1=0;
+ mul_add_c(a[7],b[3],c2,c3,c1);
+ mul_add_c(a[6],b[4],c2,c3,c1);
+ mul_add_c(a[5],b[5],c2,c3,c1);
+ mul_add_c(a[4],b[6],c2,c3,c1);
+ mul_add_c(a[3],b[7],c2,c3,c1);
+ r[10]=c2;
+ c2=0;
+ mul_add_c(a[4],b[7],c3,c1,c2);
+ mul_add_c(a[5],b[6],c3,c1,c2);
+ mul_add_c(a[6],b[5],c3,c1,c2);
+ mul_add_c(a[7],b[4],c3,c1,c2);
+ r[11]=c3;
+ c3=0;
+ mul_add_c(a[7],b[5],c1,c2,c3);
+ mul_add_c(a[6],b[6],c1,c2,c3);
+ mul_add_c(a[5],b[7],c1,c2,c3);
+ r[12]=c1;
+ c1=0;
+ mul_add_c(a[6],b[7],c2,c3,c1);
+ mul_add_c(a[7],b[6],c2,c3,c1);
+ r[13]=c2;
+ c2=0;
+ mul_add_c(a[7],b[7],c3,c1,c2);
+ r[14]=c3;
+ r[15]=c1;
+ }
+
+void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+ {
+ BN_ULONG t1,t2;
+ BN_ULONG c1,c2,c3;
+
+ c1=0;
+ c2=0;
+ c3=0;
+ mul_add_c(a[0],b[0],c1,c2,c3);
+ r[0]=c1;
+ c1=0;
+ mul_add_c(a[0],b[1],c2,c3,c1);
+ mul_add_c(a[1],b[0],c2,c3,c1);
+ r[1]=c2;
+ c2=0;
+ mul_add_c(a[2],b[0],c3,c1,c2);
+ mul_add_c(a[1],b[1],c3,c1,c2);
+ mul_add_c(a[0],b[2],c3,c1,c2);
+ r[2]=c3;
+ c3=0;
+ mul_add_c(a[0],b[3],c1,c2,c3);
+ mul_add_c(a[1],b[2],c1,c2,c3);
+ mul_add_c(a[2],b[1],c1,c2,c3);
+ mul_add_c(a[3],b[0],c1,c2,c3);
+ r[3]=c1;
+ c1=0;
+ mul_add_c(a[3],b[1],c2,c3,c1);
+ mul_add_c(a[2],b[2],c2,c3,c1);
+ mul_add_c(a[1],b[3],c2,c3,c1);
+ r[4]=c2;
+ c2=0;
+ mul_add_c(a[2],b[3],c3,c1,c2);
+ mul_add_c(a[3],b[2],c3,c1,c2);
+ r[5]=c3;
+ c3=0;
+ mul_add_c(a[3],b[3],c1,c2,c3);
+ r[6]=c1;
+ r[7]=c2;
+ }
+
+void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
+ {
+ BN_ULONG t1,t2;
+ BN_ULONG c1,c2,c3;
+
+ c1=0;
+ c2=0;
+ c3=0;
+ sqr_add_c(a,0,c1,c2,c3);
+ r[0]=c1;
+ c1=0;
+ sqr_add_c2(a,1,0,c2,c3,c1);
+ r[1]=c2;
+ c2=0;
+ sqr_add_c(a,1,c3,c1,c2);
+ sqr_add_c2(a,2,0,c3,c1,c2);
+ r[2]=c3;
+ c3=0;
+ sqr_add_c2(a,3,0,c1,c2,c3);
+ sqr_add_c2(a,2,1,c1,c2,c3);
+ r[3]=c1;
+ c1=0;
+ sqr_add_c(a,2,c2,c3,c1);
+ sqr_add_c2(a,3,1,c2,c3,c1);
+ sqr_add_c2(a,4,0,c2,c3,c1);
+ r[4]=c2;
+ c2=0;
+ sqr_add_c2(a,5,0,c3,c1,c2);
+ sqr_add_c2(a,4,1,c3,c1,c2);
+ sqr_add_c2(a,3,2,c3,c1,c2);
+ r[5]=c3;
+ c3=0;
+ sqr_add_c(a,3,c1,c2,c3);
+ sqr_add_c2(a,4,2,c1,c2,c3);
+ sqr_add_c2(a,5,1,c1,c2,c3);
+ sqr_add_c2(a,6,0,c1,c2,c3);
+ r[6]=c1;
+ c1=0;
+ sqr_add_c2(a,7,0,c2,c3,c1);
+ sqr_add_c2(a,6,1,c2,c3,c1);
+ sqr_add_c2(a,5,2,c2,c3,c1);
+ sqr_add_c2(a,4,3,c2,c3,c1);
+ r[7]=c2;
+ c2=0;
+ sqr_add_c(a,4,c3,c1,c2);
+ sqr_add_c2(a,5,3,c3,c1,c2);
+ sqr_add_c2(a,6,2,c3,c1,c2);
+ sqr_add_c2(a,7,1,c3,c1,c2);
+ r[8]=c3;
+ c3=0;
+ sqr_add_c2(a,7,2,c1,c2,c3);
+ sqr_add_c2(a,6,3,c1,c2,c3);
+ sqr_add_c2(a,5,4,c1,c2,c3);
+ r[9]=c1;
+ c1=0;
+ sqr_add_c(a,5,c2,c3,c1);
+ sqr_add_c2(a,6,4,c2,c3,c1);
+ sqr_add_c2(a,7,3,c2,c3,c1);
+ r[10]=c2;
+ c2=0;
+ sqr_add_c2(a,7,4,c3,c1,c2);
+ sqr_add_c2(a,6,5,c3,c1,c2);
+ r[11]=c3;
+ c3=0;
+ sqr_add_c(a,6,c1,c2,c3);
+ sqr_add_c2(a,7,5,c1,c2,c3);
+ r[12]=c1;
+ c1=0;
+ sqr_add_c2(a,7,6,c2,c3,c1);
+ r[13]=c2;
+ c2=0;
+ sqr_add_c(a,7,c3,c1,c2);
+ r[14]=c3;
+ r[15]=c1;
+ }
+
+void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
+ {
+ BN_ULONG t1,t2;
+ BN_ULONG c1,c2,c3;
+
+ c1=0;
+ c2=0;
+ c3=0;
+ sqr_add_c(a,0,c1,c2,c3);
+ r[0]=c1;
+ c1=0;
+ sqr_add_c2(a,1,0,c2,c3,c1);
+ r[1]=c2;
+ c2=0;
+ sqr_add_c(a,1,c3,c1,c2);
+ sqr_add_c2(a,2,0,c3,c1,c2);
+ r[2]=c3;
+ c3=0;
+ sqr_add_c2(a,3,0,c1,c2,c3);
+ sqr_add_c2(a,2,1,c1,c2,c3);
+ r[3]=c1;
+ c1=0;
+ sqr_add_c(a,2,c2,c3,c1);
+ sqr_add_c2(a,3,1,c2,c3,c1);
+ r[4]=c2;
+ c2=0;
+ sqr_add_c2(a,3,2,c3,c1,c2);
+ r[5]=c3;
+ c3=0;
+ sqr_add_c(a,3,c1,c2,c3);
+ r[6]=c1;
+ r[7]=c2;
+ }
+#endif
OpenPOWER on IntegriCloud