summaryrefslogtreecommitdiffstats
path: root/crypto/openssl/crypto/bn/asm
diff options
context:
space:
mode:
authornectar <nectar@FreeBSD.org>2003-02-19 23:17:42 +0000
committernectar <nectar@FreeBSD.org>2003-02-19 23:17:42 +0000
commit6c9986c446b6cf77f5e83d111dbcca682d6fdd71 (patch)
treee5eb3878430323e978956db174c9c51c7997ba4a /crypto/openssl/crypto/bn/asm
parentb6c07e9a21ba42613fc3906d3efb586dd5c9a846 (diff)
downloadFreeBSD-src-6c9986c446b6cf77f5e83d111dbcca682d6fdd71.zip
FreeBSD-src-6c9986c446b6cf77f5e83d111dbcca682d6fdd71.tar.gz
Vendor import of OpenSSL 0.9.7a.
Diffstat (limited to 'crypto/openssl/crypto/bn/asm')
-rw-r--r--crypto/openssl/crypto/bn/asm/ia64.S235
-rw-r--r--crypto/openssl/crypto/bn/asm/pa-risc2.s36
2 files changed, 189 insertions, 82 deletions
diff --git a/crypto/openssl/crypto/bn/asm/ia64.S b/crypto/openssl/crypto/bn/asm/ia64.S
index ae56066..7dfda85 100644
--- a/crypto/openssl/crypto/bn/asm/ia64.S
+++ b/crypto/openssl/crypto/bn/asm/ia64.S
@@ -1,6 +1,6 @@
.explicit
.text
-.ident "ia64.S, Version 1.1"
+.ident "ia64.S, Version 2.0"
.ident "IA-64 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
//
@@ -13,6 +13,35 @@
// disclaimed.
// ====================================================================
//
+// Version 2.x is Itanium2 re-tune. Few words about how Itanum2 is
+// different from Itanium to this module viewpoint. Most notably, is it
+// "wider" than Itanium? Can you experience loop scalability as
+// discussed in commentary sections? Not really:-( Itanium2 has 6
+// integer ALU ports, i.e. it's 2 ports wider, but it's not enough to
+// spin twice as fast, as I need 8 IALU ports. Amount of floating point
+// ports is the same, i.e. 2, while I need 4. In other words, to this
+// module Itanium2 remains effectively as "wide" as Itanium. Yet it's
+// essentially different in respect to this module, and a re-tune was
+// required. Well, because some intruction latencies has changed. Most
+// noticeably those intensively used:
+//
+// Itanium Itanium2
+// ldf8 9 6 L2 hit
+// ld8 2 1 L1 hit
+// getf 2 5
+// xma[->getf] 7[+1] 4[+0]
+// add[->st8] 1[+1] 1[+0]
+//
+// What does it mean? You might ratiocinate that the original code
+// should run just faster... Because sum of latencies is smaller...
+// Wrong! Note that getf latency increased. This means that if a loop is
+// scheduled for lower latency (and they are), then it will suffer from
+// stall condition and the code will therefore turn anti-scalable, e.g.
+// original bn_mul_words spun at 5*n or 2.5 times slower than expected
+// on Itanium2! What to do? Reschedule loops for Itanium2? But then
+// Itanium would exhibit anti-scalability. So I've chosen to reschedule
+// for worst latency for every instruction aiming for best *all-round*
+// performance.
// Q. How much faster does it get?
// A. Here is the output from 'openssl speed rsa dsa' for vanilla
@@ -149,12 +178,27 @@ bn_add_words:
brp.loop.imp .L_bn_add_words_ctop,.L_bn_add_words_cend-16
}
.body
-{ .mib; mov r14=r32 // rp
+{ .mib;
+#if defined(_HPUX_SOURCE) && defined(_ILP32)
+ addp4 r14=0,r32 // rp
+#else
+ mov r14=r32 // rp
+#endif
mov r9=pr };;
-{ .mii; mov r15=r33 // ap
+{ .mii;
+#if defined(_HPUX_SOURCE) && defined(_ILP32)
+ addp4 r15=0,r33 // ap
+#else
+ mov r15=r33 // ap
+#endif
mov ar.lc=r10
mov ar.ec=6 }
-{ .mib; mov r16=r34 // bp
+{ .mib;
+#if defined(_HPUX_SOURCE) && defined(_ILP32)
+ addp4 r16=0,r34 // bp
+#else
+ mov r16=r34 // bp
+#endif
mov pr.rot=1<<16 };;
.L_bn_add_words_ctop:
@@ -174,7 +218,7 @@ bn_add_words:
{ .mii;
(p59) add r8=1,r8 // return value
- mov pr=r9,-1
+ mov pr=r9,0x1ffff
mov ar.lc=r3 }
{ .mbb; nop.b 0x0
br.ret.sptk.many b0 };;
@@ -202,12 +246,27 @@ bn_sub_words:
brp.loop.imp .L_bn_sub_words_ctop,.L_bn_sub_words_cend-16
}
.body
-{ .mib; mov r14=r32 // rp
+{ .mib;
+#if defined(_HPUX_SOURCE) && defined(_ILP32)
+ addp4 r14=0,r32 // rp
+#else
+ mov r14=r32 // rp
+#endif
mov r9=pr };;
-{ .mii; mov r15=r33 // ap
+{ .mii;
+#if defined(_HPUX_SOURCE) && defined(_ILP32)
+ addp4 r15=0,r33 // ap
+#else
+ mov r15=r33 // ap
+#endif
mov ar.lc=r10
mov ar.ec=6 }
-{ .mib; mov r16=r34 // bp
+{ .mib;
+#if defined(_HPUX_SOURCE) && defined(_ILP32)
+ addp4 r16=0,r34 // bp
+#else
+ mov r16=r34 // bp
+#endif
mov pr.rot=1<<16 };;
.L_bn_sub_words_ctop:
@@ -227,7 +286,7 @@ bn_sub_words:
{ .mii;
(p59) add r8=1,r8 // return value
- mov pr=r9,-1
+ mov pr=r9,0x1ffff
mov ar.lc=r3 }
{ .mbb; nop.b 0x0
br.ret.sptk.many b0 };;
@@ -253,7 +312,7 @@ bn_mul_words:
#ifdef XMA_TEMPTATION
{ .mfi; alloc r2=ar.pfs,4,0,0,0 };;
#else
-{ .mfi; alloc r2=ar.pfs,4,4,0,8 };;
+{ .mfi; alloc r2=ar.pfs,4,12,0,16 };;
#endif
{ .mib; mov r8=r0 // return value
cmp4.le p6,p0=r34,r0
@@ -266,24 +325,30 @@ bn_mul_words:
.body
{ .mib; setf.sig f8=r35 // w
- mov pr.rot=0x400001<<16
- // ------^----- serves as (p48) at first (p26)
+ mov pr.rot=0x800001<<16
+ // ------^----- serves as (p50) at first (p27)
brp.loop.imp .L_bn_mul_words_ctop,.L_bn_mul_words_cend-16
}
#ifndef XMA_TEMPTATION
-{ .mii; mov r14=r32 // rp
- mov r15=r33 // ap
+{ .mii;
+#if defined(_HPUX_SOURCE) && defined(_ILP32)
+ addp4 r14=0,r32 // rp
+ addp4 r15=0,r33 // ap
+#else
+ mov r14=r32 // rp
+ mov r15=r33 // ap
+#endif
mov ar.lc=r10 }
-{ .mii; mov r39=0 // serves as r33 at first (p26)
- mov ar.ec=12 };;
+{ .mii; mov r40=0 // serves as r35 at first (p27)
+ mov ar.ec=13 };;
-// This loop spins in 2*(n+11) ticks. It's scheduled for data in L2
-// cache (i.e. 9 ticks away) as floating point load/store instructions
+// This loop spins in 2*(n+12) ticks. It's scheduled for data in Itanium
+// L2 cache (i.e. 9 ticks away) as floating point load/store instructions
// bypass L1 cache and L2 latency is actually best-case scenario for
-// ldf8. The loop is not scalable and shall run in 2*(n+11) even on
-// "wider" IA-64 implementations. It's a trade-off here. n+22 loop
+// ldf8. The loop is not scalable and shall run in 2*(n+12) even on
+// "wider" IA-64 implementations. It's a trade-off here. n+24 loop
// would give us ~5% in *overall* performance improvement on "wider"
// IA-64, but would hurt Itanium for about same because of longer
// epilogue. As it's a matter of few percents in either case I've
@@ -291,25 +356,25 @@ bn_mul_words:
// this very instruction sequence in bn_mul_add_words loop which in
// turn is scalable).
.L_bn_mul_words_ctop:
-{ .mfi; (p25) getf.sig r36=f49 // low
- (p21) xmpy.lu f45=f37,f8
- (p27) cmp.ltu p52,p48=r39,r38 }
+{ .mfi; (p25) getf.sig r36=f52 // low
+ (p21) xmpy.lu f48=f37,f8
+ (p28) cmp.ltu p54,p50=r41,r39 }
{ .mfi; (p16) ldf8 f32=[r15],8
- (p21) xmpy.hu f38=f37,f8
+ (p21) xmpy.hu f40=f37,f8
(p0) nop.i 0x0 };;
-{ .mii; (p26) getf.sig r32=f43 // high
- .pred.rel "mutex",p48,p52
- (p48) add r38=r37,r33 // (p26)
- (p52) add r38=r37,r33,1 } // (p26)
-{ .mfb; (p27) st8 [r14]=r39,8
+{ .mii; (p25) getf.sig r32=f44 // high
+ .pred.rel "mutex",p50,p54
+ (p50) add r40=r38,r35 // (p27)
+ (p54) add r40=r38,r35,1 } // (p27)
+{ .mfb; (p28) st8 [r14]=r41,8
(p0) nop.f 0x0
br.ctop.sptk .L_bn_mul_words_ctop };;
.L_bn_mul_words_cend:
{ .mii; nop.m 0x0
-.pred.rel "mutex",p49,p53
-(p49) add r8=r34,r0
-(p53) add r8=r34,r0,1 }
+.pred.rel "mutex",p51,p55
+(p51) add r8=r36,r0
+(p55) add r8=r36,r0,1 }
{ .mfb; nop.m 0x0
nop.f 0x0
nop.b 0x0 }
@@ -344,7 +409,7 @@ bn_mul_words:
#endif // XMA_TEMPTATION
{ .mii; nop.m 0x0
- mov pr=r9,-1
+ mov pr=r9,0x1ffff
mov ar.lc=r3 }
{ .mfb; rum 1<<5 // clear um.mfh
nop.f 0x0
@@ -376,59 +441,69 @@ bn_mul_add_words:
.body
{ .mib; setf.sig f8=r35 // w
- mov pr.rot=0x400001<<16
- // ------^----- serves as (p48) at first (p26)
+ mov pr.rot=0x800001<<16
+ // ------^----- serves as (p50) at first (p27)
brp.loop.imp .L_bn_mul_add_words_ctop,.L_bn_mul_add_words_cend-16
}
-{ .mii; mov r14=r32 // rp
- mov r15=r33 // ap
+{ .mii;
+#if defined(_HPUX_SOURCE) && defined(_ILP32)
+ addp4 r14=0,r32 // rp
+ addp4 r15=0,r33 // ap
+#else
+ mov r14=r32 // rp
+ mov r15=r33 // ap
+#endif
mov ar.lc=r10 }
-{ .mii; mov r39=0 // serves as r33 at first (p26)
- mov r18=r32 // rp copy
- mov ar.ec=14 };;
+{ .mii; mov r40=0 // serves as r35 at first (p27)
+#if defined(_HPUX_SOURCE) && defined(_ILP32)
+ addp4 r18=0,r32 // rp copy
+#else
+ mov r18=r32 // rp copy
+#endif
+ mov ar.ec=15 };;
-// This loop spins in 3*(n+13) ticks on Itanium and should spin in
-// 2*(n+13) on "wider" IA-64 implementations (to be verified with new
+// This loop spins in 3*(n+14) ticks on Itanium and should spin in
+// 2*(n+14) on "wider" IA-64 implementations (to be verified with new
// µ-architecture manuals as they become available). As usual it's
// possible to compress the epilogue, down to 10 in this case, at the
// cost of scalability. Compressed (and therefore non-scalable) loop
-// running at 3*(n+10) would buy you ~10% on Itanium but take ~35%
+// running at 3*(n+11) would buy you ~10% on Itanium but take ~35%
// from "wider" IA-64 so let it be scalable! Special attention was
// paid for having the loop body split at 64-byte boundary. ld8 is
// scheduled for L1 cache as the data is more than likely there.
// Indeed, bn_mul_words has put it there a moment ago:-)
.L_bn_mul_add_words_ctop:
-{ .mfi; (p25) getf.sig r36=f49 // low
- (p21) xmpy.lu f45=f37,f8
- (p27) cmp.ltu p52,p48=r39,r38 }
+{ .mfi; (p25) getf.sig r36=f52 // low
+ (p21) xmpy.lu f48=f37,f8
+ (p28) cmp.ltu p54,p50=r41,r39 }
{ .mfi; (p16) ldf8 f32=[r15],8
- (p21) xmpy.hu f38=f37,f8
- (p27) add r43=r43,r39 };;
-{ .mii; (p26) getf.sig r32=f43 // high
- .pred.rel "mutex",p48,p52
- (p48) add r38=r37,r33 // (p26)
- (p52) add r38=r37,r33,1 } // (p26)
-{ .mfb; (p27) cmp.ltu.unc p56,p0=r43,r39
+ (p21) xmpy.hu f40=f37,f8
+ (p28) add r45=r45,r41 };;
+{ .mii; (p25) getf.sig r32=f44 // high
+ .pred.rel "mutex",p50,p54
+ (p50) add r40=r38,r35 // (p27)
+ (p54) add r40=r38,r35,1 } // (p27)
+{ .mfb; (p28) cmp.ltu.unc p60,p0=r45,r41
(p0) nop.f 0x0
(p0) nop.b 0x0 }
-{ .mii; (p26) ld8 r42=[r18],8
- (p58) cmp.eq.or p57,p0=-1,r44
- (p58) add r44=1,r44 }
-{ .mfb; (p29) st8 [r14]=r45,8
+{ .mii; (p27) ld8 r44=[r18],8
+ (p62) cmp.eq.or p61,p0=-1,r46
+ (p62) add r46=1,r46 }
+{ .mfb; (p30) st8 [r14]=r47,8
(p0) nop.f 0x0
br.ctop.sptk .L_bn_mul_add_words_ctop};;
.L_bn_mul_add_words_cend:
{ .mii; nop.m 0x0
-.pred.rel "mutex",p51,p55
-(p51) add r8=r36,r0
-(p55) add r8=r36,r0,1 }
+.pred.rel "mutex",p53,p57
+(p53) add r8=r38,r0
+(p57) add r8=r38,r0,1 }
{ .mfb; nop.m 0x0
nop.f 0x0
nop.b 0x0 };;
{ .mii;
-(p59) add r8=1,r8
- mov pr=r9,-1
+(p63) add r8=1,r8
+ mov pr=r9,0x1ffff
mov ar.lc=r3 }
{ .mfb; rum 1<<5 // clear um.mfh
nop.f 0x0
@@ -461,6 +536,10 @@ bn_sqr_words:
mov r9=pr };;
.body
+#if defined(_HPUX_SOURCE) && defined(_ILP32)
+{ .mii; addp4 r32=0,r32
+ addp4 r33=0,r33 };;
+#endif
{ .mib;
mov pr.rot=1<<16
brp.loop.imp .L_bn_sqr_words_ctop,.L_bn_sqr_words_cend-16
@@ -492,7 +571,7 @@ bn_sqr_words:
.L_bn_sqr_words_cend:
{ .mii; nop.m 0x0
- mov pr=r9,-1
+ mov pr=r9,0x1ffff
mov ar.lc=r3 }
{ .mfb; rum 1<<5 // clear um.mfh
nop.f 0x0
@@ -526,7 +605,14 @@ bn_sqr_comba8:
.prologue
.fframe 0
.save ar.pfs,r2
+#if defined(_HPUX_SOURCE) && defined(_ILP32)
{ .mii; alloc r2=ar.pfs,2,1,0,0
+ addp4 r33=0,r33
+ addp4 r32=0,r32 };;
+{ .mii;
+#else
+{ .mii; alloc r2=ar.pfs,2,1,0,0
+#endif
mov r34=r33
add r14=8,r33 };;
.body
@@ -587,7 +673,14 @@ bn_mul_comba8:
.prologue
.fframe 0
.save ar.pfs,r2
+#if defined(_HPUX_SOURCE) && defined(_ILP32)
{ .mii; alloc r2=ar.pfs,3,0,0,0
+ addp4 r33=0,r33
+ addp4 r34=0,r34 };;
+{ .mii; addp4 r32=0,r32
+#else
+{ .mii; alloc r2=ar.pfs,3,0,0,0
+#endif
add r14=8,r33
add r17=8,r34 }
.body
@@ -1138,7 +1231,14 @@ bn_sqr_comba4:
.prologue
.fframe 0
.save ar.pfs,r2
+#if defined(_HPUX_SOURCE) && defined(_ILP32)
+{ .mii; alloc r2=ar.pfs,2,1,0,0
+ addp4 r32=0,r32
+ addp4 r33=0,r33 };;
+{ .mii;
+#else
{ .mii; alloc r2=ar.pfs,2,1,0,0
+#endif
mov r34=r33
add r14=8,r33 };;
.body
@@ -1164,7 +1264,14 @@ bn_mul_comba4:
.prologue
.fframe 0
.save ar.pfs,r2
+#if defined(_HPUX_SOURCE) && defined(_ILP32)
+{ .mii; alloc r2=ar.pfs,3,0,0,0
+ addp4 r33=0,r33
+ addp4 r34=0,r34 };;
+{ .mii; addp4 r32=0,r32
+#else
{ .mii; alloc r2=ar.pfs,3,0,0,0
+#endif
add r14=8,r33
add r17=8,r34 }
.body
@@ -1464,7 +1571,7 @@ bn_div_words:
or r8=r8,r33
mov ar.pfs=r2 };;
{ .mii; shr.u r9=H,I // remainder if anybody wants it
- mov pr=r10,-1 }
+ mov pr=r10,0x1ffff }
{ .mfb; br.ret.sptk.many b0 };;
// Unsigned 64 by 32 (well, by 64 for the moment) bit integer division
diff --git a/crypto/openssl/crypto/bn/asm/pa-risc2.s b/crypto/openssl/crypto/bn/asm/pa-risc2.s
index af9730d..f3b1629 100644
--- a/crypto/openssl/crypto/bn/asm/pa-risc2.s
+++ b/crypto/openssl/crypto/bn/asm/pa-risc2.s
@@ -747,8 +747,8 @@ bn_div_words
.PROC
.EXPORT bn_div_words,ENTRY,PRIV_LEV=3,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR,RTNVAL=GR,LONG_RETURN
.IMPORT BN_num_bits_word,CODE
- .IMPORT __iob,DATA
- .IMPORT fprintf,CODE
+ ;--- not PIC .IMPORT __iob,DATA
+ ;--- not PIC .IMPORT fprintf,CODE
.IMPORT abort,CODE
.IMPORT $$div2U,MILLICODE
.CALLINFO CALLER,FRAME=144,ENTRY_GR=%r9,SAVE_RP,ARGS_SAVED,ORDERING_AWARE
@@ -844,12 +844,12 @@ $0006001A
MOVIB,TR 2,%r8,$0006001C ;offset 0xa18
EXTRD,U %r3,63,32,%r7 ;offset 0xa1c
$D2
- ADDIL LR'__iob-$global$,%r27,%r1 ;offset 0xa20
- LDIL LR'C$7,%r21 ;offset 0xa24
- LDO RR'__iob-$global$+32(%r1),%r26 ;offset 0xa28
- .CALL ARGW0=GR,ARGW1=GR,ARGW2=GR,RTNVAL=GR ;in=24,25,26;out=28;
- B,L fprintf,%r2 ;offset 0xa2c
- LDO RR'C$7(%r21),%r25 ;offset 0xa30
+ ;--- not PIC ADDIL LR'__iob-$global$,%r27,%r1 ;offset 0xa20
+ ;--- not PIC LDIL LR'C$7,%r21 ;offset 0xa24
+ ;--- not PIC LDO RR'__iob-$global$+32(%r1),%r26 ;offset 0xa28
+ ;--- not PIC .CALL ARGW0=GR,ARGW1=GR,ARGW2=GR,RTNVAL=GR ;in=24,25,26;out=28;
+ ;--- not PIC B,L fprintf,%r2 ;offset 0xa2c
+ ;--- not PIC LDO RR'C$7(%r21),%r25 ;offset 0xa30
.CALL ;
B,L abort,%r2 ;offset 0xa34
NOP ;offset 0xa38
@@ -1605,14 +1605,14 @@ bn_mul_comba4
.PROCEND
- .SPACE $TEXT$
- .SUBSPA $CODE$
- .SPACE $PRIVATE$,SORT=16
- .IMPORT $global$,DATA
- .SPACE $TEXT$
- .SUBSPA $CODE$
- .SUBSPA $LIT$,ACCESS=0x2c
-C$7
- .ALIGN 8
- .STRINGZ "Division would overflow (%d)\n"
+;--- not PIC .SPACE $TEXT$
+;--- not PIC .SUBSPA $CODE$
+;--- not PIC .SPACE $PRIVATE$,SORT=16
+;--- not PIC .IMPORT $global$,DATA
+;--- not PIC .SPACE $TEXT$
+;--- not PIC .SUBSPA $CODE$
+;--- not PIC .SUBSPA $LIT$,ACCESS=0x2c
+;--- not PIC C$7
+;--- not PIC .ALIGN 8
+;--- not PIC .STRINGZ "Division would overflow (%d)\n"
.END
OpenPOWER on IntegriCloud