summaryrefslogtreecommitdiffstats
path: root/contrib/gcc/config
diff options
context:
space:
mode:
authorobrien <obrien@FreeBSD.org>2002-05-09 20:02:13 +0000
committerobrien <obrien@FreeBSD.org>2002-05-09 20:02:13 +0000
commit98051db4df2d731c020b9560e9937beb0e4656b1 (patch)
tree36f481a01150941dad35fedfafeac692064186ca /contrib/gcc/config
parente28e4d7e4707573d64baa270f9578937fe574a87 (diff)
parentc8f5fc7032940ad6633f932ac40cade82ec4d0cc (diff)
downloadFreeBSD-src-98051db4df2d731c020b9560e9937beb0e4656b1.zip
FreeBSD-src-98051db4df2d731c020b9560e9937beb0e4656b1.tar.gz
This commit was generated by cvs2svn to compensate for changes in r96263,
which included commits to RCS files with non-trunk default branches.
Diffstat (limited to 'contrib/gcc/config')
-rw-r--r--contrib/gcc/config/alpha/alpha-protos.h6
-rw-r--r--contrib/gcc/config/alpha/alpha.h14
-rw-r--r--contrib/gcc/config/alpha/alpha.md179
-rw-r--r--contrib/gcc/config/alpha/linux.h4
-rw-r--r--contrib/gcc/config/alpha/netbsd.h14
-rw-r--r--contrib/gcc/config/alpha/osf.h25
-rw-r--r--contrib/gcc/config/alpha/t-vms4
-rw-r--r--contrib/gcc/config/alpha/vms.h51
-rw-r--r--contrib/gcc/config/alpha/x-vms2
-rw-r--r--contrib/gcc/config/alpha/xm-vms.h3
-rw-r--r--contrib/gcc/config/arm/aof.h13
-rw-r--r--contrib/gcc/config/arm/aout.h12
-rw-r--r--contrib/gcc/config/arm/arm.c323
-rw-r--r--contrib/gcc/config/arm/arm.h23
-rw-r--r--contrib/gcc/config/arm/arm.md71
-rw-r--r--contrib/gcc/config/arm/coff.h4
-rw-r--r--contrib/gcc/config/arm/elf.h4
-rw-r--r--contrib/gcc/config/arm/linux-elf.h8
-rw-r--r--contrib/gcc/config/arm/netbsd.h11
-rw-r--r--contrib/gcc/config/arm/rtems-elf.h13
-rw-r--r--contrib/gcc/config/float-sparc.h2
-rw-r--r--contrib/gcc/config/i386/cygwin.h5
-rw-r--r--contrib/gcc/config/i386/djgpp-rtems.h9
-rw-r--r--contrib/gcc/config/i386/gnu.h2
-rw-r--r--contrib/gcc/config/i386/i386-interix.h12
-rw-r--r--contrib/gcc/config/i386/i386-protos.h1
-rw-r--r--contrib/gcc/config/i386/libgcc-x86_64-glibc.ver25
-rw-r--r--contrib/gcc/config/i386/linux-aout.h4
-rw-r--r--contrib/gcc/config/i386/linux-oldld.h4
-rw-r--r--contrib/gcc/config/i386/linux.h2
-rw-r--r--contrib/gcc/config/i386/linux64.h96
-rw-r--r--contrib/gcc/config/i386/netbsd-elf.h12
-rw-r--r--contrib/gcc/config/i386/netbsd.h9
-rw-r--r--contrib/gcc/config/i386/rtems.h16
-rw-r--r--contrib/gcc/config/i386/rtemself.h72
-rw-r--r--contrib/gcc/config/i386/sco5.h3
-rw-r--r--contrib/gcc/config/i386/sol2.h10
-rw-r--r--contrib/gcc/config/i386/t-interix2
-rw-r--r--contrib/gcc/config/i386/t-linux6415
-rw-r--r--contrib/gcc/config/i386/t-rtems-i38640
-rw-r--r--contrib/gcc/config/i386/x86-64.h2
-rw-r--r--contrib/gcc/config/ia64/aix.h16
-rw-r--r--contrib/gcc/config/ia64/elf.h6
-rw-r--r--contrib/gcc/config/ia64/freebsd.h10
-rw-r--r--contrib/gcc/config/ia64/hpux.h14
-rw-r--r--contrib/gcc/config/ia64/ia64-protos.h4
-rw-r--r--contrib/gcc/config/ia64/ia64.c322
-rw-r--r--contrib/gcc/config/ia64/ia64.h59
-rw-r--r--contrib/gcc/config/ia64/ia64.md33
-rw-r--r--contrib/gcc/config/ia64/linux.h22
-rw-r--r--contrib/gcc/config/ia64/sysv4.h5
-rw-r--r--contrib/gcc/config/ia64/unwind-ia64.c298
-rw-r--r--contrib/gcc/config/libgcc-glibc.ver23
-rw-r--r--contrib/gcc/config/netbsd.h13
-rw-r--r--contrib/gcc/config/rs6000/aix.h5
-rw-r--r--contrib/gcc/config/rs6000/aix43.h4
-rw-r--r--contrib/gcc/config/rs6000/aix51.h11
-rw-r--r--contrib/gcc/config/rs6000/altivec.h8733
-rw-r--r--contrib/gcc/config/rs6000/crtsavres.asm407
-rw-r--r--contrib/gcc/config/rs6000/darwin.h14
-rw-r--r--contrib/gcc/config/rs6000/eabi.asm362
-rw-r--r--contrib/gcc/config/rs6000/gnu.h38
-rw-r--r--contrib/gcc/config/rs6000/linux64.h78
-rw-r--r--contrib/gcc/config/rs6000/netbsd.h5
-rw-r--r--contrib/gcc/config/rs6000/ppc-asm.h1
-rw-r--r--contrib/gcc/config/rs6000/rs6000-protos.h2
-rw-r--r--contrib/gcc/config/rs6000/rs6000.c710
-rw-r--r--contrib/gcc/config/rs6000/rs6000.h219
-rw-r--r--contrib/gcc/config/rs6000/rs6000.md591
-rw-r--r--contrib/gcc/config/rs6000/rtems.h12
-rw-r--r--contrib/gcc/config/rs6000/sysv4.h104
-rw-r--r--contrib/gcc/config/rs6000/t-aix432
-rw-r--r--contrib/gcc/config/rs6000/t-linux6416
-rw-r--r--contrib/gcc/config/rs6000/t-ppccomm11
-rw-r--r--contrib/gcc/config/rs6000/xcoff.h28
-rw-r--r--contrib/gcc/config/rtems.h17
-rw-r--r--contrib/gcc/config/sparc/aout.h2
-rw-r--r--contrib/gcc/config/sparc/crtfastmath.c54
-rw-r--r--contrib/gcc/config/sparc/elf.h4
-rw-r--r--contrib/gcc/config/sparc/freebsd.h28
-rw-r--r--contrib/gcc/config/sparc/gmon-sol2.c4
-rw-r--r--contrib/gcc/config/sparc/libgcc-sparc-glibc.ver28
-rw-r--r--contrib/gcc/config/sparc/linux-aout.h6
-rw-r--r--contrib/gcc/config/sparc/linux.h84
-rw-r--r--contrib/gcc/config/sparc/linux64.h153
-rw-r--r--contrib/gcc/config/sparc/lite.h2
-rw-r--r--contrib/gcc/config/sparc/litecoff.h2
-rw-r--r--contrib/gcc/config/sparc/liteelf.h2
-rw-r--r--contrib/gcc/config/sparc/netbsd-elf.h344
-rw-r--r--contrib/gcc/config/sparc/netbsd.h9
-rw-r--r--contrib/gcc/config/sparc/pbd.h3
-rw-r--r--contrib/gcc/config/sparc/rtems.h9
-rw-r--r--contrib/gcc/config/sparc/rtemself.h11
-rw-r--r--contrib/gcc/config/sparc/sol2-64.h25
-rw-r--r--contrib/gcc/config/sparc/sol2-bi.h279
-rw-r--r--contrib/gcc/config/sparc/sol2-gas-bi.h5
-rw-r--r--contrib/gcc/config/sparc/sol2-gld-bi.h9
-rw-r--r--contrib/gcc/config/sparc/sol2-gld.h6
-rw-r--r--contrib/gcc/config/sparc/sol2.h36
-rw-r--r--contrib/gcc/config/sparc/sol27-sld.h8
-rw-r--r--contrib/gcc/config/sparc/sp64-aout.h2
-rw-r--r--contrib/gcc/config/sparc/sp64-elf.h6
-rw-r--r--contrib/gcc/config/sparc/sp86x-aout.h2
-rw-r--r--contrib/gcc/config/sparc/sp86x-elf.h2
-rw-r--r--contrib/gcc/config/sparc/sparc-protos.h20
-rw-r--r--contrib/gcc/config/sparc/sparc.c1436
-rw-r--r--contrib/gcc/config/sparc/sparc.h380
-rw-r--r--contrib/gcc/config/sparc/sparc.md912
-rw-r--r--contrib/gcc/config/sparc/splet.h2
-rw-r--r--contrib/gcc/config/sparc/t-crtfm4
-rw-r--r--contrib/gcc/config/sparc/t-elf12
-rw-r--r--contrib/gcc/config/sparc/t-linux648
-rw-r--r--contrib/gcc/config/sparc/t-netbsd646
-rw-r--r--contrib/gcc/config/sparc/t-sol2-642
-rw-r--r--contrib/gcc/config/sparc/vxsim.h3
-rw-r--r--contrib/gcc/config/sparc/vxsparc64.h2
-rw-r--r--contrib/gcc/config/t-slibgcc-elf-ver23
-rw-r--r--contrib/gcc/config/t-slibgcc-nolc-override1
-rw-r--r--contrib/gcc/config/t-slibgcc-sld20
119 files changed, 13103 insertions, 4175 deletions
diff --git a/contrib/gcc/config/alpha/alpha-protos.h b/contrib/gcc/config/alpha/alpha-protos.h
index 2ff0350..c6fdd04 100644
--- a/contrib/gcc/config/alpha/alpha-protos.h
+++ b/contrib/gcc/config/alpha/alpha-protos.h
@@ -59,7 +59,7 @@ extern int current_file_function_operand PARAMS ((rtx, enum machine_mode));
extern int direct_call_operand PARAMS ((rtx, enum machine_mode));
extern int local_symbolic_operand PARAMS ((rtx, enum machine_mode));
extern int small_symbolic_operand PARAMS ((rtx, enum machine_mode));
-extern int some_small_symbolic_mem_operand PARAMS ((rtx, enum machine_mode));
+extern int some_small_symbolic_operand PARAMS ((rtx, enum machine_mode));
extern int global_symbolic_operand PARAMS ((rtx, enum machine_mode));
extern int call_operand PARAMS ((rtx, enum machine_mode));
extern int symbolic_operand PARAMS ((rtx, enum machine_mode));
@@ -90,7 +90,7 @@ extern rtx alpha_legitimize_address PARAMS ((rtx, rtx, enum machine_mode));
extern rtx alpha_legitimize_reload_address PARAMS ((rtx, enum machine_mode,
int, int, int));
-extern rtx split_small_symbolic_mem_operand PARAMS ((rtx));
+extern rtx split_small_symbolic_operand PARAMS ((rtx));
extern void get_aligned_mem PARAMS ((rtx, rtx *, rtx *));
extern rtx get_unaligned_address PARAMS ((rtx, int));
@@ -163,6 +163,8 @@ extern rtx function_arg PARAMS ((CUMULATIVE_ARGS, enum machine_mode,
#endif
extern void alpha_start_function PARAMS ((FILE *, const char *, tree));
extern void alpha_end_function PARAMS ((FILE *, const char *, tree));
+extern void alpha_output_mi_thunk_osf PARAMS ((FILE *, tree,
+ HOST_WIDE_INT, tree));
extern void alpha_encode_section_info PARAMS ((tree));
#endif /* TREE CODE */
diff --git a/contrib/gcc/config/alpha/alpha.h b/contrib/gcc/config/alpha/alpha.h
index dd8d820..b2363bc 100644
--- a/contrib/gcc/config/alpha/alpha.h
+++ b/contrib/gcc/config/alpha/alpha.h
@@ -43,12 +43,6 @@ Boston, MA 02111-1307, USA. */
#define CPP_SUBTARGET_SPEC ""
#endif
-/* Set the spec to use for signed char. The default tests the above macro
- but DEC's compiler can't handle the conditional in a "constant"
- operand. */
-
-#define SIGNED_CHAR_SPEC "%{funsigned-char:-D__CHAR_UNSIGNED__}"
-
#define WORD_SWITCH_TAKES_ARG(STR) \
(!strcmp (STR, "rpath") || DEFAULT_WORD_SWITCH_TAKES_ARG(STR))
@@ -2079,7 +2073,8 @@ do { \
{"reg_no_subreg_operand", {REG}}, \
{"addition_operation", {PLUS}}, \
{"symbolic_operand", {SYMBOL_REF, LABEL_REF, CONST}}, \
- {"some_small_symbolic_mem_operand", {SET, PARALLEL}},
+ {"some_small_symbolic_operand", {SET, PARALLEL, PREFETCH, UNSPEC, \
+ UNSPEC_VOLATILE}},
/* Define the `__builtin_va_list' type for the ABI. */
#define BUILD_VA_LIST_TYPE(VALIST) \
@@ -2242,3 +2237,8 @@ do { \
/* Generate calls to memcpy, etc., not bcopy, etc. */
#define TARGET_MEM_FUNCTIONS 1
+
+/* Output code to add DELTA to the first argument, and then jump to FUNCTION.
+ Used for C++ multiple inheritance. */
+#define ASM_OUTPUT_MI_THUNK(FILE, THUNK_FNDECL, DELTA, FUNCTION) \
+ alpha_output_mi_thunk_osf (FILE, THUNK_FNDECL, DELTA, FUNCTION)
diff --git a/contrib/gcc/config/alpha/alpha.md b/contrib/gcc/config/alpha/alpha.md
index b987de8..785a61d 100644
--- a/contrib/gcc/config/alpha/alpha.md
+++ b/contrib/gcc/config/alpha/alpha.md
@@ -39,6 +39,7 @@
(UNSPEC_LITERAL 11)
(UNSPEC_LITUSE 12)
(UNSPEC_SIBCALL 13)
+ (UNSPEC_SYMBOL 14)
])
;; UNSPEC_VOLATILE:
@@ -518,31 +519,14 @@ fadd,fmul,fcpys,fdiv,fsqrt,misc,mvi,ftoi,itof,multi"
(sign_extend:DI (match_dup 1)))]
"")
-;; Do addsi3 the way expand_binop would do if we didn't have one. This
-;; generates better code. We have the anonymous addsi3 pattern below in
-;; case combine wants to make it.
+;; Don't say we have addsi3 if optimizing. This generates better code. We
+;; have the anonymous addsi3 pattern below in case combine wants to make it.
(define_expand "addsi3"
[(set (match_operand:SI 0 "register_operand" "")
(plus:SI (match_operand:SI 1 "reg_or_0_operand" "")
(match_operand:SI 2 "add_operand" "")))]
- ""
-{
- if (optimize)
- {
- rtx op1 = gen_lowpart (DImode, operands[1]);
- rtx op2 = gen_lowpart (DImode, operands[2]);
-
- if (! cse_not_expected)
- {
- rtx tmp = gen_reg_rtx (DImode);
- emit_insn (gen_adddi3 (tmp, op1, op2));
- emit_move_insn (gen_lowpart (DImode, operands[0]), tmp);
- }
- else
- emit_insn (gen_adddi3 (gen_lowpart (DImode, operands[0]), op1, op2));
- DONE;
- }
-})
+ "! optimize"
+ "")
(define_insn "*addsi_internal"
[(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
@@ -581,6 +565,17 @@ fadd,fmul,fcpys,fdiv,fsqrt,misc,mvi,ftoi,itof,multi"
addl %r1,%2,%0
subl %r1,%n2,%0")
+(define_insn "*addsi_se2"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (sign_extend:DI
+ (subreg:SI (plus:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ,rJ")
+ (match_operand:DI 2 "sext_add_operand" "rI,O"))
+ 0)))]
+ ""
+ "@
+ addl %r1,%2,%0
+ subl %r1,%n2,%0")
+
(define_split
[(set (match_operand:DI 0 "register_operand" "")
(sign_extend:DI
@@ -844,24 +839,8 @@ fadd,fmul,fcpys,fdiv,fsqrt,misc,mvi,ftoi,itof,multi"
[(set (match_operand:SI 0 "register_operand" "")
(minus:SI (match_operand:SI 1 "reg_or_0_operand" "")
(match_operand:SI 2 "reg_or_8bit_operand" "")))]
- ""
-{
- if (optimize)
- {
- rtx op1 = gen_lowpart (DImode, operands[1]);
- rtx op2 = gen_lowpart (DImode, operands[2]);
-
- if (! cse_not_expected)
- {
- rtx tmp = gen_reg_rtx (DImode);
- emit_insn (gen_subdi3 (tmp, op1, op2));
- emit_move_insn (gen_lowpart (DImode, operands[0]), tmp);
- }
- else
- emit_insn (gen_subdi3 (gen_lowpart (DImode, operands[0]), op1, op2));
- DONE;
- }
-})
+ "! optimize"
+ "")
(define_insn "*subsi_internal"
[(set (match_operand:SI 0 "register_operand" "=r")
@@ -877,6 +856,15 @@ fadd,fmul,fcpys,fdiv,fsqrt,misc,mvi,ftoi,itof,multi"
""
"subl %r1,%2,%0")
+(define_insn "*subsi_se2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI
+ (subreg:SI (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (match_operand:DI 2 "reg_or_8bit_operand" "rI"))
+ 0)))]
+ ""
+ "subl %r1,%2,%0")
+
(define_insn "subvsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
(minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
@@ -1610,23 +1598,20 @@ fadd,fmul,fcpys,fdiv,fsqrt,misc,mvi,ftoi,itof,multi"
}
[(set_attr "type" "iadd,shift")])
-;; ??? The following pattern is made by combine, but earlier phases
-;; (specifically flow) can't handle it. This occurs in jump.c. Deal
-;; with this in a better way at some point.
-;;(define_insn ""
-;; [(set (match_operand:DI 0 "register_operand" "=r")
-;; (sign_extend:DI
-;; (subreg:SI (ashift:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
-;; (match_operand:DI 2 "const_int_operand" "P"))
-;; 0)))]
-;; "INTVAL (operands[2]) >= 1 && INTVAL (operands[2]) <= 3"
-;;{
-;; if (operands[2] == const1_rtx)
-;; return "addl %r1,%r1,%0";
-;; else
-;; return "s%P2addl %r1,0,%0";
-;;}
-;; [(set_attr "type" "iadd")])
+(define_insn "*ashldi_se"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI
+ (subreg:SI (ashift:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (match_operand:DI 2 "const_int_operand" "P"))
+ 0)))]
+ "INTVAL (operands[2]) >= 1 && INTVAL (operands[2]) <= 3"
+{
+ if (operands[2] == const1_rtx)
+ return "addl %r1,%r1,%0";
+ else
+ return "s%P2addl %r1,0,%0";
+}
+ [(set_attr "type" "iadd")])
(define_insn "lshrdi3"
[(set (match_operand:DI 0 "register_operand" "=r")
@@ -5111,6 +5096,16 @@ fadd,fmul,fcpys,fdiv,fsqrt,misc,mvi,ftoi,itof,multi"
""
"call_pal 0x86"
[(set_attr "type" "ibr")])
+
+;; BUGCHK is documented common to OSF/1 and VMS PALcode.
+;; NT does not document anything at 0x81 -- presumably it would generate
+;; the equivalent of SIGILL, but this isn't that important.
+;; ??? Presuming unicosmk uses either OSF/1 or VMS PALcode.
+(define_insn "trap"
+ [(trap_if (const_int 1) (const_int 0))]
+ "!TARGET_ABI_WINDOWS_NT"
+ "call_pal 0x81"
+ [(set_attr "type" "ibr")])
;; Finally, we have the basic data motion insns. The byte and word insns
;; are done via define_expand. Start with the floating-point insns, since
@@ -5292,10 +5287,11 @@ fadd,fmul,fcpys,fdiv,fsqrt,misc,mvi,ftoi,itof,multi"
itofs %1,%0"
[(set_attr "type" "ilog,iadd,iadd,ild,ist,fcpys,fld,fst,ftoi,itof")])
-(define_insn "*movsi_nt_vms"
+(define_insn "*movsi_nt_vms_nofix"
[(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,r,m,*f,*f,m")
(match_operand:SI 1 "input_operand" "rJ,K,L,s,m,rJ,*fJ,m,*f"))]
"(TARGET_ABI_WINDOWS_NT || TARGET_ABI_OPEN_VMS)
+ && !TARGET_FIX
&& (register_operand (operands[0], SImode)
|| reg_or_0_operand (operands[1], SImode))"
"@
@@ -5310,6 +5306,27 @@ fadd,fmul,fcpys,fdiv,fsqrt,misc,mvi,ftoi,itof,multi"
st%, %R1,%0"
[(set_attr "type" "ilog,iadd,iadd,ldsym,ild,ist,fcpys,fld,fst")])
+(define_insn "*movsi_nt_vms_fix"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,r,m,*f,*f,m,r,*f")
+ (match_operand:SI 1 "input_operand" "rJ,K,L,s,m,rJ,*fJ,m,*f,*f,r"))]
+ "(TARGET_ABI_WINDOWS_NT || TARGET_ABI_OPEN_VMS)
+ && TARGET_FIX
+ && (register_operand (operands[0], SImode)
+ || reg_or_0_operand (operands[1], SImode))"
+ "@
+ bis $31,%1,%0
+ lda %0,%1
+ ldah %0,%h1
+ lda %0,%1
+ ldl %0,%1
+ stl %r1,%0
+ cpys %R1,%R1,%0
+ ld%, %0,%1
+ st%, %R1,%0
+ ftois %1,%0
+ itofs %1,%0"
+ [(set_attr "type" "ilog,iadd,iadd,ldsym,ild,ist,fcpys,fld,fst,ftoi,itof")])
+
(define_insn "*movhi_nobwx"
[(set (match_operand:HI 0 "register_operand" "=r,r")
(match_operand:HI 1 "input_operand" "rJ,n"))]
@@ -5501,10 +5518,10 @@ fadd,fmul,fcpys,fdiv,fsqrt,misc,mvi,ftoi,itof,multi"
"operands[2] = pic_offset_table_rtx;")
(define_split
- [(match_operand 0 "some_small_symbolic_mem_operand" "")]
+ [(match_operand 0 "some_small_symbolic_operand" "")]
"TARGET_EXPLICIT_RELOCS && reload_completed"
[(match_dup 0)]
- "operands[0] = split_small_symbolic_mem_operand (operands[0]);")
+ "operands[0] = split_small_symbolic_operand (operands[0]);")
(define_insn "movdi_er_high_g"
[(set (match_operand:DI 0 "register_operand" "=r")
@@ -5531,6 +5548,41 @@ fadd,fmul,fcpys,fdiv,fsqrt,misc,mvi,ftoi,itof,multi"
(const_int 0)] UNSPEC_LITERAL))]
"operands[2] = pic_offset_table_rtx;")
+;; With RTL inlining, at -O3, rtl is generated, stored, then actually
+;; compiled at the end of compilation. In the meantime, someone can
+;; re-encode-section-info on some symbol changing it e.g. from global
+;; to local-not-small. If this happens, we'd have emitted a plain
+;; load rather than a high+losum load and not recognize the insn.
+;;
+;; So if rtl inlining is in effect, we delay the global/not-global
+;; decision until rest_of_compilation by wrapping it in an UNSPEC_SYMBOL.
+
+(define_insn_and_split "movdi_er_maybe_g"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "symbolic_operand" "")]
+ UNSPEC_SYMBOL))]
+ "TARGET_EXPLICIT_RELOCS && flag_inline_functions"
+ "#"
+ ""
+ [(set (match_dup 0) (match_dup 1))]
+{
+ if (local_symbolic_operand (operands[1], Pmode)
+ && !small_symbolic_operand (operands[1], Pmode))
+ {
+ rtx subtarget = no_new_pseudos ? operands[0] : gen_reg_rtx (Pmode);
+ rtx tmp;
+
+ tmp = gen_rtx_HIGH (Pmode, operands[1]);
+ if (reload_completed)
+ tmp = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmp);
+ emit_insn (gen_rtx_SET (VOIDmode, subtarget, tmp));
+
+ tmp = gen_rtx_LO_SUM (Pmode, subtarget, operands[1]);
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], tmp));
+ DONE;
+ }
+})
+
(define_insn "*movdi_er_nofix"
[(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,r,r,r,m,*f,*f,Q")
(match_operand:DI 1 "input_operand" "rJ,K,L,T,s,m,rJ,*fJ,Q,*f"))]
@@ -6700,13 +6752,10 @@ fadd,fmul,fcpys,fdiv,fsqrt,misc,mvi,ftoi,itof,multi"
(set_attr "type" "multi")])
(define_insn "*exception_receiver_2"
- [(unspec_volatile [(match_operand:DI 0 "nonimmediate_operand" "r,m")]
- UNSPECV_EHR)]
+ [(unspec_volatile [(match_operand:DI 0 "memory_operand" "m")] UNSPECV_EHR)]
"TARGET_LD_BUGGY_LDGP"
- "@
- bis $31,%0,$29
- ldq $29,%0"
- [(set_attr "type" "ilog,ild")])
+ "ldq $29,%0"
+ [(set_attr "type" "ild")])
(define_expand "nonlocal_goto_receiver"
[(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)
diff --git a/contrib/gcc/config/alpha/linux.h b/contrib/gcc/config/alpha/linux.h
index 8a1b93f..912d7b26 100644
--- a/contrib/gcc/config/alpha/linux.h
+++ b/contrib/gcc/config/alpha/linux.h
@@ -1,6 +1,6 @@
/* Definitions of target machine for GNU compiler,
for Alpha Linux-based GNU systems.
- Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
+ Copyright (C) 1996, 1997, 1998, 2002 Free Software Foundation, Inc.
Contributed by Richard Henderson.
This file is part of GNU CC.
@@ -25,7 +25,7 @@ Boston, MA 02111-1307, USA. */
#undef CPP_PREDEFINES
#define CPP_PREDEFINES \
-"-Dlinux -Dunix -Asystem=linux -D_LONGLONG -D__alpha__ " \
+"-D__gnu_linux__ -Dlinux -Dunix -Asystem=linux -D_LONGLONG -D__alpha__ " \
SUB_CPP_PREDEFINES
/* The GNU C++ standard library requires that these macros be defined. */
diff --git a/contrib/gcc/config/alpha/netbsd.h b/contrib/gcc/config/alpha/netbsd.h
index 7eacce2..e5551da 100644
--- a/contrib/gcc/config/alpha/netbsd.h
+++ b/contrib/gcc/config/alpha/netbsd.h
@@ -79,19 +79,5 @@ Boston, MA 02111-1307, USA. */
%{!shared:crtend%O%s} %{shared:crtendS%O%s}"
-/* Make gcc agree with <machine/ansi.h> */
-
-#undef WCHAR_TYPE
-#define WCHAR_TYPE "int"
-
-#undef WCHAR_UNSIGNED
-#define WCHAR_UNSIGNED 0
-
-#undef WCHAR_TYPE_SIZE
-#define WCHAR_TYPE_SIZE 32
-
-#undef WINT_TYPE
-#define WINT_TYPE "int"
-
#undef TARGET_VERSION
#define TARGET_VERSION fprintf (stderr, " (NetBSD/alpha ELF)");
diff --git a/contrib/gcc/config/alpha/osf.h b/contrib/gcc/config/alpha/osf.h
index 250974c..efb0a16 100644
--- a/contrib/gcc/config/alpha/osf.h
+++ b/contrib/gcc/config/alpha/osf.h
@@ -47,7 +47,8 @@ Boston, MA 02111-1307, USA. */
#undef CPP_SUBTARGET_SPEC
#define CPP_SUBTARGET_SPEC \
-"%{pthread|threads:-D_REENTRANT} %{threads:-D_PTHREAD_USE_D4} %(cpp_xfloat)"
+"%{pthread|threads:-D_REENTRANT} %{threads:-D_PTHREAD_USE_D4} %(cpp_xfloat) \
+-D__EXTERN_PREFIX"
/* Under OSF4, -p and -pg require -lprof1, and -lprof1 requires -lpdf. */
@@ -56,12 +57,13 @@ Boston, MA 02111-1307, USA. */
%{threads: -lpthreads} %{pthread|threads: -lpthread -lmach -lexc} -lc"
/* Pass "-G 8" to ld because Alpha's CC does. Pass -O3 if we are
- optimizing, -O1 if we are not. Pass -shared, -non_shared or
+ optimizing, -O1 if we are not. Pass -S to silence `weak symbol
+ multiply defined' warnings. Pass -shared, -non_shared or
-call_shared as appropriate. Pass -hidden_symbol so that our
constructor and call-frame data structures are not accidentally
overridden. */
#define LINK_SPEC \
- "-G 8 %{O*:-O3} %{!O*:-O1} %{static:-non_shared} \
+ "-G 8 %{O*:-O3} %{!O*:-O1} -S %{static:-non_shared} \
%{!static:%{shared:-shared -hidden_symbol _GLOBAL_*} \
%{!shared:-call_shared}} %{pg} %{taso} %{rpath*}"
@@ -93,19 +95,18 @@ Boston, MA 02111-1307, USA. */
#define ASM_OLDAS_SPEC ""
-/* No point in running CPP on our assembler output. */
-#if ((TARGET_DEFAULT | TARGET_CPU_DEFAULT) & MASK_GAS) != 0
-/* Don't pass -g to GNU as, because some versions don't accept this option. */
-#define ASM_SPEC "%{malpha-as:-g %(asm_oldas)} -nocpp %{pg}"
-#else
/* In OSF/1 v3.2c, the assembler by default does not output file names which
causes mips-tfile to fail. Passing -g to the assembler fixes this problem.
??? Strictly speaking, we need -g only if the user specifies -g. Passing
it always means that we get slightly larger than necessary object files
if the user does not specify -g. If we don't pass -g, then mips-tfile
will need to be fixed to work in this case. Pass -O0 since some
- optimization are broken and don't help us anyway. */
-#define ASM_SPEC "%{!mgas:-g %(asm_oldas)} -nocpp %{pg} -O0"
+ optimization are broken and don't help us anyway. Pass -nocpp because
+ there's no point in running CPP on our assembler output. */
+#if ((TARGET_DEFAULT | TARGET_CPU_DEFAULT) & MASK_GAS) != 0
+#define ASM_SPEC "%{malpha-as:-g %(asm_oldas) -nocpp %{pg} -O0}"
+#else
+#define ASM_SPEC "%{!mgas:-g %(asm_oldas) -nocpp %{pg} -O0}"
#endif
/* Specify to run a post-processor, mips-tfile after the assembler
@@ -209,3 +210,7 @@ __enable_execute_stack (addr) \
/* Handle #pragma weak and #pragma pack. */
#undef HANDLE_SYSV_PRAGMA
#define HANDLE_SYSV_PRAGMA 1
+
+/* Handle #pragma extern_prefix. Technically only needed for Tru64 5.x,
+ but easier to manipulate preprocessor bits from here. */
+#define HANDLE_PRAGMA_EXTERN_PREFIX 1
diff --git a/contrib/gcc/config/alpha/t-vms b/contrib/gcc/config/alpha/t-vms
index 5057c31..516d3ec 100644
--- a/contrib/gcc/config/alpha/t-vms
+++ b/contrib/gcc/config/alpha/t-vms
@@ -20,3 +20,7 @@ vcrt0.o: $(CRT0_S) $(GCC_PASSES)
pcrt0.o: $(CRT0_S) $(GCC_PASSES)
decc -c /names=as_is $(srcdir)/config/alpha/vms-psxcrt0.c -o pcrt0.o
+MULTILIB_OPTIONS = mcpu=ev6
+MULTILIB_DIRNAMES = ev6
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/contrib/gcc/config/alpha/vms.h b/contrib/gcc/config/alpha/vms.h
index a01556e..ffc3275 100644
--- a/contrib/gcc/config/alpha/vms.h
+++ b/contrib/gcc/config/alpha/vms.h
@@ -251,6 +251,12 @@ typedef struct {int num_args; enum avms_arg_type atypes[6];} avms_arg_info;
alpha_write_verstamp (FILE); \
fprintf (FILE, "\t.set noreorder\n"); \
fprintf (FILE, "\t.set volatile\n"); \
+ if (TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX) \
+ { \
+ fprintf (FILE, "\t.arch %s\n", \
+ (TARGET_CPU_EV6 ? "ev6" \
+ : TARGET_MAX ? "pca56" : "ev56")); \
+ } \
ASM_OUTPUT_SOURCE_FILENAME (FILE, main_input_filename); \
}
@@ -385,22 +391,14 @@ do { \
#define LINK_EH_SPEC "vms-dwarf2eh.o%s "
#ifdef IN_LIBGCC2
-#include <libicb.h>
#include <pdscdef.h>
#define MD_FALLBACK_FRAME_STATE_FOR(CONTEXT, FS, SUCCESS) \
do { \
- unsigned long handle; \
- PDSCDEF *pv; \
- INVO_CONTEXT_BLK invo; \
+ PDSCDEF *pv = *((PDSCDEF **) (CONTEXT)->reg [29]); \
\
- memset (&invo, 0, sizeof (INVO_CONTEXT_BLK)); \
- \
- invo.libicb$q_ireg [29] = *((long long *) (CONTEXT)->reg [29]); \
- invo.libicb$q_ireg [30] = (long long) (CONTEXT)->cfa; \
- handle = LIB$GET_INVO_HANDLE (&invo); \
- LIB$GET_INVO_CONTEXT (handle, &invo); \
- pv = (PDSCDEF *) invo.libicb$ph_procedure_descriptor; \
+ if (pv && ((long) pv & 0x7) == 0) /* low bits 0 means address */ \
+ pv = *(PDSCDEF **) pv; \
\
if (pv && ((pv->pdsc$w_flags & 0xf) == PDSC$K_KIND_FP_STACK)) \
{ \
@@ -426,6 +424,19 @@ do { \
\
goto SUCCESS; \
} \
+ else if (pv && ((pv->pdsc$w_flags & 0xf) == PDSC$K_KIND_FP_REGISTER)) \
+ { \
+ (FS)->cfa_offset = pv->pdsc$l_size; \
+ (FS)->cfa_reg = pv->pdsc$w_flags & PDSC$M_BASE_REG_IS_FP ? 29 : 30; \
+ (FS)->retaddr_column = 26; \
+ (FS)->cfa_how = CFA_REG_OFFSET; \
+ (FS)->regs.reg[26].loc.reg = pv->pdsc$b_save_ra; \
+ (FS)->regs.reg[26].how = REG_SAVED_REG; \
+ (FS)->regs.reg[29].loc.reg = pv->pdsc$b_save_fp; \
+ (FS)->regs.reg[29].how = REG_SAVED_REG; \
+ \
+ goto SUCCESS; \
+ } \
} while (0)
#endif
@@ -508,11 +519,17 @@ do { \
#define NAME__MAIN "__gccmain"
#define SYMBOL__MAIN __gccmain
+#define MD_EXEC_PREFIX "/gnu/lib/gcc-lib/"
+#define MD_STARTFILE_PREFIX "/gnu/lib/gcc-lib/"
+
/* Specify the list of include file directories. */
-#define INCLUDE_DEFAULTS \
-{ \
- { "/gnu_gxx_include", 0, 1, 1 }, \
- { "/gnu_cc_include", 0, 0, 0 }, \
- { "/gnu/include", 0, 0, 0 }, \
- { 0, 0, 0, 0 } \
+#define INCLUDE_DEFAULTS \
+{ \
+ { "/gnu/lib/gcc-lib/include", 0, 0, 0 }, \
+ { "/gnu_gxx_include", 0, 1, 1 }, \
+ { "/gnu_cc_include", 0, 0, 0 }, \
+ { "/gnu/include", 0, 0, 0 }, \
+ { 0, 0, 0, 0 } \
}
+
+#define LONGLONG_STANDALONE 1
diff --git a/contrib/gcc/config/alpha/x-vms b/contrib/gcc/config/alpha/x-vms
index f53f1c7..c98f03d 100644
--- a/contrib/gcc/config/alpha/x-vms
+++ b/contrib/gcc/config/alpha/x-vms
@@ -1,6 +1,8 @@
# Under VMS, directory names cannot contain dots.
version:=$(shell echo $(gcc_version) | sed -e 's/\./_/g')
+libsubdir=$(libdir)/gcc-lib
+
# Rules for linker and compiler wrappers. These are only useful on
# a VMS host.
EXTRA_PROGRAMS=ld.exe decc.exe
diff --git a/contrib/gcc/config/alpha/xm-vms.h b/contrib/gcc/config/alpha/xm-vms.h
index d728ec1..7bfceba 100644
--- a/contrib/gcc/config/alpha/xm-vms.h
+++ b/contrib/gcc/config/alpha/xm-vms.h
@@ -37,6 +37,9 @@ Boston, MA 02111-1307, USA. */
/* Open files in stream mode if not otherwise explicitly specified */
#define __UNIX_FOPEN 1
+/* Write to stdout using fputc to avoid record terminators in pipes */
+#define __UNIX_FWRITE 1
+
#define STDC_HEADERS 1
#define HOST_EXECUTABLE_SUFFIX ".exe"
diff --git a/contrib/gcc/config/arm/aof.h b/contrib/gcc/config/arm/aof.h
index 750bc08..f110ea2 100644
--- a/contrib/gcc/config/arm/aof.h
+++ b/contrib/gcc/config/arm/aof.h
@@ -120,6 +120,10 @@ do { \
(*ptr++) (); \
} while (0)
+/* We really want to put Thumb tables in a read-only data section, but
+ switching to another section during function output is not
+ possible. We could however do what the SPARC does and defer the
+ whole table generation until the end of the function. */
#define JUMP_TABLES_IN_TEXT_SECTION 1
#ifndef ARM_OS_NAME
@@ -322,8 +326,13 @@ do { \
/* Output of Dispatch Tables */
-#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,BODY,VALUE,REL) \
- fprintf ((STREAM), "\tb\t|L..%d|\n", (VALUE))
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,BODY,VALUE,REL) \
+ do { \
+ if (TARGET_ARM) \
+ fprintf ((STREAM), "\tb\t|L..%d|\n", (VALUE)); \
+ else \
+ fprintf ((STREAM), "\tDCD\t|L..%d| - |L..%d|\n", (VALUE), (REL)); \
+ } while (0)
#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM,VALUE) \
fprintf ((STREAM), "\tDCD\t|L..%d|\n", (VALUE))
diff --git a/contrib/gcc/config/arm/aout.h b/contrib/gcc/config/arm/aout.h
index 64ca8b7..868eee0 100644
--- a/contrib/gcc/config/arm/aout.h
+++ b/contrib/gcc/config/arm/aout.h
@@ -181,8 +181,16 @@ Boston, MA 02111-1307, USA. */
#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
asm_fprintf (STREAM, "\t.word\t%LL%d\n", VALUE)
-#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
- asm_fprintf (STREAM, "\tb\t%LL%d\n", VALUE)
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
+ do \
+ { \
+ if (TARGET_ARM) \
+ asm_fprintf (STREAM, "\tb\t%LL%d\n", VALUE); \
+ else \
+ asm_fprintf (STREAM, "\t.word\t%LL%d-%LL%d\n", VALUE, REL); \
+ } \
+ while (0)
+
#undef ASM_OUTPUT_ASCII
#define ASM_OUTPUT_ASCII(STREAM, PTR, LEN) \
diff --git a/contrib/gcc/config/arm/arm.c b/contrib/gcc/config/arm/arm.c
index dd823a2..4ef7279 100644
--- a/contrib/gcc/config/arm/arm.c
+++ b/contrib/gcc/config/arm/arm.c
@@ -266,12 +266,9 @@ int thumb_code = 0;
PRINT_OPERAND_ADDRESS. */
enum machine_mode output_memory_reference_mode;
-/* Nonzero if the prologue must setup `fp'. */
-int current_function_anonymous_args;
-
/* The register number to be used for the PIC offset register. */
const char * arm_pic_register_string = NULL;
-int arm_pic_register = 9;
+int arm_pic_register = INVALID_REGNUM;
/* Set to 1 when a return insn is output, this means that the epilogue
is not needed. */
@@ -654,8 +651,8 @@ arm_override_options ()
/* If stack checking is disabled, we can use r10 as the PIC register,
which keeps r9 available. */
- if (flag_pic && !TARGET_APCS_STACK)
- arm_pic_register = 10;
+ if (flag_pic)
+ arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
if (TARGET_APCS_FLOAT)
warning ("passing floating point arguments in fp regs not yet supported");
@@ -716,18 +713,16 @@ arm_override_options ()
if (arm_pic_register_string != NULL)
{
- int pic_register;
-
+ int pic_register = decode_reg_name (arm_pic_register_string);
+
if (!flag_pic)
warning ("-mpic-register= is useless without -fpic");
- pic_register = decode_reg_name (arm_pic_register_string);
-
/* Prevent the user from choosing an obviously stupid PIC register. */
- if (pic_register < 0 || call_used_regs[pic_register]
- || pic_register == HARD_FRAME_POINTER_REGNUM
- || pic_register == STACK_POINTER_REGNUM
- || pic_register >= PC_REGNUM)
+ else if (pic_register < 0 || call_used_regs[pic_register]
+ || pic_register == HARD_FRAME_POINTER_REGNUM
+ || pic_register == STACK_POINTER_REGNUM
+ || pic_register >= PC_REGNUM)
error ("unable to use '%s' for PIC register", arm_pic_register_string);
else
arm_pic_register = pic_register;
@@ -902,14 +897,14 @@ use_return_insn (iscond)
func_type = arm_current_func_type ();
- /* Naked functions, volatile functiond and interrupt
- functions all need special consideration. */
- if (func_type & (ARM_FT_INTERRUPT | ARM_FT_VOLATILE | ARM_FT_NAKED))
+ /* Naked functions and volatile functions need special
+ consideration. */
+ if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
return 0;
/* As do variadic functions. */
if (current_function_pretend_args_size
- || current_function_anonymous_args
+ || cfun->machine->uses_anonymous_args
/* Of if the function calls __builtin_eh_return () */
|| ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
/* Or if there is no frame pointer and there is a stack adjustment. */
@@ -2102,9 +2097,6 @@ arm_encode_call_attribute (decl, flag)
int len = strlen (str);
char * newstr;
- if (TREE_CODE (decl) != FUNCTION_DECL)
- return;
-
/* Do not allow weak functions to be treated as short call. */
if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
return;
@@ -2318,7 +2310,10 @@ legitimize_pic_address (orig, mode, reg)
else
emit_insn (gen_pic_load_addr_thumb (address, orig));
- if (GET_CODE (orig) == LABEL_REF && NEED_GOT_RELOC)
+ if ((GET_CODE (orig) == LABEL_REF
+ || (GET_CODE (orig) == SYMBOL_REF &&
+ ENCODED_SHORT_CALL_ATTR_P (XSTR (orig, 0))))
+ && NEED_GOT_RELOC)
pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
else
{
@@ -5334,14 +5329,29 @@ is_jump_table (insn)
return NULL_RTX;
}
+#ifndef JUMP_TABLES_IN_TEXT_SECTION
+#define JUMP_TABLES_IN_TEXT_SECTION 0
+#endif
+
static HOST_WIDE_INT
get_jump_table_size (insn)
rtx insn;
{
- rtx body = PATTERN (insn);
- int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
+ /* ADDR_VECs only take room if read-only data does into the text
+ section. */
+ if (JUMP_TABLES_IN_TEXT_SECTION
+#if !defined(READONLY_DATA_SECTION)
+ || 1
+#endif
+ )
+ {
+ rtx body = PATTERN (insn);
+ int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
+
+ return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
+ }
- return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
+ return 0;
}
/* Move a minipool fix MP from its current location to before MAX_MP.
@@ -7139,17 +7149,16 @@ arm_compute_save_reg_mask ()
/* Decide if we need to save the link register.
Interrupt routines have their own banked link register,
so they never need to save it.
- Otheriwse if we do not use the link register we do not need to save
+ Otherwise if we do not use the link register we do not need to save
it. If we are pushing other registers onto the stack however, we
can save an instruction in the epilogue by pushing the link register
now and then popping it back into the PC. This incurs extra memory
accesses though, so we only do it when optimising for size, and only
if we know that we will not need a fancy return sequence. */
- if (! IS_INTERRUPT (func_type)
- && (regs_ever_live [LR_REGNUM]
+ if (regs_ever_live [LR_REGNUM]
|| (save_reg_mask
&& optimize_size
- && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL)))
+ && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL))
save_reg_mask |= 1 << LR_REGNUM;
if (cfun->machine->lr_save_eliminated)
@@ -7207,21 +7216,19 @@ output_return_instruction (operand, really_return, reverse)
live_regs_mask = arm_compute_save_reg_mask ();
- /* On some ARM architectures it is faster to use LDR rather than LDM to
- load a single register. On other architectures, the cost is the same.
- In 26 bit mode we have to use LDM in order to be able to restore the CPSR. */
- if ((live_regs_mask == (1 << LR_REGNUM))
- && ! TARGET_INTERWORK
- && ! IS_INTERRUPT (func_type)
- && (! really_return || TARGET_APCS_32))
+ if (live_regs_mask)
{
- if (! really_return)
- sprintf (instr, "ldr%s\t%%|lr, [%%|sp], #4", conditional);
+ const char * return_reg;
+
+ /* If we do not have any special requirements for function exit
+ (eg interworking, or ISR) then we can load the return address
+ directly into the PC. Otherwise we must load it into LR. */
+ if (really_return
+ && ! TARGET_INTERWORK)
+ return_reg = reg_names[PC_REGNUM];
else
- sprintf (instr, "ldr%s\t%%|pc, [%%|sp], #4", conditional);
- }
- else if (live_regs_mask)
- {
+ return_reg = reg_names[LR_REGNUM];
+
if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
/* There are two possible reasons for the IP register being saved.
Either a stack frame was created, in which case IP contains the
@@ -7233,96 +7240,91 @@ output_return_instruction (operand, really_return, reverse)
live_regs_mask |= (1 << SP_REGNUM);
}
- /* Generate the load multiple instruction to restore the registers. */
- if (frame_pointer_needed)
- sprintf (instr, "ldm%sea\t%%|fp, {", conditional);
- else
- sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
-
- for (reg = 0; reg <= SP_REGNUM; reg++)
- if (live_regs_mask & (1 << reg))
- {
- strcat (instr, "%|");
- strcat (instr, reg_names[reg]);
- strcat (instr, ", ");
- }
-
- if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
+ /* On some ARM architectures it is faster to use LDR rather than
+ LDM to load a single register. On other architectures, the
+ cost is the same. In 26 bit mode, or for exception handlers,
+ we have to use LDM to load the PC so that the CPSR is also
+ restored. */
+ for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
{
- /* If we are not restoring the LR register then we will
- have added one too many commas to the list above.
- Replace it with a closing brace. */
- instr [strlen (instr) - 2] = '}';
+ if (live_regs_mask == (unsigned int)(1 << reg))
+ break;
}
- else
+ if (reg <= LAST_ARM_REGNUM
+ && (reg != LR_REGNUM
+ || ! really_return
+ || (TARGET_APCS_32 && ! IS_INTERRUPT (func_type))))
{
- strcat (instr, "%|");
-
- /* At this point there should only be one or two registers left in
- live_regs_mask: always LR, and possibly PC if we created a stack
- frame. LR contains the return address. If we do not have any
- special requirements for function exit (eg interworking, or ISR)
- then we can load this value directly into the PC and save an
- instruction. */
- if (! TARGET_INTERWORK
- && ! IS_INTERRUPT (func_type)
- && really_return)
- strcat (instr, reg_names [PC_REGNUM]);
- else
- strcat (instr, reg_names [LR_REGNUM]);
-
- strcat (instr, (TARGET_APCS_32 || !really_return) ? "}" : "}^");
+ sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
+ (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
}
-
- if (really_return)
+ else
{
- /* See if we need to generate an extra instruction to
- perform the actual function return. */
- switch ((int) ARM_FUNC_TYPE (func_type))
- {
- case ARM_FT_ISR:
- case ARM_FT_FIQ:
- output_asm_insn (instr, & operand);
+ char *p;
+ int first = 1;
- strcpy (instr, "sub");
- strcat (instr, conditional);
- strcat (instr, "s\t%|pc, %|lr, #4");
- break;
+ /* Generate the load multiple instruction to restore the registers. */
+ if (frame_pointer_needed)
+ sprintf (instr, "ldm%sea\t%%|fp, {", conditional);
+ else
+ sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
- case ARM_FT_EXCEPTION:
- output_asm_insn (instr, & operand);
+ p = instr + strlen (instr);
- strcpy (instr, "mov");
- strcat (instr, conditional);
- strcat (instr, "s\t%|pc, %|lr");
- break;
+ for (reg = 0; reg <= SP_REGNUM; reg++)
+ if (live_regs_mask & (1 << reg))
+ {
+ int l = strlen (reg_names[reg]);
- case ARM_FT_INTERWORKED:
- output_asm_insn (instr, & operand);
+ if (first)
+ first = 0;
+ else
+ {
+ memcpy (p, ", ", 2);
+ p += 2;
+ }
- strcpy (instr, "bx");
- strcat (instr, conditional);
- strcat (instr, "\t%|lr");
- break;
+ memcpy (p, "%|", 2);
+ memcpy (p + 2, reg_names[reg], l);
+ p += l + 2;
+ }
+
+ if (live_regs_mask & (1 << LR_REGNUM))
+ {
+ int l = strlen (return_reg);
- default:
- /* The return has already been handled
- by loading the LR into the PC. */
- if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
+ if (! first)
{
- output_asm_insn (instr, & operand);
-
- strcpy (instr, "mov");
- strcat (instr, conditional);
- if (! TARGET_APCS_32)
- strcat (instr, "s");
- strcat (instr, "\t%|pc, %|lr");
+ memcpy (p, ", ", 2);
+ p += 2;
}
- break;
+
+ memcpy (p, "%|", 2);
+ memcpy (p + 2, return_reg, l);
+ strcpy (p + 2 + l, ((TARGET_APCS_32
+ && !IS_INTERRUPT (func_type))
+ || !really_return)
+ ? "}" : "}^");
}
+ else
+ strcpy (p, "}");
+ }
+
+ output_asm_insn (instr, & operand);
+
+ /* See if we need to generate an extra instruction to
+ perform the actual function return. */
+ if (really_return
+ && func_type != ARM_FT_INTERWORKED
+ && (live_regs_mask & (1 << LR_REGNUM)) != 0)
+ {
+ /* The return has already been handled
+ by loading the LR into the PC. */
+ really_return = 0;
}
}
- else if (really_return)
+
+ if (really_return)
{
switch ((int) ARM_FUNC_TYPE (func_type))
{
@@ -7340,18 +7342,19 @@ output_return_instruction (operand, really_return, reverse)
break;
default:
- sprintf (instr, "mov%s%s\t%%|pc, %%|lr",
- conditional, TARGET_APCS_32 ? "" : "s");
+ /* ARMv5 implementations always provide BX, so interworking
+ is the default unless APCS-26 is in use. */
+ if ((insn_flags & FL_ARCH5) != 0 && TARGET_APCS_32)
+ sprintf (instr, "bx%s\t%%|lr", conditional);
+ else
+ sprintf (instr, "mov%s%s\t%%|pc, %%|lr",
+ conditional, TARGET_APCS_32 ? "" : "s");
break;
}
+
+ output_asm_insn (instr, & operand);
}
- else
- /* Nothing to load off the stack, and
- no return instruction to generate. */
- return "";
- output_asm_insn (instr, & operand);
-
return "";
}
@@ -7457,9 +7460,9 @@ arm_output_function_prologue (f, frame_size)
current_function_args_size,
current_function_pretend_args_size, frame_size);
- asm_fprintf (f, "\t%@ frame_needed = %d, current_function_anonymous_args = %d\n",
+ asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
frame_pointer_needed,
- current_function_anonymous_args);
+ cfun->machine->uses_anonymous_args);
if (cfun->machine->lr_save_eliminated)
asm_fprintf (f, "\t%@ link register save eliminated.\n");
@@ -7479,8 +7482,9 @@ arm_output_epilogue (really_return)
int reg;
unsigned long saved_regs_mask;
unsigned long func_type;
- /* If we need this, then it will always be at least this much. */
- int floats_offset = 12;
+ /* Floats_offset is the offset from the "virtual" frame. In an APCS
+ frame that is $fp + 4 for a non-variadic function. */
+ int floats_offset = 0;
rtx operands[3];
int frame_size = get_frame_size ();
FILE * f = asm_out_file;
@@ -7517,6 +7521,9 @@ arm_output_epilogue (really_return)
saved_regs_mask = arm_compute_save_reg_mask ();
+ /* XXX We should adjust floats_offset for any anonymous args, and then
+ re-adjust vfp_offset below to compensate. */
+
/* Compute how far away the floats will be. */
for (reg = 0; reg <= LAST_ARM_REGNUM; reg ++)
if (saved_regs_mask & (1 << reg))
@@ -7524,6 +7531,8 @@ arm_output_epilogue (really_return)
if (frame_pointer_needed)
{
+ int vfp_offset = 4;
+
if (arm_fpu_arch == FP_SOFT2)
{
for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg--)
@@ -7531,7 +7540,7 @@ arm_output_epilogue (really_return)
{
floats_offset += 12;
asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
- reg, FP_REGNUM, floats_offset);
+ reg, FP_REGNUM, floats_offset - vfp_offset);
}
}
else
@@ -7548,7 +7557,7 @@ arm_output_epilogue (really_return)
if (start_reg - reg == 3)
{
asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
- reg, FP_REGNUM, floats_offset);
+ reg, FP_REGNUM, floats_offset - vfp_offset);
start_reg = reg - 1;
}
}
@@ -7557,7 +7566,7 @@ arm_output_epilogue (really_return)
if (reg != start_reg)
asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
reg + 1, start_reg - reg,
- FP_REGNUM, floats_offset);
+ FP_REGNUM, floats_offset - vfp_offset);
start_reg = reg - 1;
}
}
@@ -7566,7 +7575,7 @@ arm_output_epilogue (really_return)
if (reg != start_reg)
asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
reg + 1, start_reg - reg,
- FP_REGNUM, floats_offset);
+ FP_REGNUM, floats_offset - vfp_offset);
}
/* saved_regs_mask should contain the IP, which at the time of stack
@@ -7660,7 +7669,7 @@ arm_output_epilogue (really_return)
to load use the LDR instruction - it is faster. */
if (saved_regs_mask == (1 << LR_REGNUM))
{
- /* The excpetion handler ignores the LR, so we do
+ /* The exception handler ignores the LR, so we do
not really need to load it off the stack. */
if (eh_ofs)
asm_fprintf (f, "\tadd\t%r, %r, #4\n", SP_REGNUM, SP_REGNUM);
@@ -7686,7 +7695,10 @@ arm_output_epilogue (really_return)
REGNO (eh_ofs));
#endif
- if (! really_return)
+ if (! really_return
+ || (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
+ && current_function_pretend_args_size == 0
+ && saved_regs_mask & (1 << PC_REGNUM)))
return "";
/* Generate the return instruction. */
@@ -7754,7 +7766,6 @@ arm_output_function_epilogue (file, frame_size)
abort ();
/* Reset the ARM-specific per-function variables. */
- current_function_anonymous_args = 0;
after_arm_reorg = 0;
}
}
@@ -8068,7 +8079,7 @@ arm_compute_initial_elimination_offset (from, to)
/* FIXME: Not sure about this. Maybe we should always return 0 ? */
return (frame_pointer_needed
&& current_function_needs_context
- && ! current_function_anonymous_args) ? 4 : 0;
+ && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
case STACK_POINTER_REGNUM:
/* If nothing has been pushed on the stack at all
@@ -8209,7 +8220,7 @@ arm_expand_prologue ()
else
{
/* Store the args on the stack. */
- if (current_function_anonymous_args)
+ if (cfun->machine->uses_anonymous_args)
insn = emit_multi_reg_push
((0xf0 >> (args_to_push / 4)) & 0xf);
else
@@ -8245,7 +8256,7 @@ arm_expand_prologue ()
if (args_to_push)
{
/* Push the argument registers, or reserve space for them. */
- if (current_function_anonymous_args)
+ if (cfun->machine->uses_anonymous_args)
insn = emit_multi_reg_push
((0xf0 >> (args_to_push / 4)) & 0xf);
else
@@ -8255,6 +8266,19 @@ arm_expand_prologue ()
RTX_FRAME_RELATED_P (insn) = 1;
}
+ /* If this is an interrupt service routine, and the link register is
+ going to be pushed, subtracting four now will mean that the
+ function return can be done with a single instruction. */
+ if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
+ && (live_regs_mask & (1 << LR_REGNUM)) != 0)
+ {
+ emit_insn (gen_rtx_SET (SImode,
+ gen_rtx_REG (SImode, LR_REGNUM),
+ gen_rtx_PLUS (SImode,
+ gen_rtx_REG (SImode, LR_REGNUM),
+ GEN_INT (-4))));
+ }
+
if (live_regs_mask)
{
insn = emit_multi_reg_push (live_regs_mask);
@@ -8609,7 +8633,9 @@ arm_assemble_integer (x, size, aligned_p)
if (NEED_GOT_RELOC && flag_pic && making_const_table &&
(GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
{
- if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
+ if (GET_CODE (x) == SYMBOL_REF
+ && (CONSTANT_POOL_ADDRESS_P (x)
+ || ENCODED_SHORT_CALL_ATTR_P (XSTR (x, 0))))
fputs ("(GOTOFF)", asm_out_file);
else if (GET_CODE (x) == LABEL_REF)
fputs ("(GOTOFF)", asm_out_file);
@@ -9139,20 +9165,8 @@ arm_hard_regno_mode_ok (regno, mode)
return (NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
if (regno <= LAST_ARM_REGNUM)
- /* We allow an SImode or smaller value to be stored in any
- general purpose register. This does not mean, for example
- that GCC will choose to store a variable in the stack pointer
- since it is a fixed register. But it is important to allow
- access to these special registers, so that they can be
- referenced from C code via the asm assembler alias, eg:
-
- register char * stack_ptr asm ("sp");
-
- For any mode requiring more than one register to hold the
- value we restrict the choice so that r13, r14, and r15
- cannot be part of the register set. */
- return (NUM_REGS (mode) <= 1)
- || (regno < (SP_REGNUM - (unsigned) NUM_REGS (mode)));
+ /* We allow any value to be stored in the general regisetrs. */
+ return 1;
if ( regno == FRAME_POINTER_REGNUM
|| regno == ARG_POINTER_REGNUM)
@@ -10187,7 +10201,6 @@ thumb_expand_prologue ()
if (regno > LAST_LO_REGNUM) /* Very unlikely */
{
rtx spare = gen_rtx (REG, SImode, IP_REGNUM);
- rtx insn;
/* Choose an arbitary, non-argument low register. */
reg = gen_rtx (REG, SImode, LAST_LO_REGNUM);
@@ -10312,7 +10325,7 @@ thumb_output_function_prologue (f, size)
if (current_function_pretend_args_size)
{
- if (current_function_anonymous_args)
+ if (cfun->machine->uses_anonymous_args)
{
int num_pushes;
diff --git a/contrib/gcc/config/arm/arm.h b/contrib/gcc/config/arm/arm.h
index 46f938e..5e8b5d9 100644
--- a/contrib/gcc/config/arm/arm.h
+++ b/contrib/gcc/config/arm/arm.h
@@ -81,8 +81,6 @@ extern struct rtx_def * pool_vector_label;
/* Set to 1 when a return insn is output, this means that the epilogue
is not needed. */
extern int return_used_this_function;
-/* Nonzero if the prologue must setup `fp'. */
-extern int current_function_anonymous_args;
/* Just in case configure has failed to define anything. */
#ifndef TARGET_CPU_DEFAULT
@@ -855,7 +853,7 @@ extern const char * structure_size_string;
regno <= LAST_ARM_FP_REGNUM; ++regno) \
fixed_regs[regno] = call_used_regs[regno] = 1; \
} \
- if (flag_pic) \
+ if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM) \
{ \
fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
@@ -1029,6 +1027,13 @@ extern const char * structure_size_string;
16, 17, 18, 19, 20, 21, 22, 23, \
24, 25, 26 \
}
+
+/* Interrupt functions can only use registers that have already been
+ saved by the prologue, even if they would normally be
+ call-clobbered. */
+#define HARD_REGNO_RENAME_OK(SRC, DST) \
+ (! IS_INTERRUPT (cfun->machine->func_type) || \
+ regs_ever_live[DST])
/* Register and constant classes. */
@@ -1454,6 +1459,8 @@ typedef struct machine_function
int lr_save_eliminated;
/* Records the type of the current function. */
unsigned long func_type;
+ /* Record if the function has a variable argument list. */
+ int uses_anonymous_args;
}
machine_function;
@@ -1536,8 +1543,7 @@ typedef struct
that way. */
#define SETUP_INCOMING_VARARGS(CUM, MODE, TYPE, PRETEND_SIZE, NO_RTL) \
{ \
- extern int current_function_anonymous_args; \
- current_function_anonymous_args = 1; \
+ cfun->machine->uses_anonymous_args = 1; \
if ((CUM).nregs < NUM_ARG_REGS) \
(PRETEND_SIZE) = (NUM_ARG_REGS - (CUM).nregs) * UNITS_PER_WORD; \
}
@@ -1844,7 +1850,8 @@ typedef struct
#define THUMB_LEGITIMATE_CONSTANT_P(X) \
( GET_CODE (X) == CONST_INT \
|| GET_CODE (X) == CONST_DOUBLE \
- || CONSTANT_ADDRESS_P (X))
+ || CONSTANT_ADDRESS_P (X) \
+ || flag_pic)
#define LEGITIMATE_CONSTANT_P(X) \
(TARGET_ARM ? ARM_LEGITIMATE_CONSTANT_P (X) : THUMB_LEGITIMATE_CONSTANT_P (X))
@@ -1892,9 +1899,9 @@ typedef struct
or known to be defined in this file then encode a short call flag.
This macro is used inside the ENCODE_SECTION macro. */
#define ARM_ENCODE_CALL_TYPE(decl) \
- if (TREE_CODE (decl) == FUNCTION_DECL) \
+ if (TREE_CODE_CLASS (TREE_CODE (decl)) == 'd') \
{ \
- if (DECL_WEAK (decl)) \
+ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl)) \
arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR); \
else if (! TREE_PUBLIC (decl)) \
arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR); \
diff --git a/contrib/gcc/config/arm/arm.md b/contrib/gcc/config/arm/arm.md
index 3646fe5..91bbf61 100644
--- a/contrib/gcc/config/arm/arm.md
+++ b/contrib/gcc/config/arm/arm.md
@@ -135,8 +135,12 @@
(define_attr "neg_pool_range" "" (const_int 0))
; An assembler sequence may clobber the condition codes without us knowing.
+; If such an insn references the pool, then we have no way of knowing how,
+; so use the most conservative value for pool_range.
(define_asm_attributes
- [(set_attr "conds" "clob")])
+ [(set_attr "conds" "clob")
+ (set_attr "length" "4")
+ (set_attr "pool_range" "250")])
; TYPE attribute is used to detect floating point instructions which, if
; running on a co-processor can run in parallel with other, basic instructions
@@ -3959,7 +3963,7 @@
[(set_attr "length" "8")
(set_attr "type" "*,load,store2")
(set_attr "pool_range" "*,1020,*")
- (set_attr "neg_pool_range" "*,1012,*")]
+ (set_attr "neg_pool_range" "*,1008,*")]
)
;;; ??? This should have alternatives for constants.
@@ -4132,27 +4136,6 @@
}"
)
-(define_expand "movaddr"
- [(set (match_operand:SI 0 "s_register_operand" "")
- (match_operand:DI 1 "address_operand" ""))]
- "TARGET_ARM"
- ""
-)
-
-(define_insn "*movaddr_insn"
- [(set (match_operand:SI 0 "s_register_operand" "=r")
- (match_operand:DI 1 "address_operand" "p"))]
- "TARGET_ARM
- && reload_completed
- && (GET_CODE (operands[1]) == LABEL_REF
- || (GET_CODE (operands[1]) == CONST
- && GET_CODE (XEXP (operands[1], 0)) == PLUS
- && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
- && GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == CONST_INT))"
- "adr%?\\t%0, %a1"
- [(set_attr "predicable" "yes")]
-)
-
;; When generating pic, we need to load the symbol offset into a register.
;; So that the optimizer does not confuse this with a normal symbol load
;; we use an unspec. The offset will be loaded from a constant pool entry,
@@ -5110,8 +5093,8 @@
(set_attr "predicable" "yes")
(set_attr "type"
"load,store2,*,store2,load,ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r")
- (set_attr "pool_range" "*,*,*,*,252,*,*,1024,*,*,*")
- (set_attr "neg_pool_range" "*,*,*,*,244,*,*,1012,*,*,*")]
+ (set_attr "pool_range" "*,*,*,*,1020,*,*,1024,*,*,*")
+ (set_attr "neg_pool_range" "*,*,*,*,1008,*,*,1008,*,*,*")]
)
;; Software floating point version. This is essentially the same as movdi.
@@ -5126,8 +5109,8 @@
"* return output_move_double (operands);"
[(set_attr "length" "8,8,8")
(set_attr "type" "*,load,store2")
- (set_attr "pool_range" "252")
- (set_attr "neg_pool_range" "244")]
+ (set_attr "pool_range" "1020")
+ (set_attr "neg_pool_range" "1008")]
)
;;; ??? This should have alternatives for constants.
@@ -5201,7 +5184,7 @@
(set_attr "predicable" "yes")
(set_attr "type" "ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r,*")
(set_attr "pool_range" "*,*,1024,*,*,*,*")
- (set_attr "neg_pool_range" "*,*,1012,*,*,*,*")]
+ (set_attr "neg_pool_range" "*,*,1004,*,*,*,*")]
)
@@ -6655,7 +6638,8 @@
(use (match_operand 2 "" ""))
(clobber (reg:SI LR_REGNUM))]
"TARGET_THUMB
- && operands[2] == const0_rtx && (GET_CODE (operands[0]) == SYMBOL_REF)"
+ && GET_CODE (operands[0]) == SYMBOL_REF
+ && !arm_is_longcall_p (operands[0], INTVAL (operands[2]), 1)"
"bl\\t%a0"
[(set_attr "length" "4")
(set_attr "type" "call")]
@@ -6668,7 +6652,8 @@
(use (match_operand 3 "" ""))
(clobber (reg:SI LR_REGNUM))]
"TARGET_THUMB
- && operands[3] == const0_rtx && (GET_CODE (operands[1]) == SYMBOL_REF)"
+ && GET_CODE (operands[1]) == SYMBOL_REF
+ && !arm_is_longcall_p (operands[1], INTVAL (operands[3]), 1)"
"bl\\t%a1"
[(set_attr "length" "4")
(set_attr "type" "call")]
@@ -8297,6 +8282,7 @@
"TARGET_ARM
&& !BYTES_BIG_ENDIAN
&& !TARGET_MMU_TRAPS
+ && !arm_arch4
&& REGNO (operands[0]) != FRAME_POINTER_REGNUM
&& REGNO (operands[1]) != FRAME_POINTER_REGNUM
&& (GET_CODE (operands[2]) != REG
@@ -8315,6 +8301,7 @@
"TARGET_ARM
&& !BYTES_BIG_ENDIAN
&& !TARGET_MMU_TRAPS
+ && !arm_arch4
&& REGNO (operands[0]) != FRAME_POINTER_REGNUM
&& REGNO (operands[1]) != FRAME_POINTER_REGNUM
&& (GET_CODE (operands[2]) != REG
@@ -8479,6 +8466,7 @@
"TARGET_ARM
&& !BYTES_BIG_ENDIAN
&& !TARGET_MMU_TRAPS
+ && !arm_arch4
&& REGNO (operands[0]) != FRAME_POINTER_REGNUM
&& REGNO (operands[1]) != FRAME_POINTER_REGNUM
&& REGNO (operands[3]) != FRAME_POINTER_REGNUM"
@@ -8499,6 +8487,7 @@
"TARGET_ARM
&& !BYTES_BIG_ENDIAN
&& !TARGET_MMU_TRAPS
+ && !arm_arch4
&& REGNO (operands[0]) != FRAME_POINTER_REGNUM
&& REGNO (operands[1]) != FRAME_POINTER_REGNUM
&& REGNO (operands[3]) != FRAME_POINTER_REGNUM"
@@ -8562,6 +8551,7 @@
"TARGET_ARM
&& !BYTES_BIG_ENDIAN
&& !TARGET_MMU_TRAPS
+ && !arm_arch4
&& REGNO (operands[0]) != REGNO(operands[1])
&& (GET_CODE (operands[2]) != REG
|| REGNO(operands[0]) != REGNO (operands[2]))"
@@ -9136,11 +9126,28 @@
;; Miscellaneous Thumb patterns
-(define_insn "tablejump"
+(define_expand "tablejump"
+ [(parallel [(set (pc) (match_operand:SI 0 "register_operand" "l*r"))
+ (use (label_ref (match_operand 1 "" "")))])]
+ "TARGET_THUMB"
+ "
+ if (flag_pic)
+ {
+ /* Hopefully, CSE will eliminate this copy. */
+ rtx reg1 = copy_addr_to_reg (gen_rtx_LABEL_REF (Pmode, operands[1]));
+ rtx reg2 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_addsi3 (reg2, operands[0], reg1));
+ operands[0] = reg2;
+ }
+ "
+)
+
+(define_insn "*thumb_tablejump"
[(set (pc) (match_operand:SI 0 "register_operand" "l*r"))
(use (label_ref (match_operand 1 "" "")))]
"TARGET_THUMB"
- "mov pc, %0"
+ "mov\\t%|pc, %0"
[(set_attr "length" "2")]
)
diff --git a/contrib/gcc/config/arm/coff.h b/contrib/gcc/config/arm/coff.h
index f53dace..c9f0d92 100644
--- a/contrib/gcc/config/arm/coff.h
+++ b/contrib/gcc/config/arm/coff.h
@@ -72,7 +72,9 @@ Boston, MA 02111-1307, USA. */
/* Define this macro if jump tables (for `tablejump' insns) should be
output in the text section, along with the assembler instructions.
Otherwise, the readonly data section is used. */
-#define JUMP_TABLES_IN_TEXT_SECTION 1
+/* We put ARM jump tables in the text section, because it makes the code
+ more efficient, but for Thumb it's better to put them out of band. */
+#define JUMP_TABLES_IN_TEXT_SECTION (TARGET_ARM)
#undef READONLY_DATA_SECTION
#define READONLY_DATA_SECTION rdata_section
diff --git a/contrib/gcc/config/arm/elf.h b/contrib/gcc/config/arm/elf.h
index c27daca..1cab2c4 100644
--- a/contrib/gcc/config/arm/elf.h
+++ b/contrib/gcc/config/arm/elf.h
@@ -103,7 +103,9 @@ Boston, MA 02111-1307, USA. */
/* Define this macro if jump tables (for `tablejump' insns) should be
output in the text section, along with the assembler instructions.
Otherwise, the readonly data section is used. */
-#define JUMP_TABLES_IN_TEXT_SECTION 1
+/* We put ARM jump tables in the text section, because it makes the code
+ more efficient, but for Thumb it's better to put them out of band. */
+#define JUMP_TABLES_IN_TEXT_SECTION (TARGET_ARM)
#ifndef LINK_SPEC
#define LINK_SPEC "%{mbig-endian:-EB} -X"
diff --git a/contrib/gcc/config/arm/linux-elf.h b/contrib/gcc/config/arm/linux-elf.h
index 74f7e7e..f1c86d2 100644
--- a/contrib/gcc/config/arm/linux-elf.h
+++ b/contrib/gcc/config/arm/linux-elf.h
@@ -1,5 +1,5 @@
/* Definitions for ARM running Linux-based GNU systems using ELF
- Copyright (C) 1993, 1994, 1997, 1998, 1999, 2000, 2001
+ Copyright (C) 1993, 1994, 1997, 1998, 1999, 2000, 2001, 2002
Free Software Foundation, Inc.
Contributed by Philip Blundell <philb@gnu.org>
@@ -42,6 +42,10 @@ Boston, MA 02111-1307, USA. */
#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_32__"
+/* The GNU C++ standard library requires that these macros be defined. */
+#undef CPLUSPLUS_CPP_SPEC
+#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)"
+
/* Now we define the strings used to build the spec file. */
#define LIB_SPEC \
"%{shared: -lc} \
@@ -87,7 +91,7 @@ Boston, MA 02111-1307, USA. */
#undef CPP_PREDEFINES
#define CPP_PREDEFINES \
-"-Dunix -Dlinux -D__ELF__ \
+"-Dunix -D__gnu_linux__ -Dlinux -D__ELF__ \
-Asystem=unix -Asystem=posix"
/* Allow #sccs in preprocessor. */
diff --git a/contrib/gcc/config/arm/netbsd.h b/contrib/gcc/config/arm/netbsd.h
index 74f32e8..2be49d8 100644
--- a/contrib/gcc/config/arm/netbsd.h
+++ b/contrib/gcc/config/arm/netbsd.h
@@ -1,4 +1,4 @@
-/* NetBSD/arm (RiscBSD) version.
+/* NetBSD/arm a.out version.
Copyright (C) 1993, 1994, 1997, 1998 Free Software Foundation, Inc.
Contributed by Mark Brinicombe (amb@physig.ph.kcl.ac.uk)
@@ -77,15 +77,6 @@ Boston, MA 02111-1307, USA. */
#undef PTRDIFF_TYPE
#define PTRDIFF_TYPE "int"
-#undef WCHAR_TYPE
-#define WCHAR_TYPE "int"
-
-#undef WCHAR_UNSIGNED
-#define WCHAR_UNSIGNED 0
-
-#undef WCHAR_TYPE_SIZE
-#define WCHAR_TYPE_SIZE 32
-
#define HANDLE_SYSV_PRAGMA
/* We don't have any limit on the length as out debugger is GDB. */
diff --git a/contrib/gcc/config/arm/rtems-elf.h b/contrib/gcc/config/arm/rtems-elf.h
index 954f72b..312c96f 100644
--- a/contrib/gcc/config/arm/rtems-elf.h
+++ b/contrib/gcc/config/arm/rtems-elf.h
@@ -1,5 +1,5 @@
/* Definitions for RTEMS based ARM systems using ELF
- Copyright (C) 2000, 2001 Free Software Foundation, Inc.
+ Copyright (C) 2000, 2002 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -19,15 +19,10 @@ the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
/* Run-time Target Specification. */
-#undef TARGET_VERSION
+#undef TARGET_VERSION
#define TARGET_VERSION fputs (" (ARM/ELF RTEMS)", stderr);
#define HAS_INIT_SECTION
-#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-Darm -Darm_elf -Drtems -D__rtems__ -D__ELF__ \
- -Asystem(rtems) -Acpu(arm) -Amachine(arm)"
-
-/*#undef INVOKE_main*/
-
-
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-D__rtems__ -D__ELF__ -Asystem=rtems"
diff --git a/contrib/gcc/config/float-sparc.h b/contrib/gcc/config/float-sparc.h
index f3c2583..84250e5 100644
--- a/contrib/gcc/config/float-sparc.h
+++ b/contrib/gcc/config/float-sparc.h
@@ -66,7 +66,7 @@
#undef DBL_MAX_10_EXP
#define DBL_MAX_10_EXP 308
-#if defined(__sparcv9) || defined(__arch64__)
+#if defined(__sparcv9) || defined(__arch64__) || defined(__LONG_DOUBLE_128__)
/* Number of base-FLT_RADIX digits in the significand of a long double */
#undef LDBL_MANT_DIG
diff --git a/contrib/gcc/config/i386/cygwin.h b/contrib/gcc/config/i386/cygwin.h
index 2a812a5..35f8209 100644
--- a/contrib/gcc/config/i386/cygwin.h
+++ b/contrib/gcc/config/i386/cygwin.h
@@ -423,6 +423,11 @@ extern void i386_pe_unique_section PARAMS ((TREE, int));
#undef ASM_COMMENT_START
#define ASM_COMMENT_START " #"
+/* DWARF2 Unwinding doesn't work with exception handling yet. To make it
+ work, we need to build a libgcc_s.dll, and dcrt0.o should be changed to
+ call __register_frame_info/__deregister_frame_info. */
+#define DWARF2_UNWIND_INFO 0
+
/* Don't assume anything about the header files. */
#define NO_IMPLICIT_EXTERN_C
diff --git a/contrib/gcc/config/i386/djgpp-rtems.h b/contrib/gcc/config/i386/djgpp-rtems.h
index b8f4908..551b666 100644
--- a/contrib/gcc/config/i386/djgpp-rtems.h
+++ b/contrib/gcc/config/i386/djgpp-rtems.h
@@ -1,7 +1,7 @@
/* Configuration for an i386 running RTEMS on top of MS-DOS with
DJGPP v2.x.
- Copyright (C) 1996,1999 Free Software Foundation, Inc.
+ Copyright (C) 1996, 1999, 2002 Free Software Foundation, Inc.
Contributed by Joel Sherrill (joel@OARcorp.com).
This file is part of GNU CC.
@@ -21,20 +21,15 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-#include "i386/djgpp.h"
-
/* Specify predefined symbols in preprocessor. */
#ifdef CPP_PREDEFINES
#undef CPP_PREDEFINES
#endif
-#define CPP_PREDEFINES "-Dunix -DGO32 -DDJGPP=2 -DMSDOS \
+#define CPP_PREDEFINES "-Dunix -DGO32 -DDJGPP=2 -DMSDOS -D__rtems__ \
-Asystem=unix -Asystem=msdos -Asystem=rtems"
/* Generate calls to memcpy, memcmp and memset. */
#ifndef TARGET_MEM_FUNCTIONS
#define TARGET_MEM_FUNCTIONS
#endif
-
-/* end of i386/djgpp-rtems.h */
-
diff --git a/contrib/gcc/config/i386/gnu.h b/contrib/gcc/config/i386/gnu.h
index 0214c05..b14328f 100644
--- a/contrib/gcc/config/i386/gnu.h
+++ b/contrib/gcc/config/i386/gnu.h
@@ -5,7 +5,7 @@
#undef CPP_PREDEFINES
#define CPP_PREDEFINES "-D__ELF__ -DMACH -Asystem=mach \
- -Dunix -Asystem=unix -Asystem=posix -D__GNU__ -Asystem=gnu"
+ -Dunix -Asystem=unix -Asystem=posix -D__gnu_hurd__ -D__GNU__ -Asystem=gnu"
#undef CPP_SPEC
#define CPP_SPEC "%(cpp_cpu) \
diff --git a/contrib/gcc/config/i386/i386-interix.h b/contrib/gcc/config/i386/i386-interix.h
index 962862f..76357f3 100644
--- a/contrib/gcc/config/i386/i386-interix.h
+++ b/contrib/gcc/config/i386/i386-interix.h
@@ -65,6 +65,7 @@ Boston, MA 02111-1307, USA. */
-D_M_IX86=300 -D_X86_=1 \
-D__stdcall=__attribute__((__stdcall__)) \
-D__cdecl=__attribute__((__cdecl__)) \
+ -D__declspec(x)=__attribute__((x)) \
-Asystem=unix -Asystem=interix"
#undef CPP_SPEC
@@ -237,6 +238,7 @@ Boston, MA 02111-1307, USA. */
#undef LD_INIT_SWITCH
#undef LD_FINI_SWITCH
+#define EH_FRAME_IN_DATA_SECTION
/* Note that there appears to be two different ways to support const
sections at the moment. You can either #define the symbol
@@ -410,10 +412,10 @@ extern void i386_pe_unique_section ();
#define UNIQUE_SECTION(DECL,RELOC) i386_pe_unique_section (DECL, RELOC)
#define SUPPORTS_ONE_ONLY 1
+#endif /* 0 */
/* Switch into a generic section. */
#define TARGET_ASM_NAMED_SECTION default_pe_asm_named_section
-#endif /* 0 */
/* DWARF2 Unwinding doesn't work with exception handling yet. */
#define DWARF2_UNWIND_INFO 0
@@ -421,3 +423,11 @@ extern void i386_pe_unique_section ();
/* Don't assume anything about the header files. */
#define NO_IMPLICIT_EXTERN_C
+/* MSVC returns structs of up to 8 bytes via registers. */
+
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+#undef RETURN_IN_MEMORY
+#define RETURN_IN_MEMORY(TYPE) \
+ (TYPE_MODE (TYPE) == BLKmode || \
+ (AGGREGATE_TYPE_P (TYPE) && int_size_in_bytes(TYPE) > 8 ))
diff --git a/contrib/gcc/config/i386/i386-protos.h b/contrib/gcc/config/i386/i386-protos.h
index 01c4d44..8321d47 100644
--- a/contrib/gcc/config/i386/i386-protos.h
+++ b/contrib/gcc/config/i386/i386-protos.h
@@ -169,6 +169,7 @@ extern int ix86_memory_move_cost PARAMS ((enum machine_mode, enum reg_class,
extern void ix86_set_move_mem_attrs PARAMS ((rtx, rtx, rtx, rtx, rtx));
extern void emit_i387_cw_initialization PARAMS ((rtx, rtx));
extern bool ix86_fp_jump_nontrivial_p PARAMS ((enum rtx_code));
+extern void x86_order_regs_for_local_alloc PARAMS ((void));
#ifdef TREE_CODE
diff --git a/contrib/gcc/config/i386/libgcc-x86_64-glibc.ver b/contrib/gcc/config/i386/libgcc-x86_64-glibc.ver
new file mode 100644
index 0000000..2ea321f
--- /dev/null
+++ b/contrib/gcc/config/i386/libgcc-x86_64-glibc.ver
@@ -0,0 +1,25 @@
+# In order to work around the very problems that force us to now generally
+# create a libgcc.so, glibc reexported a number of routines from libgcc.a.
+# By now choosing the same version tags for these specific routines, we
+# maintain enough binary compatibility to allow future versions of glibc
+# to defer implementation of these routines to libgcc.so via DT_AUXILIARY.
+
+%ifndef __x86_64__
+%inherit GCC_3.0 GLIBC_2.0
+GLIBC_2.0 {
+ # Sampling of DImode arithmetic used by (at least) i386 and m68k.
+ __divdi3
+ __moddi3
+ __udivdi3
+ __umoddi3
+
+ # Exception handling support functions used by most everyone.
+ __register_frame
+ __register_frame_table
+ __deregister_frame
+ __register_frame_info
+ __deregister_frame_info
+ __frame_state_for
+ __register_frame_info_table
+}
+%endif
diff --git a/contrib/gcc/config/i386/linux-aout.h b/contrib/gcc/config/i386/linux-aout.h
index 302ee69..4b9cd5c 100644
--- a/contrib/gcc/config/i386/linux-aout.h
+++ b/contrib/gcc/config/i386/linux-aout.h
@@ -1,5 +1,5 @@
/* Definitions for Intel 386 running Linux-based GNU systems using a.out.
- Copyright (C) 1992, 1994, 1995, 1997, 1998 Free Software Foundation, Inc.
+ Copyright (C) 1992, 1994, 1995, 1997, 1998, 2002 Free Software Foundation, Inc.
Contributed by H.J. Lu (hjl@nynexst.com)
This file is part of GNU CC.
@@ -31,7 +31,7 @@ Boston, MA 02111-1307, USA. */
/* Specify predefined symbols in preprocessor. */
#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-Dunix -Dlinux -Asystem=posix"
+#define CPP_PREDEFINES "-Dunix -D__gnu_linux__ -Dlinux -Asystem=posix"
#undef CPP_SPEC
#define CPP_SPEC "%(cpp_cpu) %{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{posix:-D_POSIX_SOURCE}"
diff --git a/contrib/gcc/config/i386/linux-oldld.h b/contrib/gcc/config/i386/linux-oldld.h
index 6102aa8..a95c205 100644
--- a/contrib/gcc/config/i386/linux-oldld.h
+++ b/contrib/gcc/config/i386/linux-oldld.h
@@ -1,6 +1,6 @@
/* Definitions for Intel 386 running Linux-based GNU systems with pre-BFD
a.out linkers.
- Copyright (C) 1995, 1997, 1998 Free Software Foundation, Inc.
+ Copyright (C) 1995, 1997, 1998, 2002 Free Software Foundation, Inc.
Contributed by Michael Meissner (meissner@cygnus.com)
This file is part of GNU CC.
@@ -32,7 +32,7 @@ Boston, MA 02111-1307, USA. */
/* Specify predefined symbols in preprocessor. */
#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-Dunix -Dlinux -Asystem=posix"
+#define CPP_PREDEFINES "-Dunix -D__gnu_linux__ -Dlinux -Asystem=posix"
#undef CPP_SPEC
#define CPP_SPEC "%(cpp_cpu) %{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{posix:-D_POSIX_SOURCE}"
diff --git a/contrib/gcc/config/i386/linux.h b/contrib/gcc/config/i386/linux.h
index de634d3..b07bd03 100644
--- a/contrib/gcc/config/i386/linux.h
+++ b/contrib/gcc/config/i386/linux.h
@@ -85,7 +85,7 @@ Boston, MA 02111-1307, USA. */
#define WCHAR_TYPE_SIZE BITS_PER_WORD
#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-D__ELF__ -Dunix -Dlinux -Asystem=posix"
+#define CPP_PREDEFINES "-D__ELF__ -Dunix -D__gnu_linux__ -Dlinux -Asystem=posix"
#undef CPP_SPEC
#ifdef USE_GNULIBC_1
diff --git a/contrib/gcc/config/i386/linux64.h b/contrib/gcc/config/i386/linux64.h
index 8d70972..4926999 100644
--- a/contrib/gcc/config/i386/linux64.h
+++ b/contrib/gcc/config/i386/linux64.h
@@ -1,5 +1,5 @@
/* Definitions for AMD x86-64 running Linux-based GNU systems with ELF format.
- Copyright (C) 2001 Free Software Foundation, Inc.
+ Copyright (C) 2001, 2002 Free Software Foundation, Inc.
Contributed by Jan Hubicka <jh@suse.cz>, based on linux.h.
This file is part of GNU CC.
@@ -25,7 +25,7 @@ Boston, MA 02111-1307, USA. */
#define TARGET_VERSION fprintf (stderr, " (x86-64 Linux/ELF)");
#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-D__ELF__ -Dunix -Dlinux -Asystem(posix)"
+#define CPP_PREDEFINES "-D__ELF__ -Dunix -D__gnu_linux__ -Dlinux -Asystem(posix)"
#undef CPP_SPEC
#define CPP_SPEC "%(cpp_cpu) %{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT} %{!m32:-D__LONG_MAX__=9223372036854775807L}"
@@ -39,10 +39,96 @@ Boston, MA 02111-1307, USA. */
done. */
#undef LINK_SPEC
-#define LINK_SPEC "%{!m32:-m elf_x86_64} %{m32:-m elf_i386} %{shared:-shared} \
+#define LINK_SPEC "%{!m32:-m elf_x86_64 -Y P,/usr/lib64} %{m32:-m elf_i386} \
+ %{shared:-shared} \
%{!shared: \
%{!static: \
%{rdynamic:-export-dynamic} \
- %{!dynamic-linker:-dynamic-linker /lib64/ld-linux-x86-64.so.2}} \
- %{static:-static}}"
+ %{m32:%{!dynamic-linker:-dynamic-linker /lib/ld-linux.so.2}} \
+ %{!m32:%{!dynamic-linker:-dynamic-linker /lib64/ld-linux-x86-64.so.2}}} \
+ %{static:-static}}"
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ "%{m32:%{!shared: \
+ %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} \
+ %{!p:%{profile:gcrt1.o%s} %{!profile:crt1.o%s}}}} \
+ crti.o%s %{static:crtbeginT.o%s}\
+ %{!static:%{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}}} \
+ %{!m32:%{!shared: \
+ %{pg:/usr/lib64/gcrt1.o%s} %{!pg:%{p:/usr/lib64/gcrt1.o%s} \
+ %{!p:%{profile:/usr/lib64/gcrt1.o%s} %{!profile:/usr/lib64/crt1.o%s}}}}\
+ /usr/lib64/crti.o%s %{static:crtbeginT.o%s} \
+ %{!static:%{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}}}"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "\
+ %{m32:%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s} \
+ %{!m32:%{!shared:crtend.o%s} %{shared:crtendS.o%s} /usr/lib64/crtn.o%s}"
+
+#define MULTILIB_DEFAULTS { "m64" }
+
+/* Do code reading to identify a signal frame, and set the frame
+ state data appropriately. See unwind-dw2.c for the structs. */
+
+#ifdef IN_LIBGCC2
+#include <signal.h>
+#include <sys/ucontext.h>
+#endif
+
+#define MD_FALLBACK_FRAME_STATE_FOR(CONTEXT, FS, SUCCESS) \
+ do { \
+ unsigned char *pc_ = (CONTEXT)->ra; \
+ struct sigcontext *sc_; \
+ long new_cfa_; \
+ \
+ /* movq __NR_rt_sigreturn, %rax ; syscall */ \
+ if (*(unsigned char *)(pc_+0) == 0x48 \
+ && *(unsigned long *)(pc_+1) == 0x050f0000000fc0c7) \
+ { \
+ struct ucontext *uc_ = (CONTEXT)->cfa; \
+ sc_ = (struct sigcontext *) &uc_->uc_mcontext; \
+ } \
+ else \
+ break; \
+ \
+ new_cfa_ = sc_->rsp; \
+ (FS)->cfa_how = CFA_REG_OFFSET; \
+ /* Register 7 is rsp */ \
+ (FS)->cfa_reg = 7; \
+ (FS)->cfa_offset = new_cfa_ - (long) (CONTEXT)->cfa; \
+ \
+ /* The SVR4 register numbering macros aren't usable in libgcc. */ \
+ (FS)->regs.reg[0].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[0].loc.offset = (long)&sc_->rax - new_cfa_; \
+ (FS)->regs.reg[1].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[1].loc.offset = (long)&sc_->rbx - new_cfa_; \
+ (FS)->regs.reg[2].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[2].loc.offset = (long)&sc_->rcx - new_cfa_; \
+ (FS)->regs.reg[3].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[3].loc.offset = (long)&sc_->rdx - new_cfa_; \
+ (FS)->regs.reg[4].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[4].loc.offset = (long)&sc_->rbp - new_cfa_; \
+ (FS)->regs.reg[5].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[5].loc.offset = (long)&sc_->rsi - new_cfa_; \
+ (FS)->regs.reg[6].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[6].loc.offset = (long)&sc_->rdi - new_cfa_; \
+ (FS)->regs.reg[8].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[8].loc.offset = (long)&sc_->r8 - new_cfa_; \
+ (FS)->regs.reg[9].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[9].loc.offset = (long)&sc_->r9 - new_cfa_; \
+ (FS)->regs.reg[10].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[10].loc.offset = (long)&sc_->r10 - new_cfa_; \
+ (FS)->regs.reg[11].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[11].loc.offset = (long)&sc_->r11 - new_cfa_; \
+ (FS)->regs.reg[12].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[12].loc.offset = (long)&sc_->r12 - new_cfa_; \
+ (FS)->regs.reg[13].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[13].loc.offset = (long)&sc_->r13 - new_cfa_; \
+ (FS)->regs.reg[14].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[14].loc.offset = (long)&sc_->r14 - new_cfa_; \
+ (FS)->regs.reg[15].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[15].loc.offset = (long)&sc_->r15 - new_cfa_; \
+ (FS)->retaddr_column = 16; \
+ goto SUCCESS; \
+ } while (0)
diff --git a/contrib/gcc/config/i386/netbsd-elf.h b/contrib/gcc/config/i386/netbsd-elf.h
index 7ff3c21..30267df 100644
--- a/contrib/gcc/config/i386/netbsd-elf.h
+++ b/contrib/gcc/config/i386/netbsd-elf.h
@@ -51,18 +51,6 @@ Boston, MA 02111-1307, USA. */
#undef PTRDIFF_TYPE
#define PTRDIFF_TYPE "int"
-#undef WCHAR_TYPE
-#define WCHAR_TYPE "int"
-
-#undef WCHAR_UNSIGNED
-#define WCHAR_UNSIGNED 0
-
-#undef WCHAR_TYPE_SIZE
-#define WCHAR_TYPE_SIZE 32
-
-#undef WINT_TYPE
-#define WINT_TYPE "int"
-
#undef ASM_APP_ON
#define ASM_APP_ON "#APP\n"
diff --git a/contrib/gcc/config/i386/netbsd.h b/contrib/gcc/config/i386/netbsd.h
index 659a4f0..1d95de2 100644
--- a/contrib/gcc/config/i386/netbsd.h
+++ b/contrib/gcc/config/i386/netbsd.h
@@ -22,15 +22,6 @@
#undef PTRDIFF_TYPE
#define PTRDIFF_TYPE "int"
-#undef WCHAR_TYPE
-#define WCHAR_TYPE "int"
-
-#undef WCHAR_UNSIGNED
-#define WCHAR_UNSIGNED 0
-
-#undef WCHAR_TYPE_SIZE
-#define WCHAR_TYPE_SIZE 32
-
#undef ASM_APP_ON
#define ASM_APP_ON "#APP\n"
diff --git a/contrib/gcc/config/i386/rtems.h b/contrib/gcc/config/i386/rtems.h
index 9101332..0fda632 100644
--- a/contrib/gcc/config/i386/rtems.h
+++ b/contrib/gcc/config/i386/rtems.h
@@ -1,5 +1,5 @@
/* Definitions for rtems targeting an Intel i386 using coff.
- Copyright (C) 1996, 1997, 2000 Free Software Foundation, Inc.
+ Copyright (C) 1996, 1997, 2000, 2002 Free Software Foundation, Inc.
Contributed by Joel Sherrill (joel@OARcorp.com).
This file is part of GNU CC.
@@ -19,19 +19,7 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-#include "i386/i386-coff.h"
-
/* Specify predefined symbols in preprocessor. */
#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-Drtems -D__rtems__ -Asystem=rtems"
-
-/* Generate calls to memcpy, memcmp and memset. */
-#ifndef TARGET_MEM_FUNCTIONS
-#define TARGET_MEM_FUNCTIONS
-#endif
-
-/* Get machine-independent configuration parameters for RTEMS. */
-#include <rtems.h>
-
-/* end of i386/rtems.h */
+#define CPP_PREDEFINES "-D__rtems__ -Asystem=rtems"
diff --git a/contrib/gcc/config/i386/rtemself.h b/contrib/gcc/config/i386/rtemself.h
index 6e31f56..7e831b9 100644
--- a/contrib/gcc/config/i386/rtemself.h
+++ b/contrib/gcc/config/i386/rtemself.h
@@ -1,8 +1,6 @@
-/* Definitions for Intel 386 running Linux-based GNU systems with ELF format.
- Copyright (C) 1994, 1995, 1996, 1997, 1998, 2000
- Free Software Foundation, Inc.
- Contributed by Eric Youngdale.
- Modified for stabs-in-ELF by H.J. Lu.
+/* Definitions for rtems targeting a ix86 using ELF.
+ Copyright (C) 1996, 1997, 2000, 2001, 2002 Free Software Foundation, Inc.
+ Contributed by Joel Sherrill (joel@OARcorp.com).
This file is part of GNU CC.
@@ -21,65 +19,13 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-#define LINUX_DEFAULT_ELF
+/* Specify predefined symbols in preprocessor. */
-#undef TARGET_VERSION
-#define TARGET_VERSION fprintf (stderr, " (i386 RTEMS with ELF)");
+#include <i386/i386elf.h>
-/* The svr4 ABI for the i386 says that records and unions are returned
- in memory. */
-#undef DEFAULT_PCC_STRUCT_RETURN
-#define DEFAULT_PCC_STRUCT_RETURN 1
-
-#undef DBX_REGISTER_NUMBER
-#define DBX_REGISTER_NUMBER(n) svr4_dbx_register_map[n]
-
-/* Output assembler code to FILE to increment profiler label # LABELNO
- for profiling a function entry. */
-
-#undef FUNCTION_PROFILER
-#define FUNCTION_PROFILER(FILE, LABELNO) \
-{ \
- if (flag_pic) \
- { \
- fprintf (FILE, "\tleal %sP%d@GOTOFF(%%ebx),%%edx\n", \
- LPREFIX, (LABELNO)); \
- fprintf (FILE, "\tcall *mcount@GOT(%%ebx)\n"); \
- } \
- else \
- { \
- fprintf (FILE, "\tmovl $%sP%d,%%edx\n", LPREFIX, (LABELNO)); \
- fprintf (FILE, "\tcall mcount\n"); \
- } \
-}
-
-#undef SIZE_TYPE
-#define SIZE_TYPE "unsigned int"
-
-#undef PTRDIFF_TYPE
-#define PTRDIFF_TYPE "int"
-
-#undef WCHAR_TYPE
-#define WCHAR_TYPE "long int"
-
-#undef WCHAR_TYPE_SIZE
-#define WCHAR_TYPE_SIZE BITS_PER_WORD
-
#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-Drtems -D__rtems__ -Asystem=rtems"
-
-/* A C statement (sans semicolon) to output to the stdio stream
- FILE the assembler definition of uninitialized global DECL named
- NAME whose size is SIZE bytes and alignment is ALIGN bytes.
- Try to use asm_output_aligned_bss to implement this macro. */
-
-#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
- asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN)
-
-#undef STARTFILE_SPEC
-#define STARTFILE_SPEC "crt0.o%s crti.o%s crtbegin.o%s"
-
-#undef ENDFILE_SPEC
-#define ENDFILE_SPEC "crtend.o%s crtn.o%s"
+#define CPP_PREDEFINES "-D__rtems__ -Asystem=rtems \
+ -D__ELF__ -D__i386__ -D__USE_INIT_FINI__"
-/* end of i386/rtemself.h */
+#undef CPP_SPEC
+#define CPP_SPEC "%(cpp_cpu) %{msoft-float:-D_SOFT_FLOAT}"
diff --git a/contrib/gcc/config/i386/sco5.h b/contrib/gcc/config/i386/sco5.h
index 211ca36..36a04d5 100644
--- a/contrib/gcc/config/i386/sco5.h
+++ b/contrib/gcc/config/i386/sco5.h
@@ -20,9 +20,6 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-#include "i386/i386.h" /* Base i386 target definitions */
-#include "i386/att.h" /* Use AT&T i386 assembler syntax */
-
#undef TARGET_VERSION
#define TARGET_VERSION fprintf (stderr, " (i386, SCO OpenServer 5 Syntax)");
diff --git a/contrib/gcc/config/i386/sol2.h b/contrib/gcc/config/i386/sol2.h
index fb9d745..5fa5fcd 100644
--- a/contrib/gcc/config/i386/sol2.h
+++ b/contrib/gcc/config/i386/sol2.h
@@ -39,6 +39,8 @@ Boston, MA 02111-1307, USA. */
#define ASM_SPEC \
"%{v:-V} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Wa,*:%*} -s"
+#define CMOV_SUN_AS_SYNTAX 1
+
#else /* GAS_REJECTS_MINUS_S */
/* Same as above, except for -s, unsupported by GNU as. */
@@ -73,10 +75,11 @@ Boston, MA 02111-1307, USA. */
#undef WINT_TYPE_SIZE
#define WINT_TYPE_SIZE BITS_PER_WORD
-/* Add "sun" to the list of symbols defined for SVR4. */
+#define HANDLE_PRAGMA_REDEFINE_EXTNAME 1
+
#undef CPP_PREDEFINES
#define CPP_PREDEFINES \
- "-Dunix -D__svr4__ -D__SVR4 -Dsun -Asystem=svr4"
+ "-Dunix -D__svr4__ -D__SVR4 -Dsun -D__PRAGMA_REDEFINE_EXTNAME -Asystem=svr4"
/* Solaris 2/Intel as chokes on #line directives. */
#undef CPP_SPEC
@@ -154,3 +157,6 @@ Boston, MA 02111-1307, USA. */
#undef LOCAL_LABEL_PREFIX
#define LOCAL_LABEL_PREFIX "."
+
+/* The Solaris assembler does not support .quad. Do not use it. */
+#undef ASM_QUAD
diff --git a/contrib/gcc/config/i386/t-interix b/contrib/gcc/config/i386/t-interix
index 3f78f78..adcf593 100644
--- a/contrib/gcc/config/i386/t-interix
+++ b/contrib/gcc/config/i386/t-interix
@@ -4,5 +4,3 @@ LIB1ASMFUNCS = _chkstk
interix.o: $(srcdir)/config/i386/interix.c
$(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(srcdir)/config/i386/interix.c
-# System headers will track gcc's needs.
-USER_H=
diff --git a/contrib/gcc/config/i386/t-linux64 b/contrib/gcc/config/i386/t-linux64
new file mode 100644
index 0000000..46a7caa
--- /dev/null
+++ b/contrib/gcc/config/i386/t-linux64
@@ -0,0 +1,15 @@
+# On x86-64 we do not need any exports for glibc for 64-bit libgcc_s,
+# override the settings
+# from t-slibgcc-elf-ver and t-linux
+SHLIB_MAPFILES = $(srcdir)/libgcc-std.ver \
+ $(srcdir)/config/i386/libgcc-x86_64-glibc.ver
+
+MULTILIB_OPTIONS = m64/m32
+MULTILIB_DIRNAMES = 64 32
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+EXTRA_MULTILIB_PARTS=crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o
+
+SHLIB_SLIBDIR_SUFFIXES = 64:64 32:
diff --git a/contrib/gcc/config/i386/t-rtems-i386 b/contrib/gcc/config/i386/t-rtems-i386
index d301ed9..b57f4fd 100644
--- a/contrib/gcc/config/i386/t-rtems-i386
+++ b/contrib/gcc/config/i386/t-rtems-i386
@@ -12,3 +12,43 @@ crtn.o: $(srcdir)/config/i386/sol2-cn.asm $(GCC_PASSES)
sed -e '/^!/d' <$(srcdir)/config/i386/sol2-cn.asm >crtn.s
$(GCC_FOR_TARGET) -c -o crtn.o crtn.s
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+LIB2FUNCS_EXTRA = xp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifdef __LITTLE_ENDIAN__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >>dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifdef __LITTLE_ENDIAN__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >>fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+xp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define EXTENDED_FLOAT_STUBS' > xp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> xp-bit.c
+
+MULTILIB_OPTIONS = mcpu=i486/mcpu=pentium/mcpu=pentiumpro/mcpu=k6/mcpu=athlon \
+msoft-float mno-fp-ret-in-387
+MULTILIB_DIRNAMES= m486 mpentium mpentiumpro k6 athlon soft-float nofp
+MULTILIB_MATCHES = msoft-float=mno-m80387
+MULTILIB_EXCEPTIONS = \
+mno-fp-ret-in-387 \
+mcpu=i486/*mno-fp-ret-in-387* \
+mcpu=pentium/*msoft-float* mcpu=pentium/*mno-fp-ret-in-387* \
+mcpu=pentiumpro/*msoft-float* mcpu=pentiumpro/*mno-fp-ret-in-387* \
+mcpu=k6/*msoft-float* mcpu=k6/*mno-fp-ret-in-387* \
+mcpu=athlon/*msoft-float* mcpu=athlon/*mno-fp-ret-in-387*
+
+EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/contrib/gcc/config/i386/x86-64.h b/contrib/gcc/config/i386/x86-64.h
index c6a8376..56e4684 100644
--- a/contrib/gcc/config/i386/x86-64.h
+++ b/contrib/gcc/config/i386/x86-64.h
@@ -73,7 +73,6 @@ Boston, MA 02111-1307, USA. */
This is used to align code labels according to Intel recommendations. */
-#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
#define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE,LOG,MAX_SKIP) \
do { \
if ((LOG) != 0) { \
@@ -81,7 +80,6 @@ Boston, MA 02111-1307, USA. */
else fprintf ((FILE), "\t.p2align %d,,%d\n", (LOG), (MAX_SKIP)); \
} \
} while (0)
-#endif
/* i386 System V Release 4 uses DWARF debugging info.
diff --git a/contrib/gcc/config/ia64/aix.h b/contrib/gcc/config/ia64/aix.h
index 1e57c2b..fee0732 100644
--- a/contrib/gcc/config/ia64/aix.h
+++ b/contrib/gcc/config/ia64/aix.h
@@ -86,9 +86,9 @@ Boston, MA 02111-1307, USA. */
#undef CPP_PREDEFINES
#define CPP_PREDEFINES "\
--D__ia64 -D__ia64__ -D_AIX -D_AIX64 -D_LONGLONG -Dunix \
--D__LP64__ -D__ELF__ -Asystem=unix -Asystem=aix -Acpu=ia64 -Amachine=ia64 \
--D__64BIT__ -D_LONG_LONG -D_IA64 -D__int128=__size128_t"
+ -D_AIX -D_AIX64 -D_LONGLONG -Dunix \
+ -Asystem=unix -Asystem=aix \
+ -D__64BIT__ -D_LONG_LONG -D_IA64 -D__int128=__size128_t"
/* The GNU C++ standard library requires that these macros be defined. */
#undef CPLUSPLUS_CPP_SPEC
@@ -100,10 +100,6 @@ Boston, MA 02111-1307, USA. */
-D__LONG_MAX__=9223372036854775807L \
%{cpp_cpu}"
-/* ia64-specific options for gas */
-#undef ASM_SPEC
-#define ASM_SPEC "-x %{mconstant-gp} %{mauto-pic}"
-
/* Define this for shared library support. */
#undef LINK_SPEC
@@ -115,14 +111,8 @@ Boston, MA 02111-1307, USA. */
%{!dynamic-linker:-dynamic-linker /usr/lib/ia64l64/libc.so.1}} \
%{static:-static}}"
-#define DONT_USE_BUILTIN_SETJMP
#define JMP_BUF_SIZE 85
-/* Output any profiling code before the prologue. */
-
-#undef PROFILE_BEFORE_PROLOGUE
-#define PROFILE_BEFORE_PROLOGUE 1
-
/* A C statement or compound statement to output to FILE some assembler code to
call the profiling subroutine `mcount'.
diff --git a/contrib/gcc/config/ia64/elf.h b/contrib/gcc/config/ia64/elf.h
index af8c7a6..af6d917 100644
--- a/contrib/gcc/config/ia64/elf.h
+++ b/contrib/gcc/config/ia64/elf.h
@@ -17,10 +17,12 @@
#if ((TARGET_CPU_DEFAULT | TARGET_DEFAULT) & MASK_GNU_AS) != 0
/* GNU AS. */
-#define ASM_SPEC \
- "%{mno-gnu-as:-N so} %{!mno-gnu-as:-x} %{mconstant-gp} %{mauto-pic}"
+#undef ASM_EXTRA_SPEC
+#define ASM_EXTRA_SPEC \
+ "%{mno-gnu-as:-N so} %{!mno-gnu-as:-x}"
#else
/* Intel ias. */
+#undef ASM_SPEC
#define ASM_SPEC \
"%{!mgnu-as:-N so} %{mgnu-as:-x} %{mconstant-gp:-M const_gp}\
%{mauto-pic:-M no_plabel}"
diff --git a/contrib/gcc/config/ia64/freebsd.h b/contrib/gcc/config/ia64/freebsd.h
index 6140128..57bb391 100644
--- a/contrib/gcc/config/ia64/freebsd.h
+++ b/contrib/gcc/config/ia64/freebsd.h
@@ -19,7 +19,6 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
-#undef LINK_SPEC
#define LINK_SPEC \
"%{p:%e`-p' not supported; use `-pg' and gprof(1)} \
%{Wl,*:%*} \
@@ -32,9 +31,6 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
%{!dynamic-linker:-dynamic-linker /usr/libexec/ld-elf.so.1}} \
%{static:-Bstatic}}"
-#undef ASM_SPEC
-#define ASM_SPEC "-x %{mconstant-gp} %{mauto-pic}"
-
/************************[ Target stuff ]***********************************/
@@ -57,10 +53,4 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
#define TARGET_ELF 1
-#define DONT_USE_BUILTIN_SETJMP
#define JMP_BUF_SIZE 76
-
-/* Output any profiling code before the prologue. */
-
-#undef PROFILE_BEFORE_PROLOGUE
-#define PROFILE_BEFORE_PROLOGUE 1
diff --git a/contrib/gcc/config/ia64/hpux.h b/contrib/gcc/config/ia64/hpux.h
index 89b2902..d46acf2 100644
--- a/contrib/gcc/config/ia64/hpux.h
+++ b/contrib/gcc/config/ia64/hpux.h
@@ -27,9 +27,9 @@ Boston, MA 02111-1307, USA. */
#undef CPP_PREDEFINES
#define CPP_PREDEFINES "\
- -D__IA64__ -D__ia64 -D__ia64__ -D__hpux -D__hpux__ -Dhpux -Dunix \
- -D__BIG_ENDIAN__ -D_LONGLONG -D__ELF__ \
- -Asystem=hpux -Asystem=posix -Asystem=unix -Acpu=ia64 -Amachine=ia64 \
+ -D__IA64__ -D__hpux -D__hpux__ -Dhpux -Dunix \
+ -D__BIG_ENDIAN__ -D_LONGLONG \
+ -Asystem=hpux -Asystem=posix -Asystem=unix \
-D_UINT128_T"
/* -D__fpreg=long double is needed to compensate for the lack of __fpreg
@@ -40,15 +40,14 @@ Boston, MA 02111-1307, USA. */
#undef CPP_SPEC
#define CPP_SPEC "\
%{mcpu=itanium:-D__itanium__} \
- %{mlp64:-D__LP64__ -D__LONG_MAX__=9223372036854775807L} \
+ %{mlp64:-D__LP64__ -D_LP64 -D__LONG_MAX__=9223372036854775807L} \
%{!ansi:%{!std=c*:%{!std=i*: -D_HPUX_SOURCE -D__STDC_EXT__}}} \
-D__fpreg=long\\ double \
-D__float80=long\\ double \
-D__float128=long\\ double"
-#undef ASM_SPEC
-#define ASM_SPEC "-x %{mconstant-gp} %{mauto-pic} \
- %{milp32:-milp32} %{mlp64:-mlp64}"
+#undef ASM_EXTRA_SPEC
+#define ASM_EXTRA_SPEC "%{milp32:-milp32} %{mlp64:-mlp64}"
#undef ENDFILE_SPEC
@@ -84,7 +83,6 @@ Boston, MA 02111-1307, USA. */
#define POINTERS_EXTEND_UNSIGNED -1
-#define DONT_USE_BUILTIN_SETJMP
#define JMP_BUF_SIZE (8 * 76)
#undef CONST_SECTION_ASM_OP
diff --git a/contrib/gcc/config/ia64/ia64-protos.h b/contrib/gcc/config/ia64/ia64-protos.h
index 43538cc..659adc5 100644
--- a/contrib/gcc/config/ia64/ia64-protos.h
+++ b/contrib/gcc/config/ia64/ia64-protos.h
@@ -113,6 +113,9 @@ extern int ia64_function_arg_partial_nregs PARAMS((CUMULATIVE_ARGS *,
extern void ia64_function_arg_advance PARAMS((CUMULATIVE_ARGS *,
enum machine_mode,
tree, int));
+extern int ia64_function_arg_pass_by_reference PARAMS((CUMULATIVE_ARGS *,
+ enum machine_mode,
+ tree, int));
extern int ia64_return_in_memory PARAMS((tree));
extern void ia64_asm_output_external PARAMS((FILE *, tree, const char *));
@@ -122,6 +125,7 @@ extern void ia64_encode_section_info PARAMS((tree));
extern int ia64_register_move_cost PARAMS((enum machine_mode, enum reg_class,
enum reg_class));
extern int ia64_epilogue_uses PARAMS((int));
+extern int ia64_eh_uses PARAMS((int));
extern void emit_safe_across_calls PARAMS((FILE *));
extern void ia64_init_builtins PARAMS((void));
extern void ia64_override_options PARAMS((void));
diff --git a/contrib/gcc/config/ia64/ia64.c b/contrib/gcc/config/ia64/ia64.c
index 7ca060b..91dd396 100644
--- a/contrib/gcc/config/ia64/ia64.c
+++ b/contrib/gcc/config/ia64/ia64.c
@@ -24,7 +24,6 @@ Boston, MA 02111-1307, USA. */
#include "system.h"
#include "rtl.h"
#include "tree.h"
-#include "tm_p.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "real.h"
@@ -46,6 +45,7 @@ Boston, MA 02111-1307, USA. */
#include "timevar.h"
#include "target.h"
#include "target-def.h"
+#include "tm_p.h"
/* This is used for communication between ASM_OUTPUT_LABEL and
ASM_OUTPUT_LABELREF. */
@@ -138,7 +138,6 @@ static rtx ia64_expand_compare_and_swap PARAMS ((enum machine_mode, int,
static rtx ia64_expand_lock_test_and_set PARAMS ((enum machine_mode,
tree, rtx));
static rtx ia64_expand_lock_release PARAMS ((enum machine_mode, tree, rtx));
-const struct attribute_spec ia64_attribute_table[];
static bool ia64_assemble_integer PARAMS ((rtx, unsigned int, int));
static void ia64_output_function_prologue PARAMS ((FILE *, HOST_WIDE_INT));
static void ia64_output_function_epilogue PARAMS ((FILE *, HOST_WIDE_INT));
@@ -156,6 +155,14 @@ static int ia64_variable_issue PARAMS ((FILE *, int, rtx, int));
static rtx ia64_cycle_display PARAMS ((int, rtx));
+/* Table of valid machine attributes. */
+static const struct attribute_spec ia64_attribute_table[] =
+{
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
+ { "syscall_linkage", 0, 0, false, true, true, NULL },
+ { NULL, 0, 0, false, false, false, NULL }
+};
+
/* Initialize the GCC target structure. */
#undef TARGET_ATTRIBUTE_TABLE
#define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
@@ -1137,7 +1144,8 @@ ia64_expand_call (retval, addr, nextarg, sibcall_p)
rtx nextarg;
int sibcall_p;
{
- rtx insn, b0, pfs, gp_save, narg_rtx;
+ rtx insn, b0, pfs, gp_save, narg_rtx, dest;
+ bool indirect_p;
int narg;
addr = XEXP (addr, 0);
@@ -1164,61 +1172,36 @@ ia64_expand_call (retval, addr, nextarg, sibcall_p)
return;
}
- if (sibcall_p)
+ indirect_p = ! symbolic_operand (addr, VOIDmode);
+
+ if (sibcall_p || (TARGET_CONST_GP && !indirect_p))
gp_save = NULL_RTX;
else
gp_save = ia64_gp_save_reg (setjmp_operand (addr, VOIDmode));
+ if (gp_save)
+ emit_move_insn (gp_save, pic_offset_table_rtx);
+
/* If this is an indirect call, then we have the address of a descriptor. */
- if (! symbolic_operand (addr, VOIDmode))
+ if (indirect_p)
{
- rtx dest;
-
- if (! sibcall_p)
- emit_move_insn (gp_save, pic_offset_table_rtx);
-
dest = force_reg (DImode, gen_rtx_MEM (DImode, addr));
emit_move_insn (pic_offset_table_rtx,
gen_rtx_MEM (DImode, plus_constant (addr, 8)));
-
- if (sibcall_p)
- insn = gen_sibcall_pic (dest, narg_rtx, b0, pfs);
- else if (! retval)
- insn = gen_call_pic (dest, narg_rtx, b0);
- else
- insn = gen_call_value_pic (retval, dest, narg_rtx, b0);
- emit_call_insn (insn);
-
- if (! sibcall_p)
- emit_move_insn (pic_offset_table_rtx, gp_save);
- }
- else if (TARGET_CONST_GP)
- {
- if (sibcall_p)
- insn = gen_sibcall_nopic (addr, narg_rtx, b0, pfs);
- else if (! retval)
- insn = gen_call_nopic (addr, narg_rtx, b0);
- else
- insn = gen_call_value_nopic (retval, addr, narg_rtx, b0);
- emit_call_insn (insn);
}
else
- {
- if (sibcall_p)
- emit_call_insn (gen_sibcall_pic (addr, narg_rtx, b0, pfs));
- else
- {
- emit_move_insn (gp_save, pic_offset_table_rtx);
+ dest = addr;
- if (! retval)
- insn = gen_call_pic (addr, narg_rtx, b0);
- else
- insn = gen_call_value_pic (retval, addr, narg_rtx, b0);
- emit_call_insn (insn);
+ if (sibcall_p)
+ insn = gen_sibcall_pic (dest, narg_rtx, b0, pfs);
+ else if (! retval)
+ insn = gen_call_pic (dest, narg_rtx, b0);
+ else
+ insn = gen_call_value_pic (retval, dest, narg_rtx, b0);
+ emit_call_insn (insn);
- emit_move_insn (pic_offset_table_rtx, gp_save);
- }
- }
+ if (gp_save)
+ emit_move_insn (pic_offset_table_rtx, gp_save);
}
/* Begin the assembly file. */
@@ -2040,7 +2023,7 @@ ia64_expand_prologue ()
/* We don't need an alloc instruction if we've used no outputs or locals. */
if (current_frame_info.n_local_regs == 0
&& current_frame_info.n_output_regs == 0
- && current_frame_info.n_input_regs <= current_function_args_info.words)
+ && current_frame_info.n_input_regs <= current_function_args_info.int_regs)
{
/* If there is no alloc, but there are input registers used, then we
need a .regstk directive. */
@@ -2873,7 +2856,7 @@ hfa_element_mode (type, nested)
return VOIDmode;
case ARRAY_TYPE:
- return TYPE_MODE (TREE_TYPE (type));
+ return hfa_element_mode (TREE_TYPE (type), 1);
case RECORD_TYPE:
case UNION_TYPE:
@@ -3181,14 +3164,14 @@ ia64_function_arg_advance (cum, mode, type, named)
FR registers, then FP values must also go in general registers. This can
happen when we have a SFmode HFA. */
else if (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS)
- return;
+ cum->int_regs = cum->words;
/* If there is a prototype, then FP values go in a FR register when
named, and in a GR registeer when unnamed. */
else if (cum->prototype)
{
if (! named)
- return;
+ cum->int_regs = cum->words;
else
/* ??? Complex types should not reach here. */
cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
@@ -3196,10 +3179,24 @@ ia64_function_arg_advance (cum, mode, type, named)
/* If there is no prototype, then FP values go in both FR and GR
registers. */
else
- /* ??? Complex types should not reach here. */
- cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
+ {
+ /* ??? Complex types should not reach here. */
+ cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
+ cum->int_regs = cum->words;
+ }
+}
- return;
+/* Variable sized types are passed by reference. */
+/* ??? At present this is a GCC extension to the IA-64 ABI. */
+
+int
+ia64_function_arg_pass_by_reference (cum, mode, type, named)
+ CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+ tree type;
+ int named ATTRIBUTE_UNUSED;
+{
+ return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
}
/* Implement va_start. */
@@ -3232,6 +3229,13 @@ ia64_va_arg (valist, type)
{
tree t;
+ /* Variable sized types are passed by reference. */
+ if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ {
+ rtx addr = std_expand_builtin_va_arg (valist, build_pointer_type (type));
+ return gen_rtx_MEM (ptr_mode, force_reg (Pmode, addr));
+ }
+
/* Arguments with alignment larger than 8 bytes start at the next even
boundary. */
if (TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
@@ -4765,6 +4769,7 @@ group_barrier_needed_p (insn)
/* We play dependency tricks with the epilogue in order
to get proper schedules. Undo this for dv analysis. */
case CODE_FOR_epilogue_deallocate_stack:
+ case CODE_FOR_prologue_allocate_stack:
pat = XVECEXP (pat, 0, 0);
break;
@@ -5236,21 +5241,22 @@ ia64_single_set (insn)
x = COND_EXEC_CODE (x);
if (GET_CODE (x) == SET)
return x;
- ret = single_set_2 (insn, x);
- if (ret == NULL && GET_CODE (x) == PARALLEL)
- {
- /* Special case here prologue_allocate_stack and
- epilogue_deallocate_stack. Although it is not a classical
- single set, the second set is there just to protect it
- from moving past FP-relative stack accesses. */
- if (XVECLEN (x, 0) == 2
- && GET_CODE (XVECEXP (x, 0, 0)) == SET
- && GET_CODE (XVECEXP (x, 0, 1)) == SET
- && GET_CODE (SET_DEST (XVECEXP (x, 0, 1))) == REG
- && SET_DEST (XVECEXP (x, 0, 1)) == SET_SRC (XVECEXP (x, 0, 1))
- && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
- ret = XVECEXP (x, 0, 0);
+
+ /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
+ Although they are not classical single set, the second set is there just
+ to protect it from moving past FP-relative stack accesses. */
+ switch (recog_memoized (insn))
+ {
+ case CODE_FOR_prologue_allocate_stack:
+ case CODE_FOR_epilogue_deallocate_stack:
+ ret = XVECEXP (x, 0, 0);
+ break;
+
+ default:
+ ret = single_set_2 (insn, x);
+ break;
}
+
return ret;
}
@@ -5348,6 +5354,7 @@ ia64_adjust_cost (insn, link, dep_insn, cost)
if (reg_overlap_mentioned_p (SET_DEST (set), addr))
return cost + 1;
}
+
if ((dep_class == ITANIUM_CLASS_IALU
|| dep_class == ITANIUM_CLASS_ILOG
|| dep_class == ITANIUM_CLASS_LD)
@@ -5355,25 +5362,28 @@ ia64_adjust_cost (insn, link, dep_insn, cost)
|| insn_class == ITANIUM_CLASS_MMSHF
|| insn_class == ITANIUM_CLASS_MMSHFI))
return 3;
+
if (dep_class == ITANIUM_CLASS_FMAC
&& (insn_class == ITANIUM_CLASS_FMISC
|| insn_class == ITANIUM_CLASS_FCVTFX
|| insn_class == ITANIUM_CLASS_XMPY))
return 7;
+
if ((dep_class == ITANIUM_CLASS_FMAC
|| dep_class == ITANIUM_CLASS_FMISC
|| dep_class == ITANIUM_CLASS_FCVTFX
|| dep_class == ITANIUM_CLASS_XMPY)
&& insn_class == ITANIUM_CLASS_STF)
return 8;
+
+ /* Intel docs say only LD, ST, IALU, ILOG, ISHF consumers have latency 4,
+ but HP engineers say any non-MM operation. */
if ((dep_class == ITANIUM_CLASS_MMMUL
|| dep_class == ITANIUM_CLASS_MMSHF
|| dep_class == ITANIUM_CLASS_MMSHFI)
- && (insn_class == ITANIUM_CLASS_LD
- || insn_class == ITANIUM_CLASS_ST
- || insn_class == ITANIUM_CLASS_IALU
- || insn_class == ITANIUM_CLASS_ILOG
- || insn_class == ITANIUM_CLASS_ISHF))
+ && insn_class != ITANIUM_CLASS_MMMUL
+ && insn_class != ITANIUM_CLASS_MMSHF
+ && insn_class != ITANIUM_CLASS_MMSHFI)
return 4;
return cost;
@@ -5475,32 +5485,6 @@ ia64_emit_insn_before (insn, before)
emit_insn_before (insn, before);
}
-#if 0
-/* Generate a nop insn of the given type. Note we never generate L type
- nops. */
-
-static rtx
-gen_nop_type (t)
- enum attr_type t;
-{
- switch (t)
- {
- case TYPE_M:
- return gen_nop_m ();
- case TYPE_I:
- return gen_nop_i ();
- case TYPE_B:
- return gen_nop_b ();
- case TYPE_F:
- return gen_nop_f ();
- case TYPE_X:
- return gen_nop_x ();
- default:
- abort ();
- }
-}
-#endif
-
/* When rotating a bundle out of the issue window, insert a bundle selector
insn in front of it. DUMP is the scheduling dump file or NULL. START
is either 0 or 3, depending on whether we want to emit a bundle selector
@@ -5565,8 +5549,8 @@ cycle_end_fill_slots (dump)
if (slot > sched_data.split)
abort ();
if (dump)
- fprintf (dump, "// Packet needs %s, have %s\n", type_names[packet->t[slot]],
- type_names[t]);
+ fprintf (dump, "// Packet needs %s, have %s\n",
+ type_names[packet->t[slot]], type_names[t]);
sched_data.types[slot] = packet->t[slot];
sched_data.insns[slot] = 0;
sched_data.stopbit[slot] = 0;
@@ -5578,15 +5562,22 @@ cycle_end_fill_slots (dump)
slot++;
}
+
/* Do _not_ use T here. If T == TYPE_A, then we'd risk changing the
actual slot type later. */
sched_data.types[slot] = packet->t[slot];
sched_data.insns[slot] = tmp_insns[i];
sched_data.stopbit[slot] = 0;
slot++;
+
/* TYPE_L instructions always fill up two slots. */
if (t == TYPE_L)
- slot++;
+ {
+ sched_data.types[slot] = packet->t[slot];
+ sched_data.insns[slot] = 0;
+ sched_data.stopbit[slot] = 0;
+ slot++;
+ }
}
/* This isn't right - there's no need to pad out until the forced split;
@@ -5629,6 +5620,8 @@ rotate_one_bundle (dump)
memmove (sched_data.insns,
sched_data.insns + 3,
sched_data.cur * sizeof *sched_data.insns);
+ sched_data.packet
+ = &packets[(sched_data.packet->t2 - bundle) * NR_BUNDLES];
}
else
{
@@ -6060,6 +6053,7 @@ static void
maybe_rotate (dump)
FILE *dump;
{
+ cycle_end_fill_slots (dump);
if (sched_data.cur == 6)
rotate_two_bundles (dump);
else if (sched_data.cur >= 3)
@@ -6074,12 +6068,6 @@ static int prev_cycle;
value of sched_data.first_slot. */
static int prev_first;
-/* The last insn that has been scheduled. At the start of a new cycle
- we know that we can emit new insns after it; the main scheduling code
- has already emitted a cycle_display insn after it and is using that
- as its current last insn. */
-static rtx last_issued;
-
/* Emit NOPs to fill the delay between PREV_CYCLE and CLOCK_VAR. Used to
pad out the delay between MM (shifts, etc.) and integer operations. */
@@ -6090,12 +6078,13 @@ nop_cycles_until (clock_var, dump)
{
int prev_clock = prev_cycle;
int cycles_left = clock_var - prev_clock;
+ bool did_stop = false;
/* Finish the previous cycle; pad it out with NOPs. */
if (sched_data.cur == 3)
{
- rtx t = gen_insn_group_barrier (GEN_INT (3));
- last_issued = emit_insn_after (t, last_issued);
+ sched_emit_insn (gen_insn_group_barrier (GEN_INT (3)));
+ did_stop = true;
maybe_rotate (dump);
}
else if (sched_data.cur > 0)
@@ -6114,12 +6103,9 @@ nop_cycles_until (clock_var, dump)
int i;
for (i = sched_data.cur; i < split; i++)
{
- rtx t;
-
- t = gen_nop_type (sched_data.packet->t[i]);
- last_issued = emit_insn_after (t, last_issued);
- sched_data.types[i] = sched_data.packet->t[sched_data.cur];
- sched_data.insns[i] = last_issued;
+ rtx t = sched_emit_insn (gen_nop_type (sched_data.packet->t[i]));
+ sched_data.types[i] = sched_data.packet->t[i];
+ sched_data.insns[i] = t;
sched_data.stopbit[i] = 0;
}
sched_data.cur = split;
@@ -6131,12 +6117,9 @@ nop_cycles_until (clock_var, dump)
int i;
for (i = sched_data.cur; i < 6; i++)
{
- rtx t;
-
- t = gen_nop_type (sched_data.packet->t[i]);
- last_issued = emit_insn_after (t, last_issued);
- sched_data.types[i] = sched_data.packet->t[sched_data.cur];
- sched_data.insns[i] = last_issued;
+ rtx t = sched_emit_insn (gen_nop_type (sched_data.packet->t[i]));
+ sched_data.types[i] = sched_data.packet->t[i];
+ sched_data.insns[i] = t;
sched_data.stopbit[i] = 0;
}
sched_data.cur = 6;
@@ -6146,8 +6129,8 @@ nop_cycles_until (clock_var, dump)
if (need_stop || sched_data.cur == 6)
{
- rtx t = gen_insn_group_barrier (GEN_INT (3));
- last_issued = emit_insn_after (t, last_issued);
+ sched_emit_insn (gen_insn_group_barrier (GEN_INT (3)));
+ did_stop = true;
}
maybe_rotate (dump);
}
@@ -6155,24 +6138,22 @@ nop_cycles_until (clock_var, dump)
cycles_left--;
while (cycles_left > 0)
{
- rtx t = gen_bundle_selector (GEN_INT (0));
- last_issued = emit_insn_after (t, last_issued);
- t = gen_nop_type (TYPE_M);
- last_issued = emit_insn_after (t, last_issued);
- t = gen_nop_type (TYPE_I);
- last_issued = emit_insn_after (t, last_issued);
+ sched_emit_insn (gen_bundle_selector (GEN_INT (0)));
+ sched_emit_insn (gen_nop_type (TYPE_M));
+ sched_emit_insn (gen_nop_type (TYPE_I));
if (cycles_left > 1)
{
- t = gen_insn_group_barrier (GEN_INT (2));
- last_issued = emit_insn_after (t, last_issued);
+ sched_emit_insn (gen_insn_group_barrier (GEN_INT (2)));
cycles_left--;
}
- t = gen_nop_type (TYPE_I);
- last_issued = emit_insn_after (t, last_issued);
- t = gen_insn_group_barrier (GEN_INT (3));
- last_issued = emit_insn_after (t, last_issued);
+ sched_emit_insn (gen_nop_type (TYPE_I));
+ sched_emit_insn (gen_insn_group_barrier (GEN_INT (3)));
+ did_stop = true;
cycles_left--;
}
+
+ if (did_stop)
+ init_insn_group_barriers ();
}
/* We are about to being issuing insns for this clock cycle.
@@ -6198,31 +6179,34 @@ ia64_internal_sched_reorder (dump, sched_verbose, ready, pn_ready,
dump_current_packet (dump);
}
+ /* Work around the pipeline flush that will occurr if the results of
+ an MM instruction are accessed before the result is ready. Intel
+ documentation says this only happens with IALU, ISHF, ILOG, LD,
+ and ST consumers, but experimental evidence shows that *any* non-MM
+ type instruction will incurr the flush. */
if (reorder_type == 0 && clock_var > 0 && ia64_final_schedule)
{
for (insnp = ready; insnp < e_ready; insnp++)
{
- rtx insn = *insnp;
+ rtx insn = *insnp, link;
enum attr_itanium_class t = ia64_safe_itanium_class (insn);
- if (t == ITANIUM_CLASS_IALU || t == ITANIUM_CLASS_ISHF
- || t == ITANIUM_CLASS_ILOG
- || t == ITANIUM_CLASS_LD || t == ITANIUM_CLASS_ST)
- {
- rtx link;
- for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
- if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT
- && REG_NOTE_KIND (link) != REG_DEP_ANTI)
+
+ if (t == ITANIUM_CLASS_MMMUL
+ || t == ITANIUM_CLASS_MMSHF
+ || t == ITANIUM_CLASS_MMSHFI)
+ continue;
+
+ for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == 0)
+ {
+ rtx other = XEXP (link, 0);
+ enum attr_itanium_class t0 = ia64_safe_itanium_class (other);
+ if (t0 == ITANIUM_CLASS_MMSHF || t0 == ITANIUM_CLASS_MMMUL)
{
- rtx other = XEXP (link, 0);
- enum attr_itanium_class t0 = ia64_safe_itanium_class (other);
- if (t0 == ITANIUM_CLASS_MMSHF
- || t0 == ITANIUM_CLASS_MMMUL)
- {
- nop_cycles_until (clock_var, sched_verbose ? dump : NULL);
- goto out;
- }
+ nop_cycles_until (clock_var, sched_verbose ? dump : NULL);
+ goto out;
}
- }
+ }
}
}
out:
@@ -6486,8 +6470,6 @@ ia64_variable_issue (dump, sched_verbose, insn, can_issue_more)
{
enum attr_type t = ia64_safe_type (insn);
- last_issued = insn;
-
if (sched_data.last_was_stop)
{
int t = sched_data.first_slot;
@@ -6833,13 +6815,33 @@ ia64_epilogue_uses (regno)
}
}
-/* Table of valid machine attributes. */
-const struct attribute_spec ia64_attribute_table[] =
+/* Return true if REGNO is used by the frame unwinder. */
+
+int
+ia64_eh_uses (regno)
+ int regno;
{
- /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
- { "syscall_linkage", 0, 0, false, true, true, NULL },
- { NULL, 0, 0, false, false, false, NULL }
-};
+ if (! reload_completed)
+ return 0;
+
+ if (current_frame_info.reg_save_b0
+ && regno == current_frame_info.reg_save_b0)
+ return 1;
+ if (current_frame_info.reg_save_pr
+ && regno == current_frame_info.reg_save_pr)
+ return 1;
+ if (current_frame_info.reg_save_ar_pfs
+ && regno == current_frame_info.reg_save_ar_pfs)
+ return 1;
+ if (current_frame_info.reg_save_ar_unat
+ && regno == current_frame_info.reg_save_ar_unat)
+ return 1;
+ if (current_frame_info.reg_save_ar_lc
+ && regno == current_frame_info.reg_save_ar_lc)
+ return 1;
+
+ return 0;
+}
/* For ia64, SYMBOL_REF_FLAG set means that it is a function.
diff --git a/contrib/gcc/config/ia64/ia64.h b/contrib/gcc/config/ia64/ia64.h
index 1900717..f69983b 100644
--- a/contrib/gcc/config/ia64/ia64.h
+++ b/contrib/gcc/config/ia64/ia64.h
@@ -31,12 +31,19 @@ Boston, MA 02111-1307, USA. */
/* Run-time target specifications */
-#define CPP_CPU_SPEC "\
- -Acpu=ia64 -Amachine=ia64 \
- %{!ansi:%{!std=c*:%{!std=i*:-Dia64}}} -D__ia64 -D__ia64__"
+#define EXTRA_SPECS \
+ { "cpp_cpu", CPP_CPU_SPEC }, \
+ { "asm_extra", ASM_EXTRA_SPEC },
+
+#define CPP_CPU_SPEC " \
+ -Acpu=ia64 -Amachine=ia64 -D__ia64 -D__ia64__ %{!milp32:-D_LP64 -D__LP64__} \
+ -D__ELF__"
#define CC1_SPEC "%(cc1_cpu) "
+#define ASM_EXTRA_SPEC ""
+
+
/* This declaration should be present. */
extern int target_flags;
@@ -203,6 +210,7 @@ extern const char *ia64_fixed_range_string;
defines in other tm.h files. */
#define CPP_SPEC \
"%{mcpu=itanium:-D__itanium__} %{mbig-endian:-D__BIG_ENDIAN__} \
+ %(cpp_cpu) \
-D__LONG_MAX__=9223372036854775807L"
/* This is always "long" so it doesn't "change" in ILP32 vs. LP64. */
@@ -340,7 +348,7 @@ while (0)
/* By default, the C++ compiler will use function addresses in the
vtable entries. Setting this non-zero tells the compiler to use
function descriptors instead. The value of this macro says how
- many words wide the descriptor is (normally 2). It is assumed
+ many words wide the descriptor is (normally 2). It is assumed
that the address of a function descriptor may be treated as a
pointer to a function. */
#define TARGET_VTABLE_USES_DESCRIPTORS 2
@@ -397,7 +405,7 @@ while (0)
/* Register Basics */
-/* Number of hardware registers known to the compiler.
+/* Number of hardware registers known to the compiler.
We have 128 general registers, 128 floating point registers,
64 predicate registers, 8 branch registers, one frame pointer,
and several "application" registers. */
@@ -459,7 +467,7 @@ while (0)
f0: constant 0.0
f1: constant 1.0
p0: constant true
- fp: eliminable frame pointer */
+ fp: eliminable frame pointer */
/* The last 16 stacked regs are reserved for the 8 input and 8 output
registers. */
@@ -529,12 +537,12 @@ while (0)
1, 1, 1, 1, 1, 0, 1 \
}
-/* Like `CALL_USED_REGISTERS' but used to overcome a historical
+/* Like `CALL_USED_REGISTERS' but used to overcome a historical
problem which makes CALL_USED_REGISTERS *always* include
- all the FIXED_REGISTERS. Until this problem has been
+ all the FIXED_REGISTERS. Until this problem has been
resolved this macro can be used to overcome this situation.
- In particular, block_propagate() requires this list
- be acurate, or we can remove registers which should be live.
+ In particular, block_propagate() requires this list
+ be acurate, or we can remove registers which should be live.
This macro is used in regs_invalidated_by_call. */
#define CALL_REALLY_USED_REGISTERS \
@@ -1151,6 +1159,14 @@ enum reg_class
in it. */
#define ARG_POINTER_REGNUM R_GR(0)
+/* Due to the way varargs and argument spilling happens, the argument
+ pointer is not 16-byte aligned like the stack pointer. */
+#define INIT_EXPANDERS \
+ do { \
+ if (cfun && cfun->emit->regno_pointer_align) \
+ REGNO_POINTER_ALIGN (ARG_POINTER_REGNUM) = 64; \
+ } while (0)
+
/* The register number for the return address register. For IA-64, this
is not actually a pointer as the name suggests, but that's a name that
gen_rtx_REG already takes care to keep unique. We modify
@@ -1258,7 +1274,8 @@ enum reg_class
pointer is passed in whatever way is appropriate for passing a pointer to
that type. */
-#define FUNCTION_ARG_PASS_BY_REFERENCE(CUM, MODE, TYPE, NAMED) 0
+#define FUNCTION_ARG_PASS_BY_REFERENCE(CUM, MODE, TYPE, NAMED) \
+ ia64_function_arg_pass_by_reference (&CUM, MODE, TYPE, NAMED)
/* A C type for declaring a variable that is used as the first argument of
`FUNCTION_ARG' and other related values. For some target machines, the type
@@ -1267,6 +1284,7 @@ enum reg_class
typedef struct ia64_args
{
int words; /* # words of arguments so far */
+ int int_regs; /* # GR registers used so far */
int fp_regs; /* # FR registers used so far */
int prototype; /* whether function prototyped */
} CUMULATIVE_ARGS;
@@ -1277,6 +1295,7 @@ typedef struct ia64_args
#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
do { \
(CUM).words = 0; \
+ (CUM).int_regs = 0; \
(CUM).fp_regs = 0; \
(CUM).prototype = ((FNTYPE) && TYPE_ARG_TYPES (FNTYPE)) || (LIBNAME); \
} while (0)
@@ -1290,6 +1309,7 @@ do { \
#define INIT_CUMULATIVE_INCOMING_ARGS(CUM, FNTYPE, LIBNAME) \
do { \
(CUM).words = 0; \
+ (CUM).int_regs = 0; \
(CUM).fp_regs = 0; \
(CUM).prototype = 1; \
} while (0)
@@ -1355,7 +1375,7 @@ do { \
#define FUNCTION_VALUE_REGNO_P(REGNO) \
(((REGNO) >= GR_RET_FIRST && (REGNO) <= GR_RET_LAST) \
- || ((REGNO) >= FR_RET_FIRST && (REGNO) <= FR_RET_LAST))
+ || ((REGNO) >= FR_RET_FIRST && (REGNO) <= FR_RET_LAST))
/* How Large Values are Returned */
@@ -1404,6 +1424,10 @@ do { \
#define EPILOGUE_USES(REGNO) ia64_epilogue_uses (REGNO)
+/* Nonzero for registers used by the exception handling mechanism. */
+
+#define EH_USES(REGNO) ia64_eh_uses (REGNO)
+
/* Output at beginning of assembler file. */
#define ASM_FILE_START(FILE) \
@@ -1722,7 +1746,7 @@ do { \
|| (CLASS) == GR_AND_FR_REGS ? 4 : 10)
/* A C expression for the cost of a branch instruction. A value of 1 is the
- default; other values are interpreted relative to that. Used by the
+ default; other values are interpreted relative to that. Used by the
if-conversion code as max instruction count. */
/* ??? This requires investigation. The primary effect might be how
many additional insn groups we run into, vs how good the dynamic
@@ -2273,7 +2297,7 @@ do { \
fprintf (FILE, "[.%s%d:]\n", PREFIX, NUM)
/* Use section-relative relocations for debugging offsets. Unlike other
- targets that fake this by putting the section VMA at 0, IA-64 has
+ targets that fake this by putting the section VMA at 0, IA-64 has
proper relocations for them. */
#define ASM_OUTPUT_DWARF_OFFSET(FILE, SIZE, LABEL) \
do { \
@@ -2527,4 +2551,11 @@ enum fetchop_code {
IA64_ADD_OP, IA64_SUB_OP, IA64_OR_OP, IA64_AND_OP, IA64_XOR_OP, IA64_NAND_OP
};
+#define DONT_USE_BUILTIN_SETJMP
+
+/* Output any profiling code before the prologue. */
+
+#undef PROFILE_BEFORE_PROLOGUE
+#define PROFILE_BEFORE_PROLOGUE 1
+
/* End of ia64.h */
diff --git a/contrib/gcc/config/ia64/ia64.md b/contrib/gcc/config/ia64/ia64.md
index c88e8b0..7b11c06 100644
--- a/contrib/gcc/config/ia64/ia64.md
+++ b/contrib/gcc/config/ia64/ia64.md
@@ -4848,7 +4848,7 @@
[(set (match_operand:DI 0 "register_operand" "=r,r,r")
(plus:DI (match_operand:DI 1 "register_operand" "%r,r,a")
(match_operand:DI 2 "gr_reg_or_22bit_operand" "r,I,J")))
- (set (match_operand:DI 3 "register_operand" "=r,r,r")
+ (set (match_operand:DI 3 "register_operand" "+r,r,r")
(match_dup 3))]
""
"@
@@ -5045,6 +5045,37 @@
[(set_attr "itanium_class" "stop_bit")
(set_attr "predicable" "no")])
+(define_expand "trap"
+ [(trap_if (const_int 1) (const_int 0))]
+ ""
+ "")
+
+;; ??? We don't have a match-any slot type. Setting the type to unknown
+;; produces worse code that setting the slot type to A.
+
+(define_insn "*trap"
+ [(trap_if (const_int 1) (match_operand 0 "const_int_operand" ""))]
+ ""
+ "break %0"
+ [(set_attr "itanium_class" "chk_s")])
+
+(define_expand "conditional_trap"
+ [(trap_if (match_operand 0 "" "") (match_operand 1 "" ""))]
+ ""
+{
+ operands[0] = ia64_expand_compare (GET_CODE (operands[0]), VOIDmode);
+})
+
+(define_insn "*conditional_trap"
+ [(trap_if (match_operator 0 "predicate_operator"
+ [(match_operand:BI 1 "register_operand" "c")
+ (const_int 0)])
+ (match_operand 2 "const_int_operand" ""))]
+ ""
+ "(%J0) break %2"
+ [(set_attr "itanium_class" "chk_s")
+ (set_attr "predicable" "no")])
+
(define_insn "break_f"
[(unspec_volatile [(const_int 0)] 3)]
""
diff --git a/contrib/gcc/config/ia64/linux.h b/contrib/gcc/config/ia64/linux.h
index 1889ef6..3091852 100644
--- a/contrib/gcc/config/ia64/linux.h
+++ b/contrib/gcc/config/ia64/linux.h
@@ -11,12 +11,8 @@
/* ??? Maybe this should be in sysv4.h? */
#define CPP_PREDEFINES "\
--D__ia64 -D__ia64__ -D__linux -D__linux__ -D_LONGLONG -Dlinux -Dunix \
--D__LP64__ -D__ELF__ -Asystem=linux -Acpu=ia64 -Amachine=ia64"
-
-/* ??? ia64 gas doesn't accept standard svr4 assembler options? */
-#undef ASM_SPEC
-#define ASM_SPEC "-x %{mconstant-gp} %{mauto-pic}"
+ -D__gnu_linux__ -D__linux -D__linux__ -D_LONGLONG \
+ -Dlinux -Dunix -Asystem=linux"
/* Need to override linux.h STARTFILE_SPEC, since it has crtbeginT.o in. */
#undef STARTFILE_SPEC
@@ -46,14 +42,8 @@
%{static:-static}}"
-#define DONT_USE_BUILTIN_SETJMP
#define JMP_BUF_SIZE 76
-/* Output any profiling code before the prologue. */
-
-#undef PROFILE_BEFORE_PROLOGUE
-#define PROFILE_BEFORE_PROLOGUE 1
-
/* Override linux.h LINK_EH_SPEC definition.
Signalize that because we have fde-glibc, we don't need all C shared libs
linked against -lgcc_s. */
@@ -98,10 +88,16 @@
(CONTEXT)->pfs_loc = &(sc_->sc_ar_pfs); \
(CONTEXT)->lc_loc = &(sc_->sc_ar_lc); \
(CONTEXT)->unat_loc = &(sc_->sc_ar_unat); \
+ (CONTEXT)->br_loc[0] = &(sc_->sc_br[0]); \
+ (CONTEXT)->bsp = sc_->sc_ar_bsp; \
(CONTEXT)->pr = sc_->sc_pr; \
(CONTEXT)->psp = sc_->sc_gr[12]; \
+ (CONTEXT)->gp = sc_->sc_gr[1]; \
+ /* Signal frame doesn't have an associated reg. stack frame \
+ other than what we adjust for below. */ \
+ (FS) -> no_reg_stack_frame = 1; \
\
- /* Don't touch the branch registers. The kernel doesn't \
+ /* Don't touch the branch registers o.t. b0. The kernel doesn't \
pass the preserved branch registers in the sigcontext but \
leaves them intact, so there's no need to do anything \
with them here. */ \
diff --git a/contrib/gcc/config/ia64/sysv4.h b/contrib/gcc/config/ia64/sysv4.h
index 1b5d469..c53a1dc 100644
--- a/contrib/gcc/config/ia64/sysv4.h
+++ b/contrib/gcc/config/ia64/sysv4.h
@@ -22,6 +22,11 @@
#undef ASCII_DATA_ASM_OP
#define ASCII_DATA_ASM_OP "\tstring\t"
+/* ia64-specific options for gas
+ ??? ia64 gas doesn't accept standard svr4 assembler options? */
+#undef ASM_SPEC
+#define ASM_SPEC "-x %{mconstant-gp} %{mauto-pic} %(asm_extra)"
+
/* ??? Unfortunately, .lcomm doesn't work, because it puts things in either
.bss or .sbss, and we can't control the decision of which is used. When
I use .lcomm, I get a cryptic "Section group has no member" error from
diff --git a/contrib/gcc/config/ia64/unwind-ia64.c b/contrib/gcc/config/ia64/unwind-ia64.c
index 99923aa..ca91539 100644
--- a/contrib/gcc/config/ia64/unwind-ia64.c
+++ b/contrib/gcc/config/ia64/unwind-ia64.c
@@ -35,6 +35,10 @@
#include "tsystem.h"
#include "unwind.h"
#include "unwind-ia64.h"
+#include "ia64intrin.h"
+
+/* This isn't thread safe, but nice for occasional tests. */
+#undef ENABLE_MALLOC_CHECKING
#ifndef __USING_SJLJ_EXCEPTIONS__
#define UNW_VER(x) ((x) >> 48)
@@ -121,13 +125,24 @@ struct unw_reg_info
int when; /* when the register gets saved */
};
+struct unw_reg_state {
+ struct unw_reg_state *next; /* next (outer) element on state stack */
+ struct unw_reg_info reg[UNW_NUM_REGS]; /* register save locations */
+};
+
+struct unw_labeled_state {
+ struct unw_labeled_state *next; /* next labeled state (or NULL) */
+ unsigned long label; /* label for this state */
+ struct unw_reg_state saved_state;
+};
+
typedef struct unw_state_record
{
unsigned int first_region : 1; /* is this the first region? */
unsigned int done : 1; /* are we done scanning descriptors? */
unsigned int any_spills : 1; /* got any register spills? */
unsigned int in_body : 1; /* are we inside a body? */
-
+ unsigned int no_reg_stack_frame : 1; /* Don't adjust bsp for i&l regs */
unsigned char *imask; /* imask of of spill_mask record or NULL */
unsigned long pr_val; /* predicate values */
unsigned long pr_mask; /* predicate mask */
@@ -141,11 +156,8 @@ typedef struct unw_state_record
unsigned char gr_save_loc; /* next general register to use for saving */
unsigned char return_link_reg; /* branch register for return link */
- struct unw_reg_state {
- struct unw_reg_state *next;
- unsigned long label; /* label of this state record */
- struct unw_reg_info reg[UNW_NUM_REGS];
- } curr, *stack, *reg_state_list;
+ struct unw_labeled_state *labeled_states; /* list of all labeled states */
+ struct unw_reg_state curr; /* current state */
_Unwind_Personality_Fn personality;
@@ -184,9 +196,12 @@ struct _Unwind_Context
void *lsda; /* language specific data area */
/* Preserved state. */
- unsigned long *bsp_loc; /* previous bsp save location */
+ unsigned long *bsp_loc; /* previous bsp save location
+ Appears to be write-only? */
unsigned long *bspstore_loc;
- unsigned long *pfs_loc;
+ unsigned long *pfs_loc; /* Save location for pfs in current
+ (corr. to sp) frame. Target
+ contains cfm for caller. */
unsigned long *pri_unat_loc;
unsigned long *unat_loc;
unsigned long *lc_loc;
@@ -226,28 +241,196 @@ static unsigned char const save_order[] =
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
-/* Unwind decoder routines */
+/* MASK is a bitmap describing the allocation state of emergency buffers,
+ with bit set indicating free. Return >= 0 if allocation is successful;
+ < 0 if failure. */
-static void
-push (struct unw_state_record *sr)
+static inline int
+atomic_alloc (unsigned int *mask)
+{
+ unsigned int old = *mask, ret, new;
+
+ while (1)
+ {
+ if (old == 0)
+ return -1;
+ ret = old & -old;
+ new = old & ~ret;
+ new = __sync_val_compare_and_swap (mask, old, new);
+ if (old == new)
+ break;
+ old = new;
+ }
+
+ return __builtin_ffs (ret) - 1;
+}
+
+/* Similarly, free an emergency buffer. */
+
+static inline void
+atomic_free (unsigned int *mask, int bit)
+{
+ __sync_xor_and_fetch (mask, 1 << bit);
+}
+
+
+#define SIZE(X) (sizeof(X) / sizeof(*(X)))
+#define MASK_FOR(X) ((2U << (SIZE (X) - 1)) - 1)
+#define PTR_IN(X, P) ((P) >= (X) && (P) < (X) + SIZE (X))
+
+static struct unw_reg_state emergency_reg_state[32];
+static int emergency_reg_state_free = MASK_FOR (emergency_reg_state);
+
+static struct unw_labeled_state emergency_labeled_state[8];
+static int emergency_labeled_state_free = MASK_FOR (emergency_labeled_state);
+
+#ifdef ENABLE_MALLOC_CHECKING
+static int reg_state_alloced;
+static int labeled_state_alloced;
+#endif
+
+/* Allocation and deallocation of structures. */
+
+static struct unw_reg_state *
+alloc_reg_state (void)
{
struct unw_reg_state *rs;
+#ifdef ENABLE_MALLOC_CHECKING
+ reg_state_alloced++;
+#endif
+
rs = malloc (sizeof (struct unw_reg_state));
+ if (!rs)
+ {
+ int n = atomic_alloc (&emergency_reg_state_free);
+ if (n >= 0)
+ rs = &emergency_reg_state[n];
+ }
+
+ return rs;
+}
+
+static void
+free_reg_state (struct unw_reg_state *rs)
+{
+#ifdef ENABLE_MALLOC_CHECKING
+ reg_state_alloced--;
+#endif
+
+ if (PTR_IN (emergency_reg_state, rs))
+ atomic_free (&emergency_reg_state_free, rs - emergency_reg_state);
+ else
+ free (rs);
+}
+
+static struct unw_labeled_state *
+alloc_label_state (void)
+{
+ struct unw_labeled_state *ls;
+
+#ifdef ENABLE_MALLOC_CHECKING
+ labeled_state_alloced++;
+#endif
+
+ ls = malloc(sizeof(struct unw_labeled_state));
+ if (!ls)
+ {
+ int n = atomic_alloc (&emergency_labeled_state_free);
+ if (n >= 0)
+ ls = &emergency_labeled_state[n];
+ }
+
+ return ls;
+}
+
+static void
+free_label_state (struct unw_labeled_state *ls)
+{
+#ifdef ENABLE_MALLOC_CHECKING
+ labeled_state_alloced--;
+#endif
+
+ if (PTR_IN (emergency_labeled_state, ls))
+ atomic_free (&emergency_labeled_state_free, emergency_labeled_state - ls);
+ else
+ free (ls);
+}
+
+/* Routines to manipulate the state stack. */
+
+static void
+push (struct unw_state_record *sr)
+{
+ struct unw_reg_state *rs = alloc_reg_state ();
memcpy (rs, &sr->curr, sizeof (*rs));
- rs->next = sr->stack;
- sr->stack = rs;
+ sr->curr.next = rs;
}
static void
pop (struct unw_state_record *sr)
{
- struct unw_reg_state *rs;
+ struct unw_reg_state *rs = sr->curr.next;
+
+ if (!rs)
+ abort ();
+ memcpy (&sr->curr, rs, sizeof(*rs));
+ free_reg_state (rs);
+}
+
+/* Make a copy of the state stack. Non-recursive to avoid stack overflows. */
+
+static struct unw_reg_state *
+dup_state_stack (struct unw_reg_state *rs)
+{
+ struct unw_reg_state *copy, *prev = NULL, *first = NULL;
+
+ while (rs)
+ {
+ copy = alloc_reg_state ();
+ memcpy (copy, rs, sizeof(*copy));
+ if (first)
+ prev->next = copy;
+ else
+ first = copy;
+ rs = rs->next;
+ prev = copy;
+ }
+
+ return first;
+}
+
+/* Free all stacked register states (but not RS itself). */
+static void
+free_state_stack (struct unw_reg_state *rs)
+{
+ struct unw_reg_state *p, *next;
- rs = sr->stack;
- sr->stack = rs->next;
- free (rs);
+ for (p = rs->next; p != NULL; p = next)
+ {
+ next = p->next;
+ free_reg_state (p);
+ }
+ rs->next = NULL;
+}
+
+/* Free all labeled states. */
+
+static void
+free_label_states (struct unw_labeled_state *ls)
+{
+ struct unw_labeled_state *next;
+
+ for (; ls ; ls = next)
+ {
+ next = ls->next;
+
+ free_state_stack (&ls->saved_state);
+ free_label_state (ls);
+ }
}
+
+/* Unwind decoder routines */
static enum unw_register_index __attribute__((const))
decode_abreg (unsigned char abreg, int memory)
@@ -295,8 +478,8 @@ alloc_spill_area (unsigned long *offp, unsigned long regsize,
if (reg->where == UNW_WHERE_SPILL_HOME)
{
reg->where = UNW_WHERE_PSPREL;
- reg->val = 0x10 - *offp;
- *offp += regsize;
+ *offp -= regsize;
+ reg->val = *offp;
}
}
}
@@ -330,7 +513,7 @@ finish_prologue (struct unw_state_record *sr)
/* First, resolve implicit register save locations
(see Section "11.4.2.3 Rules for Using Unwind Descriptors", rule 3). */
- for (i = 0; i < (int) sizeof(save_order); ++i)
+ for (i = 0; i < (int) sizeof (save_order); ++i)
{
reg = sr->curr.reg + save_order[i];
if (reg->where == UNW_WHERE_GR_SAVE)
@@ -363,8 +546,8 @@ finish_prologue (struct unw_state_record *sr)
mask = *cp++;
kind = (mask >> 2*(3-(t & 3))) & 3;
if (kind > 0)
- spill_next_when(&regs[kind - 1], sr->curr.reg + limit[kind - 1],
- sr->region_start + t);
+ spill_next_when (&regs[kind - 1], sr->curr.reg + limit[kind - 1],
+ sr->region_start + t);
}
}
@@ -372,12 +555,12 @@ finish_prologue (struct unw_state_record *sr)
if (sr->any_spills)
{
off = sr->spill_offset;
- alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2,
- sr->curr.reg + UNW_REG_F31);
- alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_B1,
- sr->curr.reg + UNW_REG_B5);
- alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_R4,
- sr->curr.reg + UNW_REG_R7);
+ alloc_spill_area (&off, 16, sr->curr.reg + UNW_REG_F2,
+ sr->curr.reg + UNW_REG_F31);
+ alloc_spill_area (&off, 8, sr->curr.reg + UNW_REG_B1,
+ sr->curr.reg + UNW_REG_B5);
+ alloc_spill_area (&off, 8, sr->curr.reg + UNW_REG_R4,
+ sr->curr.reg + UNW_REG_R7);
}
}
@@ -392,23 +575,24 @@ desc_prologue (int body, unw_word rlen, unsigned char mask,
int i;
if (!(sr->in_body || sr->first_region))
- finish_prologue(sr);
+ finish_prologue (sr);
sr->first_region = 0;
/* Check if we're done. */
- if (body && sr->when_target < sr->region_start + sr->region_len)
+ if (sr->when_target < sr->region_start + sr->region_len)
{
sr->done = 1;
return;
}
for (i = 0; i < sr->epilogue_count; ++i)
- pop(sr);
+ pop (sr);
+
sr->epilogue_count = 0;
sr->epilogue_start = UNW_WHEN_NEVER;
if (!body)
- push(sr);
+ push (sr);
sr->region_start += sr->region_len;
sr->region_len = rlen;
@@ -494,7 +678,8 @@ desc_frgr_mem (unsigned char grmask, unw_word frmask,
{
if ((frmask & 1) != 0)
{
- set_reg (sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
+ enum unw_register_index base = i < 4 ? UNW_REG_F2 : UNW_REG_F16 - 4;
+ set_reg (sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
sr->region_start + sr->region_len - 1, 0);
sr->any_spills = 1;
}
@@ -631,13 +816,15 @@ desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
static inline void
desc_copy_state (unw_word label, struct unw_state_record *sr)
{
- struct unw_reg_state *rs;
+ struct unw_labeled_state *ls;
- for (rs = sr->reg_state_list; rs; rs = rs->next)
+ for (ls = sr->labeled_states; ls; ls = ls->next)
{
- if (rs->label == label)
- {
- memcpy (&sr->curr, rs, sizeof(sr->curr));
+ if (ls->label == label)
+ {
+ free_state_stack (&sr->curr);
+ memcpy (&sr->curr, &ls->saved_state, sizeof (sr->curr));
+ sr->curr.next = dup_state_stack (ls->saved_state.next);
return;
}
}
@@ -647,13 +834,15 @@ desc_copy_state (unw_word label, struct unw_state_record *sr)
static inline void
desc_label_state (unw_word label, struct unw_state_record *sr)
{
- struct unw_reg_state *rs;
+ struct unw_labeled_state *ls = alloc_label_state ();
- rs = malloc (sizeof (struct unw_reg_state));
- memcpy (rs, &sr->curr, sizeof (*rs));
- rs->label = label;
- rs->next = sr->reg_state_list;
- sr->reg_state_list = rs;
+ ls->label = label;
+ memcpy (&ls->saved_state, &sr->curr, sizeof (ls->saved_state));
+ ls->saved_state.next = dup_state_stack (sr->curr.next);
+
+ /* Insert into list of labeled states. */
+ ls->next = sr->labeled_states;
+ sr->labeled_states = ls;
}
/*
@@ -1461,8 +1650,11 @@ uw_frame_state_for (struct _Unwind_Context *context, _Unwind_FrameState *fs)
unsigned long *unw, header, length;
unsigned char *insn, *insn_end;
unsigned long segment_base;
+ struct unw_reg_info *r;
memset (fs, 0, sizeof (*fs));
+ for (r = fs->curr.reg; r < fs->curr.reg + UNW_NUM_REGS; ++r)
+ r->when = UNW_WHEN_NEVER;
context->lsda = 0;
ent = _Unwind_FindTableEntry ((void *) context->rp,
@@ -1518,6 +1710,14 @@ uw_frame_state_for (struct _Unwind_Context *context, _Unwind_FrameState *fs)
while (!fs->done && insn < insn_end)
insn = unw_decode (insn, fs->in_body, fs);
+ free_label_states (fs->labeled_states);
+ free_state_stack (&fs->curr);
+
+#ifdef ENABLE_MALLOC_CHECKING
+ if (reg_state_alloced || labeled_state_alloced)
+ abort ();
+#endif
+
/* If we're in the epilogue, sp has been restored and all values
on the memory stack below psp also have been restored. */
if (fs->when_target > fs->epilogue_start)
@@ -1578,7 +1778,7 @@ uw_update_reg_address (struct _Unwind_Context *context,
/* Note that while RVAL can only be 1-5 from normal descriptors,
we can want to look at B0 due to having manually unwound a
signal frame. */
- if (rval >= 0 && rval <= 5)
+ if (rval <= 5)
addr = context->br_loc[rval];
else
abort ();
@@ -1677,8 +1877,7 @@ uw_update_reg_address (struct _Unwind_Context *context,
context->psp = *(unsigned long *)addr;
break;
- case UNW_REG_RNAT:
- case UNW_NUM_REGS:
+ default:
abort ();
}
}
@@ -1720,7 +1919,10 @@ uw_update_context (struct _Unwind_Context *context, _Unwind_FrameState *fs)
/* Unwind BSP for the local registers allocated this frame. */
/* ??? What to do with stored BSP or BSPSTORE registers. */
- if (fs->when_target > fs->curr.reg[UNW_REG_PFS].when)
+ /* We assert that we are either at a call site, or we have
+ just unwound through a signal frame. In either case
+ pfs_loc is valid. */
+ if (!(fs -> no_reg_stack_frame))
{
unsigned long pfs = *context->pfs_loc;
unsigned long sol = (pfs >> 7) & 0x7f;
diff --git a/contrib/gcc/config/libgcc-glibc.ver b/contrib/gcc/config/libgcc-glibc.ver
new file mode 100644
index 0000000..837c1a7
--- /dev/null
+++ b/contrib/gcc/config/libgcc-glibc.ver
@@ -0,0 +1,23 @@
+# In order to work around the very problems that force us to now generally
+# create a libgcc.so, glibc reexported a number of routines from libgcc.a.
+# By now choosing the same version tags for these specific routines, we
+# maintain enough binary compatibility to allow future versions of glibc
+# to defer implementation of these routines to libgcc.so via DT_AUXILIARY.
+
+%inherit GCC_3.0 GLIBC_2.0
+GLIBC_2.0 {
+ # Sampling of DImode arithmetic used by (at least) i386 and m68k.
+ __divdi3
+ __moddi3
+ __udivdi3
+ __umoddi3
+
+ # Exception handling support functions used by most everyone.
+ __register_frame
+ __register_frame_table
+ __deregister_frame
+ __register_frame_info
+ __deregister_frame_info
+ __frame_state_for
+ __register_frame_info_table
+}
diff --git a/contrib/gcc/config/netbsd.h b/contrib/gcc/config/netbsd.h
index fba341b..227942c 100644
--- a/contrib/gcc/config/netbsd.h
+++ b/contrib/gcc/config/netbsd.h
@@ -109,3 +109,16 @@
/* Handle #pragma weak and #pragma pack. */
#define HANDLE_SYSV_PRAGMA
+
+
+/* Define some types that are the same on all NetBSD platforms,
+ making them agree with <machine/ansi.h>. */
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+#undef WINT_TYPE
+#define WINT_TYPE "int"
diff --git a/contrib/gcc/config/rs6000/aix.h b/contrib/gcc/config/rs6000/aix.h
index 2738a37..d8dde5b 100644
--- a/contrib/gcc/config/rs6000/aix.h
+++ b/contrib/gcc/config/rs6000/aix.h
@@ -1,6 +1,6 @@
/* Definitions of target machine for GNU compiler,
for IBM RS/6000 POWER running AIX.
- Copyright (C) 2000, 2001 Free Software Foundation, Inc.
+ Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -27,6 +27,9 @@ Boston, MA 02111-1307, USA. */
collect has a chance to see them, so scan the object files directly. */
#define COLLECT_EXPORT_LIST
+/* Handle #pragma weak and #pragma pack. */
+#define HANDLE_SYSV_PRAGMA
+
/* This is the only version of nm that collect2 can work with. */
#define REAL_NM_FILE_NAME "/usr/ucb/nm"
diff --git a/contrib/gcc/config/rs6000/aix43.h b/contrib/gcc/config/rs6000/aix43.h
index 93e186c..7aa8707 100644
--- a/contrib/gcc/config/rs6000/aix43.h
+++ b/contrib/gcc/config/rs6000/aix43.h
@@ -199,10 +199,6 @@ do { \
%{pthread:%{pg:gcrt0_r%O%s}%{!pg:%{p:mcrt0_r%O%s}%{!p:crt0_r%O%s}}}\
%{!pthread:%{pg:gcrt0%O%s}%{!pg:%{p:mcrt0%O%s}%{!p:crt0%O%s}}}}}}"
-/* Since there are separate multilibs for pthreads, determine the
- thread model based on the command-line arguments. */
-#define THREAD_MODEL_SPEC "%{pthread:posix}%{!pthread:single}"
-
/* AIX 4.3 typedefs ptrdiff_t as "long" while earlier releases used "int". */
#undef PTRDIFF_TYPE
diff --git a/contrib/gcc/config/rs6000/aix51.h b/contrib/gcc/config/rs6000/aix51.h
index ae01440..121c7ba 100644
--- a/contrib/gcc/config/rs6000/aix51.h
+++ b/contrib/gcc/config/rs6000/aix51.h
@@ -202,10 +202,6 @@ do { \
%{pthread:%{pg:gcrt0_r%O%s}%{!pg:%{p:mcrt0_r%O%s}%{!p:crt0_r%O%s}}}\
%{!pthread:%{pg:gcrt0%O%s}%{!pg:%{p:mcrt0%O%s}%{!p:crt0%O%s}}}}}}"
-/* Since there are separate multilibs for pthreads, determine the
- thread model based on the command-line arguments. */
-#define THREAD_MODEL_SPEC "%{pthread:posix}%{!pthread:single}"
-
/* AIX V5 typedefs ptrdiff_t as "long" while earlier releases used "int". */
#undef PTRDIFF_TYPE
@@ -213,10 +209,13 @@ do { \
/* __WCHAR_TYPE__ is dynamic, so do not define it statically. */
#define NO_BUILTIN_WCHAR_TYPE
-#undef WCHAR_TYPE
-#undef WCHAR_TYPE_SIZE
+
+/* Type used for wchar_t, as a string used in a declaration. */
+#undef WCHAR_TYPE
+#define WCHAR_TYPE (!TARGET_64BIT ? "short unsigned int" : "unsigned int")
/* Width of wchar_t in bits. */
+#undef WCHAR_TYPE_SIZE
#define WCHAR_TYPE_SIZE (!TARGET_64BIT ? 16 : 32)
#define MAX_WCHAR_TYPE_SIZE 32
diff --git a/contrib/gcc/config/rs6000/altivec.h b/contrib/gcc/config/rs6000/altivec.h
index 4d5b0a3..85869dc 100644
--- a/contrib/gcc/config/rs6000/altivec.h
+++ b/contrib/gcc/config/rs6000/altivec.h
@@ -35,18 +35,29 @@ Boston, MA 02111-1307, USA. */
/* Required by Motorola specs. */
#define __VEC__ 10206
+#ifndef __ALTIVEC__
#define __ALTIVEC__ 1
+#endif
#define __vector __attribute__((vector_size(16)))
-/* Dummy prototype. */
-extern void __altivec_link_error_invalid_argument ();
-
/* You are allowed to undef this for C++ compatability. */
#define vector __vector
+#define bool signed
+#define pixel short
+#define __pixel short
+
+/* Dummy prototype. */
+extern int __altivec_link_error_invalid_argument ();
+
/* Helper macros. */
+#define __CR6_EQ 0
+#define __CR6_EQ_REV 1
+#define __CR6_LT 2
+#define __CR6_LT_REV 3
+
#define __bin_args_eq(xtype, x, ytype, y) \
(__builtin_types_compatible_p (xtype, typeof (x)) \
&& __builtin_types_compatible_p (ytype, typeof (y)))
@@ -61,1398 +72,7784 @@ extern void __altivec_link_error_invalid_argument ();
#define __ch(x, y, z) __builtin_choose_expr (x, y, z)
+/* These are easy... Same exact arguments. */
+
+#define vec_vaddcuw vec_addc
+#define vec_vand vec_and
+#define vec_vandc vec_andc
+#define vec_vrfip vec_ceil
+#define vec_vcmpbfp vec_cmpb
+#define vec_vcmpgefp vec_cmpge
+#define vec_vctsxs vec_cts
+#define vec_vctuxs vec_ctu
+#define vec_vexptefp vec_expte
+#define vec_vrfim vec_floor
+#define vec_lvx vec_ld
+#define vec_lvxl vec_ldl
+#define vec_vlogefp vec_loge
+#define vec_vmaddfp vec_madd
+#define vec_vmhaddshs vec_madds
+#define vec_vmladduhm vec_mladd
+#define vec_vmhraddshs vec_mradds
+#define vec_vnmsubfp vec_nmsub
+#define vec_vnor vec_nor
+#define vec_vor vec_or
+#define vec_vpkpx vec_packpx
+#define vec_vperm vec_perm
+#define vec_vrefp vec_re
+#define vec_vrfin vec_round
+#define vec_vrsqrtefp vec_rsqrte
+#define vec_vsel vec_sel
+#define vec_vsldoi vec_sld
+#define vec_vsl vec_sll
+#define vec_vslo vec_slo
+#define vec_vspltisb vec_splat_s8
+#define vec_vspltish vec_splat_s16
+#define vec_vspltisw vec_splat_s32
+#define vec_vsr vec_srl
+#define vec_vsro vec_sro
+#define vec_stvx vec_st
+#define vec_stvxl vec_stl
+#define vec_vsubcuw vec_subc
+#define vec_vsum2sws vec_sum2s
+#define vec_vsumsws vec_sums
+#define vec_vrfiz vec_trunc
+#define vec_vxor vec_xor
+
#ifdef __cplusplus
-/* C++ stuff here. */
+/* Prototypes for builtins that take literals and must always be
+ inlined. */
+inline vector float vec_ctf (vector unsigned int, const char) __attribute__ ((always_inline));
+inline vector float vec_ctf (vector signed int, const char) __attribute__ ((always_inline));
+inline vector signed int vec_cts (vector float, const char) __attribute__ ((always_inline));
+inline vector unsigned int vec_ctu (vector float, const char) __attribute__ ((always_inline));
+inline void vec_dss (const char) __attribute__ ((always_inline));
+inline void vec_dst (void *, int, const char) __attribute__ ((always_inline));
+inline void vec_dstst (void *, int, const char) __attribute__ ((always_inline));
+inline void vec_dststt (void *, int, const char) __attribute__ ((always_inline));
+inline void vec_dstt (void *, int, const char) __attribute__ ((always_inline));
+inline vector float vec_sld (vector float, vector float, const char) __attribute__ ((always_inline));
+inline vector signed int vec_sld (vector signed int, vector signed int, const char) __attribute__ ((always_inline));
+inline vector unsigned int vec_sld (vector unsigned int, vector unsigned int, const char) __attribute__ ((always_inline));
+inline vector signed short vec_sld (vector signed short, vector signed short, const char) __attribute__ ((always_inline));
+inline vector unsigned short vec_sld (vector unsigned short, vector unsigned short, const char) __attribute__ ((always_inline));
+inline vector signed char vec_sld (vector signed char, vector signed char, const char) __attribute__ ((always_inline));
+inline vector unsigned char vec_sld (vector unsigned char, vector unsigned char, const char) __attribute__ ((always_inline));
+inline vector signed char vec_splat (vector signed char, const char) __attribute__ ((always_inline));
+inline vector unsigned char vec_splat (vector unsigned char, const char) __attribute__ ((always_inline));
+inline vector signed short vec_splat (vector signed short, const char) __attribute__ ((always_inline));
+inline vector unsigned short vec_splat (vector unsigned short, const char) __attribute__ ((always_inline));
+inline vector float vec_splat (vector float, const char) __attribute__ ((always_inline));
+inline vector signed int vec_splat (vector signed int, const char) __attribute__ ((always_inline));
+inline vector unsigned int vec_splat (vector unsigned int, const char) __attribute__ ((always_inline));
+inline vector signed char vec_splat_s8 (const char) __attribute__ ((always_inline));
+inline vector signed short vec_splat_s16 (const char) __attribute__ ((always_inline));
+inline vector signed int vec_splat_s32 (const char) __attribute__ ((always_inline));
+inline vector unsigned char vec_splat_u8 (const char) __attribute__ ((always_inline));
+inline vector unsigned short vec_splat_u16 (const char) __attribute__ ((always_inline));
+inline vector unsigned int vec_splat_u32 (const char) __attribute__ ((always_inline));
+
+/* vec_abs */
+
+inline vector signed char
+vec_abs (vector signed char a1)
+{
+ return __builtin_altivec_abs_v16qi (a1);
+}
+
+inline vector signed short
+vec_abs (vector signed short a1)
+{
+ return __builtin_altivec_abs_v8hi (a1);
+}
+
+inline vector signed int
+vec_abs (vector signed int a1)
+{
+ return __builtin_altivec_abs_v4si (a1);
+}
+
+inline vector float
+vec_abs (vector float a1)
+{
+ return __builtin_altivec_abs_v4sf (a1);
+}
+
+/* vec_abss */
+
+inline vector signed char
+vec_abss (vector signed char a1)
+{
+ return __builtin_altivec_abss_v16qi (a1);
+}
+
+inline vector signed short
+vec_abss (vector signed short a1)
+{
+ return __builtin_altivec_abss_v8hi (a1);
+}
+
+inline vector signed int
+vec_abss (vector signed int a1)
+{
+ return __builtin_altivec_abss_v4si (a1);
+}
+
+/* vec_add */
+
+inline vector signed char
+vec_add (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_add (vector signed char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_add (vector unsigned char a1, vector signed char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_add (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector signed short
+vec_add (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_add (vector signed short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_add (vector unsigned short a1, vector signed short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_add (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector signed int
+vec_add (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_add (vector signed int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_add (vector unsigned int a1, vector signed int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_add (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector float
+vec_add (vector float a1, vector float a2)
+{
+ return (vector float) __builtin_altivec_vaddfp ((vector float) a1, (vector float) a2);
+}
+
+/* vec_vaddfp */
+
+inline vector float
+vec_vaddfp (vector float a1, vector float a2)
+{
+ return (vector float) __builtin_altivec_vaddfp ((vector float) a1, (vector float) a2);
+}
+
+/* vec_vadduwm */
+
+inline vector signed int
+vec_vadduwm (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_vadduwm (vector signed int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_vadduwm (vector unsigned int a1, vector signed int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_vadduwm (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vadduhm */
+
+inline vector signed short
+vec_vadduhm (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_vadduhm (vector signed short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_vadduhm (vector unsigned short a1, vector signed short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_vadduhm (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vaddubm */
+
+inline vector signed char
+vec_vaddubm (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_vaddubm (vector signed char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_vaddubm (vector unsigned char a1, vector signed char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_vaddubm (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_addc */
+
+inline vector unsigned int
+vec_addc (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vaddcuw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_adds */
+
+inline vector unsigned char
+vec_adds (vector signed char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_adds (vector unsigned char a1, vector signed char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_adds (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector signed char
+vec_adds (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vaddsbs ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned short
+vec_adds (vector signed short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_adds (vector unsigned short a1, vector signed short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_adds (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector signed short
+vec_adds (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vaddshs ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned int
+vec_adds (vector signed int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_adds (vector unsigned int a1, vector signed int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_adds (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_adds (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vaddsws ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vaddsws */
+
+inline vector signed int
+vec_vaddsws (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vaddsws ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vadduws */
+
+inline vector unsigned int
+vec_vadduws (vector signed int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_vadduws (vector unsigned int a1, vector signed int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_vadduws (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vaddshs */
+inline vector signed short
+vec_vaddshs (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vaddshs ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vadduhs */
+
+inline vector unsigned short
+vec_vadduhs (vector signed short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_vadduhs (vector unsigned short a1, vector signed short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_vadduhs (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vaddsbs */
+
+inline vector signed char
+vec_vaddsbs (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vaddsbs ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_vaddubs */
+
+inline vector unsigned char
+vec_vaddubs (vector signed char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_vaddubs (vector unsigned char a1, vector signed char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_vaddubs (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_and */
+
+inline vector float
+vec_and (vector float a1, vector float a2)
+{
+ return (vector float) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector float
+vec_and (vector float a1, vector signed int a2)
+{
+ return (vector float) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector float
+vec_and (vector signed int a1, vector float a2)
+{
+ return (vector float) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_and (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_and (vector signed int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_and (vector unsigned int a1, vector signed int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_and (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed short
+vec_and (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_and (vector signed short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_and (vector unsigned short a1, vector signed short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_and (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed char
+vec_and (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_and (vector signed char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_and (vector unsigned char a1, vector signed char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_and (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_andc */
+
+inline vector float
+vec_andc (vector float a1, vector float a2)
+{
+ return (vector float) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector float
+vec_andc (vector float a1, vector signed int a2)
+{
+ return (vector float) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector float
+vec_andc (vector signed int a1, vector float a2)
+{
+ return (vector float) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_andc (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_andc (vector signed int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_andc (vector unsigned int a1, vector signed int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_andc (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed short
+vec_andc (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_andc (vector signed short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_andc (vector unsigned short a1, vector signed short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_andc (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed char
+vec_andc (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_andc (vector signed char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_andc (vector unsigned char a1, vector signed char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_andc (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_avg */
+
+inline vector unsigned char
+vec_avg (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vavgub ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector signed char
+vec_avg (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vavgsb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned short
+vec_avg (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vavguh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector signed short
+vec_avg (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vavgsh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned int
+vec_avg (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vavguw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_avg (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vavgsw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vavgsw */
+
+inline vector signed int
+vec_vavgsw (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vavgsw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vavguw */
+
+inline vector unsigned int
+vec_vavguw (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vavguw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vavgsh */
+
+inline vector signed short
+vec_vavgsh (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vavgsh ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vavguh */
+
+inline vector unsigned short
+vec_vavguh (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vavguh ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vavgsb */
+
+inline vector signed char
+vec_vavgsb (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vavgsb ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_vavgub */
+
+inline vector unsigned char
+vec_vavgub (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vavgub ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_ceil */
+
+inline vector float
+vec_ceil (vector float a1)
+{
+ return (vector float) __builtin_altivec_vrfip ((vector float) a1);
+}
+
+/* vec_cmpb */
+
+inline vector signed int
+vec_cmpb (vector float a1, vector float a2)
+{
+ return (vector signed int) __builtin_altivec_vcmpbfp ((vector float) a1, (vector float) a2);
+}
+
+/* vec_cmpeq */
+
+inline vector signed char
+vec_cmpeq (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vcmpequb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector signed char
+vec_cmpeq (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector signed char) __builtin_altivec_vcmpequb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector signed short
+vec_cmpeq (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vcmpequh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector signed short
+vec_cmpeq (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector signed short) __builtin_altivec_vcmpequh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector signed int
+vec_cmpeq (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vcmpequw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_cmpeq (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector signed int) __builtin_altivec_vcmpequw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_cmpeq (vector float a1, vector float a2)
+{
+ return (vector signed int) __builtin_altivec_vcmpeqfp ((vector float) a1, (vector float) a2);
+}
+
+/* vec_vcmpeqfp */
+
+inline vector signed int
+vec_vcmpeqfp (vector float a1, vector float a2)
+{
+ return (vector signed int) __builtin_altivec_vcmpeqfp ((vector float) a1, (vector float) a2);
+}
+
+/* vec_vcmpequw */
+
+inline vector signed int
+vec_vcmpequw (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vcmpequw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_vcmpequw (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector signed int) __builtin_altivec_vcmpequw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vcmpequh */
+
+inline vector signed short
+vec_vcmpequh (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vcmpequh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector signed short
+vec_vcmpequh (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector signed short) __builtin_altivec_vcmpequh ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vcmpequb */
+
+inline vector signed char
+vec_vcmpequb (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vcmpequb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector signed char
+vec_vcmpequb (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector signed char) __builtin_altivec_vcmpequb ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_cmpge */
+
+inline vector signed int
+vec_cmpge (vector float a1, vector float a2)
+{
+ return (vector signed int) __builtin_altivec_vcmpgefp ((vector float) a1, (vector float) a2);
+}
+
+/* vec_cmpgt */
+
+inline vector signed char
+vec_cmpgt (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector signed char) __builtin_altivec_vcmpgtub ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector signed char
+vec_cmpgt (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vcmpgtsb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector signed short
+vec_cmpgt (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector signed short) __builtin_altivec_vcmpgtuh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector signed short
+vec_cmpgt (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vcmpgtsh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector signed int
+vec_cmpgt (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector signed int) __builtin_altivec_vcmpgtuw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_cmpgt (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vcmpgtsw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_cmpgt (vector float a1, vector float a2)
+{
+ return (vector signed int) __builtin_altivec_vcmpgtfp ((vector float) a1, (vector float) a2);
+}
+
+/* vec_vcmpgtfp */
+
+inline vector signed int
+vec_vcmpgtfp (vector float a1, vector float a2)
+{
+ return (vector signed int) __builtin_altivec_vcmpgtfp ((vector float) a1, (vector float) a2);
+}
+
+/* vec_vcmpgtsw */
+
+inline vector signed int
+vec_vcmpgtsw (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vcmpgtsw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vcmpgtuw */
+
+inline vector signed int
+vec_vcmpgtuw (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector signed int) __builtin_altivec_vcmpgtuw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vcmpgtsh */
+
+inline vector signed short
+vec_cmpgtsh (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vcmpgtsh ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vcmpgtuh */
+
+inline vector signed short
+vec_vcmpgtuh (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector signed short) __builtin_altivec_vcmpgtuh ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vcmpgtsb */
+
+inline vector signed char
+vec_vcmpgtsb (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vcmpgtsb ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_vcmpgtub */
+
+inline vector signed char
+vec_vcmpgtub (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector signed char) __builtin_altivec_vcmpgtub ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_cmple */
+
+inline vector signed int
+vec_cmple (vector float a1, vector float a2)
+{
+ return (vector signed int) __builtin_altivec_vcmpgefp ((vector float) a1, (vector float) a2);
+}
+
+/* vec_cmplt */
+
+inline vector signed char
+vec_cmplt (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector signed char) __builtin_altivec_vcmpgtub ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector signed char
+vec_cmplt (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vcmpgtsb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector signed short
+vec_cmplt (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector signed short) __builtin_altivec_vcmpgtuh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector signed short
+vec_cmplt (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vcmpgtsh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector signed int
+vec_cmplt (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector signed int) __builtin_altivec_vcmpgtuw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_cmplt (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vcmpgtsw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_cmplt (vector float a1, vector float a2)
+{
+ return (vector signed int) __builtin_altivec_vcmpgtfp ((vector float) a1, (vector float) a2);
+}
+
+/* vec_ctf */
+
+inline vector float
+vec_ctf (vector unsigned int a1, const char a2)
+{
+ return (vector float) __builtin_altivec_vcfux ((vector signed int) a1, a2);
+}
+
+inline vector float
+vec_ctf (vector signed int a1, const char a2)
+{
+ return (vector float) __builtin_altivec_vcfsx ((vector signed int) a1, a2);
+}
+
+/* vec_vcfsx */
+
+inline vector float
+vec_vcfsx (vector signed int a1, const char a2)
+{
+ return (vector float) __builtin_altivec_vcfsx ((vector signed int) a1, a2);
+}
+
+/* vec_vcfux */
+
+inline vector float
+vec_vcfux (vector unsigned int a1, const char a2)
+{
+ return (vector float) __builtin_altivec_vcfux ((vector signed int) a1, a2);
+}
+
+/* vec_cts */
+
+inline vector signed int
+vec_cts (vector float a1, const char a2)
+{
+ return (vector signed int) __builtin_altivec_vctsxs ((vector float) a1, a2);
+}
+
+/* vec_ctu */
+
+inline vector unsigned int
+vec_ctu (vector float a1, const char a2)
+{
+ return (vector unsigned int) __builtin_altivec_vctuxs ((vector float) a1, a2);
+}
+
+/* vec_dss */
+
+inline void
+vec_dss (const char a1)
+{
+ __builtin_altivec_dss (a1);
+}
+
+/* vec_dssall */
+
+inline void
+vec_dssall ()
+{
+ __builtin_altivec_dssall ();
+}
+
+/* vec_dst */
+
+inline void
+vec_dst (void *a1, int a2, const char a3)
+{
+ __builtin_altivec_dst ((void *) a1, a2, a3);
+}
+
+/* vec_dstst */
+
+inline void
+vec_dstst (void *a1, int a2, const char a3)
+{
+ __builtin_altivec_dstst ((void *) a1, a2, a3);
+}
+
+/* vec_dststt */
+
+inline void
+vec_dststt (void *a1, int a2, const char a3)
+{
+ __builtin_altivec_dststt ((void *) a1, a2, a3);
+}
+
+/* vec_dstt */
+
+inline void
+vec_dstt (void *a1, int a2, const char a3)
+{
+ __builtin_altivec_dstt ((void *) a1, a2, a3);
+}
+
+/* vec_expte */
+
+inline vector float
+vec_expte (vector float a1)
+{
+ return (vector float) __builtin_altivec_vexptefp ((vector float) a1);
+}
+
+/* vec_floor */
+
+inline vector float
+vec_floor (vector float a1)
+{
+ return (vector float) __builtin_altivec_vrfim ((vector float) a1);
+}
+
+/* vec_ld */
+
+inline vector float
+vec_ld (int a1, vector float *a2)
+{
+ return (vector float) __builtin_altivec_lvx (a1, (void *) a2);
+}
+
+inline vector float
+vec_ld (int a1, float *a2)
+{
+ return (vector float) __builtin_altivec_lvx (a1, (void *) a2);
+}
+
+inline vector signed int
+vec_ld (int a1, vector signed int *a2)
+{
+ return (vector signed int) __builtin_altivec_lvx (a1, (void *) a2);
+}
+
+inline vector signed int
+vec_ld (int a1, signed int *a2)
+{
+ return (vector signed int) __builtin_altivec_lvx (a1, (void *) a2);
+}
+
+inline vector unsigned int
+vec_ld (int a1, vector unsigned int *a2)
+{
+ return (vector unsigned int) __builtin_altivec_lvx (a1, (void *) a2);
+}
+
+inline vector unsigned int
+vec_ld (int a1, unsigned int *a2)
+{
+ return (vector unsigned int) __builtin_altivec_lvx (a1, (void *) a2);
+}
+
+inline vector signed short
+vec_ld (int a1, vector signed short *a2)
+{
+ return (vector signed short) __builtin_altivec_lvx (a1, (void *) a2);
+}
+
+inline vector signed short
+vec_ld (int a1, signed short *a2)
+{
+ return (vector signed short) __builtin_altivec_lvx (a1, (void *) a2);
+}
+
+inline vector unsigned short
+vec_ld (int a1, vector unsigned short *a2)
+{
+ return (vector unsigned short) __builtin_altivec_lvx (a1, (void *) a2);
+}
+
+inline vector unsigned short
+vec_ld (int a1, unsigned short *a2)
+{
+ return (vector unsigned short) __builtin_altivec_lvx (a1, (void *) a2);
+}
+
+inline vector signed char
+vec_ld (int a1, vector signed char *a2)
+{
+ return (vector signed char) __builtin_altivec_lvx (a1, (void *) a2);
+}
+
+inline vector signed char
+vec_ld (int a1, signed char *a2)
+{
+ return (vector signed char) __builtin_altivec_lvx (a1, (void *) a2);
+}
+
+inline vector unsigned char
+vec_ld (int a1, vector unsigned char *a2)
+{
+ return (vector unsigned char) __builtin_altivec_lvx (a1, (void *) a2);
+}
+
+inline vector unsigned char
+vec_ld (int a1, unsigned char *a2)
+{
+ return (vector unsigned char) __builtin_altivec_lvx (a1, (void *) a2);
+}
+
+/* vec_lde */
+
+inline vector signed char
+vec_lde (int a1, signed char *a2)
+{
+ return (vector signed char) __builtin_altivec_lvebx (a1, (void *) a2);
+}
+
+inline vector unsigned char
+vec_lde (int a1, unsigned char *a2)
+{
+ return (vector unsigned char) __builtin_altivec_lvebx (a1, (void *) a2);
+}
+
+inline vector signed short
+vec_lde (int a1, signed short *a2)
+{
+ return (vector signed short) __builtin_altivec_lvehx (a1, (void *) a2);
+}
+
+inline vector unsigned short
+vec_lde (int a1, unsigned short *a2)
+{
+ return (vector unsigned short) __builtin_altivec_lvehx (a1, (void *) a2);
+}
+
+inline vector float
+vec_lde (int a1, float *a2)
+{
+ return (vector float) __builtin_altivec_lvewx (a1, (void *) a2);
+}
+
+inline vector signed int
+vec_lde (int a1, signed int *a2)
+{
+ return (vector signed int) __builtin_altivec_lvewx (a1, (void *) a2);
+}
+
+inline vector unsigned int
+vec_lde (int a1, unsigned int *a2)
+{
+ return (vector unsigned int) __builtin_altivec_lvewx (a1, (void *) a2);
+}
+
+/* vec_lvewx */
+
+inline vector float
+vec_lvewx (int a1, float *a2)
+{
+ return (vector float) __builtin_altivec_lvewx (a1, (void *) a2);
+}
+
+inline vector signed int
+vec_lvewx (int a1, signed int *a2)
+{
+ return (vector signed int) __builtin_altivec_lvewx (a1, (void *) a2);
+}
+
+inline vector unsigned int
+vec_lvewx (int a1, unsigned int *a2)
+{
+ return (vector unsigned int) __builtin_altivec_lvewx (a1, (void *) a2);
+}
+
+/* vec_lvehx */
+
+inline vector signed short
+vec_lvehx (int a1, signed short *a2)
+{
+ return (vector signed short) __builtin_altivec_lvehx (a1, (void *) a2);
+}
+
+inline vector unsigned short
+vec_lvehx (int a1, unsigned short *a2)
+{
+ return (vector unsigned short) __builtin_altivec_lvehx (a1, (void *) a2);
+}
+
+/* vec_lvebx */
+
+inline vector signed char
+vec_lvebx (int a1, signed char *a2)
+{
+ return (vector signed char) __builtin_altivec_lvebx (a1, (void *) a2);
+}
+
+inline vector unsigned char
+vec_lvebx (int a1, unsigned char *a2)
+{
+ return (vector unsigned char) __builtin_altivec_lvebx (a1, (void *) a2);
+}
+
+/* vec_ldl */
+
+inline vector float
+vec_ldl (int a1, vector float *a2)
+{
+ return (vector float) __builtin_altivec_lvxl (a1, (void *) a2);
+}
+
+inline vector float
+vec_ldl (int a1, float *a2)
+{
+ return (vector float) __builtin_altivec_lvxl (a1, (void *) a2);
+}
+
+inline vector signed int
+vec_ldl (int a1, vector signed int *a2)
+{
+ return (vector signed int) __builtin_altivec_lvxl (a1, (void *) a2);
+}
+
+inline vector signed int
+vec_ldl (int a1, signed int *a2)
+{
+ return (vector signed int) __builtin_altivec_lvxl (a1, (void *) a2);
+}
+
+inline vector unsigned int
+vec_ldl (int a1, vector unsigned int *a2)
+{
+ return (vector unsigned int) __builtin_altivec_lvxl (a1, (void *) a2);
+}
+
+inline vector unsigned int
+vec_ldl (int a1, unsigned int *a2)
+{
+ return (vector unsigned int) __builtin_altivec_lvxl (a1, (void *) a2);
+}
+
+inline vector signed short
+vec_ldl (int a1, vector signed short *a2)
+{
+ return (vector signed short) __builtin_altivec_lvxl (a1, (void *) a2);
+}
+
+inline vector signed short
+vec_ldl (int a1, signed short *a2)
+{
+ return (vector signed short) __builtin_altivec_lvxl (a1, (void *) a2);
+}
+
+inline vector unsigned short
+vec_ldl (int a1, vector unsigned short *a2)
+{
+ return (vector unsigned short) __builtin_altivec_lvxl (a1, (void *) a2);
+}
+
+inline vector unsigned short
+vec_ldl (int a1, unsigned short *a2)
+{
+ return (vector unsigned short) __builtin_altivec_lvxl (a1, (void *) a2);
+}
+
+inline vector signed char
+vec_ldl (int a1, vector signed char *a2)
+{
+ return (vector signed char) __builtin_altivec_lvxl (a1, (void *) a2);
+}
+
+inline vector signed char
+vec_ldl (int a1, signed char *a2)
+{
+ return (vector signed char) __builtin_altivec_lvxl (a1, (void *) a2);
+}
+
+inline vector unsigned char
+vec_ldl (int a1, vector unsigned char *a2)
+{
+ return (vector unsigned char) __builtin_altivec_lvxl (a1, (void *) a2);
+}
+
+inline vector unsigned char
+vec_ldl (int a1, unsigned char *a2)
+{
+ return (vector unsigned char) __builtin_altivec_lvxl (a1, (void *) a2);
+}
+
+/* vec_loge */
+
+inline vector float
+vec_loge (vector float a1)
+{
+ return (vector float) __builtin_altivec_vlogefp ((vector float) a1);
+}
+
+/* vec_lvsl */
+
+inline vector unsigned char
+vec_lvsl (int a1, unsigned char *a2)
+{
+ return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+}
+
+inline vector unsigned char
+vec_lvsl (int a1, signed char *a2)
+{
+ return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+}
+
+inline vector unsigned char
+vec_lvsl (int a1, unsigned short *a2)
+{
+ return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+}
+
+inline vector unsigned char
+vec_lvsl (int a1, signed short *a2)
+{
+ return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+}
+
+inline vector unsigned char
+vec_lvsl (int a1, unsigned int *a2)
+{
+ return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+}
+
+inline vector unsigned char
+vec_lvsl (int a1, signed int *a2)
+{
+ return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+}
+
+inline vector unsigned char
+vec_lvsl (int a1, float *a2)
+{
+ return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+}
+
+/* vec_lvsr */
+
+inline vector unsigned char
+vec_lvsr (int a1, unsigned char *a2)
+{
+ return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+}
+
+inline vector unsigned char
+vec_lvsr (int a1, signed char *a2)
+{
+ return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+}
+
+inline vector unsigned char
+vec_lvsr (int a1, unsigned short *a2)
+{
+ return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+}
+
+inline vector unsigned char
+vec_lvsr (int a1, signed short *a2)
+{
+ return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+}
+
+inline vector unsigned char
+vec_lvsr (int a1, unsigned int *a2)
+{
+ return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+}
+
+inline vector unsigned char
+vec_lvsr (int a1, signed int *a2)
+{
+ return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+}
+
+inline vector unsigned char
+vec_lvsr (int a1, float *a2)
+{
+ return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+}
+
+/* vec_madd */
+
+inline vector float
+vec_madd (vector float a1, vector float a2, vector float a3)
+{
+ return (vector float) __builtin_altivec_vmaddfp ((vector float) a1, (vector float) a2, (vector float) a3);
+}
+
+
+/* vec_madds */
+
+inline vector signed short
+vec_madds (vector signed short a1, vector signed short a2, vector signed short a3)
+{
+ return (vector signed short) __builtin_altivec_vmhaddshs ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3);
+}
+
+/* vec_max */
+
+inline vector unsigned char
+vec_max (vector signed char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_max (vector unsigned char a1, vector signed char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_max (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector signed char
+vec_max (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vmaxsb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned short
+vec_max (vector signed short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_max (vector unsigned short a1, vector signed short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_max (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector signed short
+vec_max (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vmaxsh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned int
+vec_max (vector signed int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_max (vector unsigned int a1, vector signed int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_max (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_max (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vmaxsw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector float
+vec_max (vector float a1, vector float a2)
+{
+ return (vector float) __builtin_altivec_vmaxfp ((vector float) a1, (vector float) a2);
+}
+
+/* vec_vmaxfp */
+
+inline vector float
+vec_vmaxfp (vector float a1, vector float a2)
+{
+ return (vector float) __builtin_altivec_vmaxfp ((vector float) a1, (vector float) a2);
+}
+
+/* vec_vmaxsw */
+
+inline vector signed int
+vec_vmaxsw (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vmaxsw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vmaxuw */
+
+inline vector unsigned int
+vec_vmaxuw (vector signed int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_vmaxuw (vector unsigned int a1, vector signed int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_vmaxuw (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vmaxsh */
+
+inline vector signed short
+vec_vmaxsh (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vmaxsh ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vmaxuh */
+
+inline vector unsigned short
+vec_vmaxuh (vector signed short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_vmaxuh (vector unsigned short a1, vector signed short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_vmaxuh (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vmaxsb */
+
+inline vector signed char
+vec_vmaxsb (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vmaxsb ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_vmaxub */
+
+inline vector unsigned char
+vec_vmaxub (vector signed char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_vmaxub (vector unsigned char a1, vector signed char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_vmaxub (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_mergeh */
+
+inline vector signed char
+vec_mergeh (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vmrghb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_mergeh (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vmrghb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector signed short
+vec_mergeh (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vmrghh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_mergeh (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vmrghh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector float
+vec_mergeh (vector float a1, vector float a2)
+{
+ return (vector float) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_mergeh (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_mergeh (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vmrghw */
+
+inline vector float
+vec_vmrghw (vector float a1, vector float a2)
+{
+ return (vector float) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_vmrghw (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_vmrghw (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vmrghh */
+
+inline vector signed short
+vec_vmrghh (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vmrghh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_vmrghh (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vmrghh ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vmrghb */
+
+inline vector signed char
+vec_vmrghb (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vmrghb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_vmrghb (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vmrghb ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_mergel */
+
+inline vector signed char
+vec_mergel (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vmrglb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_mergel (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vmrglb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector signed short
+vec_mergel (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vmrglh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_mergel (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vmrglh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector float
+vec_mergel (vector float a1, vector float a2)
+{
+ return (vector float) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_mergel (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_mergel (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vmrglw */
+
+inline vector float
+vec_vmrglw (vector float a1, vector float a2)
+{
+ return (vector float) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_vmrglw (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_vmrglw (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vmrglh */
+
+inline vector signed short
+vec_vmrglh (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vmrglh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_vmrglh (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vmrglh ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vmrglb */
+
+inline vector signed char
+vec_vmrglb (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vmrglb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_vmrglb (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vmrglb ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_mfvscr */
+
+inline vector unsigned short
+vec_mfvscr ()
+{
+ return (vector unsigned short) __builtin_altivec_mfvscr ();
+}
+
+/* vec_min */
+
+inline vector unsigned char
+vec_min (vector signed char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_min (vector unsigned char a1, vector signed char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_min (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector signed char
+vec_min (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vminsb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned short
+vec_min (vector signed short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_min (vector unsigned short a1, vector signed short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_min (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector signed short
+vec_min (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vminsh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned int
+vec_min (vector signed int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_min (vector unsigned int a1, vector signed int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_min (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_min (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vminsw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector float
+vec_min (vector float a1, vector float a2)
+{
+ return (vector float) __builtin_altivec_vminfp ((vector float) a1, (vector float) a2);
+}
+
+/* vec_vminfp */
+
+inline vector float
+vec_vminfp (vector float a1, vector float a2)
+{
+ return (vector float) __builtin_altivec_vminfp ((vector float) a1, (vector float) a2);
+}
+
+/* vec_vminsw */
+
+inline vector signed int
+vec_vminsw (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vminsw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vminuw */
+
+inline vector unsigned int
+vec_vminuw (vector signed int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_vminuw (vector unsigned int a1, vector signed int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_vminuw (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vminsh */
+
+inline vector signed short
+vec_vminsh (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vminsh ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vminuh */
+
+inline vector unsigned short
+vec_vminuh (vector signed short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_vminuh (vector unsigned short a1, vector signed short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_vminuh (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vminsb */
+
+inline vector signed char
+vec_vminsb (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vminsb ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_vminub */
+
+inline vector unsigned char
+vec_vminub (vector signed char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_vminub (vector unsigned char a1, vector signed char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_vminub (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_mladd */
+
+inline vector signed short
+vec_mladd (vector signed short a1, vector signed short a2, vector signed short a3)
+{
+ return (vector signed short) __builtin_altivec_vmladduhm ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3);
+}
+
+inline vector signed short
+vec_mladd (vector signed short a1, vector unsigned short a2, vector unsigned short a3)
+{
+ return (vector signed short) __builtin_altivec_vmladduhm ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3);
+}
+
+inline vector signed short
+vec_mladd (vector unsigned short a1, vector signed short a2, vector signed short a3)
+{
+ return (vector signed short) __builtin_altivec_vmladduhm ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3);
+}
+
+inline vector unsigned short
+vec_mladd (vector unsigned short a1, vector unsigned short a2, vector unsigned short a3)
+{
+ return (vector unsigned short) __builtin_altivec_vmladduhm ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3);
+}
+
+/* vec_mradds */
+
+inline vector signed short
+vec_mradds (vector signed short a1, vector signed short a2, vector signed short a3)
+{
+ return (vector signed short) __builtin_altivec_vmhraddshs ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3);
+}
+
+/* vec_msum */
+
+inline vector unsigned int
+vec_msum (vector unsigned char a1, vector unsigned char a2, vector unsigned int a3)
+{
+ return (vector unsigned int) __builtin_altivec_vmsumubm ((vector signed char) a1, (vector signed char) a2, (vector signed int) a3);
+}
+
+inline vector signed int
+vec_msum (vector signed char a1, vector unsigned char a2, vector signed int a3)
+{
+ return (vector signed int) __builtin_altivec_vmsummbm ((vector signed char) a1, (vector signed char) a2, (vector signed int) a3);
+}
+
+inline vector unsigned int
+vec_msum (vector unsigned short a1, vector unsigned short a2, vector unsigned int a3)
+{
+ return (vector unsigned int) __builtin_altivec_vmsumuhm ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+}
+
+inline vector signed int
+vec_msum (vector signed short a1, vector signed short a2, vector signed int a3)
+{
+ return (vector signed int) __builtin_altivec_vmsumshm ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+}
+
+/* vec_vmsumshm */
+
+inline vector signed int
+vec_vmsumshm (vector signed short a1, vector signed short a2, vector signed int a3)
+{
+ return (vector signed int) __builtin_altivec_vmsumshm ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+}
+
+/* vec_vmsumuhm */
+
+inline vector unsigned int
+vec_vmsumuhm (vector unsigned short a1, vector unsigned short a2, vector unsigned int a3)
+{
+ return (vector unsigned int) __builtin_altivec_vmsumuhm ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+}
+
+/* vec_vmsummbm */
+
+inline vector signed int
+vec_vmsummbm (vector signed char a1, vector unsigned char a2, vector signed int a3)
+{
+ return (vector signed int) __builtin_altivec_vmsummbm ((vector signed char) a1, (vector signed char) a2, (vector signed int) a3);
+}
+
+/* vec_vmsumubm */
+
+inline vector unsigned int
+vec_vmsumubm (vector unsigned char a1, vector unsigned char a2, vector unsigned int a3)
+{
+ return (vector unsigned int) __builtin_altivec_vmsumubm ((vector signed char) a1, (vector signed char) a2, (vector signed int) a3);
+}
+
+/* vec_msums */
+
+inline vector unsigned int
+vec_msums (vector unsigned short a1, vector unsigned short a2, vector unsigned int a3)
+{
+ return (vector unsigned int) __builtin_altivec_vmsumuhs ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+}
+
+inline vector signed int
+vec_msums (vector signed short a1, vector signed short a2, vector signed int a3)
+{
+ return (vector signed int) __builtin_altivec_vmsumshs ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+}
+
+/* vec_vmsumshs */
+
+inline vector signed int
+vec_vmsumshs (vector signed short a1, vector signed short a2, vector signed int a3)
+{
+ return (vector signed int) __builtin_altivec_vmsumshs ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+}
+
+/* vec_vmsumuhs */
+
+inline vector unsigned int
+vec_vmsumuhs (vector unsigned short a1, vector unsigned short a2, vector unsigned int a3)
+{
+ return (vector unsigned int) __builtin_altivec_vmsumuhs ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+}
+
+/* vec_mtvscr */
+
+inline void
+vec_mtvscr (vector signed int a1)
+{
+ __builtin_altivec_mtvscr ((vector signed int) a1);
+}
+
+inline void
+vec_mtvscr (vector unsigned int a1)
+{
+ __builtin_altivec_mtvscr ((vector signed int) a1);
+}
+
+inline void
+vec_mtvscr (vector signed short a1)
+{
+ __builtin_altivec_mtvscr ((vector signed int) a1);
+}
+
+inline void
+vec_mtvscr (vector unsigned short a1)
+{
+ __builtin_altivec_mtvscr ((vector signed int) a1);
+}
+
+inline void
+vec_mtvscr (vector signed char a1)
+{
+ __builtin_altivec_mtvscr ((vector signed int) a1);
+}
+
+inline void
+vec_mtvscr (vector unsigned char a1)
+{
+ __builtin_altivec_mtvscr ((vector signed int) a1);
+}
+
+/* vec_mule */
+
+inline vector unsigned short
+vec_mule (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned short) __builtin_altivec_vmuleub ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector signed short
+vec_mule (vector signed char a1, vector signed char a2)
+{
+ return (vector signed short) __builtin_altivec_vmulesb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned int
+vec_mule (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned int) __builtin_altivec_vmuleuh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector signed int
+vec_mule (vector signed short a1, vector signed short a2)
+{
+ return (vector signed int) __builtin_altivec_vmulesh ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vmulesh */
+
+inline vector signed int
+vec_vmulesh (vector signed short a1, vector signed short a2)
+{
+ return (vector signed int) __builtin_altivec_vmulesh ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vmuleuh */
+
+inline vector unsigned int
+vec_vmuleuh (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned int) __builtin_altivec_vmuleuh ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vmuleub */
+inline vector unsigned short
+vec_vmuleub (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned short) __builtin_altivec_vmuleub ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_mulo */
+
+inline vector unsigned short
+vec_mulo (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned short) __builtin_altivec_vmuloub ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector signed short
+vec_mulo (vector signed char a1, vector signed char a2)
+{
+ return (vector signed short) __builtin_altivec_vmulosb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned int
+vec_mulo (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned int) __builtin_altivec_vmulouh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector signed int
+vec_mulo (vector signed short a1, vector signed short a2)
+{
+ return (vector signed int) __builtin_altivec_vmulosh ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vmulosh */
+
+inline vector signed int
+vec_vmulosh (vector signed short a1, vector signed short a2)
+{
+ return (vector signed int) __builtin_altivec_vmulosh ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vmulouh */
+
+inline vector unsigned int
+vec_vmulouh (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned int) __builtin_altivec_vmulouh ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vmulosb */
+
+inline vector signed short
+vec_vmulosb (vector signed char a1, vector signed char a2)
+{
+ return (vector signed short) __builtin_altivec_vmulosb ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_vmuloub */
+
+inline vector unsigned short
+vec_vmuloub (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned short) __builtin_altivec_vmuloub ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_nmsub */
+
+inline vector float
+vec_nmsub (vector float a1, vector float a2, vector float a3)
+{
+ return (vector float) __builtin_altivec_vnmsubfp ((vector float) a1, (vector float) a2, (vector float) a3);
+}
+
+/* vec_nor */
+
+inline vector float
+vec_nor (vector float a1, vector float a2)
+{
+ return (vector float) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_nor (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_nor (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed short
+vec_nor (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_nor (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed char
+vec_nor (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_nor (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_or */
+
+inline vector float
+vec_or (vector float a1, vector float a2)
+{
+ return (vector float) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector float
+vec_or (vector float a1, vector signed int a2)
+{
+ return (vector float) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector float
+vec_or (vector signed int a1, vector float a2)
+{
+ return (vector float) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_or (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_or (vector signed int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_or (vector unsigned int a1, vector signed int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_or (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed short
+vec_or (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_or (vector signed short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_or (vector unsigned short a1, vector signed short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_or (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed char
+vec_or (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_or (vector signed char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_or (vector unsigned char a1, vector signed char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_or (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_pack */
+
+inline vector signed char
+vec_pack (vector signed short a1, vector signed short a2)
+{
+ return (vector signed char) __builtin_altivec_vpkuhum ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned char
+vec_pack (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned char) __builtin_altivec_vpkuhum ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector signed short
+vec_pack (vector signed int a1, vector signed int a2)
+{
+ return (vector signed short) __builtin_altivec_vpkuwum ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_pack (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned short) __builtin_altivec_vpkuwum ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vpkuwum */
+
+inline vector signed short
+vec_vpkuwum (vector signed int a1, vector signed int a2)
+{
+ return (vector signed short) __builtin_altivec_vpkuwum ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_vpkuwum (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned short) __builtin_altivec_vpkuwum ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vpkuhum */
+
+inline vector signed char
+vec_vpkuhum (vector signed short a1, vector signed short a2)
+{
+ return (vector signed char) __builtin_altivec_vpkuhum ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned char
+vec_vpkuhum (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned char) __builtin_altivec_vpkuhum ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_packpx */
+
+inline vector signed short
+vec_packpx (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector signed short) __builtin_altivec_vpkpx ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_packs */
+
+inline vector unsigned char
+vec_packs (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned char) __builtin_altivec_vpkuhus ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector signed char
+vec_packs (vector signed short a1, vector signed short a2)
+{
+ return (vector signed char) __builtin_altivec_vpkshss ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_packs (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned short) __builtin_altivec_vpkuwus ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed short
+vec_packs (vector signed int a1, vector signed int a2)
+{
+ return (vector signed short) __builtin_altivec_vpkswss ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vpkswss */
+
+inline vector signed short
+vec_vpkswss (vector signed int a1, vector signed int a2)
+{
+ return (vector signed short) __builtin_altivec_vpkswss ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vpkuwus */
+
+inline vector unsigned short
+vec_vpkuwus (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned short) __builtin_altivec_vpkuwus ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vpkshss */
+
+inline vector signed char
+vec_vpkshss (vector signed short a1, vector signed short a2)
+{
+ return (vector signed char) __builtin_altivec_vpkshss ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vpkuhus */
+
+inline vector unsigned char
+vec_vpkuhus (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned char) __builtin_altivec_vpkuhus ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_packsu */
+
+inline vector unsigned char
+vec_packsu (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned char) __builtin_altivec_vpkuhus ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned char
+vec_packsu (vector signed short a1, vector signed short a2)
+{
+ return (vector unsigned char) __builtin_altivec_vpkshus ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_packsu (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned short) __builtin_altivec_vpkuwus ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_packsu (vector signed int a1, vector signed int a2)
+{
+ return (vector unsigned short) __builtin_altivec_vpkswus ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vpkswus */
+
+inline vector unsigned short
+vec_vpkswus (vector signed int a1, vector signed int a2)
+{
+ return (vector unsigned short) __builtin_altivec_vpkswus ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vpkshus */
+
+inline vector unsigned char
+vec_vpkshus (vector signed short a1, vector signed short a2)
+{
+ return (vector unsigned char) __builtin_altivec_vpkshus ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_perm */
+
+inline vector float
+vec_perm (vector float a1, vector float a2, vector unsigned char a3)
+{
+ return (vector float) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3);
+}
+
+inline vector signed int
+vec_perm (vector signed int a1, vector signed int a2, vector unsigned char a3)
+{
+ return (vector signed int) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3);
+}
+
+inline vector unsigned int
+vec_perm (vector unsigned int a1, vector unsigned int a2, vector unsigned char a3)
+{
+ return (vector unsigned int) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3);
+}
+
+inline vector signed short
+vec_perm (vector signed short a1, vector signed short a2, vector unsigned char a3)
+{
+ return (vector signed short) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3);
+}
+
+inline vector unsigned short
+vec_perm (vector unsigned short a1, vector unsigned short a2, vector unsigned char a3)
+{
+ return (vector unsigned short) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3);
+}
+
+inline vector signed char
+vec_perm (vector signed char a1, vector signed char a2, vector unsigned char a3)
+{
+ return (vector signed char) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3);
+}
+
+inline vector unsigned char
+vec_perm (vector unsigned char a1, vector unsigned char a2, vector unsigned char a3)
+{
+ return (vector unsigned char) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3);
+}
+
+/* vec_re */
+
+inline vector float
+vec_re (vector float a1)
+{
+ return (vector float) __builtin_altivec_vrefp ((vector float) a1);
+}
+
+/* vec_rl */
+
+inline vector signed char
+vec_rl (vector signed char a1, vector unsigned char a2)
+{
+ return (vector signed char) __builtin_altivec_vrlb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_rl (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vrlb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector signed short
+vec_rl (vector signed short a1, vector unsigned short a2)
+{
+ return (vector signed short) __builtin_altivec_vrlh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_rl (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vrlh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector signed int
+vec_rl (vector signed int a1, vector unsigned int a2)
+{
+ return (vector signed int) __builtin_altivec_vrlw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_rl (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vrlw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vrlw */
+
+inline vector signed int
+vec_vrlw (vector signed int a1, vector unsigned int a2)
+{
+ return (vector signed int) __builtin_altivec_vrlw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_vrlw (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vrlw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vrlh */
+
+inline vector signed short
+vec_vrlh (vector signed short a1, vector unsigned short a2)
+{
+ return (vector signed short) __builtin_altivec_vrlh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_vrlh (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vrlh ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vrlb */
+
+inline vector signed char
+vec_vrlb (vector signed char a1, vector unsigned char a2)
+{
+ return (vector signed char) __builtin_altivec_vrlb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_vrlb (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vrlb ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_round */
+
+inline vector float
+vec_round (vector float a1)
+{
+ return (vector float) __builtin_altivec_vrfin ((vector float) a1);
+}
+
+/* vec_rsqrte */
+
+inline vector float
+vec_rsqrte (vector float a1)
+{
+ return (vector float) __builtin_altivec_vrsqrtefp ((vector float) a1);
+}
+
+/* vec_sel */
+
+inline vector float
+vec_sel (vector float a1, vector float a2, vector signed int a3)
+{
+ return (vector float) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+}
+
+inline vector float
+vec_sel (vector float a1, vector float a2, vector unsigned int a3)
+{
+ return (vector float) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+}
+
+inline vector signed int
+vec_sel (vector signed int a1, vector signed int a2, vector signed int a3)
+{
+ return (vector signed int) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+}
+
+inline vector signed int
+vec_sel (vector signed int a1, vector signed int a2, vector unsigned int a3)
+{
+ return (vector signed int) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+}
+
+inline vector unsigned int
+vec_sel (vector unsigned int a1, vector unsigned int a2, vector signed int a3)
+{
+ return (vector unsigned int) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+}
+
+inline vector unsigned int
+vec_sel (vector unsigned int a1, vector unsigned int a2, vector unsigned int a3)
+{
+ return (vector unsigned int) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+}
+
+inline vector signed short
+vec_sel (vector signed short a1, vector signed short a2, vector signed short a3)
+{
+ return (vector signed short) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+}
+
+inline vector signed short
+vec_sel (vector signed short a1, vector signed short a2, vector unsigned short a3)
+{
+ return (vector signed short) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+}
+
+inline vector unsigned short
+vec_sel (vector unsigned short a1, vector unsigned short a2, vector signed short a3)
+{
+ return (vector unsigned short) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+}
+
+inline vector unsigned short
+vec_sel (vector unsigned short a1, vector unsigned short a2, vector unsigned short a3)
+{
+ return (vector unsigned short) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+}
+
+inline vector signed char
+vec_sel (vector signed char a1, vector signed char a2, vector signed char a3)
+{
+ return (vector signed char) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+}
+
+inline vector signed char
+vec_sel (vector signed char a1, vector signed char a2, vector unsigned char a3)
+{
+ return (vector signed char) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+}
+
+inline vector unsigned char
+vec_sel (vector unsigned char a1, vector unsigned char a2, vector signed char a3)
+{
+ return (vector unsigned char) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+}
+
+inline vector unsigned char
+vec_sel (vector unsigned char a1, vector unsigned char a2, vector unsigned char a3)
+{
+ return (vector unsigned char) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+}
+
+/* vec_sl */
+
+inline vector signed char
+vec_sl (vector signed char a1, vector unsigned char a2)
+{
+ return (vector signed char) __builtin_altivec_vslb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_sl (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vslb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector signed short
+vec_sl (vector signed short a1, vector unsigned short a2)
+{
+ return (vector signed short) __builtin_altivec_vslh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_sl (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vslh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector signed int
+vec_sl (vector signed int a1, vector unsigned int a2)
+{
+ return (vector signed int) __builtin_altivec_vslw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_sl (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vslw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vslw */
+
+inline vector signed int
+vec_vslw (vector signed int a1, vector unsigned int a2)
+{
+ return (vector signed int) __builtin_altivec_vslw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_vslw (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vslw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vslh */
+
+inline vector signed short
+vec_vslh (vector signed short a1, vector unsigned short a2)
+{
+ return (vector signed short) __builtin_altivec_vslh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_vslh (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vslh ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vslb */
+
+inline vector signed char
+vec_vslb (vector signed char a1, vector unsigned char a2)
+{
+ return (vector signed char) __builtin_altivec_vslb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_vslb (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vslb ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_sld */
+
+inline vector float
+vec_sld (vector float a1, vector float a2, const char a3)
+{
+ return (vector float) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, a3);
+}
+
+inline vector signed int
+vec_sld (vector signed int a1, vector signed int a2, const char a3)
+{
+ return (vector signed int) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, a3);
+}
+
+inline vector unsigned int
+vec_sld (vector unsigned int a1, vector unsigned int a2, const char a3)
+{
+ return (vector unsigned int) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, a3);
+}
+
+inline vector signed short
+vec_sld (vector signed short a1, vector signed short a2, const char a3)
+{
+ return (vector signed short) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, a3);
+}
+
+inline vector unsigned short
+vec_sld (vector unsigned short a1, vector unsigned short a2, const char a3)
+{
+ return (vector unsigned short) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, a3);
+}
+
+inline vector signed char
+vec_sld (vector signed char a1, vector signed char a2, const char a3)
+{
+ return (vector signed char) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, a3);
+}
+
+inline vector unsigned char
+vec_sld (vector unsigned char a1, vector unsigned char a2, const char a3)
+{
+ return (vector unsigned char) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, a3);
+}
+
+/* vec_sll */
+
+inline vector signed int
+vec_sll (vector signed int a1, vector unsigned int a2)
+{
+ return (vector signed int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_sll (vector signed int a1, vector unsigned short a2)
+{
+ return (vector signed int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_sll (vector signed int a1, vector unsigned char a2)
+{
+ return (vector signed int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_sll (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_sll (vector unsigned int a1, vector unsigned short a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_sll (vector unsigned int a1, vector unsigned char a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed short
+vec_sll (vector signed short a1, vector unsigned int a2)
+{
+ return (vector signed short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed short
+vec_sll (vector signed short a1, vector unsigned short a2)
+{
+ return (vector signed short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed short
+vec_sll (vector signed short a1, vector unsigned char a2)
+{
+ return (vector signed short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_sll (vector unsigned short a1, vector unsigned int a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_sll (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_sll (vector unsigned short a1, vector unsigned char a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed char
+vec_sll (vector signed char a1, vector unsigned int a2)
+{
+ return (vector signed char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed char
+vec_sll (vector signed char a1, vector unsigned short a2)
+{
+ return (vector signed char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed char
+vec_sll (vector signed char a1, vector unsigned char a2)
+{
+ return (vector signed char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_sll (vector unsigned char a1, vector unsigned int a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_sll (vector unsigned char a1, vector unsigned short a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_sll (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_slo */
+
+inline vector float
+vec_slo (vector float a1, vector signed char a2)
+{
+ return (vector float) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector float
+vec_slo (vector float a1, vector unsigned char a2)
+{
+ return (vector float) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_slo (vector signed int a1, vector signed char a2)
+{
+ return (vector signed int) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_slo (vector signed int a1, vector unsigned char a2)
+{
+ return (vector signed int) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_slo (vector unsigned int a1, vector signed char a2)
+{
+ return (vector unsigned int) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_slo (vector unsigned int a1, vector unsigned char a2)
+{
+ return (vector unsigned int) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed short
+vec_slo (vector signed short a1, vector signed char a2)
+{
+ return (vector signed short) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed short
+vec_slo (vector signed short a1, vector unsigned char a2)
+{
+ return (vector signed short) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_slo (vector unsigned short a1, vector signed char a2)
+{
+ return (vector unsigned short) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_slo (vector unsigned short a1, vector unsigned char a2)
+{
+ return (vector unsigned short) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed char
+vec_slo (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed char
+vec_slo (vector signed char a1, vector unsigned char a2)
+{
+ return (vector signed char) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_slo (vector unsigned char a1, vector signed char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_slo (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_splat */
+
+inline vector signed char
+vec_splat (vector signed char a1, const char a2)
+{
+ return (vector signed char) __builtin_altivec_vspltb ((vector signed char) a1, a2);
+}
+
+inline vector unsigned char
+vec_splat (vector unsigned char a1, const char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vspltb ((vector signed char) a1, a2);
+}
+
+inline vector signed short
+vec_splat (vector signed short a1, const char a2)
+{
+ return (vector signed short) __builtin_altivec_vsplth ((vector signed short) a1, a2);
+}
+
+inline vector unsigned short
+vec_splat (vector unsigned short a1, const char a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsplth ((vector signed short) a1, a2);
+}
+
+inline vector float
+vec_splat (vector float a1, const char a2)
+{
+ return (vector float) __builtin_altivec_vspltw ((vector signed int) a1, a2);
+}
+
+inline vector signed int
+vec_splat (vector signed int a1, const char a2)
+{
+ return (vector signed int) __builtin_altivec_vspltw ((vector signed int) a1, a2);
+}
+
+inline vector unsigned int
+vec_splat (vector unsigned int a1, const char a2)
+{
+ return (vector unsigned int) __builtin_altivec_vspltw ((vector signed int) a1, a2);
+}
+
+/* vec_vspltw */
+
+inline vector float
+vec_vspltw (vector float a1, const char a2)
+{
+ return (vector float) __builtin_altivec_vspltw ((vector signed int) a1, a2);
+}
+
+inline vector signed int
+vec_vspltw (vector signed int a1, const char a2)
+{
+ return (vector signed int) __builtin_altivec_vspltw ((vector signed int) a1, a2);
+}
+
+inline vector unsigned int
+vec_vspltw (vector unsigned int a1, const char a2)
+{
+ return (vector unsigned int) __builtin_altivec_vspltw ((vector signed int) a1, a2);
+}
+
+/* vec_vsplth */
+
+inline vector signed short
+vec_vsplth (vector signed short a1, const char a2)
+{
+ return (vector signed short) __builtin_altivec_vsplth ((vector signed short) a1, a2);
+}
+
+inline vector unsigned short
+vec_vsplth (vector unsigned short a1, const char a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsplth ((vector signed short) a1, a2);
+}
+
+/* vec_vspltb */
+
+inline vector signed char
+vec_vspltb (vector signed char a1, const char a2)
+{
+ return (vector signed char) __builtin_altivec_vspltb ((vector signed char) a1, a2);
+}
+
+inline vector unsigned char
+vec_vspltb (vector unsigned char a1, const char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vspltb ((vector signed char) a1, a2);
+}
+
+/* vec_splat_s8 */
+
+inline vector signed char
+vec_splat_s8 (const char a1)
+{
+ return (vector signed char) __builtin_altivec_vspltisb (a1);
+}
+
+/* vec_splat_s16 */
+
+inline vector signed short
+vec_splat_s16 (const char a1)
+{
+ return (vector signed short) __builtin_altivec_vspltish (a1);
+}
+
+/* vec_splat_s32 */
+
+inline vector signed int
+vec_splat_s32 (const char a1)
+{
+ return (vector signed int) __builtin_altivec_vspltisw (a1);
+}
+
+/* vec_splat_u8 */
+
+inline vector unsigned char
+vec_splat_u8 (const char a1)
+{
+ return (vector unsigned char) __builtin_altivec_vspltisb (a1);
+}
+
+/* vec_splat_u16 */
+
+inline vector unsigned short
+vec_splat_u16 (const char a1)
+{
+ return (vector unsigned short) __builtin_altivec_vspltish (a1);
+}
+
+/* vec_splat_u32 */
+
+inline vector unsigned int
+vec_splat_u32 (const char a1)
+{
+ return (vector unsigned int) __builtin_altivec_vspltisw (a1);
+}
+
+/* vec_sr */
+
+inline vector signed char
+vec_sr (vector signed char a1, vector unsigned char a2)
+{
+ return (vector signed char) __builtin_altivec_vsrb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_sr (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsrb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector signed short
+vec_sr (vector signed short a1, vector unsigned short a2)
+{
+ return (vector signed short) __builtin_altivec_vsrh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_sr (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsrh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector signed int
+vec_sr (vector signed int a1, vector unsigned int a2)
+{
+ return (vector signed int) __builtin_altivec_vsrw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_sr (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsrw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vsrw */
+
+inline vector signed int
+vec_vsrw (vector signed int a1, vector unsigned int a2)
+{
+ return (vector signed int) __builtin_altivec_vsrw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_vsrw (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsrw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vsrh */
+
+inline vector signed short
+vec_vsrh (vector signed short a1, vector unsigned short a2)
+{
+ return (vector signed short) __builtin_altivec_vsrh ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_vsrh (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsrh ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vsrb */
+
+inline vector signed char
+vec_vsrb (vector signed char a1, vector unsigned char a2)
+{
+ return (vector signed char) __builtin_altivec_vsrb ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_vsrb (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsrb ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_sra */
+
+inline vector signed char
+vec_sra (vector signed char a1, vector unsigned char a2)
+{
+ return (vector signed char) __builtin_altivec_vsrab ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_sra (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsrab ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector signed short
+vec_sra (vector signed short a1, vector unsigned short a2)
+{
+ return (vector signed short) __builtin_altivec_vsrah ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_sra (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsrah ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector signed int
+vec_sra (vector signed int a1, vector unsigned int a2)
+{
+ return (vector signed int) __builtin_altivec_vsraw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_sra (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsraw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vsraw */
+
+inline vector signed int
+vec_vsraw (vector signed int a1, vector unsigned int a2)
+{
+ return (vector signed int) __builtin_altivec_vsraw ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_vsraw (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsraw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vsrah */
+
+inline vector signed short
+vec_vsrah (vector signed short a1, vector unsigned short a2)
+{
+ return (vector signed short) __builtin_altivec_vsrah ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_vsrah (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsrah ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vsrab */
+
+inline vector signed char
+vec_vsrab (vector signed char a1, vector unsigned char a2)
+{
+ return (vector signed char) __builtin_altivec_vsrab ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_vsrab (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsrab ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_srl */
+
+inline vector signed int
+vec_srl (vector signed int a1, vector unsigned int a2)
+{
+ return (vector signed int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_srl (vector signed int a1, vector unsigned short a2)
+{
+ return (vector signed int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_srl (vector signed int a1, vector unsigned char a2)
+{
+ return (vector signed int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_srl (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_srl (vector unsigned int a1, vector unsigned short a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_srl (vector unsigned int a1, vector unsigned char a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed short
+vec_srl (vector signed short a1, vector unsigned int a2)
+{
+ return (vector signed short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed short
+vec_srl (vector signed short a1, vector unsigned short a2)
+{
+ return (vector signed short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed short
+vec_srl (vector signed short a1, vector unsigned char a2)
+{
+ return (vector signed short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_srl (vector unsigned short a1, vector unsigned int a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_srl (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_srl (vector unsigned short a1, vector unsigned char a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed char
+vec_srl (vector signed char a1, vector unsigned int a2)
+{
+ return (vector signed char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed char
+vec_srl (vector signed char a1, vector unsigned short a2)
+{
+ return (vector signed char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed char
+vec_srl (vector signed char a1, vector unsigned char a2)
+{
+ return (vector signed char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_srl (vector unsigned char a1, vector unsigned int a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_srl (vector unsigned char a1, vector unsigned short a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_srl (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_sro */
+
+inline vector float
+vec_sro (vector float a1, vector signed char a2)
+{
+ return (vector float) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector float
+vec_sro (vector float a1, vector unsigned char a2)
+{
+ return (vector float) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_sro (vector signed int a1, vector signed char a2)
+{
+ return (vector signed int) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_sro (vector signed int a1, vector unsigned char a2)
+{
+ return (vector signed int) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_sro (vector unsigned int a1, vector signed char a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_sro (vector unsigned int a1, vector unsigned char a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed short
+vec_sro (vector signed short a1, vector signed char a2)
+{
+ return (vector signed short) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed short
+vec_sro (vector signed short a1, vector unsigned char a2)
+{
+ return (vector signed short) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_sro (vector unsigned short a1, vector signed char a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_sro (vector unsigned short a1, vector unsigned char a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed char
+vec_sro (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed char
+vec_sro (vector signed char a1, vector unsigned char a2)
+{
+ return (vector signed char) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_sro (vector unsigned char a1, vector signed char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_sro (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_st */
+
+inline void
+vec_st (vector float a1, int a2, void *a3)
+{
+ __builtin_altivec_stvx ((vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (vector signed int a1, int a2, void *a3)
+{
+ __builtin_altivec_stvx ((vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (vector unsigned int a1, int a2, void *a3)
+{
+ __builtin_altivec_stvx ((vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (vector signed short a1, int a2, void *a3)
+{
+ __builtin_altivec_stvx ((vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (vector unsigned short a1, int a2, void *a3)
+{
+ __builtin_altivec_stvx ((vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (vector signed char a1, int a2, void *a3)
+{
+ __builtin_altivec_stvx ((vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (vector unsigned char a1, int a2, void *a3)
+{
+ __builtin_altivec_stvx ((vector signed int) a1, a2, (void *) a3);
+}
+
+/* vec_ste */
+
+inline void
+vec_ste (vector signed char a1, int a2, void *a3)
+{
+ __builtin_altivec_stvebx ((vector signed char) a1, a2, (void *) a3);
+}
+
+inline void
+vec_ste (vector unsigned char a1, int a2, void *a3)
+{
+ __builtin_altivec_stvebx ((vector signed char) a1, a2, (void *) a3);
+}
+
+inline void
+vec_ste (vector signed short a1, int a2, void *a3)
+{
+ __builtin_altivec_stvehx ((vector signed short) a1, a2, (void *) a3);
+}
+
+inline void
+vec_ste (vector unsigned short a1, int a2, void *a3)
+{
+ __builtin_altivec_stvehx ((vector signed short) a1, a2, (void *) a3);
+}
+
+inline void
+vec_ste (vector float a1, int a2, void *a3)
+{
+ __builtin_altivec_stvewx ((vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_ste (vector signed int a1, int a2, void *a3)
+{
+ __builtin_altivec_stvewx ((vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_ste (vector unsigned int a1, int a2, void *a3)
+{
+ __builtin_altivec_stvewx ((vector signed int) a1, a2, (void *) a3);
+}
+
+/* vec_stvewx */
+
+inline void
+vec_stvewx (vector float a1, int a2, void *a3)
+{
+ __builtin_altivec_stvewx ((vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stvewx (vector signed int a1, int a2, void *a3)
+{
+ __builtin_altivec_stvewx ((vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stvewx (vector unsigned int a1, int a2, void *a3)
+{
+ __builtin_altivec_stvewx ((vector signed int) a1, a2, (void *) a3);
+}
+
+/* vec_stvehx */
+
+inline void
+vec_stvehx (vector signed short a1, int a2, void *a3)
+{
+ __builtin_altivec_stvehx ((vector signed short) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stvehx (vector unsigned short a1, int a2, void *a3)
+{
+ __builtin_altivec_stvehx ((vector signed short) a1, a2, (void *) a3);
+}
+
+/* vec_stvebx */
+
+inline void
+vec_stvebx (vector signed char a1, int a2, void *a3)
+{
+ __builtin_altivec_stvebx ((vector signed char) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stvebx (vector unsigned char a1, int a2, void *a3)
+{
+ __builtin_altivec_stvebx ((vector signed char) a1, a2, (void *) a3);
+}
+
+/* vec_stl */
+
+inline void
+vec_stl (vector float a1, int a2, void *a3)
+{
+ __builtin_altivec_stvxl ((vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (vector signed int a1, int a2, void *a3)
+{
+ __builtin_altivec_stvxl ((vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (vector unsigned int a1, int a2, void *a3)
+{
+ __builtin_altivec_stvxl ((vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (vector signed short a1, int a2, void *a3)
+{
+ __builtin_altivec_stvxl ((vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (vector unsigned short a1, int a2, void *a3)
+{
+ __builtin_altivec_stvxl ((vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (vector signed char a1, int a2, void *a3)
+{
+ __builtin_altivec_stvxl ((vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (vector unsigned char a1, int a2, void *a3)
+{
+ __builtin_altivec_stvxl ((vector signed int) a1, a2, (void *) a3);
+}
+
+/* vec_sub */
+
+inline vector signed char
+vec_sub (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_sub (vector signed char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_sub (vector unsigned char a1, vector signed char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_sub (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector signed short
+vec_sub (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_sub (vector signed short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_sub (vector unsigned short a1, vector signed short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_sub (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector signed int
+vec_sub (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_sub (vector signed int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_sub (vector unsigned int a1, vector signed int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_sub (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector float
+vec_sub (vector float a1, vector float a2)
+{
+ return (vector float) __builtin_altivec_vsubfp ((vector float) a1, (vector float) a2);
+}
+
+/* vec_vsubfp */
+
+inline vector float
+vec_vsubfp (vector float a1, vector float a2)
+{
+ return (vector float) __builtin_altivec_vsubfp ((vector float) a1, (vector float) a2);
+}
+
+/* vec_vsubuwm */
+
+inline vector signed int
+vec_vsubuwm (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_vsubuwm (vector signed int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_vsubuwm (vector unsigned int a1, vector signed int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_vsubuwm (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vsubuhm */
+
+inline vector signed short
+vec_vsubuhm (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_vsubuhm (vector signed short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_vsubuhm (vector unsigned short a1, vector signed short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_vsubuhm (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vsububm */
+
+inline vector signed char
+vec_vsububm (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_vsububm (vector signed char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_vsububm (vector unsigned char a1, vector signed char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_vsububm (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_subc */
+
+inline vector unsigned int
+vec_subc (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsubcuw ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_subs */
+
+inline vector unsigned char
+vec_subs (vector signed char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsububs ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_subs (vector unsigned char a1, vector signed char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsububs ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_subs (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsububs ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector signed char
+vec_subs (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vsubsbs ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned short
+vec_subs (vector signed short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_subs (vector unsigned short a1, vector signed short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_subs (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector signed short
+vec_subs (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vsubshs ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned int
+vec_subs (vector signed int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_subs (vector unsigned int a1, vector signed int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_subs (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_subs (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vsubsws ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vsubsws */
+
+inline vector signed int
+vec_vsubsws (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vsubsws ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vsubuws */
+
+inline vector unsigned int
+vec_vsubuws (vector signed int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_vsubuws (vector unsigned int a1, vector signed int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_vsubuws (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_vsubshs */
+
+inline vector signed short
+vec_vsubshs (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vsubshs ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vsubuhs */
+
+inline vector unsigned short
+vec_vsubuhs (vector signed short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_vsubuhs (vector unsigned short a1, vector signed short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vsubuhs */
+
+inline vector unsigned short
+vec_vsubsuhs (vector signed short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_vsubsuhs (vector unsigned short a1, vector signed short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2);
+}
+
+inline vector unsigned short
+vec_vsubsuhs (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2);
+}
+
+/* vec_vsubsbs */
+
+inline vector signed char
+vec_vsubsbs (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vsubsbs ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_vsububs */
+
+inline vector unsigned char
+vec_vsubsubs (vector signed char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsububs ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_vsubsubs (vector unsigned char a1, vector signed char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsububs ((vector signed char) a1, (vector signed char) a2);
+}
+
+inline vector unsigned char
+vec_vsubsubs (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vsububs ((vector signed char) a1, (vector signed char) a2);
+}
+
+/* vec_sum4s */
+
+inline vector unsigned int
+vec_sum4s (vector unsigned char a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsum4ubs ((vector signed char) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_sum4s (vector signed char a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vsum4sbs ((vector signed char) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_sum4s (vector signed short a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vsum4shs ((vector signed short) a1, (vector signed int) a2);
+}
+
+/* vec_vsum4shs */
+
+inline vector signed int
+vec_vsum4shss (vector signed short a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vsum4shs ((vector signed short) a1, (vector signed int) a2);
+}
+
+/* vec_vsum4sbs */
+
+inline vector signed int
+vec_vsum4sbs (vector signed char a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vsum4sbs ((vector signed char) a1, (vector signed int) a2);
+}
+
+/* vec_vsum4ubs */
+
+inline vector unsigned int
+vec_vsum4ubs (vector unsigned char a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vsum4ubs ((vector signed char) a1, (vector signed int) a2);
+}
+
+/* vec_sum2s */
+
+inline vector signed int
+vec_sum2s (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vsum2sws ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_sums */
+
+inline vector signed int
+vec_sums (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vsumsws ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_trunc */
+
+inline vector float
+vec_trunc (vector float a1)
+{
+ return (vector float) __builtin_altivec_vrfiz ((vector float) a1);
+}
+
+/* vec_unpackh */
+
+inline vector signed short
+vec_unpackh (vector signed char a1)
+{
+ return (vector signed short) __builtin_altivec_vupkhsb ((vector signed char) a1);
+}
+
+inline vector signed int
+vec_unpackh (vector signed short a1)
+{
+ return (vector signed int) __builtin_altivec_vupkhsh ((vector signed short) a1);
+}
+
+/* vec_vupkhsh */
+
+inline vector signed int
+vec_vupkhsh (vector signed short a1)
+{
+ return (vector signed int) __builtin_altivec_vupkhsh ((vector signed short) a1);
+}
+
+/* vec_vupkhpx */
+
+inline vector unsigned int
+vec_vupkhpx (vector signed short a1)
+{
+ return (vector unsigned int) __builtin_altivec_vupkhpx ((vector signed short) a1);
+}
+
+/* vec_vupkhsb */
+
+inline vector signed short
+vec_vupkhsb (vector signed char a1)
+{
+ return (vector signed short) __builtin_altivec_vupkhsb ((vector signed char) a1);
+}
+
+/* vec_unpackl */
+
+inline vector signed short
+vec_unpackl (vector signed char a1)
+{
+ return (vector signed short) __builtin_altivec_vupklsb ((vector signed char) a1);
+}
+
+inline vector unsigned int
+vec_vupklpx (vector signed short a1)
+{
+ return (vector unsigned int) __builtin_altivec_vupklpx ((vector signed short) a1);
+}
+
+inline vector signed int
+vec_unpackl (vector signed short a1)
+{
+ return (vector signed int) __builtin_altivec_vupklsh ((vector signed short) a1);
+}
+
+/* vec_upklsh */
+
+inline vector signed int
+vec_vupklsh (vector signed short a1)
+{
+ return (vector signed int) __builtin_altivec_vupklsh ((vector signed short) a1);
+}
+
+/* vec_vupklsb */
+
+inline vector signed short
+vec_vupklsb (vector signed char a1)
+{
+ return (vector signed short) __builtin_altivec_vupklsb ((vector signed char) a1);
+}
+
+/* vec_xor */
+
+inline vector float
+vec_xor (vector float a1, vector float a2)
+{
+ return (vector float) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector float
+vec_xor (vector float a1, vector signed int a2)
+{
+ return (vector float) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector float
+vec_xor (vector signed int a1, vector float a2)
+{
+ return (vector float) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed int
+vec_xor (vector signed int a1, vector signed int a2)
+{
+ return (vector signed int) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_xor (vector signed int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_xor (vector unsigned int a1, vector signed int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned int
+vec_xor (vector unsigned int a1, vector unsigned int a2)
+{
+ return (vector unsigned int) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed short
+vec_xor (vector signed short a1, vector signed short a2)
+{
+ return (vector signed short) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_xor (vector signed short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_xor (vector unsigned short a1, vector signed short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned short
+vec_xor (vector unsigned short a1, vector unsigned short a2)
+{
+ return (vector unsigned short) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector signed char
+vec_xor (vector signed char a1, vector signed char a2)
+{
+ return (vector signed char) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_xor (vector signed char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_xor (vector unsigned char a1, vector signed char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+}
+
+inline vector unsigned char
+vec_xor (vector unsigned char a1, vector unsigned char a2)
+{
+ return (vector unsigned char) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2);
+}
+
+/* vec_all_eq */
+
+inline int
+vec_all_eq (vector signed char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpequb_p (__CR6_LT, a1, (vector signed char) a2);
+}
+
+inline int
+vec_all_eq (vector signed char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpequb_p (__CR6_LT, a1, a2);
+}
+
+inline int
+vec_all_eq (vector unsigned char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpequb_p (__CR6_LT, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_all_eq (vector unsigned char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpequb_p (__CR6_LT, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_all_eq (vector signed short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_LT, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_all_eq (vector signed short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_LT, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_all_eq (vector unsigned short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_LT, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_all_eq (vector unsigned short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_LT, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_all_eq (vector signed int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_LT, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_all_eq (vector signed int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_LT, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_all_eq (vector unsigned int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_LT, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_all_eq (vector unsigned int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_LT, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_all_eq (vector float a1, vector float a2)
+{
+ return __builtin_altivec_vcmpeqfp_p (__CR6_LT, a1, a2);
+}
+
+/* vec_all_ge */
+
+inline int
+vec_all_ge (vector signed char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) a2, (vector signed char) a1);
+}
+
+inline int
+vec_all_ge (vector unsigned char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) a2, (vector signed char) a1);
+}
+
+inline int
+vec_all_ge (vector unsigned char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) a2, (vector signed char) a1);
+}
+
+inline int
+vec_all_ge (vector signed char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (vector signed char) a2, (vector signed char) a1);
+}
+
+inline int
+vec_all_ge (vector signed short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) a2, (vector signed short) a1);
+}
+
+inline int
+vec_all_ge (vector unsigned short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) a2, (vector signed short) a1);
+}
+
+inline int
+vec_all_ge (vector unsigned short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) a2, (vector signed short) a1);
+}
+
+inline int
+vec_all_ge (vector signed short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (vector signed short) a2, (vector signed short) a1);
+}
+
+inline int
+vec_all_ge (vector signed int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) a2, (vector signed int) a1);
+}
+
+inline int
+vec_all_ge (vector unsigned int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) a2, (vector signed int) a1);
+}
+
+inline int
+vec_all_ge (vector unsigned int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) a2, (vector signed int) a1);
+}
+
+inline int
+vec_all_ge (vector signed int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (vector signed int) a2, (vector signed int) a1);
+}
+
+inline int
+vec_all_ge (vector float a1, vector float a2)
+{
+ return __builtin_altivec_vcmpgefp_p (__CR6_EQ, a1, a2);
+}
+
+/* vec_all_gt */
+
+inline int
+vec_all_gt (vector signed char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_all_gt (vector unsigned char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_all_gt (vector unsigned char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_all_gt (vector signed char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpgtsb_p (__CR6_LT, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_all_gt (vector signed short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_all_gt (vector unsigned short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_all_gt (vector unsigned short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_all_gt (vector signed short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpgtsh_p (__CR6_LT, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_all_gt (vector signed int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_all_gt (vector unsigned int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_all_gt (vector unsigned int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_all_gt (vector signed int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_LT, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_all_gt (vector float a1, vector float a2)
+{
+ return __builtin_altivec_vcmpgtfp_p (__CR6_LT, a1, a2);
+}
+
+/* vec_all_in */
+
+inline int
+vec_all_in (vector float a1, vector float a2)
+{
+ return __builtin_altivec_vcmpbfp_p (__CR6_EQ, a1, a2);
+}
+
+/* vec_all_le */
+
+inline int
+vec_all_le (vector signed char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_all_le (vector unsigned char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_all_le (vector unsigned char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_all_le (vector signed char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_all_le (vector signed short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_all_le (vector unsigned short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_all_le (vector unsigned short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_all_le (vector signed short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_all_le (vector signed int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_all_le (vector unsigned int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_all_le (vector unsigned int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_all_le (vector signed int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_all_le (vector float a1, vector float a2)
+{
+ return __builtin_altivec_vcmpgefp_p (__CR6_LT, a2, a1);
+}
+
+/* vec_all_lt */
+
+inline int
+vec_all_lt (vector signed char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) a2, (vector signed char) a1);
+}
+
+inline int
+vec_all_lt (vector unsigned char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) a2, (vector signed char) a1);
+}
+
+inline int
+vec_all_lt (vector unsigned char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) a2, (vector signed char) a1);
+}
+
+inline int
+vec_all_lt (vector signed char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpgtsb_p (__CR6_LT, (vector signed char) a2, (vector signed char) a1);
+}
+
+inline int
+vec_all_lt (vector signed short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) a2, (vector signed short) a1);
+}
+
+inline int
+vec_all_lt (vector unsigned short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) a2, (vector signed short) a1);
+}
+
+inline int
+vec_all_lt (vector unsigned short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) a2, (vector signed short) a1);
+}
+
+inline int
+vec_all_lt (vector signed short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpgtsh_p (__CR6_LT, (vector signed short) a2, (vector signed short) a1);
+}
+
+inline int
+vec_all_lt (vector signed int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) a2, (vector signed int) a1);
+}
+
+inline int
+vec_all_lt (vector unsigned int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) a2, (vector signed int) a1);
+}
+
+inline int
+vec_all_lt (vector unsigned int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) a2, (vector signed int) a1);
+}
+
+inline int
+vec_all_lt (vector signed int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_LT, (vector signed int) a2, (vector signed int) a1);
+}
+
+inline int
+vec_all_lt (vector float a1, vector float a2)
+{
+ return __builtin_altivec_vcmpgtfp_p (__CR6_LT, a2, a1);
+}
+
+/* vec_all_nan */
+
+inline int
+vec_all_nan (vector float a1)
+{
+ return __builtin_altivec_vcmpeqfp_p (__CR6_EQ, a1, a1);
+}
+
+/* vec_all_ne */
+
+inline int
+vec_all_ne (vector signed char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpequb_p (__CR6_EQ, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_all_ne (vector signed char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpequb_p (__CR6_EQ, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_all_ne (vector unsigned char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpequb_p (__CR6_EQ, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_all_ne (vector unsigned char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpequb_p (__CR6_EQ, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_all_ne (vector signed short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_all_ne (vector signed short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_all_ne (vector unsigned short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_all_ne (vector unsigned short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_all_ne (vector signed int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_EQ, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_all_ne (vector signed int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_EQ, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_all_ne (vector unsigned int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_EQ, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_all_ne (vector unsigned int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_EQ, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_all_ne (vector float a1, vector float a2)
+{
+ return __builtin_altivec_vcmpeqfp_p (__CR6_EQ, a1, a2);
+}
+
+/* vec_all_nge */
+
+inline int
+vec_all_nge (vector float a1, vector float a2)
+{
+ return __builtin_altivec_vcmpgefp_p (__CR6_EQ, a1, a2);
+}
+
+/* vec_all_ngt */
+
+inline int
+vec_all_ngt (vector float a1, vector float a2)
+{
+ return __builtin_altivec_vcmpgtfp_p (__CR6_EQ, a1, a2);
+}
+
+/* vec_all_nle */
+
+inline int
+vec_all_nle (vector float a1, vector float a2)
+{
+ return __builtin_altivec_vcmpgefp_p (__CR6_EQ, a2, a1);
+}
+
+/* vec_all_nlt */
+
+inline int
+vec_all_nlt (vector float a1, vector float a2)
+{
+ return __builtin_altivec_vcmpgtfp_p (__CR6_EQ, a2, a1);
+}
+
+/* vec_all_numeric */
+
+inline int
+vec_all_numeric (vector float a1)
+{
+ return __builtin_altivec_vcmpeqfp_p (__CR6_EQ, a1, a1);
+}
+
+/* vec_any_eq */
+
+inline int
+vec_any_eq (vector signed char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_any_eq (vector signed char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_any_eq (vector unsigned char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_any_eq (vector unsigned char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_any_eq (vector signed short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_any_eq (vector signed short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_any_eq (vector unsigned short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_any_eq (vector unsigned short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_any_eq (vector signed int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_any_eq (vector signed int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_any_eq (vector unsigned int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_any_eq (vector unsigned int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_any_eq (vector float a1, vector float a2)
+{
+ return __builtin_altivec_vcmpeqfp_p (__CR6_EQ_REV, a1, a2);
+}
+
+/* vec_any_ge */
+
+inline int
+vec_any_ge (vector signed char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) a2, (vector signed char) a1);
+}
+
+inline int
+vec_any_ge (vector unsigned char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) a2, (vector signed char) a1);
+}
+
+inline int
+vec_any_ge (vector unsigned char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) a2, (vector signed char) a1);
+}
+
+inline int
+vec_any_ge (vector signed char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (vector signed char) a2, (vector signed char) a1);
+}
+
+inline int
+vec_any_ge (vector signed short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) a2, (vector signed short) a1);
+}
+
+inline int
+vec_any_ge (vector unsigned short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) a2, (vector signed short) a1);
+}
+
+inline int
+vec_any_ge (vector unsigned short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) a2, (vector signed short) a1);
+}
+
+inline int
+vec_any_ge (vector signed short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (vector signed short) a2, (vector signed short) a1);
+}
+
+inline int
+vec_any_ge (vector signed int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) a2, (vector signed int) a1);
+}
+
+inline int
+vec_any_ge (vector unsigned int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) a2, (vector signed int) a1);
+}
+
+inline int
+vec_any_ge (vector unsigned int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) a2, (vector signed int) a1);
+}
+
+inline int
+vec_any_ge (vector signed int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (vector signed int) a2, (vector signed int) a1);
+}
+
+inline int
+vec_any_ge (vector float a1, vector float a2)
+{
+ return __builtin_altivec_vcmpgefp_p (__CR6_EQ_REV, a1, a2);
+}
+
+/* vec_any_gt */
+
+inline int
+vec_any_gt (vector signed char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_any_gt (vector unsigned char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_any_gt (vector unsigned char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_any_gt (vector signed char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_any_gt (vector signed short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_any_gt (vector unsigned short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_any_gt (vector unsigned short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_any_gt (vector signed short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_any_gt (vector signed int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_any_gt (vector unsigned int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_any_gt (vector unsigned int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_any_gt (vector signed int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_any_gt (vector float a1, vector float a2)
+{
+ return __builtin_altivec_vcmpgtfp_p (__CR6_EQ_REV, a1, a2);
+}
+
+/* vec_any_le */
+
+inline int
+vec_any_le (vector signed char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_any_le (vector unsigned char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_any_le (vector unsigned char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_any_le (vector signed char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_any_le (vector signed short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_any_le (vector unsigned short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_any_le (vector unsigned short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_any_le (vector signed short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_any_le (vector signed int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_any_le (vector unsigned int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_any_le (vector unsigned int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_any_le (vector signed int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_any_le (vector float a1, vector float a2)
+{
+ return __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, a2, a1);
+}
+
+/* vec_any_lt */
+
+inline int
+vec_any_lt (vector signed char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) a2, (vector signed char) a1);
+}
+
+inline int
+vec_any_lt (vector unsigned char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) a2, (vector signed char) a1);
+}
+
+inline int
+vec_any_lt (vector unsigned char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) a2, (vector signed char) a1);
+}
+
+inline int
+vec_any_lt (vector signed char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (vector signed char) a2, (vector signed char) a1);
+}
+
+inline int
+vec_any_lt (vector signed short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) a2, (vector signed short) a1);
+}
+
+inline int
+vec_any_lt (vector unsigned short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) a2, (vector signed short) a1);
+}
+
+inline int
+vec_any_lt (vector unsigned short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) a2, (vector signed short) a1);
+}
+
+inline int
+vec_any_lt (vector signed short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (vector signed short) a2, (vector signed short) a1);
+}
+
+inline int
+vec_any_lt (vector signed int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) a2, (vector signed int) a1);
+}
+
+inline int
+vec_any_lt (vector unsigned int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) a2, (vector signed int) a1);
+}
+
+inline int
+vec_any_lt (vector unsigned int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) a2, (vector signed int) a1);
+}
+
+inline int
+vec_any_lt (vector signed int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (vector signed int) a2, (vector signed int) a1);
+}
+
+inline int
+vec_any_lt (vector float a1, vector float a2)
+{
+ return __builtin_altivec_vcmpgtfp_p (__CR6_EQ_REV, a2, a1);
+}
+
+/* vec_any_nan */
+
+inline int
+vec_any_nan (vector float a1)
+{
+ return __builtin_altivec_vcmpeqfp_p (__CR6_LT_REV, a1, a1);
+}
+
+/* vec_any_ne */
+
+inline int
+vec_any_ne (vector signed char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_any_ne (vector signed char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_any_ne (vector unsigned char a1, vector signed char a2)
+{
+ return __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_any_ne (vector unsigned char a1, vector unsigned char a2)
+{
+ return __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (vector signed char) a1, (vector signed char) a2);
+}
+
+inline int
+vec_any_ne (vector signed short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_any_ne (vector signed short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_any_ne (vector unsigned short a1, vector signed short a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_any_ne (vector unsigned short a1, vector unsigned short a2)
+{
+ return __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (vector signed short) a1, (vector signed short) a2);
+}
+
+inline int
+vec_any_ne (vector signed int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_any_ne (vector signed int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_any_ne (vector unsigned int a1, vector signed int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_any_ne (vector unsigned int a1, vector unsigned int a2)
+{
+ return __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (vector signed int) a1, (vector signed int) a2);
+}
+
+inline int
+vec_any_ne (vector float a1, vector float a2)
+{
+ return __builtin_altivec_vcmpeqfp_p (__CR6_LT_REV, a1, a2);
+}
+
+/* vec_any_nge */
+
+inline int
+vec_any_nge (vector float a1, vector float a2)
+{
+ return __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, a1, a2);
+}
+
+/* vec_any_ngt */
+
+inline int
+vec_any_ngt (vector float a1, vector float a2)
+{
+ return __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, a1, a2);
+}
+
+/* vec_any_nle */
+
+inline int
+vec_any_nle (vector float a1, vector float a2)
+{
+ return __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, a2, a1);
+}
+
+/* vec_any_nlt */
+
+inline int
+vec_any_nlt (vector float a1, vector float a2)
+{
+ return __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, a2, a1);
+}
+
+/* vec_any_numeric */
+
+inline int
+vec_any_numeric (vector float a1)
+{
+ return __builtin_altivec_vcmpeqfp_p (__CR6_EQ_REV, a1, a1);
+}
+
+/* vec_any_out */
+
+inline int
+vec_any_out (vector float a1, vector float a2)
+{
+ return __builtin_altivec_vcmpbfp_p (__CR6_EQ_REV, a1, a2);
+}
+
+/* vec_step */
+
+template<typename _Tp>
+struct __vec_step_help
+{
+ // All proper vector types will specialize _S_elem.
+};
+
+template<>
+struct __vec_step_help<vector signed short>
+{
+ static const int _S_elem = 8;
+};
+
+template<>
+struct __vec_step_help<vector unsigned short>
+{
+ static const int _S_elem = 8;
+};
+
+template<>
+struct __vec_step_help<vector signed int>
+{
+ static const int _S_elem = 4;
+};
+
+template<>
+struct __vec_step_help<vector unsigned int>
+{
+ static const int _S_elem = 4;
+};
+
+template<>
+struct __vec_step_help<vector unsigned char>
+{
+ static const int _S_elem = 16;
+};
+
+template<>
+struct __vec_step_help<vector signed char>
+{
+ static const int _S_elem = 16;
+};
+
+template<>
+struct __vec_step_help<vector float>
+{
+ static const int _S_elem = 4;
+};
+
+#define vec_step(t) __vec_step_help<t>::_S_elem
#else /* not C++ */
-/* Hairy macros that implement the AltiVec high-level programming
- interface for C. */
+/* "... and so I think no man in a century will suffer as greatly as
+ you will." */
+
+#define vec_abs(a) \
+ __ch (__un_args_eq (vector signed char, (a)), \
+ ((vector signed char) __builtin_altivec_abs_v16qi ((vector signed char) (a))), \
+ __ch (__un_args_eq (vector signed short, (a)), \
+ ((vector signed short) __builtin_altivec_abs_v8hi ((vector signed short) (a))), \
+ __ch (__un_args_eq (vector signed int, (a)), \
+ ((vector signed int) __builtin_altivec_abs_v4si ((vector signed int) (a))), \
+ __ch (__un_args_eq (vector float, (a)), \
+ ((vector float) __builtin_altivec_abs_v4sf ((vector float) (a))), \
+ __altivec_link_error_invalid_argument ()))))
+
+#define vec_abss(a) \
+ __ch (__un_args_eq (vector signed char, (a)), \
+ ((vector signed char) __builtin_altivec_abss_v16qi ((vector signed char) (a))), \
+ __ch (__un_args_eq (vector signed short, (a)), \
+ ((vector signed short) __builtin_altivec_abss_v8hi ((vector signed short) (a))), \
+ __ch (__un_args_eq (vector signed int, (a)), \
+ ((vector signed int) __builtin_altivec_abss_v4si ((vector signed int) (a))), \
+ __altivec_link_error_invalid_argument ())))
+
+#define vec_step(t) \
+ __ch (__builtin_types_compatible_p (t, vector signed int), 4, \
+ __ch (__builtin_types_compatible_p (t, vector unsigned int), 4, \
+ __ch (__builtin_types_compatible_p (t, vector signed short), 8, \
+ __ch (__builtin_types_compatible_p (t, vector unsigned short), 8, \
+ __ch (__builtin_types_compatible_p (t, vector signed char), 16, \
+ __ch (__builtin_types_compatible_p (t, vector unsigned char), 16, \
+ __ch (__builtin_types_compatible_p (t, vector float), 4, \
+ __altivec_link_error_invalid_argument ())))))))
+
+#define vec_vaddubm(a1, a2) \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vaddubm ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) (a1), (vector signed char) (a2))), \
+ __altivec_link_error_invalid_argument ()))))
+
+#define vec_vadduhm(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vadduhm ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) (a1), (vector signed short) (a2))), \
+ __altivec_link_error_invalid_argument ()))))
+
+#define vec_vadduwm(a1, a2) \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vadduwm ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) (a1), (vector signed int) (a2))), \
+ __altivec_link_error_invalid_argument ()))))
+
+#define vec_vaddfp(a1, a2) \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ ((vector float) __builtin_altivec_vaddfp ((vector float) (a1), (vector float) (a2))), \
+ __altivec_link_error_invalid_argument ())
#define vec_add(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
- (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
- (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector float) __builtin_altivec_vaddfp ((vector float) a1, (vector float) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vaddubm ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vadduhm ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vadduwm ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ ((vector float) __builtin_altivec_vaddfp ((vector float) (a1), (vector float) (a2))), \
__altivec_link_error_invalid_argument ())))))))))))))
-#define vec_addc(a1, a2) __builtin_altivec_vaddcuw (a1, a2)
+#define vec_addc(a1, a2) \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vaddcuw ((vector signed int) (a1), (vector signed int) (a2))), \
+ __altivec_link_error_invalid_argument ())
#define vec_adds(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed char) __builtin_altivec_vaddsbs ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
- (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed short) __builtin_altivec_vaddshs ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
- (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vaddsws ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vaddsbs ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vaddshs ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vadduws ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vadduws ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vadduws ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vaddsws ((vector signed int) (a1), (vector signed int) (a2))), \
__altivec_link_error_invalid_argument ()))))))))))))
+#define vec_vaddsws(a1, a2) \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vaddsws ((vector signed int) (a1), (vector signed int) (a2))), \
+ __altivec_link_error_invalid_argument ())
+
+#define vec_vadduws(a1, a2) \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vadduws ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vadduws ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vadduws ((vector signed int) (a1), (vector signed int) (a2))), \
+ __altivec_link_error_invalid_argument ())))
+
+#define vec_vaddshs(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vaddshs ((vector signed short) (a1), (vector signed short) (a2))), \
+ __altivec_link_error_invalid_argument ())
+
+#define vec_vadduhs(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) (a1), (vector signed short) (a2))), \
+ __altivec_link_error_invalid_argument ())))
+
+#define vec_vaddsbs(a1, a2) \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vaddsbs ((vector signed char) (a1), (vector signed char) (a2))), \
+ __altivec_link_error_invalid_argument ())
+
+#define vec_vaddubs(a1, a2) \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) (a1), (vector signed char) (a2))), \
+ __altivec_link_error_invalid_argument ())))
+
#define vec_and(a1, a2) \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector float) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector signed int, a2), \
- (vector float) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector float, a2), \
- (vector float) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
- (vector unsigned int) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed short) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
- (vector unsigned short) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed char) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector unsigned char) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ ((vector float) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector float, (a1), vector signed int, (a2)), \
+ ((vector float) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector float, (a2)), \
+ ((vector float) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vand ((vector signed int) (a1), (vector signed int) (a2))), \
__altivec_link_error_invalid_argument ())))))))))))))))
#define vec_andc(a1, a2) \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector float) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector signed int, a2), \
- (vector float) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector float, a2), \
- (vector float) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
- (vector unsigned int) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed short) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
- (vector unsigned short) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed char) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector unsigned char) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
- __altivec_link_error_invalid_argument ())))))))))))))))
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ ((vector float) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector float, (a1), vector unsigned int, (a2)), \
+ ((vector float) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector float, (a2)), \
+ ((vector float) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector signed short) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector signed short) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector signed char) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vandc ((vector signed int) (a1), (vector signed int) (a2))), \
+ __altivec_link_error_invalid_argument ()))))))))))))))))))))))
#define vec_avg(a1, a2) \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vavgub ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed char) __builtin_altivec_vavgsb ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vavguh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed short) __builtin_altivec_vavgsh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vavguw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vavgsw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vavgub ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vavgsb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vavguh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vavgsh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vavguw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vavgsw ((vector signed int) (a1), (vector signed int) (a2))), \
__altivec_link_error_invalid_argument ()))))))
-#define vec_ceil(a1) __builtin_altivec_vrfip (a1)
+#define vec_vavgsw(a1, a2) \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vavgsw ((vector signed int) (a1), (vector signed int) (a2))), \
+ __altivec_link_error_invalid_argument ())
+
+#define vec_vavguw(a1, a2) \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vavguw ((vector signed int) (a1), (vector signed int) (a2))), \
+ __altivec_link_error_invalid_argument ())
-#define vec_cmpb(a1, a2) __builtin_altivec_vcmpbfp (a1, a2)
+#define vec_vavgsh(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vavgsh ((vector signed short) (a1), (vector signed short) (a2))), \
+ __altivec_link_error_invalid_argument ())
+
+#define vec_vavguh(a1, a2) \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vavguh ((vector signed short) (a1), (vector signed short) (a2))), \
+ __altivec_link_error_invalid_argument ())
+
+#define vec_vavgsb(a1, a2) \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vavgsb ((vector signed char) (a1), (vector signed char) (a2))), \
+ __altivec_link_error_invalid_argument ())
+
+#define vec_vavgub(a1, a2) \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vavgub ((vector signed char) (a1), (vector signed char) (a2))), \
+ __altivec_link_error_invalid_argument ())
+
+#define vec_ceil(a1) __builtin_altivec_vrfip ((a1))
+
+#define vec_cmpb(a1, a2) __builtin_altivec_vcmpbfp ((a1), (a2))
#define vec_cmpeq(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed char) __builtin_altivec_vcmpequb ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector signed char) __builtin_altivec_vcmpequb ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed short) __builtin_altivec_vcmpequh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector signed short) __builtin_altivec_vcmpequh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpequw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpequw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector signed int) __builtin_altivec_vcmpeqfp ((vector float) a1, (vector float) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vcmpequb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector signed char) __builtin_altivec_vcmpequb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vcmpequh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector signed short) __builtin_altivec_vcmpequh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vcmpequw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector signed int) __builtin_altivec_vcmpequw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ ((vector signed int) __builtin_altivec_vcmpeqfp ((vector float) (a1), (vector float) (a2))), \
__altivec_link_error_invalid_argument ())))))))
-#define vec_cmpge(a1, a2) __builtin_altivec_vcmpgefp (a1, a2)
+#define vec_vcmpeqfp(a1, a2) \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ ((vector signed int) __builtin_altivec_vcmpeqfp ((vector float) (a1), (vector float) (a2))), \
+ __altivec_link_error_invalid_argument ())
+
+#define vec_vcmpequw(a1, a2) \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vcmpequw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector signed int) __builtin_altivec_vcmpequw ((vector signed int) (a1), (vector signed int) (a2))), \
+ __altivec_link_error_invalid_argument ()))
+
+#define vec_vcmpequh(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vcmpequh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector signed short) __builtin_altivec_vcmpequh ((vector signed short) (a1), (vector signed short) (a2))), \
+ __altivec_link_error_invalid_argument ()))
+
+#define vec_vcmpequb(a1, a2) \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vcmpequb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector signed char) __builtin_altivec_vcmpequb ((vector signed char) (a1), (vector signed char) (a2))), \
+ __altivec_link_error_invalid_argument ()))
+
+#define vec_cmpge(a1, a2) (vector signed int) __builtin_altivec_vcmpgefp ((a1), (a2))
#define vec_cmpgt(a1, a2) \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector signed char) __builtin_altivec_vcmpgtub ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed char) __builtin_altivec_vcmpgtsb ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector signed short) __builtin_altivec_vcmpgtuh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed short) __builtin_altivec_vcmpgtsh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector signed int) __builtin_altivec_vcmpgtfp ((vector float) a1, (vector float) a2), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector signed char) __builtin_altivec_vcmpgtub ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vcmpgtsb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector signed short) __builtin_altivec_vcmpgtuh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vcmpgtsh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector signed int) __builtin_altivec_vcmpgtuw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vcmpgtsw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ ((vector signed int) __builtin_altivec_vcmpgtfp ((vector float) (a1), (vector float) (a2))), \
__altivec_link_error_invalid_argument ())))))))
-#define vec_cmple(a1, a2) __builtin_altivec_vcmpgefp (a1, a2)
+#define vec_vcmpgtfp(a1, a2) \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ ((vector signed int) __builtin_altivec_vcmpgtfp ((vector float) (a1), (vector float) (a2))), \
+ __altivec_link_error_invalid_argument ())
+
+#define vec_vcmpgtsw(a1, a2) \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vcmpgtsw ((vector signed int) (a1), (vector signed int) (a2))), \
+ __altivec_link_error_invalid_argument ())
+
+#define vec_vcmpgtuw(a1, a2) \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector signed int) __builtin_altivec_vcmpgtuw ((vector signed int) (a1), (vector signed int) (a2))), \
+ __altivec_link_error_invalid_argument ())
+
+#define vec_vcmpgtsh(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vcmpgtsh ((vector signed short) (a1), (vector signed short) (a2))), \
+ __altivec_link_error_invalid_argument ())
+
+#define vec_vcmpgtuh(a1, a2) \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector signed short) __builtin_altivec_vcmpgtuh ((vector signed short) (a1), (vector signed short) (a2))), \
+ __altivec_link_error_invalid_argument ())
+
+#define vec_vcmpgtsb(a1, a2) \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vcmpgtsb ((vector signed char) (a1), (vector signed char) (a2))), \
+ __altivec_link_error_invalid_argument ())
+
+#define vec_vcmpgtub(a1, a2) \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector signed char) __builtin_altivec_vcmpgtub ((vector signed char) (a1), (vector signed char) (a2))), \
+ __altivec_link_error_invalid_argument ())
+
+#define vec_cmple(a1, a2) __builtin_altivec_vcmpgefp ((a1), (a2))
#define vec_cmplt(a1, a2) \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector signed char) __builtin_altivec_vcmpgtub ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed char) __builtin_altivec_vcmpgtsb ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector signed short) __builtin_altivec_vcmpgtuh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed short) __builtin_altivec_vcmpgtsh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector signed int) __builtin_altivec_vcmpgtfp ((vector float) a1, (vector float) a2), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector signed char) __builtin_altivec_vcmpgtub ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vcmpgtsb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector signed short) __builtin_altivec_vcmpgtuh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vcmpgtsh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector signed int) __builtin_altivec_vcmpgtuw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vcmpgtsw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ ((vector signed int) __builtin_altivec_vcmpgtfp ((vector float) (a1), (vector float) (a2))), \
__altivec_link_error_invalid_argument ())))))))
#define vec_ctf(a1, a2) \
-__ch (__bin_args_eq (vector unsigned int, a1, const char, a2), \
- (vector float) __builtin_altivec_vcfux ((vector signed int) a1, (const char) a2), \
-__ch (__bin_args_eq (vector signed int, a1, const char, a2), \
- (vector float) __builtin_altivec_vcfsx ((vector signed int) a1, (const char) a2), \
+__ch (__bin_args_eq (vector unsigned int, (a1), int, (a2)), \
+ ((vector float) __builtin_altivec_vcfux ((vector signed int) (a1), (const char) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), int, (a2)), \
+ ((vector float) __builtin_altivec_vcfsx ((vector signed int) (a1), (const char) (a2))), \
__altivec_link_error_invalid_argument ()))
-#define vec_cts(a1, a2) __builtin_altivec_vctsxs (a1, a2)
+#define vec_vcfsx(a1, a2) \
+__ch (__bin_args_eq (vector signed int, (a1), int, (a2)), \
+ ((vector float) __builtin_altivec_vcfsx ((vector signed int) (a1), (const char) (a2))), \
+ __altivec_link_error_invalid_argument ())
+
+#define vec_vcfux(a1, a2) \
+__ch (__bin_args_eq (vector unsigned int, (a1), int, (a2)), \
+ ((vector float) __builtin_altivec_vcfux ((vector signed int) (a1), (const char) (a2))), \
+ __altivec_link_error_invalid_argument ())
+
+#define vec_cts(a1, a2) __builtin_altivec_vctsxs ((a1), (a2))
-#define vec_ctu(a1, a2) __builtin_altivec_vctuxs (a1, a2)
+#define vec_ctu(a1, a2) (vector unsigned int) __builtin_altivec_vctuxs ((a1), (a2))
-#define vec_dss(a1) __builtin_altivec_dss (a1)
+#define vec_dss(a1) __builtin_altivec_dss ((a1))
#define vec_dssall() __builtin_altivec_dssall ()
-#define vec_dst(a1, a2, a3) __builtin_altivec_dst (a1, a2, a3)
+#define vec_dst(a1, a2, a3) __builtin_altivec_dst ((a1), (a2), (a3))
-#define vec_dstst(a1, a2, a3) __builtin_altivec_dstst (a1, a2, a3)
+#define vec_dstst(a1, a2, a3) __builtin_altivec_dstst ((a1), (a2), (a3))
-#define vec_dststt(a1, a2, a3) __builtin_altivec_dststt (a1, a2, a3)
+#define vec_dststt(a1, a2, a3) __builtin_altivec_dststt ((a1), (a2), (a3))
-#define vec_dstt(a1, a2, a3) __builtin_altivec_dstt (a1, a2, a3)
+#define vec_dstt(a1, a2, a3) __builtin_altivec_dstt ((a1), (a2), (a3))
-#define vec_expte(a1) __builtin_altivec_vexptefp (a1)
+#define vec_expte(a1) __builtin_altivec_vexptefp ((a1))
#define vec_floor(a1) __builtin_altivec_vrfim (a1)
#define vec_ld(a, b) \
-__ch (__un_args_eq (vector unsigned char *, b), \
- (vector unsigned char) __builtin_altivec_lvx (a, b), \
-__ch (__un_args_eq (unsigned char *, b), \
- (vector unsigned char) __builtin_altivec_lvx (a, b), \
-__ch (__un_args_eq (vector signed char *, b), \
- (vector signed char) __builtin_altivec_lvx (a, b), \
-__ch (__un_args_eq (signed char *, b), \
- (vector signed char) __builtin_altivec_lvx (a, b), \
-__ch (__un_args_eq (vector unsigned short *, b), \
- (vector unsigned short) __builtin_altivec_lvx (a, b), \
-__ch (__un_args_eq (unsigned short *, b), \
- (vector unsigned short) __builtin_altivec_lvx (a, b), \
-__ch (__un_args_eq (vector signed short *, b), \
- (vector signed short) __builtin_altivec_lvx (a, b), \
-__ch (__un_args_eq (signed short *, b), \
- (vector signed short) __builtin_altivec_lvx (a, b), \
-__ch (__un_args_eq (vector unsigned int *, b), \
- (vector unsigned int) __builtin_altivec_lvx (a, b), \
-__ch (__un_args_eq (unsigned int *, b), \
- (vector unsigned int) __builtin_altivec_lvx (a, b), \
-__ch (__un_args_eq (vector signed int *, b), \
- (vector signed int) __builtin_altivec_lvx (a, b), \
-__ch (__un_args_eq (signed int *, b), \
- (vector signed int) __builtin_altivec_lvx (a, b), \
-__ch (__un_args_eq (vector float *, b), \
- (vector float) __builtin_altivec_lvx (a, b), \
-__ch (__un_args_eq (float *, b), \
- (vector float) __builtin_altivec_lvx (a, b), \
-__altivec_link_error_invalid_argument ()))))))))))))))
+__ch (__un_args_eq (vector unsigned char *, (b)), \
+ ((vector unsigned char) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (vector unsigned char [], (b)), \
+ ((vector unsigned char) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (unsigned char *, (b)), \
+ ((vector unsigned char) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (unsigned char [], (b)), \
+ ((vector unsigned char) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (vector signed char *, (b)), \
+ ((vector signed char) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (vector signed char [], (b)), \
+ ((vector signed char) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (signed char *, (b)), \
+ ((vector signed char) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (signed char [], (b)), \
+ ((vector signed char) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (vector unsigned short *, (b)), \
+ ((vector unsigned short) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (vector unsigned short [], (b)), \
+ ((vector unsigned short) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (unsigned short *, (b)), \
+ ((vector unsigned short) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (unsigned short [], (b)), \
+ ((vector unsigned short) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (vector signed short *, (b)), \
+ ((vector signed short) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (vector signed short [], (b)), \
+ ((vector signed short) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (signed short *, (b)), \
+ ((vector signed short) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (signed short [], (b)), \
+ ((vector signed short) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (vector unsigned int *, (b)), \
+ ((vector unsigned int) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (vector unsigned int [], (b)), \
+ ((vector unsigned int) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (unsigned int *, (b)), \
+ ((vector unsigned int) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (unsigned int [], (b)), \
+ ((vector unsigned int) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (vector signed int *, (b)), \
+ ((vector signed int) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (vector signed int [], (b)), \
+ ((vector signed int) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (signed int *, (b)), \
+ ((vector signed int) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (signed int [], (b)), \
+ ((vector signed int) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (vector float *, (b)), \
+ ((vector float) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (vector float [], (b)), \
+ ((vector float) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (float *, (b)), \
+ ((vector float) __builtin_altivec_lvx ((a), (b))), \
+__ch (__un_args_eq (float [], (b)), \
+ ((vector float) __builtin_altivec_lvx ((a), (b))), \
+__altivec_link_error_invalid_argument ()))))))))))))))))))))))))))))
#define vec_lde(a, b) \
-__ch (__un_args_eq (unsigned char *, b), \
- (vector unsigned char) __builtin_altivec_lvebx (a, b), \
-__ch (__un_args_eq (signed char *, b), \
- (vector signed char) __builtin_altivec_lvebx (a, b), \
-__ch (__un_args_eq (unsigned short *, b), \
- (vector unsigned short) __builtin_altivec_lvehx (a, b), \
-__ch (__un_args_eq (signed short *, b), \
- (vector signed short) __builtin_altivec_lvehx (a, b), \
-__ch (__un_args_eq (unsigned int *, b), \
- (vector unsigned int) __builtin_altivec_lvewx (a, b), \
-__ch (__un_args_eq (signed int *, b), \
- (vector signed int) __builtin_altivec_lvewx (a, b), \
-__altivec_link_error_invalid_argument ()))))))
-
-#define vec_ldl(a, b) \
-__ch (__un_args_eq (vector unsigned char *, b), \
- (vector unsigned char) __builtin_altivec_lvxl (a, b), \
-__ch (__un_args_eq (unsigned char *, b), \
- (vector unsigned char) __builtin_altivec_lvxl (a, b), \
-__ch (__un_args_eq (vector signed char *, b), \
- (vector signed char) __builtin_altivec_lvxl (a, b), \
-__ch (__un_args_eq (signed char *, b), \
- (vector signed char) __builtin_altivec_lvxl (a, b), \
-__ch (__un_args_eq (vector unsigned short *, b), \
- (vector unsigned short) __builtin_altivec_lvxl (a, b), \
-__ch (__un_args_eq (unsigned short *, b), \
- (vector unsigned short) __builtin_altivec_lvxl (a, b), \
-__ch (__un_args_eq (vector signed short *, b), \
- (vector signed short) __builtin_altivec_lvxl (a, b), \
-__ch (__un_args_eq (signed short *, b), \
- (vector signed short) __builtin_altivec_lvxl (a, b), \
-__ch (__un_args_eq (vector unsigned int *, b), \
- (vector unsigned int) __builtin_altivec_lvxl (a, b), \
-__ch (__un_args_eq (unsigned int *, b), \
- (vector unsigned int) __builtin_altivec_lvxl (a, b), \
-__ch (__un_args_eq (vector signed int *, b), \
- (vector signed int) __builtin_altivec_lvxl (a, b), \
-__ch (__un_args_eq (signed int *, b), \
- (vector signed int) __builtin_altivec_lvxl (a, b), \
-__ch (__un_args_eq (vector float *, b), \
- (vector float) __builtin_altivec_lvxl (a, b), \
-__ch (__un_args_eq (float *, b), \
- (vector float) __builtin_altivec_lvxl (a, b), \
+__ch (__un_args_eq (unsigned char *, (b)), \
+ ((vector unsigned char) __builtin_altivec_lvebx ((a), (b))), \
+__ch (__un_args_eq (unsigned char [], (b)), \
+ ((vector unsigned char) __builtin_altivec_lvebx ((a), (b))), \
+__ch (__un_args_eq (signed char *, (b)), \
+ ((vector signed char) __builtin_altivec_lvebx ((a), (b))), \
+__ch (__un_args_eq (signed char [], (b)), \
+ ((vector signed char) __builtin_altivec_lvebx ((a), (b))), \
+__ch (__un_args_eq (unsigned short *, (b)), \
+ ((vector unsigned short) __builtin_altivec_lvehx ((a), (b))), \
+__ch (__un_args_eq (unsigned short [], (b)), \
+ ((vector unsigned short) __builtin_altivec_lvehx ((a), (b))), \
+__ch (__un_args_eq (signed short *, (b)), \
+ ((vector signed short) __builtin_altivec_lvehx ((a), (b))), \
+__ch (__un_args_eq (signed short [], (b)), \
+ ((vector signed short) __builtin_altivec_lvehx ((a), (b))), \
+__ch (__un_args_eq (unsigned int *, (b)), \
+ ((vector unsigned int) __builtin_altivec_lvewx ((a), (b))), \
+__ch (__un_args_eq (unsigned int [], (b)), \
+ ((vector unsigned int) __builtin_altivec_lvewx ((a), (b))), \
+__ch (__un_args_eq (signed int *, (b)), \
+ ((vector signed int) __builtin_altivec_lvewx ((a), (b))), \
+__ch (__un_args_eq (signed int [], (b)), \
+ ((vector signed int) __builtin_altivec_lvewx ((a), (b))), \
+__ch (__un_args_eq (float *, (b)), \
+ ((vector float) __builtin_altivec_lvewx ((a), (b))), \
+__ch (__un_args_eq (float [], (b)), \
+ ((vector float) __builtin_altivec_lvewx ((a), (b))), \
__altivec_link_error_invalid_argument ()))))))))))))))
-#define vec_loge(a1) __builtin_altivec_vlogefp (a1)
-
-#define vec_lvsl(a1, a2) __builtin_altivec_lvsl (a1, a2)
-
-#define vec_lvsr(a1, a2) __builtin_altivec_lvsr (a1, a2)
+#define vec_lvewx(a, b) \
+__ch (__un_args_eq (unsigned int *, (b)), \
+ ((vector unsigned int) __builtin_altivec_lvewx ((a), (b))), \
+__ch (__un_args_eq (unsigned int [], (b)), \
+ ((vector unsigned int) __builtin_altivec_lvewx ((a), (b))), \
+__ch (__un_args_eq (signed int *, (b)), \
+ ((vector signed int) __builtin_altivec_lvewx ((a), (b))), \
+__ch (__un_args_eq (signed int [], (b)), \
+ ((vector signed int) __builtin_altivec_lvewx ((a), (b))), \
+__ch (__un_args_eq (float *, (b)), \
+ ((vector float) __builtin_altivec_lvewx ((a), (b))), \
+__ch (__un_args_eq (float [], (b)), \
+ ((vector float) __builtin_altivec_lvewx ((a), (b))), \
+__altivec_link_error_invalid_argument ()))))))
-#define vec_madd(a1, a2, a3) __builtin_altivec_vmaddfp (a1, a2, a3)
+#define vec_lvehx(a, b) \
+__ch (__un_args_eq (unsigned short *, (b)), \
+ ((vector unsigned short) __builtin_altivec_lvehx ((a), (b))), \
+__ch (__un_args_eq (unsigned short [], (b)), \
+ ((vector unsigned short) __builtin_altivec_lvehx ((a), (b))), \
+__ch (__un_args_eq (signed short *, (b)), \
+ ((vector signed short) __builtin_altivec_lvehx ((a), (b))), \
+__ch (__un_args_eq (signed short [], (b)), \
+ ((vector signed short) __builtin_altivec_lvehx ((a), (b))), \
+__altivec_link_error_invalid_argument ()))))
+
+#define vec_lvebx(a, b) \
+__ch (__un_args_eq (unsigned char *, (b)), \
+ ((vector unsigned char) __builtin_altivec_lvebx ((a), (b))), \
+__ch (__un_args_eq (unsigned char [], (b)), \
+ ((vector unsigned char) __builtin_altivec_lvebx ((a), (b))), \
+__ch (__un_args_eq (signed char *, (b)), \
+ ((vector signed char) __builtin_altivec_lvebx ((a), (b))), \
+__ch (__un_args_eq (signed char [], (b)), \
+ ((vector signed char) __builtin_altivec_lvebx ((a), (b))), \
+__altivec_link_error_invalid_argument ()))))
-#define vec_madds(a1, a2, a3) __builtin_altivec_vmhaddshs (a1, a2, a3)
+#define vec_ldl(a, b) \
+__ch (__un_args_eq (vector unsigned char *, (b)), \
+ ((vector unsigned char) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (vector unsigned char [], (b)), \
+ ((vector unsigned char) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (unsigned char *, (b)), \
+ ((vector unsigned char) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (unsigned char [], (b)), \
+ ((vector unsigned char) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (vector signed char *, (b)), \
+ ((vector signed char) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (vector signed char [], (b)), \
+ ((vector signed char) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (signed char *, (b)), \
+ ((vector signed char) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (signed char [], (b)), \
+ ((vector signed char) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (vector unsigned short *, (b)), \
+ ((vector unsigned short) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (vector unsigned short [], (b)), \
+ ((vector unsigned short) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (unsigned short *, (b)), \
+ ((vector unsigned short) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (unsigned short [], (b)), \
+ ((vector unsigned short) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (vector signed short *, (b)), \
+ ((vector signed short) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (vector signed short [], (b)), \
+ ((vector signed short) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (signed short *, (b)), \
+ ((vector signed short) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (signed short [], (b)), \
+ ((vector signed short) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (vector unsigned int *, (b)), \
+ ((vector unsigned int) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (vector unsigned int [], (b)), \
+ ((vector unsigned int) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (unsigned int *, (b)), \
+ ((vector unsigned int) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (unsigned int [], (b)), \
+ ((vector unsigned int) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (vector signed int *, (b)), \
+ ((vector signed int) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (vector signed int [], (b)), \
+ ((vector signed int) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (signed int *, (b)), \
+ ((vector signed int) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (signed int [], (b)), \
+ ((vector signed int) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (vector float *, (b)), \
+ ((vector float) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (vector float [], (b)), \
+ ((vector float) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (float *, (b)), \
+ ((vector float) __builtin_altivec_lvxl ((a), (b))), \
+__ch (__un_args_eq (float [], (b)), \
+ ((vector float) __builtin_altivec_lvxl ((a), (b))), \
+__altivec_link_error_invalid_argument ()))))))))))))))))))))))))))))
+
+#define vec_loge(a1) __builtin_altivec_vlogefp ((a1))
+
+#define vec_lvsl(a1, a2) ((vector unsigned char) __builtin_altivec_lvsl ((a1), (a2)))
+
+#define vec_lvsr(a1, a2) ((vector unsigned char) __builtin_altivec_lvsr ((a1), (a2)))
+
+#define vec_madd(a1, a2, a3) (__builtin_altivec_vmaddfp ((a1), (a2), (a3)))
+
+#define vec_madds(a1, a2, a3) __builtin_altivec_vmhaddshs ((a1), (a2), (a3))
#define vec_max(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed char) __builtin_altivec_vmaxsb ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
- (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed short) __builtin_altivec_vmaxsh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
- (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vmaxsw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector float) __builtin_altivec_vmaxfp ((vector float) a1, (vector float) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vmaxsb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vmaxsh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vmaxsw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ ((vector float) __builtin_altivec_vmaxfp ((vector float) (a1), (vector float) (a2))), \
__altivec_link_error_invalid_argument ())))))))))))))
+#define vec_vmaxfp(a1, a2) \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ ((vector float) __builtin_altivec_vmaxfp ((vector float) (a1), (vector float) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vmaxsw(a1, a2) \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vmaxsw ((vector signed int) (a1), (vector signed int) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vmaxuw(a1, a2) \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) (a1), (vector signed int) (a2))), \
+__altivec_link_error_invalid_argument ())))
+
+#define vec_vmaxsh(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vmaxsh ((vector signed short) (a1), (vector signed short) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vmaxuh(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) (a1), (vector signed short) (a2))), \
+__altivec_link_error_invalid_argument ())))
+
+#define vec_vmaxsb(a1, a2) \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vmaxsb ((vector signed char) (a1), (vector signed char) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vmaxub(a1, a2) \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) (a1), (vector signed char) (a2))), \
+__altivec_link_error_invalid_argument ())))
+
#define vec_mergeh(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed char) __builtin_altivec_vmrghb ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vmrghb ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed short) __builtin_altivec_vmrghh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vmrghh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector float) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vmrghb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vmrghb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vmrghh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vmrghh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ ((vector float) __builtin_altivec_vmrghw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vmrghw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vmrghw ((vector signed int) (a1), (vector signed int) (a2))), \
__altivec_link_error_invalid_argument ())))))))
+#define vec_vmrghw(a1, a2) \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ ((vector float) __builtin_altivec_vmrghw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vmrghw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vmrghw ((vector signed int) (a1), (vector signed int) (a2))), \
+__altivec_link_error_invalid_argument ())))
+
+#define vec_vmrghh(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vmrghh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vmrghh ((vector signed short) (a1), (vector signed short) (a2))), \
+__altivec_link_error_invalid_argument ()))
+
+#define vec_vmrghb(a1, a2) \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vmrghb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vmrghb ((vector signed char) (a1), (vector signed char) (a2))), \
+__altivec_link_error_invalid_argument ()))
+
#define vec_mergel(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed char) __builtin_altivec_vmrglb ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vmrglb ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed short) __builtin_altivec_vmrglh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vmrglh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector float) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vmrglb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vmrglb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vmrglh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vmrglh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ ((vector float) __builtin_altivec_vmrglw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vmrglw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vmrglw ((vector signed int) (a1), (vector signed int) (a2))), \
__altivec_link_error_invalid_argument ())))))))
-#define vec_mfvscr() __builtin_altivec_mfvscr ()
+#define vec_vmrglw(a1, a2) \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ ((vector float) __builtin_altivec_vmrglw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vmrglw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vmrglw ((vector signed int) (a1), (vector signed int) (a2))), \
+__altivec_link_error_invalid_argument ())))
+
+#define vec_vmrglh(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vmrglh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vmrglh ((vector signed short) (a1), (vector signed short) (a2))), \
+__altivec_link_error_invalid_argument ()))
+
+#define vec_vmrglb(a1, a2) \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vmrglb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vmrglb ((vector signed char) (a1), (vector signed char) (a2))), \
+__altivec_link_error_invalid_argument ()))
+
+#define vec_mfvscr() (((vector unsigned short) __builtin_altivec_mfvscr ()))
#define vec_min(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed char) __builtin_altivec_vminsb ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
- (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed short) __builtin_altivec_vminsh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
- (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vminsw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector float) __builtin_altivec_vminfp ((vector float) a1, (vector float) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vminub ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vminub ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vminub ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vminsb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vminuh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vminuh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vminuh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vminsh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vminuw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vminuw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vminuw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vminsw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ ((vector float) __builtin_altivec_vminfp ((vector float) (a1), (vector float) (a2))), \
__altivec_link_error_invalid_argument ())))))))))))))
+#define vec_vminfp(a1, a2) \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ ((vector float) __builtin_altivec_vminfp ((vector float) (a1), (vector float) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vminsw(a1, a2) \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vminsw ((vector signed int) (a1), (vector signed int) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vminuw(a1, a2) \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vminuw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vminuw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vminuw ((vector signed int) (a1), (vector signed int) (a2))), \
+__altivec_link_error_invalid_argument ())))
+
+#define vec_vminsh(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vminsh ((vector signed short) (a1), (vector signed short) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vminuh(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vminuh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vminuh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vminuh ((vector signed short) (a1), (vector signed short) (a2))), \
+__altivec_link_error_invalid_argument ())))
+
+#define vec_vminsb(a1, a2) \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vminsb ((vector signed char) (a1), (vector signed char) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vminub(a1, a2) \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vminub ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vminub ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vminub ((vector signed char) (a1), (vector signed char) (a2))), \
+__altivec_link_error_invalid_argument ())))
+
#define vec_mladd(a1, a2, a3) \
-__ch (__tern_args_eq (vector signed short, a1, vector signed short, a2, vector signed short, a3), \
- (vector signed short) __builtin_altivec_vmladduhm ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3), \
-__ch (__tern_args_eq (vector signed short, a1, vector unsigned short, a2, vector unsigned short, a3), \
- (vector signed short) __builtin_altivec_vmladduhm ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3), \
-__ch (__tern_args_eq (vector unsigned short, a1, vector signed short, a2, vector signed short, a3), \
- (vector signed short) __builtin_altivec_vmladduhm ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3), \
-__ch (__tern_args_eq (vector unsigned short, a1, vector unsigned short, a2, vector unsigned short, a3), \
- (vector unsigned short) __builtin_altivec_vmladduhm ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3), \
+__ch (__tern_args_eq (vector signed short, (a1), vector signed short, (a2), vector signed short, (a3)), \
+ ((vector signed short) __builtin_altivec_vmladduhm ((vector signed short) (a1), (vector signed short) (a2), (vector signed short) (a3))), \
+__ch (__tern_args_eq (vector signed short, (a1), vector unsigned short, (a2), vector unsigned short, (a3)), \
+ ((vector signed short) __builtin_altivec_vmladduhm ((vector signed short) (a1), (vector signed short) (a2), (vector signed short) (a3))), \
+__ch (__tern_args_eq (vector unsigned short, (a1), vector signed short, (a2), vector signed short, (a3)), \
+ ((vector signed short) __builtin_altivec_vmladduhm ((vector signed short) (a1), (vector signed short) (a2), (vector signed short) (a3))), \
+__ch (__tern_args_eq (vector unsigned short, (a1), vector unsigned short, (a2), vector unsigned short, (a3)), \
+ ((vector unsigned short) __builtin_altivec_vmladduhm ((vector signed short) (a1), (vector signed short) (a2), (vector signed short) (a3))), \
__altivec_link_error_invalid_argument ()))))
-#define vec_mradds(a1, a2, a3) __builtin_altivec_vmhraddshs (a1, a2, a3)
+#define vec_mradds(a1, a2, a3) __builtin_altivec_vmhraddshs ((a1), (a2), (a3))
#define vec_msum(a1, a2, a3) \
-__ch (__tern_args_eq (vector unsigned char, a1, vector unsigned char, a2, vector unsigned int, a3), \
- (vector unsigned int) __builtin_altivec_vmsumubm ((vector signed char) a1, (vector signed char) a2, (vector signed int) a3), \
-__ch (__tern_args_eq (vector signed char, a1, vector unsigned char, a2, vector signed int, a3), \
- (vector signed int) __builtin_altivec_vmsummbm ((vector signed char) a1, (vector signed char) a2, (vector signed int) a3), \
-__ch (__tern_args_eq (vector unsigned short, a1, vector unsigned short, a2, vector unsigned int, a3), \
- (vector unsigned int) __builtin_altivec_vmsumuhm ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3), \
-__ch (__tern_args_eq (vector signed short, a1, vector signed short, a2, vector signed int, a3), \
- (vector signed int) __builtin_altivec_vmsumshm ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3), \
+__ch (__tern_args_eq (vector unsigned char, (a1), vector unsigned char, (a2), vector unsigned int, (a3)), \
+ ((vector unsigned int) __builtin_altivec_vmsumubm ((vector signed char) (a1), (vector signed char) (a2), (vector signed int) (a3))), \
+__ch (__tern_args_eq (vector signed char, (a1), vector unsigned char, (a2), vector signed int, (a3)), \
+ ((vector signed int) __builtin_altivec_vmsummbm ((vector signed char) (a1), (vector signed char) (a2), (vector signed int) (a3))), \
+__ch (__tern_args_eq (vector unsigned short, (a1), vector unsigned short, (a2), vector unsigned int, (a3)), \
+ ((vector unsigned int) __builtin_altivec_vmsumuhm ((vector signed short) (a1), (vector signed short) (a2), (vector signed int) (a3))), \
+__ch (__tern_args_eq (vector signed short, (a1), vector signed short, (a2), vector signed int, (a3)), \
+ ((vector signed int) __builtin_altivec_vmsumshm ((vector signed short) (a1), (vector signed short) (a2), (vector signed int) (a3))), \
__altivec_link_error_invalid_argument ()))))
+#define vec_vmsumshm(a1, a2, a3) \
+__ch (__tern_args_eq (vector signed short, (a1), vector signed short, (a2), vector signed int, (a3)), \
+ ((vector signed int) __builtin_altivec_vmsumshm ((vector signed short) (a1), (vector signed short) (a2), (vector signed int) (a3))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vmsumuhm(a1, a2, a3) \
+__ch (__tern_args_eq (vector unsigned short, (a1), vector unsigned short, (a2), vector unsigned int, (a3)), \
+ ((vector unsigned int) __builtin_altivec_vmsumuhm ((vector signed short) (a1), (vector signed short) (a2), (vector signed int) (a3))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vmsummbm(a1, a2, a3) \
+__ch (__tern_args_eq (vector signed char, (a1), vector unsigned char, (a2), vector signed int, (a3)), \
+ ((vector signed int) __builtin_altivec_vmsummbm ((vector signed char) (a1), (vector signed char) (a2), (vector signed int) (a3))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_msumubm(a1, a2, a3) \
+__ch (__tern_args_eq (vector unsigned char, (a1), vector unsigned char, (a2), vector unsigned int, (a3)), \
+ ((vector unsigned int) __builtin_altivec_vmsumubm ((vector signed char) (a1), (vector signed char) (a2), (vector signed int) (a3))), \
+__altivec_link_error_invalid_argument ())
+
#define vec_msums(a1, a2, a3) \
-__ch (__tern_args_eq (vector unsigned short, a1, vector unsigned short, a2, vector unsigned int, a3), \
- (vector unsigned int) __builtin_altivec_vmsumuhs ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3), \
-__ch (__tern_args_eq (vector signed short, a1, vector signed short, a2, vector signed int, a3), \
- (vector signed int) __builtin_altivec_vmsumshs ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3), \
+__ch (__tern_args_eq (vector unsigned short, (a1), vector unsigned short, (a2), vector unsigned int, (a3)), \
+ ((vector unsigned int) __builtin_altivec_vmsumuhs ((vector signed short) (a1), (vector signed short) (a2), (vector signed int) (a3))), \
+__ch (__tern_args_eq (vector signed short, (a1), vector signed short, (a2), vector signed int, (a3)), \
+ ((vector signed int) __builtin_altivec_vmsumshs ((vector signed short) (a1), (vector signed short) (a2), (vector signed int) (a3))), \
__altivec_link_error_invalid_argument ()))
+#define vec_vmsumshs(a1, a2, a3) \
+__ch (__tern_args_eq (vector signed short, (a1), vector signed short, (a2), vector signed int, (a3)), \
+ ((vector signed int) __builtin_altivec_vmsumshs ((vector signed short) (a1), (vector signed short) (a2), (vector signed int) (a3))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vmsumuhs(a1, a2, a3) \
+__ch (__tern_args_eq (vector unsigned short, (a1), vector unsigned short, (a2), vector unsigned int, (a3)), \
+ ((vector unsigned int) __builtin_altivec_vmsumuhs ((vector signed short) (a1), (vector signed short) (a2), (vector signed int) (a3))), \
+__altivec_link_error_invalid_argument ())
+
#define vec_mtvscr(a1) \
-__ch (__un_args_eq (vector signed int, a1), \
- __builtin_altivec_mtvscr ((vector signed int) a1), \
-__ch (__un_args_eq (vector unsigned int, a1), \
- __builtin_altivec_mtvscr ((vector signed int) a1), \
-__ch (__un_args_eq (vector signed short, a1), \
- __builtin_altivec_mtvscr ((vector signed int) a1), \
-__ch (__un_args_eq (vector unsigned short, a1), \
- __builtin_altivec_mtvscr ((vector signed int) a1), \
-__ch (__un_args_eq (vector signed char, a1), \
- __builtin_altivec_mtvscr ((vector signed int) a1), \
-__ch (__un_args_eq (vector unsigned char, a1), \
- __builtin_altivec_mtvscr ((vector signed int) a1), \
+__ch (__un_args_eq (vector signed int, (a1)), \
+ __builtin_altivec_mtvscr ((vector signed int) (a1)), \
+__ch (__un_args_eq (vector unsigned int, (a1)), \
+ __builtin_altivec_mtvscr ((vector signed int) (a1)), \
+__ch (__un_args_eq (vector signed short, (a1)), \
+ __builtin_altivec_mtvscr ((vector signed int) (a1)), \
+__ch (__un_args_eq (vector unsigned short, (a1)), \
+ __builtin_altivec_mtvscr ((vector signed int) (a1)), \
+__ch (__un_args_eq (vector signed char, (a1)), \
+ __builtin_altivec_mtvscr ((vector signed int) (a1)), \
+__ch (__un_args_eq (vector unsigned char, (a1)), \
+ __builtin_altivec_mtvscr ((vector signed int) (a1)), \
__altivec_link_error_invalid_argument ()))))))
#define vec_mule(a1, a2) \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned short) __builtin_altivec_vmuleub ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed short) __builtin_altivec_vmulesb ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned int) __builtin_altivec_vmuleuh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vmulesh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vmuleub ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed short) __builtin_altivec_vmulesb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vmuleuh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed int) __builtin_altivec_vmulesh ((vector signed short) (a1), (vector signed short) (a2))), \
__altivec_link_error_invalid_argument ()))))
+#define vec_vmulesh(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed int) __builtin_altivec_vmulesh ((vector signed short) (a1), (vector signed short) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vmuleuh(a1, a2) \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vmuleuh ((vector signed short) (a1), (vector signed short) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vmulesb(a1, a2) \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed short) __builtin_altivec_vmulesb ((vector signed char) (a1), (vector signed char) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vmuleub(a1, a2) \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vmuleub ((vector signed char) (a1), (vector signed char) (a2))), \
+__altivec_link_error_invalid_argument ())
+
#define vec_mulo(a1, a2) \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned short) __builtin_altivec_vmuloub ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed short) __builtin_altivec_vmulosb ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned int) __builtin_altivec_vmulouh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vmulosh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vmuloub ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed short) __builtin_altivec_vmulosb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vmulouh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed int) __builtin_altivec_vmulosh ((vector signed short) (a1), (vector signed short) (a2))), \
__altivec_link_error_invalid_argument ()))))
-#define vec_nmsub(a1, a2, a3) __builtin_altivec_vnmsubfp (a1, a2, a3)
+#define vec_vmulosh(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed int) __builtin_altivec_vmulosh ((vector signed short) (a1), (vector signed short) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vmulouh(a1, a2) \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vmulouh ((vector signed short) (a1), (vector signed short) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_mulosb(a1, a2) \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed short) __builtin_altivec_vmulosb ((vector signed char) (a1), (vector signed char) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vmuloub(a1, a2) \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vmuloub ((vector signed char) (a1), (vector signed char) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_nmsub(a1, a2, a3) \
+__ch (__tern_args_eq (vector float, ((a1)), vector float, ((a2)) , vector float, ((a3))), \
+ ((vector float) __builtin_altivec_vnmsubfp ((vector float) ((a1)), (vector float) ((a2)), (vector float)((a3)))), \
+ __altivec_link_error_invalid_argument ())
#define vec_nor(a1, a2) \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector float) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed short) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed char) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ ((vector float) __builtin_altivec_vnor ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vnor ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vnor ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vnor ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vnor ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vnor ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vnor ((vector signed int) (a1), (vector signed int) (a2))), \
__altivec_link_error_invalid_argument ())))))))
#define vec_or(a1, a2) \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector float) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector signed int, a2), \
- (vector float) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector float, a2), \
- (vector float) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
- (vector unsigned int) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed short) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
- (vector unsigned short) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed char) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector unsigned char) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ ((vector float) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector float, (a1), vector signed int, (a2)), \
+ ((vector float) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector float, (a2)), \
+ ((vector float) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vor ((vector signed int) (a1), (vector signed int) (a2))), \
__altivec_link_error_invalid_argument ())))))))))))))))
#define vec_pack(a1, a2) \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed char) __builtin_altivec_vpkuhum ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned char) __builtin_altivec_vpkuhum ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed short) __builtin_altivec_vpkuwum ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned short) __builtin_altivec_vpkuwum ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed char) __builtin_altivec_vpkuhum ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vpkuhum ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed short) __builtin_altivec_vpkuwum ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vpkuwum ((vector signed int) (a1), (vector signed int) (a2))), \
__altivec_link_error_invalid_argument ()))))
-#define vec_packpx(a1, a2) __builtin_altivec_vpkpx (a1, a2)
+#define vec_vpkuwum(a1, a2) \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed short) __builtin_altivec_vpkuwum ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vpkuwum ((vector signed int) (a1), (vector signed int) (a2))), \
+__altivec_link_error_invalid_argument ()))
+
+#define vec_vpkuhum(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed char) __builtin_altivec_vpkuhum ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vpkuhum ((vector signed short) (a1), (vector signed short) (a2))), \
+__altivec_link_error_invalid_argument ()))
+
+#define vec_packpx(a1, a2) __builtin_altivec_vpkpx ((a1), (a2))
#define vec_packs(a1, a2) \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned char) __builtin_altivec_vpkuhus ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed char) __builtin_altivec_vpkshss ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned short) __builtin_altivec_vpkuwus ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed short) __builtin_altivec_vpkswss ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vpkuhus ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed char) __builtin_altivec_vpkshss ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vpkuwus ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed short) __builtin_altivec_vpkswss ((vector signed int) (a1), (vector signed int) (a2))), \
__altivec_link_error_invalid_argument ()))))
+#define vec_vpkswss(a1, a2) \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed short) __builtin_altivec_vpkswss ((vector signed int) (a1), (vector signed int) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vpkuwus(a1, a2) \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vpkuwus ((vector signed int) (a1), (vector signed int) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vpkshss(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed char) __builtin_altivec_vpkshss ((vector signed short) (a1), (vector signed short) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vpkuhus(a1, a2) \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vpkuhus ((vector signed short) (a1), (vector signed short) (a2))), \
+__altivec_link_error_invalid_argument ())
+
#define vec_packsu(a1, a2) \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned char) __builtin_altivec_vpkuhus ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector unsigned char) __builtin_altivec_vpkshus ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned short) __builtin_altivec_vpkuwus ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector unsigned short) __builtin_altivec_vpkswus ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vpkuhus ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vpkshus ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vpkuwus ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vpkswus ((vector signed int) (a1), (vector signed int) (a2))), \
__altivec_link_error_invalid_argument ()))))
-#define vec_perm(a1, a2, a3, a4) \
-__ch (__tern_args_eq (vector float, a1, vector float, a2, vector unsigned char, a3), \
- (vector float) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3), \
-__ch (__tern_args_eq (vector signed int, a1, vector signed int, a2, vector unsigned char, a3), \
- (vector signed int) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3), \
-__ch (__tern_args_eq (vector unsigned int, a1, vector unsigned int, a2, vector unsigned char, a3), \
- (vector unsigned int) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3), \
-__ch (__tern_args_eq (vector signed short, a1, vector signed short, a2, vector unsigned char, a3), \
- (vector signed short) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3), \
-__ch (__tern_args_eq (vector unsigned short, a1, vector unsigned short, a2, vector unsigned char, a3), \
- (vector unsigned short) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3), \
-__ch (__tern_args_eq (vector signed char, a1, vector signed char, a2, vector unsigned char, a3), \
- (vector signed char) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3), \
-__ch (__tern_args_eq (vector unsigned char, a1, vector unsigned char, a2, vector unsigned char, a3), \
- (vector unsigned char) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3), \
+#define vec_vpkswus(a1, a2) \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vpkswus ((vector signed int) (a1), (vector signed int) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vpkshus(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vpkshus ((vector signed short) (a1), (vector signed short) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_perm(a1, a2, a3) \
+__ch (__tern_args_eq (vector float, (a1), vector float, (a2), vector unsigned char, (a3)), \
+ ((vector float) __builtin_altivec_vperm_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed char) (a3))), \
+__ch (__tern_args_eq (vector signed int, (a1), vector signed int, (a2), vector unsigned char, (a3)), \
+ ((vector signed int) __builtin_altivec_vperm_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed char) (a3))), \
+__ch (__tern_args_eq (vector unsigned int, (a1), vector unsigned int, (a2), vector unsigned char, (a3)), \
+ ((vector unsigned int) __builtin_altivec_vperm_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed char) (a3))), \
+__ch (__tern_args_eq (vector signed short, (a1), vector signed short, (a2), vector unsigned char, (a3)), \
+ ((vector signed short) __builtin_altivec_vperm_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed char) (a3))), \
+__ch (__tern_args_eq (vector unsigned short, (a1), vector unsigned short, (a2), vector unsigned char, (a3)), \
+ ((vector unsigned short) __builtin_altivec_vperm_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed char) (a3))), \
+__ch (__tern_args_eq (vector signed char, (a1), vector signed char, (a2), vector unsigned char, (a3)), \
+ ((vector signed char) __builtin_altivec_vperm_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed char) (a3))), \
+__ch (__tern_args_eq (vector unsigned char, (a1), vector unsigned char, (a2), vector unsigned char, (a3)), \
+ ((vector unsigned char) __builtin_altivec_vperm_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed char) (a3))), \
__altivec_link_error_invalid_argument ())))))))
-#define vec_re(a1) __builtin_altivec_vrefp (a1)
+#define vec_re(a1) __builtin_altivec_vrefp ((a1))
#define vec_rl(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector signed char) __builtin_altivec_vrlb ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vrlb ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector signed short) __builtin_altivec_vrlh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vrlh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vrlw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vrlw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector signed char) __builtin_altivec_vrlb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vrlb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector signed short) __builtin_altivec_vrlh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vrlh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector signed int) __builtin_altivec_vrlw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vrlw ((vector signed int) (a1), (vector signed int) (a2))), \
__altivec_link_error_invalid_argument ()))))))
-#define vec_round(a1) __builtin_altivec_vrfin (a1)
+#define vec_vrlw(a1, a2) \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector signed int) __builtin_altivec_vrlw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vrlw ((vector signed int) (a1), (vector signed int) (a2))), \
+__altivec_link_error_invalid_argument ()))
+
+#define vec_vrlh(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector signed short) __builtin_altivec_vrlh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vrlh ((vector signed short) (a1), (vector signed short) (a2))), \
+__altivec_link_error_invalid_argument ()))
+
+#define vec_vrlb(a1, a2) \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector signed char) __builtin_altivec_vrlb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vrlb ((vector signed char) (a1), (vector signed char) (a2))), \
+__altivec_link_error_invalid_argument ()))
-#define vec_rsqrte(a1) __builtin_altivec_vrsqrtefp (a1)
+#define vec_round(a1) __builtin_altivec_vrfin ((a1))
+
+#define vec_rsqrte(a1) __builtin_altivec_vrsqrtefp ((a1))
#define vec_sel(a1, a2, a3) \
-__ch (__tern_args_eq (vector float, a1, vector float, a2, vector signed int, a3), \
- (vector float) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
-__ch (__tern_args_eq (vector float, a1, vector float, a2, vector unsigned int, a3), \
- (vector float) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
-__ch (__tern_args_eq (vector signed int, a1, vector signed int, a2, vector signed int, a3), \
- (vector signed int) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
-__ch (__tern_args_eq (vector signed int, a1, vector signed int, a2, vector unsigned int, a3), \
- (vector signed int) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
-__ch (__tern_args_eq (vector unsigned int, a1, vector unsigned int, a2, vector signed int, a3), \
- (vector unsigned int) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
-__ch (__tern_args_eq (vector unsigned int, a1, vector unsigned int, a2, vector unsigned int, a3), \
- (vector unsigned int) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
-__ch (__tern_args_eq (vector signed short, a1, vector signed short, a2, vector signed short, a3), \
- (vector signed short) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
-__ch (__tern_args_eq (vector signed short, a1, vector signed short, a2, vector unsigned short, a3), \
- (vector signed short) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
-__ch (__tern_args_eq (vector unsigned short, a1, vector unsigned short, a2, vector signed short, a3), \
- (vector unsigned short) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
-__ch (__tern_args_eq (vector unsigned short, a1, vector unsigned short, a2, vector unsigned short, a3), \
- (vector unsigned short) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
-__ch (__tern_args_eq (vector signed char, a1, vector signed char, a2, vector signed char, a3), \
- (vector signed char) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
-__ch (__tern_args_eq (vector signed char, a1, vector signed char, a2, vector unsigned char, a3), \
- (vector signed char) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
-__ch (__tern_args_eq (vector unsigned char, a1, vector unsigned char, a2, vector signed char, a3), \
- (vector unsigned char) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
-__ch (__tern_args_eq (vector unsigned char, a1, vector unsigned char, a2, vector unsigned char, a3), \
- (vector unsigned char) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
+__ch (__tern_args_eq (vector float, (a1), vector float, (a2), vector signed int, (a3)), \
+ ((vector float) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
+__ch (__tern_args_eq (vector float, (a1), vector float, (a2), vector unsigned int, (a3)), \
+ ((vector float) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
+__ch (__tern_args_eq (vector signed int, (a1), vector signed int, (a2), vector signed int, (a3)), \
+ ((vector signed int) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
+__ch (__tern_args_eq (vector signed int, (a1), vector signed int, (a2), vector unsigned int, (a3)), \
+ ((vector signed int) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
+__ch (__tern_args_eq (vector unsigned int, (a1), vector unsigned int, (a2), vector signed int, (a3)), \
+ ((vector unsigned int) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
+__ch (__tern_args_eq (vector unsigned int, (a1), vector unsigned int, (a2), vector unsigned int, (a3)), \
+ ((vector unsigned int) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
+__ch (__tern_args_eq (vector signed short, (a1), vector signed short, (a2), vector signed short, (a3)), \
+ ((vector signed short) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
+__ch (__tern_args_eq (vector signed short, (a1), vector signed short, (a2), vector unsigned short, (a3)), \
+ ((vector signed short) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
+__ch (__tern_args_eq (vector unsigned short, (a1), vector unsigned short, (a2), vector signed short, (a3)), \
+ ((vector unsigned short) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
+__ch (__tern_args_eq (vector unsigned short, (a1), vector unsigned short, (a2), vector unsigned short, (a3)), \
+ ((vector unsigned short) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
+__ch (__tern_args_eq (vector signed char, (a1), vector signed char, (a2), vector signed char, (a3)), \
+ ((vector signed char) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
+__ch (__tern_args_eq (vector signed char, (a1), vector signed char, (a2), vector unsigned char, (a3)), \
+ ((vector signed char) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
+__ch (__tern_args_eq (vector unsigned char, (a1), vector unsigned char, (a2), vector signed char, (a3)), \
+ ((vector unsigned char) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
+__ch (__tern_args_eq (vector unsigned char, (a1), vector unsigned char, (a2), vector unsigned char, (a3)), \
+ ((vector unsigned char) __builtin_altivec_vsel_4si ((vector signed int) (a1), (vector signed int) (a2), (vector signed int) (a3))), \
__altivec_link_error_invalid_argument ()))))))))))))))
#define vec_sl(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector signed char) __builtin_altivec_vslb ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vslb ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector signed short) __builtin_altivec_vslh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vslh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vslw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vslw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector signed char) __builtin_altivec_vslb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vslb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector signed short) __builtin_altivec_vslh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vslh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector signed int) __builtin_altivec_vslw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vslw ((vector signed int) (a1), (vector signed int) (a2))), \
__altivec_link_error_invalid_argument ()))))))
+#define vec_vslw(a1, a2) \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector signed int) __builtin_altivec_vslw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vslw ((vector signed int) (a1), (vector signed int) (a2))), \
+__altivec_link_error_invalid_argument ()))
+
+#define vec_vslh(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector signed short) __builtin_altivec_vslh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vslh ((vector signed short) (a1), (vector signed short) (a2))), \
+__altivec_link_error_invalid_argument ()))
+
+#define vec_vslb(a1, a2) \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector signed char) __builtin_altivec_vslb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vslb ((vector signed char) (a1), (vector signed char) (a2))), \
+__altivec_link_error_invalid_argument ()))
+
#define vec_sld(a1, a2, a3) \
-__ch (__tern_args_eq (vector float, a1, vector float, a2, const char, a3), \
- (vector float) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, (const char) a3), \
-__ch (__tern_args_eq (vector signed int, a1, vector signed int, a2, const char, a3), \
- (vector signed int) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, (const char) a3), \
-__ch (__tern_args_eq (vector unsigned int, a1, vector unsigned int, a2, const char, a3), \
- (vector unsigned int) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, (const char) a3), \
-__ch (__tern_args_eq (vector signed short, a1, vector signed short, a2, const char, a3), \
- (vector signed short) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, (const char) a3), \
-__ch (__tern_args_eq (vector unsigned short, a1, vector unsigned short, a2, const char, a3), \
- (vector unsigned short) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, (const char) a3), \
-__ch (__tern_args_eq (vector signed char, a1, vector signed char, a2, const char, a3), \
- (vector signed char) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, (const char) a3), \
-__ch (__tern_args_eq (vector unsigned char, a1, vector unsigned char, a2, const char, a3), \
- (vector unsigned char) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, (const char) a3), \
+__ch (__tern_args_eq (vector float, (a1), vector float, (a2), int, (a3)), \
+ ((vector float) __builtin_altivec_vsldoi_4si ((vector signed int) (a1), (vector signed int) (a2), (const char) (a3))), \
+__ch (__tern_args_eq (vector signed int, (a1), vector signed int, (a2), int, (a3)), \
+ ((vector signed int) __builtin_altivec_vsldoi_4si ((vector signed int) (a1), (vector signed int) (a2), (const char) (a3))), \
+__ch (__tern_args_eq (vector unsigned int, (a1), vector unsigned int, (a2), int, (a3)), \
+ ((vector unsigned int) __builtin_altivec_vsldoi_4si ((vector signed int) (a1), (vector signed int) (a2), (const char) (a3))), \
+__ch (__tern_args_eq (vector signed short, (a1), vector signed short, (a2), int, (a3)), \
+ ((vector signed short) __builtin_altivec_vsldoi_4si ((vector signed int) (a1), (vector signed int) (a2), (const char) (a3))), \
+__ch (__tern_args_eq (vector unsigned short, (a1), vector unsigned short, (a2), int, (a3)), \
+ ((vector unsigned short) __builtin_altivec_vsldoi_4si ((vector signed int) (a1), (vector signed int) (a2), (const char) (a3))), \
+__ch (__tern_args_eq (vector signed char, (a1), vector signed char, (a2), int, (a3)), \
+ ((vector signed char) __builtin_altivec_vsldoi_4si ((vector signed int) (a1), (vector signed int) (a2), (const char) (a3))), \
+__ch (__tern_args_eq (vector unsigned char, (a1), vector unsigned char, (a2), int, (a3)), \
+ ((vector unsigned char) __builtin_altivec_vsldoi_4si ((vector signed int) (a1), (vector signed int) (a2), (const char) (a3))), \
__altivec_link_error_invalid_argument ())))))))
#define vec_sll(a1, a2) \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned short, a2), \
- (vector unsigned int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned char, a2), \
- (vector unsigned int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned int, a2), \
- (vector signed short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector signed short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned char, a2), \
- (vector signed short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned int, a2), \
- (vector unsigned short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned char, a2), \
- (vector unsigned short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned int, a2), \
- (vector signed char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned short, a2), \
- (vector signed char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector signed char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned int, a2), \
- (vector unsigned char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned short, a2), \
- (vector unsigned char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector signed int) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned short, (a2)), \
+ ((vector signed int) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned char, (a2)), \
+ ((vector signed int) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned int, (a2)), \
+ ((vector signed short) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector signed short) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned char, (a2)), \
+ ((vector signed short) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned int, (a2)), \
+ ((vector signed char) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned short, (a2)), \
+ ((vector signed char) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector signed char) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsl ((vector signed int) (a1), (vector signed int) (a2))), \
__altivec_link_error_invalid_argument ()))))))))))))))))))
#define vec_slo(a1, a2) \
-__ch (__bin_args_eq (vector float, a1, vector signed char, a2), \
- (vector float) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector unsigned char, a2), \
- (vector float) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed char, a2), \
- (vector unsigned int) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned char, a2), \
- (vector unsigned int) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed char, a2), \
- (vector signed short) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned char, a2), \
- (vector signed short) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed char, a2), \
- (vector unsigned short) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned char, a2), \
- (vector unsigned short) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed char) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector signed char) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector unsigned char) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, (a1), vector signed char, (a2)), \
+ ((vector float) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector float, (a1), vector unsigned char, (a2)), \
+ ((vector float) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed char, (a2)), \
+ ((vector signed int) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned char, (a2)), \
+ ((vector signed int) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed char, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed char, (a2)), \
+ ((vector signed short) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned char, (a2)), \
+ ((vector signed short) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed char, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector signed char) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vslo ((vector signed int) (a1), (vector signed int) (a2))), \
__altivec_link_error_invalid_argument ()))))))))))))))
#define vec_splat(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, const char, a2), \
- (vector signed char) __builtin_altivec_vspltb ((vector signed char) a1, (const char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, const char, a2), \
- (vector unsigned char) __builtin_altivec_vspltb ((vector signed char) a1, (const char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, const char, a2), \
- (vector signed short) __builtin_altivec_vsplth ((vector signed short) a1, (const char) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, const char, a2), \
- (vector unsigned short) __builtin_altivec_vsplth ((vector signed short) a1, (const char) a2), \
-__ch (__bin_args_eq (vector float, a1, const char, a2), \
- (vector float) __builtin_altivec_vspltw ((vector signed int) a1, (const char) a2), \
-__ch (__bin_args_eq (vector signed int, a1, const char, a2), \
- (vector signed int) __builtin_altivec_vspltw ((vector signed int) a1, (const char) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, const char, a2), \
- (vector unsigned int) __builtin_altivec_vspltw ((vector signed int) a1, (const char) a2), \
+__ch (__bin_args_eq (vector signed char, ((a1)), int, ((a2))), \
+ ((vector signed char) __builtin_altivec_vspltb ((vector signed char) ((a1)), (const char) ((a2)))), \
+__ch (__bin_args_eq (vector unsigned char, ((a1)), int, ((a2))), \
+ ((vector unsigned char) __builtin_altivec_vspltb ((vector signed char) ((a1)), (const char) ((a2)))), \
+__ch (__bin_args_eq (vector signed short, ((a1)), int, ((a2))), \
+ ((vector signed short) __builtin_altivec_vsplth ((vector signed short) ((a1)), (const char) ((a2)))), \
+__ch (__bin_args_eq (vector unsigned short, ((a1)), int, ((a2))), \
+ ((vector unsigned short) __builtin_altivec_vsplth ((vector signed short) ((a1)), (const char) ((a2)))), \
+__ch (__bin_args_eq (vector float, ((a1)), int, ((a2))), \
+ ((vector float) __builtin_altivec_vspltw ((vector signed int) ((a1)), (const char) ((a2)))), \
+__ch (__bin_args_eq (vector signed int, ((a1)), int, ((a2))), \
+ ((vector signed int) __builtin_altivec_vspltw ((vector signed int) ((a1)), (const char) ((a2)))), \
+__ch (__bin_args_eq (vector unsigned int, ((a1)), int, ((a2))), \
+ ((vector unsigned int) __builtin_altivec_vspltw ((vector signed int) (a1), (const char) ((a2)))), \
__altivec_link_error_invalid_argument ())))))))
-#define vec_splat_s8(a1) __builtin_altivec_vspltisb (a1)
+#define vec_vspltw(a1, a2) \
+__ch (__bin_args_eq (vector float, ((a1)), int, ((a2))), \
+ ((vector float) __builtin_altivec_vspltw ((vector signed int) ((a1)), (const char) ((a2)))), \
+__ch (__bin_args_eq (vector signed int, ((a1)), int, ((a2))), \
+ ((vector signed int) __builtin_altivec_vspltw ((vector signed int) ((a1)), (const char) ((a2)))), \
+__ch (__bin_args_eq (vector unsigned int, ((a1)), int, ((a2))), \
+ ((vector unsigned int) __builtin_altivec_vspltw ((vector signed int) (a1), (const char) ((a2)))), \
+__altivec_link_error_invalid_argument ())))
+
+#define vec_vsplth(a1, a2) \
+__ch (__bin_args_eq (vector signed short, ((a1)), int, ((a2))), \
+ ((vector signed short) __builtin_altivec_vsplth ((vector signed short) ((a1)), (const char) ((a2)))), \
+__ch (__bin_args_eq (vector unsigned short, ((a1)), int, ((a2))), \
+ ((vector unsigned short) __builtin_altivec_vsplth ((vector signed short) ((a1)), (const char) ((a2)))), \
+__altivec_link_error_invalid_argument ()))
+
+#define vec_vspltb(a1, a2) \
+__ch (__bin_args_eq (vector signed char, ((a1)), int, ((a2))), \
+ ((vector signed char) __builtin_altivec_vspltb ((vector signed char) ((a1)), (const char) ((a2)))), \
+__ch (__bin_args_eq (vector unsigned char, ((a1)), int, ((a2))), \
+ ((vector unsigned char) __builtin_altivec_vspltb ((vector signed char) ((a1)), (const char) ((a2)))), \
+__altivec_link_error_invalid_argument ()))
+
+#define vec_splat_s8(a1) __builtin_altivec_vspltisb ((a1))
-#define vec_splat_s16(a1) __builtin_altivec_vspltish (a1)
+#define vec_splat_s16(a1) __builtin_altivec_vspltish ((a1))
-#define vec_splat_s32(a1) __builtin_altivec_vspltisw (a1)
+#define vec_splat_s32(a1) __builtin_altivec_vspltisw ((a1))
-#define vec_splat_u8(a1) __builtin_altivec_vspltisb (a1)
+#define vec_splat_u8(a1) ((vector unsigned char) __builtin_altivec_vspltisb ((a1)))
-#define vec_splat_u16(a1) __builtin_altivec_vspltish (a1)
+#define vec_splat_u16(a1) ((vector unsigned short) __builtin_altivec_vspltish ((a1)))
-#define vec_splat_u32(a1) __builtin_altivec_vspltisw (a1)
+#define vec_splat_u32(a1) ((vector unsigned int) __builtin_altivec_vspltisw ((a1)))
#define vec_sr(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector signed char) __builtin_altivec_vsrb ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vsrb ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector signed short) __builtin_altivec_vsrh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vsrh ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vsrw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vsrw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector signed char) __builtin_altivec_vsrb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsrb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector signed short) __builtin_altivec_vsrh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsrh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector signed int) __builtin_altivec_vsrw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsrw ((vector signed int) (a1), (vector signed int) (a2))), \
__altivec_link_error_invalid_argument ()))))))
+#define vec_vsrw(a1, a2) \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector signed int) __builtin_altivec_vsrw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsrw ((vector signed int) (a1), (vector signed int) (a2))), \
+__altivec_link_error_invalid_argument ()))
+
+#define vec_vsrh(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector signed short) __builtin_altivec_vsrh ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsrh ((vector signed short) (a1), (vector signed short) (a2))), \
+__altivec_link_error_invalid_argument ()))
+
+#define vec_vsrb(a1, a2) \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector signed char) __builtin_altivec_vsrb ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsrb ((vector signed char) (a1), (vector signed char) (a2))), \
+__altivec_link_error_invalid_argument ()))
+
#define vec_sra(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector signed char) __builtin_altivec_vsrab ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vsrab ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector signed short) __builtin_altivec_vsrah ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vsrah ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vsraw ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vsraw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector signed char) __builtin_altivec_vsrab ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsrab ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector signed short) __builtin_altivec_vsrah ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsrah ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector signed int) __builtin_altivec_vsraw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsraw ((vector signed int) (a1), (vector signed int) (a2))), \
__altivec_link_error_invalid_argument ()))))))
+#define vec_vsraw(a1, a2) \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector signed int) __builtin_altivec_vsraw ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsraw ((vector signed int) (a1), (vector signed int) (a2))), \
+__altivec_link_error_invalid_argument ()))
+
+#define vec_vsrah(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector signed short) __builtin_altivec_vsrah ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsrah ((vector signed short) (a1), (vector signed short) (a2))), \
+__altivec_link_error_invalid_argument ()))
+
+#define vec_vsrab(a1, a2) \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector signed char) __builtin_altivec_vsrab ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsrab ((vector signed char) (a1), (vector signed char) (a2))), \
+__altivec_link_error_invalid_argument ()))
+
#define vec_srl(a1, a2) \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned short, a2), \
- (vector unsigned int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned char, a2), \
- (vector unsigned int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned int, a2), \
- (vector signed short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector signed short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned char, a2), \
- (vector signed short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned int, a2), \
- (vector unsigned short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned char, a2), \
- (vector unsigned short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned int, a2), \
- (vector signed char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned short, a2), \
- (vector signed char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector signed char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned int, a2), \
- (vector unsigned char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned short, a2), \
- (vector unsigned char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector signed int) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned short, (a2)), \
+ ((vector signed int) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned char, (a2)), \
+ ((vector signed int) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned int, (a2)), \
+ ((vector signed short) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector signed short) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned char, (a2)), \
+ ((vector signed short) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned int, (a2)), \
+ ((vector signed char) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned short, (a2)), \
+ ((vector signed char) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector signed char) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsr ((vector signed int) (a1), (vector signed int) (a2))), \
__altivec_link_error_invalid_argument ()))))))))))))))))))
#define vec_sro(a1, a2) \
-__ch (__bin_args_eq (vector float, a1, vector signed char, a2), \
- (vector float) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector unsigned char, a2), \
- (vector float) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed char, a2), \
- (vector unsigned int) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned char, a2), \
- (vector unsigned int) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed char, a2), \
- (vector signed short) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned char, a2), \
- (vector signed short) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed char, a2), \
- (vector unsigned short) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned char, a2), \
- (vector unsigned short) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed char) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector signed char) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector unsigned char) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, (a1), vector signed char, (a2)), \
+ ((vector float) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector float, (a1), vector unsigned char, (a2)), \
+ ((vector float) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed char, (a2)), \
+ ((vector signed int) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned char, (a2)), \
+ ((vector signed int) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed char, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed char, (a2)), \
+ ((vector signed short) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned char, (a2)), \
+ ((vector signed short) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed char, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector signed char) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsro ((vector signed int) (a1), (vector signed int) (a2))), \
__altivec_link_error_invalid_argument ()))))))))))))))
#define vec_st(a1, a2, a3) \
- __builtin_altivec_stvx ((vector signed int) a1, a2, a3)
+ __builtin_altivec_stvx ((vector signed int) (a1), (a2), (a3))
#define vec_stl(a1, a2, a3) \
- __builtin_altivec_stvxl ((vector signed int) a1, a2, a3)
+ __builtin_altivec_stvxl ((vector signed int) (a1), (a2), (a3))
#define vec_ste(a, b, c) \
-__ch (__un_args_eq (vector unsigned char, a), \
- __builtin_altivec_stvebx ((vector signed char) a, b, c), \
-__ch (__un_args_eq (vector signed char, a), \
- __builtin_altivec_stvebx ((vector signed char) a, b, c), \
-__ch (__un_args_eq (vector unsigned short, a), \
- __builtin_altivec_stvehx ((vector signed short) a, b, c), \
-__ch (__un_args_eq (vector signed short, a), \
- __builtin_altivec_stvehx ((vector signed short) a, b, c), \
-__ch (__un_args_eq (vector unsigned int, a), \
- __builtin_altivec_stvewx ((vector signed int) a, b, c), \
-__ch (__un_args_eq (vector signed int, a), \
- __builtin_altivec_stvewx ((vector signed int) a, b, c), \
-__ch (__un_args_eq (vector float, a), \
- __builtin_altivec_stvewx ((vector signed int) a, b, c), \
+__ch (__un_args_eq (vector unsigned char, (a)), \
+ __builtin_altivec_stvebx ((vector signed char) (a), (b), (c)), \
+__ch (__un_args_eq (vector signed char, (a)), \
+ __builtin_altivec_stvebx ((vector signed char) (a), (b), (c)), \
+__ch (__un_args_eq (vector unsigned short, (a)), \
+ __builtin_altivec_stvehx ((vector signed short) (a), (b), (c)), \
+__ch (__un_args_eq (vector signed short, (a)), \
+ __builtin_altivec_stvehx ((vector signed short) (a), (b), (c)), \
+__ch (__un_args_eq (vector unsigned int, (a)), \
+ __builtin_altivec_stvewx ((vector signed int) (a), (b), (c)), \
+__ch (__un_args_eq (vector signed int, (a)), \
+ __builtin_altivec_stvewx ((vector signed int) (a), (b), (c)), \
+__ch (__un_args_eq (vector float, (a)), \
+ __builtin_altivec_stvewx ((vector signed int) (a), (b), (c)), \
__altivec_link_error_invalid_argument ())))))))
+#define vec_stvewx(a, b, c) \
+__ch (__un_args_eq (vector unsigned int, (a)), \
+ __builtin_altivec_stvewx ((vector signed int) (a), (b), (c)), \
+__ch (__un_args_eq (vector signed int, (a)), \
+ __builtin_altivec_stvewx ((vector signed int) (a), (b), (c)), \
+__ch (__un_args_eq (vector float, (a)), \
+ __builtin_altivec_stvewx ((vector signed int) (a), (b), (c)), \
+__altivec_link_error_invalid_argument ())))
+
+#define vec_stvehx(a, b, c) \
+__ch (__un_args_eq (vector unsigned short, (a)), \
+ __builtin_altivec_stvehx ((vector signed short) (a), (b), (c)), \
+__ch (__un_args_eq (vector signed short, (a)), \
+ __builtin_altivec_stvehx ((vector signed short) (a), (b), (c)), \
+__altivec_link_error_invalid_argument ()))
+
+#define vec_stvebx(a, b, c) \
+__ch (__un_args_eq (vector unsigned char, (a)), \
+ __builtin_altivec_stvebx ((vector signed char) (a), (b), (c)), \
+__ch (__un_args_eq (vector signed char, (a)), \
+ __builtin_altivec_stvebx ((vector signed char) (a), (b), (c)), \
+__altivec_link_error_invalid_argument ()))
+
#define vec_sub(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
- (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
- (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector float) __builtin_altivec_vsubfp ((vector float) a1, (vector float) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vsububm ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsububm ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsububm ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsububm ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vsubuhm ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vsubuwm ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ ((vector float) __builtin_altivec_vsubfp ((vector float) (a1), (vector float) (a2))), \
__altivec_link_error_invalid_argument ())))))))))))))
-#define vec_subc(a1, a2) __builtin_altivec_vsubcuw (a1, a2)
+#define vec_vsubfp(a1, a2) \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ ((vector float) __builtin_altivec_vsubfp ((vector float) (a1), (vector float) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vsubuwm(a1, a2) \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vsubuwm ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) (a1), (vector signed int) (a2))), \
+__altivec_link_error_invalid_argument ()))))
+
+#define vec_vsubuhm(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vsubuhm ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) (a1), (vector signed short) (a2))), \
+__altivec_link_error_invalid_argument ()))))
+
+#define vec_vsububm(a1, a2) \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vsububm ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsububm ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsububm ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsububm ((vector signed char) (a1), (vector signed char) (a2))), \
+__altivec_link_error_invalid_argument ()))))
+
+#define vec_subc(a1, a2) ((vector unsigned int) __builtin_altivec_vsubcuw ((vector unsigned int) (a1), (vector unsigned int) (a2)))
#define vec_subs(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vsububs ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector unsigned char) __builtin_altivec_vsububs ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vsububs ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed char) __builtin_altivec_vsubsbs ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
- (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed short) __builtin_altivec_vsubshs ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
- (vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vsubsws ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsububs ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsububs ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsububs ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vsubsbs ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vsubshs ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vsubsws ((vector signed int) (a1), (vector signed int) (a2))), \
__altivec_link_error_invalid_argument ()))))))))))))
+#define vec_vsubsws(a1, a2) \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vsubsws ((vector signed int) (a1), (vector signed int) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vsubuws(a1, a2) \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) (a1), (vector signed int) (a2))), \
+__altivec_link_error_invalid_argument ())))
+
+#define vec_vsubshs(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ ((vector signed short) __builtin_altivec_vsubshs ((vector signed short) (a1), (vector signed short) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vsubuhs(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) (a1), (vector signed short) (a2))), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ ((vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) (a1), (vector signed short) (a2))), \
+__altivec_link_error_invalid_argument ())))
+
+#define vec_vsubsbs(a1, a2) \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ ((vector signed char) __builtin_altivec_vsubsbs ((vector signed char) (a1), (vector signed char) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vsububs(a1, a2) \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsububs ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsububs ((vector signed char) (a1), (vector signed char) (a2))), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ ((vector unsigned char) __builtin_altivec_vsububs ((vector signed char) (a1), (vector signed char) (a2))), \
+__altivec_link_error_invalid_argument ())))
+
#define vec_sum4s(a1, a2) \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vsum4ubs ((vector signed char) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vsum4sbs ((vector signed char) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vsum4shs ((vector signed short) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsum4ubs ((vector signed char) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vsum4sbs ((vector signed char) (a1), (vector signed int) (a2))), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vsum4shs ((vector signed short) (a1), (vector signed int) (a2))), \
__altivec_link_error_invalid_argument ())))
-#define vec_sum2s(a1, a2) __builtin_altivec_vsum2sws (a1, a2)
+#define vec_vsum4shs(a1, a2) \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vsum4shs ((vector signed short) (a1), (vector signed int) (a2))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vsum4sbs(a1, a2) \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed int, (a2)), \
+ ((vector signed int) __builtin_altivec_vsum4sbs ((vector signed char) (a1), (vector signed int) (a2))), \
+__altivec_link_error_invalid_argument ())
-#define vec_sums(a1, a2) __builtin_altivec_vsumsws (a1, a2)
+#define vec_vsum4ubs(a1, a2) \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned int, (a2)), \
+ ((vector unsigned int) __builtin_altivec_vsum4ubs ((vector signed char) (a1), (vector signed int) (a2))), \
+__altivec_link_error_invalid_argument ())
-#define vec_trunc(a1) __builtin_altivec_vrfiz (a1)
+#define vec_sum2s(a1, a2) __builtin_altivec_vsum2sws ((a1), (a2))
+
+#define vec_sums(a1, a2) __builtin_altivec_vsumsws ((a1), (a2))
+
+#define vec_trunc(a1) __builtin_altivec_vrfiz ((a1))
#define vec_unpackh(a1) \
-__ch (__un_args_eq (vector signed char, a1), \
- (vector signed short) __builtin_altivec_vupkhsb ((vector signed char) a1), \
-__ch (__un_args_eq (vector signed short, a1), \
- (vector unsigned int) __builtin_altivec_vupkhpx ((vector signed short) a1), \
-__ch (__un_args_eq (vector signed short, a1), \
- (vector signed int) __builtin_altivec_vupkhsh ((vector signed short) a1), \
+__ch (__un_args_eq (vector signed char, (a1)), \
+ ((vector signed short) __builtin_altivec_vupkhsb ((vector signed char) (a1))), \
+__ch (__un_args_eq (vector signed short, (a1)), \
+ ((vector unsigned int) __builtin_altivec_vupkhpx ((vector signed short) (a1))), \
+__ch (__un_args_eq (vector signed short, (a1)), \
+ ((vector signed int) __builtin_altivec_vupkhsh ((vector signed short) (a1))), \
__altivec_link_error_invalid_argument ())))
+#define vec_vupkhsh(a1) \
+__ch (__un_args_eq (vector signed short, (a1)), \
+ ((vector signed int) __builtin_altivec_vupkhsh ((vector signed short) (a1))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vupkhpx(a1) \
+__ch (__un_args_eq (vector signed short, (a1)), \
+ ((vector unsigned int) __builtin_altivec_vupkhpx ((vector signed short) (a1))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vupkhsb(a1) \
+__ch (__un_args_eq (vector signed char, (a1)), \
+ ((vector signed short) __builtin_altivec_vupkhsb ((vector signed char) (a1))), \
+__altivec_link_error_invalid_argument ())
+
#define vec_unpackl(a1) \
-__ch (__un_args_eq (vector signed char, a1), \
- (vector signed short) __builtin_altivec_vupklsb ((vector signed char) a1), \
-__ch (__un_args_eq (vector signed short, a1), \
- (vector unsigned int) __builtin_altivec_vupklpx ((vector signed short) a1), \
-__ch (__un_args_eq (vector signed short, a1), \
- (vector signed int) __builtin_altivec_vupklsh ((vector signed short) a1), \
+__ch (__un_args_eq (vector signed char, (a1)), \
+ ((vector signed short) __builtin_altivec_vupklsb ((vector signed char) (a1))), \
+__ch (__un_args_eq (vector signed short, (a1)), \
+ ((vector unsigned int) __builtin_altivec_vupklpx ((vector signed short) (a1))), \
+__ch (__un_args_eq (vector signed short, (a1)), \
+ ((vector signed int) __builtin_altivec_vupklsh ((vector signed short) (a1))), \
__altivec_link_error_invalid_argument ())))
+#define vec_vupklsh(a1) \
+__ch (__un_args_eq (vector signed short, (a1)), \
+ ((vector signed int) __builtin_altivec_vupklsh ((vector signed short) (a1))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vupklpx(a1) \
+__ch (__un_args_eq (vector signed short, (a1)), \
+ ((vector unsigned int) __builtin_altivec_vupklpx ((vector signed short) (a1))), \
+__altivec_link_error_invalid_argument ())
+
+#define vec_vupklsb(a1) \
+__ch (__un_args_eq (vector signed char, (a1)), \
+ ((vector signed short) __builtin_altivec_vupklsb ((vector signed char) (a1))), \
+__altivec_link_error_invalid_argument ())
+
#define vec_xor(a1, a2) \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector float) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector signed int, a2), \
- (vector float) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector float, a2), \
- (vector float) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
- (vector unsigned int) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector unsigned int) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed short) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
- (vector unsigned short) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector unsigned short) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed char) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector unsigned char) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector unsigned char) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
- __altivec_link_error_invalid_argument ())))))))))))))))
+__ch (__bin_args_eq (vector float, ((a1)), vector float, ((a2))), \
+ ((vector float) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
+__ch (__bin_args_eq (vector float, ((a1)), vector unsigned int, ((a2))), \
+ ((vector float) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
+__ch (__bin_args_eq (vector unsigned int, ((a1)), vector float, ((a2))), \
+ ((vector float) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
+__ch (__bin_args_eq (vector signed int, ((a1)), vector signed int, ((a2))), \
+ ((vector signed int) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
+__ch (__bin_args_eq (vector unsigned int, ((a1)), vector unsigned int, ((a2))), \
+ ((vector unsigned int) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
+__ch (__bin_args_eq (vector signed int, ((a1)), vector unsigned int, ((a2))), \
+ ((vector unsigned int) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
+__ch (__bin_args_eq (vector unsigned int, ((a1)), vector signed int, ((a2))), \
+ ((vector unsigned int) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
+__ch (__bin_args_eq (vector unsigned int, ((a1)), vector unsigned int, ((a2))), \
+ ((vector unsigned int) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
+__ch (__bin_args_eq (vector unsigned short, ((a1)), vector unsigned short, ((a2))), \
+ ((vector unsigned short) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
+__ch (__bin_args_eq (vector signed short, ((a1)), vector unsigned short, ((a2))), \
+ ((vector signed short) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
+__ch (__bin_args_eq (vector unsigned short, ((a1)), vector signed short, ((a2))), \
+ ((vector signed short) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
+__ch (__bin_args_eq (vector unsigned short, ((a1)), vector unsigned short, ((a2))), \
+ ((vector signed short) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
+__ch (__bin_args_eq (vector signed short, ((a1)), vector signed short, ((a2))), \
+ ((vector signed short) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
+__ch (__bin_args_eq (vector signed short, ((a1)), vector unsigned short, ((a2))), \
+ ((vector unsigned short) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
+__ch (__bin_args_eq (vector unsigned short, ((a1)), vector signed short, ((a2))), \
+ ((vector unsigned short) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
+__ch (__bin_args_eq (vector unsigned short, ((a1)), vector unsigned short, ((a2))), \
+ ((vector unsigned short) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
+__ch (__bin_args_eq (vector unsigned char, ((a1)), vector unsigned char, ((a2))), \
+ ((vector unsigned char) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
+__ch (__bin_args_eq (vector signed char, ((a1)), vector unsigned char, ((a2))), \
+ ((vector unsigned char) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
+__ch (__bin_args_eq (vector signed char, ((a1)), vector signed char, ((a2))), \
+ ((vector signed char) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
+__ch (__bin_args_eq (vector unsigned char, ((a1)), vector unsigned char, ((a2))), \
+ ((vector signed char) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
+__ch (__bin_args_eq (vector signed char, ((a1)), vector unsigned char, ((a2))), \
+ ((vector signed char) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
+__ch (__bin_args_eq (vector unsigned char, ((a1)), vector signed char, ((a2))), \
+ ((vector signed char) __builtin_altivec_vxor ((vector signed int) ((a1)), (vector signed int) ((a2)))), \
+ __altivec_link_error_invalid_argument ()))))))))))))))))))))))
+
+/* Predicates. */
#define vec_all_eq(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector signed int) __builtin_altivec_vcmpeqfp_p ((vector float) a1, (vector float) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_LT, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_LT, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_LT, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_LT, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_LT, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_LT, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_LT, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_LT, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ __builtin_altivec_vcmpeqfp_p (__CR6_LT, (vector float) (a1), (vector float) (a2)), \
__altivec_link_error_invalid_argument ())))))))))))))
#define vec_all_ge(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector signed int) __builtin_altivec_vcmpgefp_p ((vector float) a1, (vector float) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) (a2), (vector signed char) (a1)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) (a2), (vector signed char) (a1)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) (a2), (vector signed char) (a1)), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (vector signed char) (a2), (vector signed char) (a1)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) (a2), (vector signed short) (a1)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) (a2), (vector signed short) (a1)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) (a2), (vector signed short) (a1)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (vector signed short) (a2), (vector signed short) (a1)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) (a2), (vector signed int) (a1)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) (a2), (vector signed int) (a1)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) (a2), (vector signed int) (a1)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (vector signed int) (a2), (vector signed int) (a1)), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ __builtin_altivec_vcmpgefp_p (__CR6_EQ, (vector float) (a1), (vector float) (a2)), \
__altivec_link_error_invalid_argument ())))))))))))))
#define vec_all_gt(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector signed int) __builtin_altivec_vcmpgtfp_p ((vector float) a1, (vector float) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_LT, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_LT, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_LT, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ __builtin_altivec_vcmpgtfp_p (__CR6_LT, (vector float) (a1), (vector float) (a2)), \
__altivec_link_error_invalid_argument ())))))))))))))
-#define vec_all_in(a1, a2) __builtin_altivec_vcmpbfp_p (a1, a2)
+#define vec_all_in(a1, a2) __builtin_altivec_vcmpbfp_p (__CR6_EQ, (a1), (a2))
#define vec_all_le(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector signed int) __builtin_altivec_vcmpgefp_p ((vector float) a1, (vector float) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_EQ, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_EQ, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_EQ, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ __builtin_altivec_vcmpgefp_p (__CR6_LT, (vector float) (a2), (vector float) (a1)), \
__altivec_link_error_invalid_argument ())))))))))))))
#define vec_all_lt(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector signed int) __builtin_altivec_vcmpgtfp_p ((vector float) a1, (vector float) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) (a2), (vector signed char) (a1)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) (a2), (vector signed char) (a1)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT, (vector signed char) (a2), (vector signed char) (a1)), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_LT, (vector signed char) (a2), (vector signed char) (a1)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) (a2), (vector signed short) (a1)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) (a2), (vector signed short) (a1)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT, (vector signed short) (a2), (vector signed short) (a1)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_LT, (vector signed short) (a2), (vector signed short) (a1)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) (a2), (vector signed int) (a1)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) (a2), (vector signed int) (a1)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT, (vector signed int) (a2), (vector signed int) (a1)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_LT, (vector signed int) (a2), (vector signed int) (a1)), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ __builtin_altivec_vcmpgtfp_p (__CR6_LT, (vector float) (a2), (vector float) (a1)), \
__altivec_link_error_invalid_argument ())))))))))))))
-#define vec_all_nan(a1) __builtin_altivec_vcmpeqfp_p (a1)
+#define vec_all_nan(a1) __builtin_altivec_vcmpeqfp_p (__CR6_EQ, (a1), (a1))
#define vec_all_ne(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector signed int) __builtin_altivec_vcmpeqfp_p ((vector float) a1, (vector float) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_EQ, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_EQ, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_EQ, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_EQ, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_EQ, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_EQ, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_EQ, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_EQ, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ __builtin_altivec_vcmpeqfp_p (__CR6_EQ, (vector float) (a1), (vector float) (a2)), \
__altivec_link_error_invalid_argument ())))))))))))))
-#define vec_all_nge(a1, a2) __builtin_altivec_vcmpgefp_p (a1, a2)
+#define vec_all_nge(a1, a2) __builtin_altivec_vcmpgefp_p (__CR6_EQ, (a1), (a2))
-#define vec_all_ngt(a1, a2) __builtin_altivec_vcmpgtfp_p (a1, a2)
+#define vec_all_ngt(a1, a2) __builtin_altivec_vcmpgtfp_p (__CR6_EQ, (a1), (a2))
-#define vec_all_nle(a1, a2) __builtin_altivec_vcmpgefp_p (a1, a2)
+#define vec_all_nle(a1, a2) __builtin_altivec_vcmpgefp_p (__CR6_EQ, (a2), (a1))
-#define vec_all_nlt(a1, a2) __builtin_altivec_vcmpgtfp_p (a1, a2)
+#define vec_all_nlt(a1, a2) __builtin_altivec_vcmpgtfp_p (__CR6_EQ, (a2), (a1))
-#define vec_all_numeric(a1) __builtin_altivec_vcmpeqfp_p (a1)
+#define vec_all_numeric(a1) __builtin_altivec_vcmpeqfp_p (__CR6_EQ, (a1), (a1))
#define vec_any_eq(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector signed int) __builtin_altivec_vcmpeqfp_p ((vector float) a1, (vector float) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_EQ_REV, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_EQ_REV, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_EQ_REV, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ __builtin_altivec_vcmpeqfp_p (__CR6_EQ_REV, (vector float) (a1), (vector float) (a2)), \
__altivec_link_error_invalid_argument ())))))))))))))
#define vec_any_ge(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector signed int) __builtin_altivec_vcmpgefp_p ((vector float) a1, (vector float) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) (a2), (vector signed char) (a1)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) (a2), (vector signed char) (a1)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) (a2), (vector signed char) (a1)), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (vector signed char) (a2), (vector signed char) (a1)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) (a2), (vector signed short) (a1)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) (a2), (vector signed short) (a1)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) (a2), (vector signed short) (a1)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (vector signed short) (a2), (vector signed short) (a1)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) (a2), (vector signed int) (a1)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) (a2), (vector signed int) (a1)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) (a2), (vector signed int) (a1)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (vector signed int) (a2), (vector signed int) (a1)), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ __builtin_altivec_vcmpgefp_p (__CR6_EQ_REV, (vector float) (a1), (vector float) (a2)), \
__altivec_link_error_invalid_argument ())))))))))))))
#define vec_any_gt(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector signed int) __builtin_altivec_vcmpgtfp_p ((vector float) a1, (vector float) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ __builtin_altivec_vcmpgtfp_p (__CR6_EQ_REV, (vector float) (a1), (vector float) (a2)), \
__altivec_link_error_invalid_argument ())))))))))))))
#define vec_any_le(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector signed int) __builtin_altivec_vcmpgefp_p ((vector float) a1, (vector float) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_LT_REV, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_LT_REV, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_LT_REV, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_LT_REV, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_LT_REV, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_LT_REV, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ __builtin_altivec_vcmpgefp_p (__CR6_EQ_REV, (vector float) (a2), (vector float) (a1)), \
__altivec_link_error_invalid_argument ())))))))))))))
#define vec_any_lt(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpgtsw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector signed int) __builtin_altivec_vcmpgtfp_p ((vector float) a1, (vector float) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) (a2), (vector signed char) (a1)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) (a2), (vector signed char) (a1)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpgtub_p (__CR6_EQ_REV, (vector signed char) (a2), (vector signed char) (a1)), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpgtsb_p (__CR6_EQ_REV, (vector signed char) (a2), (vector signed char) (a1)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) (a2), (vector signed short) (a1)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) (a2), (vector signed short) (a1)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpgtuh_p (__CR6_EQ_REV, (vector signed short) (a2), (vector signed short) (a1)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpgtsh_p (__CR6_EQ_REV, (vector signed short) (a2), (vector signed short) (a1)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) (a2), (vector signed int) (a1)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) (a2), (vector signed int) (a1)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpgtuw_p (__CR6_EQ_REV, (vector signed int) (a2), (vector signed int) (a1)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpgtsw_p (__CR6_EQ_REV, (vector signed int) (a2), (vector signed int) (a1)), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ __builtin_altivec_vcmpgtfp_p (__CR6_EQ_REV, (vector float) (a2), (vector float) (a1)), \
__altivec_link_error_invalid_argument ())))))))))))))
-#define vec_any_nan(a1) __builtin_altivec_vcmpeqfp_p (a1)
+#define vec_any_nan(a1) __builtin_altivec_vcmpeqfp_p (__CR6_LT_REV, a1, a1)
#define vec_any_ne(a1, a2) \
-__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
- (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
- (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
- (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
- (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
- (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
- (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
-__ch (__bin_args_eq (vector float, a1, vector float, a2), \
- (vector signed int) __builtin_altivec_vcmpeqfp_p ((vector float) a1, (vector float) a2), \
+__ch (__bin_args_eq (vector signed char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector signed char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
+ __builtin_altivec_vcmpequb_p (__CR6_LT_REV, (vector signed char) (a1), (vector signed char) (a2)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector signed short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
+ __builtin_altivec_vcmpequh_p (__CR6_LT_REV, (vector signed short) (a1), (vector signed short) (a2)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector signed int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
+ __builtin_altivec_vcmpequw_p (__CR6_LT_REV, (vector signed int) (a1), (vector signed int) (a2)), \
+__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
+ __builtin_altivec_vcmpeqfp_p (__CR6_LT_REV, (vector float) (a1), (vector float) (a2)), \
__altivec_link_error_invalid_argument ())))))))))))))
-#define vec_any_nge(a1, a2) __builtin_altivec_vcmpgefp_p (a1, a2)
+#define vec_any_nge(a1, a2) __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, (a1), (a2))
-#define vec_any_ngt(a1, a2) __builtin_altivec_vcmpgtfp_p (a1, a2)
+#define vec_any_ngt(a1, a2) __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, (a1), (a2))
-#define vec_any_nle(a1, a2) __builtin_altivec_vcmpgefp_p (a1, a2)
+#define vec_any_nle(a1, a2) __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, (a2), (a1))
-#define vec_any_nlt(a1, a2) __builtin_altivec_vcmpgtfp_p (a1, a2)
+#define vec_any_nlt(a1, a2) __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, (a2), (a1))
-#define vec_any_numeric(a1) __builtin_altivec_vcmpeqfp_p (a1)
+#define vec_any_numeric(a1) __builtin_altivec_vcmpeqfp_p (__CR6_EQ_REV, (a1), (a1))
-#define vec_any_out(a1, a2) __builtin_altivec_vcmpbfp_p (a1, a2)
+#define vec_any_out(a1, a2) __builtin_altivec_vcmpbfp_p (__CR6_EQ_REV, (a1), (a2))
#endif /* __cplusplus */
diff --git a/contrib/gcc/config/rs6000/crtsavres.asm b/contrib/gcc/config/rs6000/crtsavres.asm
new file mode 100644
index 0000000..0c65182
--- /dev/null
+++ b/contrib/gcc/config/rs6000/crtsavres.asm
@@ -0,0 +1,407 @@
+/*
+ * Special support for eabi and SVR4
+ *
+ * Copyright (C) 1995, 1996, 1998, 2000, 2001 Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ * 64-bit support written by David Edelsohn
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * In addition to the permissions in the GNU General Public License, the
+ * Free Software Foundation gives you unlimited permission to link the
+ * compiled version of this file with other programs, and to distribute
+ * those programs without any restriction coming from the use of this
+ * file. (The General Public License restrictions do apply in other
+ * respects; for example, they cover modification of the file, and
+ * distribution when not linked into another program.)
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ *
+ * As a special exception, if you link this library with files
+ * compiled with GCC to produce an executable, this does not cause
+ * the resulting executable to be covered by the GNU General Public License.
+ * This exception does not however invalidate any other reasons why
+ * the executable file might be covered by the GNU General Public License.
+ */
+
+/* Do any initializations needed for the eabi environment */
+
+ .file "crtsavres.asm"
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifndef __powerpc64__
+
+/* Routines for saving floating point registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the floating point save area. */
+
+FUNC_START(_savefpr_14) stfd 14,-144(11) /* save fp registers */
+FUNC_START(_savefpr_15) stfd 15,-136(11)
+FUNC_START(_savefpr_16) stfd 16,-128(11)
+FUNC_START(_savefpr_17) stfd 17,-120(11)
+FUNC_START(_savefpr_18) stfd 18,-112(11)
+FUNC_START(_savefpr_19) stfd 19,-104(11)
+FUNC_START(_savefpr_20) stfd 20,-96(11)
+FUNC_START(_savefpr_21) stfd 21,-88(11)
+FUNC_START(_savefpr_22) stfd 22,-80(11)
+FUNC_START(_savefpr_23) stfd 23,-72(11)
+FUNC_START(_savefpr_24) stfd 24,-64(11)
+FUNC_START(_savefpr_25) stfd 25,-56(11)
+FUNC_START(_savefpr_26) stfd 26,-48(11)
+FUNC_START(_savefpr_27) stfd 27,-40(11)
+FUNC_START(_savefpr_28) stfd 28,-32(11)
+FUNC_START(_savefpr_29) stfd 29,-24(11)
+FUNC_START(_savefpr_30) stfd 30,-16(11)
+FUNC_START(_savefpr_31) stfd 31,-8(11)
+ blr
+FUNC_END(_savefpr_31)
+FUNC_END(_savefpr_30)
+FUNC_END(_savefpr_29)
+FUNC_END(_savefpr_28)
+FUNC_END(_savefpr_27)
+FUNC_END(_savefpr_26)
+FUNC_END(_savefpr_25)
+FUNC_END(_savefpr_24)
+FUNC_END(_savefpr_23)
+FUNC_END(_savefpr_22)
+FUNC_END(_savefpr_21)
+FUNC_END(_savefpr_20)
+FUNC_END(_savefpr_19)
+FUNC_END(_savefpr_18)
+FUNC_END(_savefpr_17)
+FUNC_END(_savefpr_16)
+FUNC_END(_savefpr_15)
+FUNC_END(_savefpr_14)
+
+/* Routines for saving integer registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the integer save area. */
+
+FUNC_START(_savegpr_14) stw 14,-72(11) /* save gp registers */
+FUNC_START(_savegpr_15) stw 15,-68(11)
+FUNC_START(_savegpr_16) stw 16,-64(11)
+FUNC_START(_savegpr_17) stw 17,-60(11)
+FUNC_START(_savegpr_18) stw 18,-56(11)
+FUNC_START(_savegpr_19) stw 19,-52(11)
+FUNC_START(_savegpr_20) stw 20,-48(11)
+FUNC_START(_savegpr_21) stw 21,-44(11)
+FUNC_START(_savegpr_22) stw 22,-40(11)
+FUNC_START(_savegpr_23) stw 23,-36(11)
+FUNC_START(_savegpr_24) stw 24,-32(11)
+FUNC_START(_savegpr_25) stw 25,-28(11)
+FUNC_START(_savegpr_26) stw 26,-24(11)
+FUNC_START(_savegpr_27) stw 27,-20(11)
+FUNC_START(_savegpr_28) stw 28,-16(11)
+FUNC_START(_savegpr_29) stw 29,-12(11)
+FUNC_START(_savegpr_30) stw 30,-8(11)
+FUNC_START(_savegpr_31) stw 31,-4(11)
+ blr
+FUNC_END(_savegpr_31)
+FUNC_END(_savegpr_30)
+FUNC_END(_savegpr_29)
+FUNC_END(_savegpr_28)
+FUNC_END(_savegpr_27)
+FUNC_END(_savegpr_26)
+FUNC_END(_savegpr_25)
+FUNC_END(_savegpr_24)
+FUNC_END(_savegpr_23)
+FUNC_END(_savegpr_22)
+FUNC_END(_savegpr_21)
+FUNC_END(_savegpr_20)
+FUNC_END(_savegpr_19)
+FUNC_END(_savegpr_18)
+FUNC_END(_savegpr_17)
+FUNC_END(_savegpr_16)
+FUNC_END(_savegpr_15)
+FUNC_END(_savegpr_14)
+
+/* Routines for restoring floating point registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the floating point save area. */
+
+FUNC_START(_restfpr_14) lfd 14,-144(11) /* restore fp registers */
+FUNC_START(_restfpr_15) lfd 15,-136(11)
+FUNC_START(_restfpr_16) lfd 16,-128(11)
+FUNC_START(_restfpr_17) lfd 17,-120(11)
+FUNC_START(_restfpr_18) lfd 18,-112(11)
+FUNC_START(_restfpr_19) lfd 19,-104(11)
+FUNC_START(_restfpr_20) lfd 20,-96(11)
+FUNC_START(_restfpr_21) lfd 21,-88(11)
+FUNC_START(_restfpr_22) lfd 22,-80(11)
+FUNC_START(_restfpr_23) lfd 23,-72(11)
+FUNC_START(_restfpr_24) lfd 24,-64(11)
+FUNC_START(_restfpr_25) lfd 25,-56(11)
+FUNC_START(_restfpr_26) lfd 26,-48(11)
+FUNC_START(_restfpr_27) lfd 27,-40(11)
+FUNC_START(_restfpr_28) lfd 28,-32(11)
+FUNC_START(_restfpr_29) lfd 29,-24(11)
+FUNC_START(_restfpr_30) lfd 30,-16(11)
+FUNC_START(_restfpr_31) lfd 31,-8(11)
+ blr
+FUNC_END(_restfpr_31)
+FUNC_END(_restfpr_30)
+FUNC_END(_restfpr_29)
+FUNC_END(_restfpr_28)
+FUNC_END(_restfpr_27)
+FUNC_END(_restfpr_26)
+FUNC_END(_restfpr_25)
+FUNC_END(_restfpr_24)
+FUNC_END(_restfpr_23)
+FUNC_END(_restfpr_22)
+FUNC_END(_restfpr_21)
+FUNC_END(_restfpr_20)
+FUNC_END(_restfpr_19)
+FUNC_END(_restfpr_18)
+FUNC_END(_restfpr_17)
+FUNC_END(_restfpr_16)
+FUNC_END(_restfpr_15)
+FUNC_END(_restfpr_14)
+
+/* Routines for restoring integer registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the integer restore area. */
+
+FUNC_START(_restgpr_14) lwz 14,-72(11) /* restore gp registers */
+FUNC_START(_restgpr_15) lwz 15,-68(11)
+FUNC_START(_restgpr_16) lwz 16,-64(11)
+FUNC_START(_restgpr_17) lwz 17,-60(11)
+FUNC_START(_restgpr_18) lwz 18,-56(11)
+FUNC_START(_restgpr_19) lwz 19,-52(11)
+FUNC_START(_restgpr_20) lwz 20,-48(11)
+FUNC_START(_restgpr_21) lwz 21,-44(11)
+FUNC_START(_restgpr_22) lwz 22,-40(11)
+FUNC_START(_restgpr_23) lwz 23,-36(11)
+FUNC_START(_restgpr_24) lwz 24,-32(11)
+FUNC_START(_restgpr_25) lwz 25,-28(11)
+FUNC_START(_restgpr_26) lwz 26,-24(11)
+FUNC_START(_restgpr_27) lwz 27,-20(11)
+FUNC_START(_restgpr_28) lwz 28,-16(11)
+FUNC_START(_restgpr_29) lwz 29,-12(11)
+FUNC_START(_restgpr_30) lwz 30,-8(11)
+FUNC_START(_restgpr_31) lwz 31,-4(11)
+ blr
+FUNC_END(_restgpr_31)
+FUNC_END(_restgpr_30)
+FUNC_END(_restgpr_29)
+FUNC_END(_restgpr_28)
+FUNC_END(_restgpr_27)
+FUNC_END(_restgpr_26)
+FUNC_END(_restgpr_25)
+FUNC_END(_restgpr_24)
+FUNC_END(_restgpr_23)
+FUNC_END(_restgpr_22)
+FUNC_END(_restgpr_21)
+FUNC_END(_restgpr_20)
+FUNC_END(_restgpr_19)
+FUNC_END(_restgpr_18)
+FUNC_END(_restgpr_17)
+FUNC_END(_restgpr_16)
+FUNC_END(_restgpr_15)
+FUNC_END(_restgpr_14)
+
+/* Routines for restoring floating point registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the floating point save area. */
+/* In addition to restoring the fp registers, it will return to the caller's */
+/* caller */
+
+FUNC_START(_restfpr_14_x) lfd 14,-144(11) /* restore fp registers */
+FUNC_START(_restfpr_15_x) lfd 15,-136(11)
+FUNC_START(_restfpr_16_x) lfd 16,-128(11)
+FUNC_START(_restfpr_17_x) lfd 17,-120(11)
+FUNC_START(_restfpr_18_x) lfd 18,-112(11)
+FUNC_START(_restfpr_19_x) lfd 19,-104(11)
+FUNC_START(_restfpr_20_x) lfd 20,-96(11)
+FUNC_START(_restfpr_21_x) lfd 21,-88(11)
+FUNC_START(_restfpr_22_x) lfd 22,-80(11)
+FUNC_START(_restfpr_23_x) lfd 23,-72(11)
+FUNC_START(_restfpr_24_x) lfd 24,-64(11)
+FUNC_START(_restfpr_25_x) lfd 25,-56(11)
+FUNC_START(_restfpr_26_x) lfd 26,-48(11)
+FUNC_START(_restfpr_27_x) lfd 27,-40(11)
+FUNC_START(_restfpr_28_x) lfd 28,-32(11)
+FUNC_START(_restfpr_29_x) lfd 29,-24(11)
+FUNC_START(_restfpr_30_x) lfd 30,-16(11)
+FUNC_START(_restfpr_31_x) lwz 0,4(11)
+ lfd 31,-8(11)
+ mtlr 0
+ mr 1,11
+ blr
+FUNC_END(_restfpr_31_x)
+FUNC_END(_restfpr_30_x)
+FUNC_END(_restfpr_29_x)
+FUNC_END(_restfpr_28_x)
+FUNC_END(_restfpr_27_x)
+FUNC_END(_restfpr_26_x)
+FUNC_END(_restfpr_25_x)
+FUNC_END(_restfpr_24_x)
+FUNC_END(_restfpr_23_x)
+FUNC_END(_restfpr_22_x)
+FUNC_END(_restfpr_21_x)
+FUNC_END(_restfpr_20_x)
+FUNC_END(_restfpr_19_x)
+FUNC_END(_restfpr_18_x)
+FUNC_END(_restfpr_17_x)
+FUNC_END(_restfpr_16_x)
+FUNC_END(_restfpr_15_x)
+FUNC_END(_restfpr_14_x)
+
+/* Routines for restoring integer registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the integer restore area. */
+
+FUNC_START(_restgpr_14_x) lwz 14,-72(11) /* restore gp registers */
+FUNC_START(_restgpr_15_x) lwz 15,-68(11)
+FUNC_START(_restgpr_16_x) lwz 16,-64(11)
+FUNC_START(_restgpr_17_x) lwz 17,-60(11)
+FUNC_START(_restgpr_18_x) lwz 18,-56(11)
+FUNC_START(_restgpr_19_x) lwz 19,-52(11)
+FUNC_START(_restgpr_20_x) lwz 20,-48(11)
+FUNC_START(_restgpr_21_x) lwz 21,-44(11)
+FUNC_START(_restgpr_22_x) lwz 22,-40(11)
+FUNC_START(_restgpr_23_x) lwz 23,-36(11)
+FUNC_START(_restgpr_24_x) lwz 24,-32(11)
+FUNC_START(_restgpr_25_x) lwz 25,-28(11)
+FUNC_START(_restgpr_26_x) lwz 26,-24(11)
+FUNC_START(_restgpr_27_x) lwz 27,-20(11)
+FUNC_START(_restgpr_28_x) lwz 28,-16(11)
+FUNC_START(_restgpr_29_x) lwz 29,-12(11)
+FUNC_START(_restgpr_30_x) lwz 30,-8(11)
+FUNC_START(_restgpr_31_x) lwz 0,4(11)
+ lwz 31,-4(11)
+ mtlr 0
+ mr 1,11
+ blr
+FUNC_END(_restgpr_31_x)
+FUNC_END(_restgpr_30_x)
+FUNC_END(_restgpr_29_x)
+FUNC_END(_restgpr_28_x)
+FUNC_END(_restgpr_27_x)
+FUNC_END(_restgpr_26_x)
+FUNC_END(_restgpr_25_x)
+FUNC_END(_restgpr_24_x)
+FUNC_END(_restgpr_23_x)
+FUNC_END(_restgpr_22_x)
+FUNC_END(_restgpr_21_x)
+FUNC_END(_restgpr_20_x)
+FUNC_END(_restgpr_19_x)
+FUNC_END(_restgpr_18_x)
+FUNC_END(_restgpr_17_x)
+FUNC_END(_restgpr_16_x)
+FUNC_END(_restgpr_15_x)
+FUNC_END(_restgpr_14_x)
+
+#else /* __powerpc64__ */
+
+ .section ".text"
+ .align 2
+
+/* Routines for saving floating point registers, called by the compiler. */
+
+.fsav:
+FUNC_START(_savef14) stfd 14,-144(1) /* save fp registers */
+FUNC_START(_savef15) stfd 15,-136(1)
+FUNC_START(_savef16) stfd 16,-128(1)
+FUNC_START(_savef17) stfd 17,-120(1)
+FUNC_START(_savef18) stfd 18,-112(1)
+FUNC_START(_savef19) stfd 19,-104(1)
+FUNC_START(_savef20) stfd 20,-96(1)
+FUNC_START(_savef21) stfd 21,-88(1)
+FUNC_START(_savef22) stfd 22,-80(1)
+FUNC_START(_savef23) stfd 23,-72(1)
+FUNC_START(_savef24) stfd 24,-64(1)
+FUNC_START(_savef25) stfd 25,-56(1)
+FUNC_START(_savef26) stfd 26,-48(1)
+FUNC_START(_savef27) stfd 27,-40(1)
+FUNC_START(_savef28) stfd 28,-32(1)
+FUNC_START(_savef29) stfd 29,-24(1)
+FUNC_START(_savef30) stfd 30,-16(1)
+FUNC_START(_savef31) stfd 31,-8(1)
+ blr
+.LTfsav:
+ .long 0
+ .byte 0,12,0,0,0,0,0,0
+ .long 0
+ .long .LTfsav-.fsav
+ .short 4
+ .ascii "fsav"
+FUNC_END(_savef31)
+FUNC_END(_savef30)
+FUNC_END(_savef29)
+FUNC_END(_savef28)
+FUNC_END(_savef27)
+FUNC_END(_savef26)
+FUNC_END(_savef25)
+FUNC_END(_savef24)
+FUNC_END(_savef23)
+FUNC_END(_savef22)
+FUNC_END(_savef21)
+FUNC_END(_savef20)
+FUNC_END(_savef19)
+FUNC_END(_savef18)
+FUNC_END(_savef17)
+FUNC_END(_savef16)
+FUNC_END(_savef15)
+FUNC_END(_savef14)
+
+/* Routines for restoring floating point registers, called by the compiler. */
+
+.fres:
+FUNC_START(_restf14) lfd 14,-144(1) /* restore fp registers */
+FUNC_START(_restf15) lfd 15,-136(1)
+FUNC_START(_restf16) lfd 16,-128(1)
+FUNC_START(_restf17) lfd 17,-120(1)
+FUNC_START(_restf18) lfd 18,-112(1)
+FUNC_START(_restf19) lfd 19,-104(1)
+FUNC_START(_restf20) lfd 20,-96(1)
+FUNC_START(_restf21) lfd 21,-88(1)
+FUNC_START(_restf22) lfd 22,-80(1)
+FUNC_START(_restf23) lfd 23,-72(1)
+FUNC_START(_restf24) lfd 24,-64(1)
+FUNC_START(_restf25) lfd 25,-56(1)
+FUNC_START(_restf26) lfd 26,-48(1)
+FUNC_START(_restf27) lfd 27,-40(1)
+FUNC_START(_restf28) lfd 28,-32(1)
+FUNC_START(_restf29) lfd 29,-24(1)
+FUNC_START(_restf30) lfd 30,-16(1)
+FUNC_START(_restf31) lfd 31,-8(1)
+ blr
+.LTfres:
+ .long 0
+ .byte 0,12,0,0,0,0,0,0
+ .long 0
+ .long .LTfres-.fres
+ .short 4
+ .ascii "fres"
+FUNC_END(_restf31)
+FUNC_END(_restf30)
+FUNC_END(_restf29)
+FUNC_END(_restf28)
+FUNC_END(_restf27)
+FUNC_END(_restf26)
+FUNC_END(_restf25)
+FUNC_END(_restf24)
+FUNC_END(_restf23)
+FUNC_END(_restf22)
+FUNC_END(_restf21)
+FUNC_END(_restf20)
+FUNC_END(_restf19)
+FUNC_END(_restf18)
+FUNC_END(_restf17)
+FUNC_END(_restf16)
+FUNC_END(_restf15)
+FUNC_END(_restf14)
+
+#endif
diff --git a/contrib/gcc/config/rs6000/darwin.h b/contrib/gcc/config/rs6000/darwin.h
index cb6b4b77..81c24e7 100644
--- a/contrib/gcc/config/rs6000/darwin.h
+++ b/contrib/gcc/config/rs6000/darwin.h
@@ -35,6 +35,9 @@ Boston, MA 02111-1307, USA. */
#define TARGET_TOC 0
#define TARGET_NO_TOC 1
+/* Handle #pragma weak and #pragma pack. */
+#define HANDLE_SYSV_PRAGMA
+
/* The Darwin ABI always includes AltiVec, can't be (validly) turned
off. */
@@ -57,8 +60,8 @@ Boston, MA 02111-1307, USA. */
#undef FRAME_POINTER_REGNUM
#define FRAME_POINTER_REGNUM 30
-#undef PIC_OFFSET_TABLE_REGNUM
-#define PIC_OFFSET_TABLE_REGNUM 31
+#undef RS6000_PIC_OFFSET_TABLE_REGNUM
+#define RS6000_PIC_OFFSET_TABLE_REGNUM 31
/* Pad the outgoing args area to 16 bytes instead of the usual 8. */
@@ -218,7 +221,10 @@ Boston, MA 02111-1307, USA. */
&& TYPE_FIELDS (STRUCT) != 0 \
&& DECL_MODE (TYPE_FIELDS (STRUCT)) == DFmode \
? MAX (MAX ((COMPUTED), (SPECIFIED)), 64) \
+ : (TARGET_ALTIVEC && TREE_CODE (STRUCT) == VECTOR_TYPE) \
+ ? MAX (MAX ((COMPUTED), (SPECIFIED)), 128) \
: MAX ((COMPUTED), (SPECIFIED)))
+
/* XXX: Darwin supports neither .quad, or .llong, but it also doesn't
support 64 bit powerpc either, so this just keeps things happy. */
#define DOUBLE_INT_ASM_OP "\t.quad\t"
@@ -227,3 +233,7 @@ Boston, MA 02111-1307, USA. */
space/speed. */
#undef MAX_LONG_TYPE_SIZE
#define MAX_LONG_TYPE_SIZE 32
+
+/* For binary compatibility with 2.95; Darwin C APIs use bool from
+ stdbool.h, which was an int-sized enum in 2.95. */
+#define BOOL_TYPE_SIZE INT_TYPE_SIZE
diff --git a/contrib/gcc/config/rs6000/eabi.asm b/contrib/gcc/config/rs6000/eabi.asm
index 85f2e1b..0808e9c 100644
--- a/contrib/gcc/config/rs6000/eabi.asm
+++ b/contrib/gcc/config/rs6000/eabi.asm
@@ -3,7 +3,6 @@
*
* Copyright (C) 1995, 1996, 1998, 2000, 2001 Free Software Foundation, Inc.
* Written By Michael Meissner
- * 64-bit support written by David Edelsohn
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -297,365 +296,4 @@ FUNC_START(__eabi_uconvert)
FUNC_END(__eabi_uconvert)
-/* Routines for saving floating point registers, called by the compiler. */
-/* Called with r11 pointing to the stack header word of the caller of the */
-/* function, just beyond the end of the floating point save area. */
-
-FUNC_START(_savefpr_14) stfd 14,-144(11) /* save fp registers */
-FUNC_START(_savefpr_15) stfd 15,-136(11)
-FUNC_START(_savefpr_16) stfd 16,-128(11)
-FUNC_START(_savefpr_17) stfd 17,-120(11)
-FUNC_START(_savefpr_18) stfd 18,-112(11)
-FUNC_START(_savefpr_19) stfd 19,-104(11)
-FUNC_START(_savefpr_20) stfd 20,-96(11)
-FUNC_START(_savefpr_21) stfd 21,-88(11)
-FUNC_START(_savefpr_22) stfd 22,-80(11)
-FUNC_START(_savefpr_23) stfd 23,-72(11)
-FUNC_START(_savefpr_24) stfd 24,-64(11)
-FUNC_START(_savefpr_25) stfd 25,-56(11)
-FUNC_START(_savefpr_26) stfd 26,-48(11)
-FUNC_START(_savefpr_27) stfd 27,-40(11)
-FUNC_START(_savefpr_28) stfd 28,-32(11)
-FUNC_START(_savefpr_29) stfd 29,-24(11)
-FUNC_START(_savefpr_30) stfd 30,-16(11)
-FUNC_START(_savefpr_31) stfd 31,-8(11)
- blr
-FUNC_END(_savefpr_31)
-FUNC_END(_savefpr_30)
-FUNC_END(_savefpr_29)
-FUNC_END(_savefpr_28)
-FUNC_END(_savefpr_27)
-FUNC_END(_savefpr_26)
-FUNC_END(_savefpr_25)
-FUNC_END(_savefpr_24)
-FUNC_END(_savefpr_23)
-FUNC_END(_savefpr_22)
-FUNC_END(_savefpr_21)
-FUNC_END(_savefpr_20)
-FUNC_END(_savefpr_19)
-FUNC_END(_savefpr_18)
-FUNC_END(_savefpr_17)
-FUNC_END(_savefpr_16)
-FUNC_END(_savefpr_15)
-FUNC_END(_savefpr_14)
-
-/* Routines for saving integer registers, called by the compiler. */
-/* Called with r11 pointing to the stack header word of the caller of the */
-/* function, just beyond the end of the integer save area. */
-
-FUNC_START(_savegpr_14) stw 14,-72(11) /* save gp registers */
-FUNC_START(_savegpr_15) stw 15,-68(11)
-FUNC_START(_savegpr_16) stw 16,-64(11)
-FUNC_START(_savegpr_17) stw 17,-60(11)
-FUNC_START(_savegpr_18) stw 18,-56(11)
-FUNC_START(_savegpr_19) stw 19,-52(11)
-FUNC_START(_savegpr_20) stw 20,-48(11)
-FUNC_START(_savegpr_21) stw 21,-44(11)
-FUNC_START(_savegpr_22) stw 22,-40(11)
-FUNC_START(_savegpr_23) stw 23,-36(11)
-FUNC_START(_savegpr_24) stw 24,-32(11)
-FUNC_START(_savegpr_25) stw 25,-28(11)
-FUNC_START(_savegpr_26) stw 26,-24(11)
-FUNC_START(_savegpr_27) stw 27,-20(11)
-FUNC_START(_savegpr_28) stw 28,-16(11)
-FUNC_START(_savegpr_29) stw 29,-12(11)
-FUNC_START(_savegpr_30) stw 30,-8(11)
-FUNC_START(_savegpr_31) stw 31,-4(11)
- blr
-FUNC_END(_savegpr_31)
-FUNC_END(_savegpr_30)
-FUNC_END(_savegpr_29)
-FUNC_END(_savegpr_28)
-FUNC_END(_savegpr_27)
-FUNC_END(_savegpr_26)
-FUNC_END(_savegpr_25)
-FUNC_END(_savegpr_24)
-FUNC_END(_savegpr_23)
-FUNC_END(_savegpr_22)
-FUNC_END(_savegpr_21)
-FUNC_END(_savegpr_20)
-FUNC_END(_savegpr_19)
-FUNC_END(_savegpr_18)
-FUNC_END(_savegpr_17)
-FUNC_END(_savegpr_16)
-FUNC_END(_savegpr_15)
-FUNC_END(_savegpr_14)
-
-/* Routines for restoring floating point registers, called by the compiler. */
-/* Called with r11 pointing to the stack header word of the caller of the */
-/* function, just beyond the end of the floating point save area. */
-
-FUNC_START(_restfpr_14) lfd 14,-144(11) /* restore fp registers */
-FUNC_START(_restfpr_15) lfd 15,-136(11)
-FUNC_START(_restfpr_16) lfd 16,-128(11)
-FUNC_START(_restfpr_17) lfd 17,-120(11)
-FUNC_START(_restfpr_18) lfd 18,-112(11)
-FUNC_START(_restfpr_19) lfd 19,-104(11)
-FUNC_START(_restfpr_20) lfd 20,-96(11)
-FUNC_START(_restfpr_21) lfd 21,-88(11)
-FUNC_START(_restfpr_22) lfd 22,-80(11)
-FUNC_START(_restfpr_23) lfd 23,-72(11)
-FUNC_START(_restfpr_24) lfd 24,-64(11)
-FUNC_START(_restfpr_25) lfd 25,-56(11)
-FUNC_START(_restfpr_26) lfd 26,-48(11)
-FUNC_START(_restfpr_27) lfd 27,-40(11)
-FUNC_START(_restfpr_28) lfd 28,-32(11)
-FUNC_START(_restfpr_29) lfd 29,-24(11)
-FUNC_START(_restfpr_30) lfd 30,-16(11)
-FUNC_START(_restfpr_31) lfd 31,-8(11)
- blr
-FUNC_END(_restfpr_31)
-FUNC_END(_restfpr_30)
-FUNC_END(_restfpr_29)
-FUNC_END(_restfpr_28)
-FUNC_END(_restfpr_27)
-FUNC_END(_restfpr_26)
-FUNC_END(_restfpr_25)
-FUNC_END(_restfpr_24)
-FUNC_END(_restfpr_23)
-FUNC_END(_restfpr_22)
-FUNC_END(_restfpr_21)
-FUNC_END(_restfpr_20)
-FUNC_END(_restfpr_19)
-FUNC_END(_restfpr_18)
-FUNC_END(_restfpr_17)
-FUNC_END(_restfpr_16)
-FUNC_END(_restfpr_15)
-FUNC_END(_restfpr_14)
-
-/* Routines for restoring integer registers, called by the compiler. */
-/* Called with r11 pointing to the stack header word of the caller of the */
-/* function, just beyond the end of the integer restore area. */
-
-FUNC_START(_restgpr_14) lwz 14,-72(11) /* restore gp registers */
-FUNC_START(_restgpr_15) lwz 15,-68(11)
-FUNC_START(_restgpr_16) lwz 16,-64(11)
-FUNC_START(_restgpr_17) lwz 17,-60(11)
-FUNC_START(_restgpr_18) lwz 18,-56(11)
-FUNC_START(_restgpr_19) lwz 19,-52(11)
-FUNC_START(_restgpr_20) lwz 20,-48(11)
-FUNC_START(_restgpr_21) lwz 21,-44(11)
-FUNC_START(_restgpr_22) lwz 22,-40(11)
-FUNC_START(_restgpr_23) lwz 23,-36(11)
-FUNC_START(_restgpr_24) lwz 24,-32(11)
-FUNC_START(_restgpr_25) lwz 25,-28(11)
-FUNC_START(_restgpr_26) lwz 26,-24(11)
-FUNC_START(_restgpr_27) lwz 27,-20(11)
-FUNC_START(_restgpr_28) lwz 28,-16(11)
-FUNC_START(_restgpr_29) lwz 29,-12(11)
-FUNC_START(_restgpr_30) lwz 30,-8(11)
-FUNC_START(_restgpr_31) lwz 31,-4(11)
- blr
-FUNC_END(_restgpr_31)
-FUNC_END(_restgpr_30)
-FUNC_END(_restgpr_29)
-FUNC_END(_restgpr_28)
-FUNC_END(_restgpr_27)
-FUNC_END(_restgpr_26)
-FUNC_END(_restgpr_25)
-FUNC_END(_restgpr_24)
-FUNC_END(_restgpr_23)
-FUNC_END(_restgpr_22)
-FUNC_END(_restgpr_21)
-FUNC_END(_restgpr_20)
-FUNC_END(_restgpr_19)
-FUNC_END(_restgpr_18)
-FUNC_END(_restgpr_17)
-FUNC_END(_restgpr_16)
-FUNC_END(_restgpr_15)
-FUNC_END(_restgpr_14)
-
-/* Routines for restoring floating point registers, called by the compiler. */
-/* Called with r11 pointing to the stack header word of the caller of the */
-/* function, just beyond the end of the floating point save area. */
-/* In addition to restoring the fp registers, it will return to the caller's */
-/* caller */
-
-FUNC_START(_restfpr_14_x) lfd 14,-144(11) /* restore fp registers */
-FUNC_START(_restfpr_15_x) lfd 15,-136(11)
-FUNC_START(_restfpr_16_x) lfd 16,-128(11)
-FUNC_START(_restfpr_17_x) lfd 17,-120(11)
-FUNC_START(_restfpr_18_x) lfd 18,-112(11)
-FUNC_START(_restfpr_19_x) lfd 19,-104(11)
-FUNC_START(_restfpr_20_x) lfd 20,-96(11)
-FUNC_START(_restfpr_21_x) lfd 21,-88(11)
-FUNC_START(_restfpr_22_x) lfd 22,-80(11)
-FUNC_START(_restfpr_23_x) lfd 23,-72(11)
-FUNC_START(_restfpr_24_x) lfd 24,-64(11)
-FUNC_START(_restfpr_25_x) lfd 25,-56(11)
-FUNC_START(_restfpr_26_x) lfd 26,-48(11)
-FUNC_START(_restfpr_27_x) lfd 27,-40(11)
-FUNC_START(_restfpr_28_x) lfd 28,-32(11)
-FUNC_START(_restfpr_29_x) lfd 29,-24(11)
-FUNC_START(_restfpr_30_x) lfd 30,-16(11)
-FUNC_START(_restfpr_31_x) lwz 0,4(11)
- lfd 31,-8(11)
- mtlr 0
- mr 1,11
- blr
-FUNC_END(_restfpr_31_x)
-FUNC_END(_restfpr_30_x)
-FUNC_END(_restfpr_29_x)
-FUNC_END(_restfpr_28_x)
-FUNC_END(_restfpr_27_x)
-FUNC_END(_restfpr_26_x)
-FUNC_END(_restfpr_25_x)
-FUNC_END(_restfpr_24_x)
-FUNC_END(_restfpr_23_x)
-FUNC_END(_restfpr_22_x)
-FUNC_END(_restfpr_21_x)
-FUNC_END(_restfpr_20_x)
-FUNC_END(_restfpr_19_x)
-FUNC_END(_restfpr_18_x)
-FUNC_END(_restfpr_17_x)
-FUNC_END(_restfpr_16_x)
-FUNC_END(_restfpr_15_x)
-FUNC_END(_restfpr_14_x)
-
-/* Routines for restoring integer registers, called by the compiler. */
-/* Called with r11 pointing to the stack header word of the caller of the */
-/* function, just beyond the end of the integer restore area. */
-
-FUNC_START(_restgpr_14_x) lwz 14,-72(11) /* restore gp registers */
-FUNC_START(_restgpr_15_x) lwz 15,-68(11)
-FUNC_START(_restgpr_16_x) lwz 16,-64(11)
-FUNC_START(_restgpr_17_x) lwz 17,-60(11)
-FUNC_START(_restgpr_18_x) lwz 18,-56(11)
-FUNC_START(_restgpr_19_x) lwz 19,-52(11)
-FUNC_START(_restgpr_20_x) lwz 20,-48(11)
-FUNC_START(_restgpr_21_x) lwz 21,-44(11)
-FUNC_START(_restgpr_22_x) lwz 22,-40(11)
-FUNC_START(_restgpr_23_x) lwz 23,-36(11)
-FUNC_START(_restgpr_24_x) lwz 24,-32(11)
-FUNC_START(_restgpr_25_x) lwz 25,-28(11)
-FUNC_START(_restgpr_26_x) lwz 26,-24(11)
-FUNC_START(_restgpr_27_x) lwz 27,-20(11)
-FUNC_START(_restgpr_28_x) lwz 28,-16(11)
-FUNC_START(_restgpr_29_x) lwz 29,-12(11)
-FUNC_START(_restgpr_30_x) lwz 30,-8(11)
-FUNC_START(_restgpr_31_x) lwz 0,4(11)
- lwz 31,-4(11)
- mtlr 0
- mr 1,11
- blr
-FUNC_END(_restgpr_31_x)
-FUNC_END(_restgpr_30_x)
-FUNC_END(_restgpr_29_x)
-FUNC_END(_restgpr_28_x)
-FUNC_END(_restgpr_27_x)
-FUNC_END(_restgpr_26_x)
-FUNC_END(_restgpr_25_x)
-FUNC_END(_restgpr_24_x)
-FUNC_END(_restgpr_23_x)
-FUNC_END(_restgpr_22_x)
-FUNC_END(_restgpr_21_x)
-FUNC_END(_restgpr_20_x)
-FUNC_END(_restgpr_19_x)
-FUNC_END(_restgpr_18_x)
-FUNC_END(_restgpr_17_x)
-FUNC_END(_restgpr_16_x)
-FUNC_END(_restgpr_15_x)
-FUNC_END(_restgpr_14_x)
-
-#else /* __powerpc64__ */
-
- .section ".text"
- .align 2
-
-/* Routines for saving floating point registers, called by the compiler. */
-
-.fsav:
-FUNC_START(_savef14) stfd 14,-144(1) /* save fp registers */
-FUNC_START(_savef15) stfd 15,-136(1)
-FUNC_START(_savef16) stfd 16,-128(1)
-FUNC_START(_savef17) stfd 17,-120(1)
-FUNC_START(_savef18) stfd 18,-112(1)
-FUNC_START(_savef19) stfd 19,-104(1)
-FUNC_START(_savef20) stfd 20,-96(1)
-FUNC_START(_savef21) stfd 21,-88(1)
-FUNC_START(_savef22) stfd 22,-80(1)
-FUNC_START(_savef23) stfd 23,-72(1)
-FUNC_START(_savef24) stfd 24,-64(1)
-FUNC_START(_savef25) stfd 25,-56(1)
-FUNC_START(_savef26) stfd 26,-48(1)
-FUNC_START(_savef27) stfd 27,-40(1)
-FUNC_START(_savef28) stfd 28,-32(1)
-FUNC_START(_savef29) stfd 29,-24(1)
-FUNC_START(_savef30) stfd 30,-16(1)
-FUNC_START(_savef31) stfd 31,-8(1)
- blr
-.LTfsav:
- .long 0
- .byte 0,12,0,0,0,0,0,0
- .long 0
- .long .LTfsav-.fsav
- .short 4
- .ascii "fsav"
-FUNC_END(_savef31)
-FUNC_END(_savef30)
-FUNC_END(_savef29)
-FUNC_END(_savef28)
-FUNC_END(_savef27)
-FUNC_END(_savef26)
-FUNC_END(_savef25)
-FUNC_END(_savef24)
-FUNC_END(_savef23)
-FUNC_END(_savef22)
-FUNC_END(_savef21)
-FUNC_END(_savef20)
-FUNC_END(_savef19)
-FUNC_END(_savef18)
-FUNC_END(_savef17)
-FUNC_END(_savef16)
-FUNC_END(_savef15)
-FUNC_END(_savef14)
-
-/* Routines for restoring floating point registers, called by the compiler. */
-
-.fres:
-FUNC_START(_restf14) lfd 14,-144(1) /* restore fp registers */
-FUNC_START(_restf15) lfd 15,-136(1)
-FUNC_START(_restf16) lfd 16,-128(1)
-FUNC_START(_restf17) lfd 17,-120(1)
-FUNC_START(_restf18) lfd 18,-112(1)
-FUNC_START(_restf19) lfd 19,-104(1)
-FUNC_START(_restf20) lfd 20,-96(1)
-FUNC_START(_restf21) lfd 21,-88(1)
-FUNC_START(_restf22) lfd 22,-80(1)
-FUNC_START(_restf23) lfd 23,-72(1)
-FUNC_START(_restf24) lfd 24,-64(1)
-FUNC_START(_restf25) lfd 25,-56(1)
-FUNC_START(_restf26) lfd 26,-48(1)
-FUNC_START(_restf27) lfd 27,-40(1)
-FUNC_START(_restf28) lfd 28,-32(1)
-FUNC_START(_restf29) lfd 29,-24(1)
-FUNC_START(_restf30) lfd 30,-16(1)
-FUNC_START(_restf31) lfd 31,-8(1)
- blr
-.LTfres:
- .long 0
- .byte 0,12,0,0,0,0,0,0
- .long 0
- .long .LTfres-.fres
- .short 4
- .ascii "fres"
-FUNC_END(_restf31)
-FUNC_END(_restf30)
-FUNC_END(_restf29)
-FUNC_END(_restf28)
-FUNC_END(_restf27)
-FUNC_END(_restf26)
-FUNC_END(_restf25)
-FUNC_END(_restf24)
-FUNC_END(_restf23)
-FUNC_END(_restf22)
-FUNC_END(_restf21)
-FUNC_END(_restf20)
-FUNC_END(_restf19)
-FUNC_END(_restf18)
-FUNC_END(_restf17)
-FUNC_END(_restf16)
-FUNC_END(_restf15)
-FUNC_END(_restf14)
-
#endif
diff --git a/contrib/gcc/config/rs6000/gnu.h b/contrib/gcc/config/rs6000/gnu.h
new file mode 100644
index 0000000..32bd906
--- /dev/null
+++ b/contrib/gcc/config/rs6000/gnu.h
@@ -0,0 +1,38 @@
+/* Definitions of target machine for GNU compiler,
+ for powerpc machines running GNU.
+ Copyright (C) 2001 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#undef CPP_OS_DEFAULT_SPEC
+#define CPP_OS_DEFAULT_SPEC "%(cpp_os_gnu)"
+
+#undef STARTFILE_DEFAULT_SPEC
+#define STARTFILE_DEFAULT_SPEC "%(startfile_gnu)"
+
+#undef ENDFILE_DEFAULT_SPEC
+#define ENDFILE_DEFAULT_SPEC "%(endfile_gnu)"
+
+#undef LINK_START_DEFAULT_SPEC
+#define LINK_START_DEFAULT_SPEC "%(link_start_gnu)"
+
+#undef LINK_OS_DEFAULT_SPEC
+#define LINK_OS_DEFAULT_SPEC "%(link_os_gnu)"
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC GNU)");
diff --git a/contrib/gcc/config/rs6000/linux64.h b/contrib/gcc/config/rs6000/linux64.h
index d014afe..40b64dc 100644
--- a/contrib/gcc/config/rs6000/linux64.h
+++ b/contrib/gcc/config/rs6000/linux64.h
@@ -1,6 +1,6 @@
/* Definitions of target machine for GNU compiler,
for 64 bit powerpc linux.
- Copyright (C) 2000, 2001 Free Software Foundation, Inc.
+ Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -87,10 +87,6 @@ Boston, MA 02111-1307, USA. */
#undef JUMP_TABLES_IN_TEXT_SECTION
#define JUMP_TABLES_IN_TEXT_SECTION 1
-/* Define cutoff for using external functions to save floating point. */
-#undef FP_SAVE_INLINE
-#define FP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) == 62 || (FIRST_REG) == 63)
-
/* 64-bit PowerPC Linux always has GPR13 fixed. */
#define FIXED_R13 1
@@ -142,9 +138,29 @@ Boston, MA 02111-1307, USA. */
#define LINK_OS_DEFAULT_SPEC "%(link_os_linux)"
#undef LINK_OS_LINUX_SPEC
+#ifndef CROSS_COMPILE
+#define LINK_OS_LINUX_SPEC "-m elf64ppc %{!shared: %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /lib64/ld.so.1}}}"
+#else
#define LINK_OS_LINUX_SPEC "-m elf64ppc %{!shared: %{!static: \
%{rdynamic:-export-dynamic} \
- %{!dynamic-linker:-dynamic-linker /lib/ld.so.1}}}"
+ %{!dynamic-linker:-dynamic-linker ld.so.1}}}"
+#endif
+
+#ifndef CROSS_COMPILE
+#undef STARTFILE_LINUX_SPEC
+#define STARTFILE_LINUX_SPEC "\
+%{!shared: %{pg:/usr/lib64/gcrt1.o%s} %{!pg:%{p:/usr/lib64/gcrt1.o%s} \
+ %{!p:/usr/lib64/crt1.o%s}}} /usr/lib64/crti.o%s \
+%{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
+#endif
+
+#ifndef CROSS_COMPILE
+#undef ENDFILE_LINUX_SPEC
+#define ENDFILE_LINUX_SPEC "\
+%{!shared:crtend.o%s} %{shared:crtendS.o%s} /usr/lib64/crtn.o%s"
+#endif
#undef TOC_SECTION_ASM_OP
#define TOC_SECTION_ASM_OP "\t.section\t\".toc\",\"aw\""
@@ -208,17 +224,6 @@ Boston, MA 02111-1307, USA. */
&& ! DECL_WEAK (DECL)) \
SYMBOL_REF_FLAG (XEXP (DECL_RTL (DECL), 0)) = 1;
-/* This macro gets just the user-specified name
- out of the string in a SYMBOL_REF. Discard
- a leading * or @. */
-#define STRIP_NAME_ENCODING(VAR,SYMBOL_NAME) \
-do { \
- const char *_name = (SYMBOL_NAME); \
- while (*_name == '*' || *_name == '@') \
- _name++; \
- (VAR) = _name; \
-} while (0)
-
/* This is how to output a reference to a user-level label named NAME.
`assemble_name' uses this. */
@@ -245,32 +250,39 @@ do { \
fputs (DOUBLE_INT_ASM_OP, (FILE)); \
putc ('.', (FILE)); \
assemble_name ((FILE), (NAME)); \
- putc ('\n', (FILE)); \
- fputs (DOUBLE_INT_ASM_OP, (FILE)); \
- fputs (".TOC.@tocbase, 0\n\t.previous\n", (FILE)); \
- \
- if (TREE_PUBLIC (DECL)) \
+ fputs (",.TOC.@tocbase,0\n\t.previous\n\t.size\t", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ fputs (",24\n\t.type\t.", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ fputs (",@function\n", (FILE)); \
+ if (TREE_PUBLIC (DECL) && ! DECL_WEAK (DECL)) \
{ \
- if (DECL_WEAK (DECL)) \
- fputs ("\t.weak\t", (FILE)); \
- else \
- fputs ("\t.globl\t", (FILE)); \
- putc ('.', (FILE)); \
+ fputs ("\t.globl\t.", (FILE)); \
assemble_name ((FILE), (NAME)); \
putc ('\n', (FILE)); \
} \
- fputs (TYPE_ASM_OP, (FILE)); \
- putc ('.', (FILE)); \
- assemble_name ((FILE), (NAME)); \
- putc (',', (FILE)); \
- fprintf ((FILE), TYPE_OPERAND_FMT, "function"); \
- putc ('\n', (FILE)); \
ASM_DECLARE_RESULT ((FILE), DECL_RESULT (DECL)); \
putc ('.', (FILE)); \
ASM_OUTPUT_LABEL ((FILE), (NAME)); \
} \
while (0)
+/* This is how to declare the size of a function. */
+#undef ASM_DECLARE_FUNCTION_SIZE
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do \
+ { \
+ if (!flag_inhibit_size_directive) \
+ { \
+ fputs ("\t.size\t.", (FILE)); \
+ assemble_name ((FILE), (FNAME)); \
+ fputs (",.-.", (FILE)); \
+ assemble_name ((FILE), (FNAME)); \
+ putc ('\n', (FILE)); \
+ } \
+ } \
+ while (0)
+
/* Return non-zero if this entry is to be written into the constant
pool in a special way. We do so if this is a SYMBOL_REF, LABEL_REF
or a CONST containing one of them. If -mfp-in-toc (the default),
diff --git a/contrib/gcc/config/rs6000/netbsd.h b/contrib/gcc/config/rs6000/netbsd.h
index 0e58a45..95f6542 100644
--- a/contrib/gcc/config/rs6000/netbsd.h
+++ b/contrib/gcc/config/rs6000/netbsd.h
@@ -64,3 +64,8 @@ Boston, MA 02111-1307, USA. */
structure return convention. */
#undef DRAFT_V4_STRUCT_RET
#define DRAFT_V4_STRUCT_RET 1
+
+/* Use STABS debugging information by default. DWARF2 makes a mess of
+ the 1.5.2 linker. */
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
diff --git a/contrib/gcc/config/rs6000/ppc-asm.h b/contrib/gcc/config/rs6000/ppc-asm.h
index 3a6fb2a..27f3635 100644
--- a/contrib/gcc/config/rs6000/ppc-asm.h
+++ b/contrib/gcc/config/rs6000/ppc-asm.h
@@ -161,6 +161,7 @@ GLUE(.L,name): \
#elif defined (__powerpc64__)
#define FUNC_NAME(name) GLUE(.,name)
+#define JUMP_TARGET(name) FUNC_NAME(name)
#define FUNC_START(name) \
.section ".opd","aw"; \
name: \
diff --git a/contrib/gcc/config/rs6000/rs6000-protos.h b/contrib/gcc/config/rs6000/rs6000-protos.h
index c40689e..19aeb07 100644
--- a/contrib/gcc/config/rs6000/rs6000-protos.h
+++ b/contrib/gcc/config/rs6000/rs6000-protos.h
@@ -40,6 +40,7 @@ extern int cc_reg_operand PARAMS ((rtx, enum machine_mode));
extern int cc_reg_not_cr0_operand PARAMS ((rtx, enum machine_mode));
extern int reg_or_short_operand PARAMS ((rtx, enum machine_mode));
extern int reg_or_neg_short_operand PARAMS ((rtx, enum machine_mode));
+extern int reg_or_aligned_short_operand PARAMS ((rtx, enum machine_mode));
extern int reg_or_u_short_operand PARAMS ((rtx, enum machine_mode));
extern int reg_or_cint_operand PARAMS ((rtx, enum machine_mode));
extern int reg_or_arith_cint_operand PARAMS ((rtx, enum machine_mode));
@@ -51,6 +52,7 @@ extern int got_no_const_operand PARAMS ((rtx, enum machine_mode));
extern int num_insns_constant PARAMS ((rtx, enum machine_mode));
extern int easy_fp_constant PARAMS ((rtx, enum machine_mode));
extern int zero_fp_constant PARAMS ((rtx, enum machine_mode));
+extern int zero_constant PARAMS ((rtx, enum machine_mode));
extern int volatile_mem_operand PARAMS ((rtx, enum machine_mode));
extern int offsettable_mem_operand PARAMS ((rtx, enum machine_mode));
extern int mem_or_easy_const_operand PARAMS ((rtx, enum machine_mode));
diff --git a/contrib/gcc/config/rs6000/rs6000.c b/contrib/gcc/config/rs6000/rs6000.c
index f8375a4..45d6fdf 100644
--- a/contrib/gcc/config/rs6000/rs6000.c
+++ b/contrib/gcc/config/rs6000/rs6000.c
@@ -163,6 +163,8 @@ static rtx rs6000_expand_builtin PARAMS ((tree, rtx, rtx, enum machine_mode, int
static rtx altivec_expand_builtin PARAMS ((tree, rtx));
static rtx altivec_expand_unop_builtin PARAMS ((enum insn_code, tree, rtx));
static rtx altivec_expand_binop_builtin PARAMS ((enum insn_code, tree, rtx));
+static rtx altivec_expand_abs_builtin PARAMS ((enum insn_code, tree, rtx));
+static rtx altivec_expand_predicate_builtin PARAMS ((enum insn_code, const char *, tree, rtx));
static rtx altivec_expand_ternop_builtin PARAMS ((enum insn_code, tree, rtx));
static rtx altivec_expand_stv_builtin PARAMS ((enum insn_code, tree));
static void rs6000_parse_abi_options PARAMS ((void));
@@ -172,6 +174,7 @@ static void is_altivec_return_reg PARAMS ((rtx, void *));
int vrsave_operation PARAMS ((rtx, enum machine_mode));
static rtx generate_set_vrsave PARAMS ((rtx, rs6000_stack_t *, int));
static void altivec_frame_fixup PARAMS ((rtx, rtx, HOST_WIDE_INT));
+static int easy_vector_constant PARAMS ((rtx));
/* Default register names. */
char rs6000_reg_names[][8] =
@@ -214,7 +217,7 @@ static const char alt_reg_names[][8] =
"%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
"%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
"%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
- "%vrsave"
+ "vrsave"
};
#endif
@@ -480,11 +483,13 @@ rs6000_override_options (default_cpu)
}
}
- if (flag_pic && DEFAULT_ABI == ABI_AIX)
+ if (flag_pic != 0 && DEFAULT_ABI == ABI_AIX)
{
- warning ("-f%s ignored (all code is position independent)",
- (flag_pic > 1) ? "PIC" : "pic");
flag_pic = 0;
+
+ if (extra_warnings)
+ warning ("-f%s ignored (all code is position independent)",
+ (flag_pic > 1) ? "PIC" : "pic");
}
#ifdef XCOFF_DEBUGGING_INFO
@@ -586,6 +591,8 @@ rs6000_parse_abi_options ()
return;
else if (! strcmp (rs6000_abi_string, "altivec"))
rs6000_altivec_abi = 1;
+ else if (! strcmp (rs6000_abi_string, "no-altivec"))
+ rs6000_altivec_abi = 0;
else
error ("unknown ABI specified: '%s'", rs6000_abi_string);
}
@@ -708,6 +715,19 @@ count_register_operand (op, mode)
return 0;
}
+/* Returns 1 if op is an altivec register. */
+int
+altivec_register_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+
+ return (register_operand (op, mode)
+ && (GET_CODE (op) != REG
+ || REGNO (op) > FIRST_PSEUDO_REGISTER
+ || ALTIVEC_REGNO_P (REGNO (op))));
+}
+
int
xer_operand (op, mode)
rtx op;
@@ -753,7 +773,7 @@ u_short_cint_operand (op, mode)
enum machine_mode mode ATTRIBUTE_UNUSED;
{
return (GET_CODE (op) == CONST_INT
- && CONST_OK_FOR_LETTER_P (INTVAL (op), 'K'));
+ && CONST_OK_FOR_LETTER_P (INTVAL (op) & GET_MODE_MASK (mode), 'K'));
}
/* Return 1 if OP is a CONST_INT that cannot fit in a signed D field. */
@@ -849,6 +869,24 @@ reg_or_neg_short_operand (op, mode)
return gpc_reg_operand (op, mode);
}
+/* Returns 1 if OP is either a constant integer valid for a DS-field or
+ a non-special register. If a register, it must be in the proper
+ mode unless MODE is VOIDmode. */
+
+int
+reg_or_aligned_short_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (gpc_reg_operand (op, mode))
+ return 1;
+ else if (short_cint_operand (op, mode) && !(INTVAL (op) & 3))
+ return 1;
+
+ return 0;
+}
+
+
/* Return 1 if the operand is either a register or an integer whose
high-order 16 bits are zero. */
@@ -1046,7 +1084,7 @@ num_insns_constant (op, mode)
REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
REAL_VALUE_TO_TARGET_SINGLE (rv, l);
- return num_insns_constant_wide ((HOST_WIDE_INT)l);
+ return num_insns_constant_wide ((HOST_WIDE_INT) l);
}
else if (GET_CODE (op) == CONST_DOUBLE)
@@ -1076,10 +1114,10 @@ num_insns_constant (op, mode)
else
{
- if (high == 0 && (low & 0x80000000) == 0)
+ if (high == 0 && low >= 0)
return num_insns_constant_wide (low);
- else if (high == -1 && (low & 0x80000000) != 0)
+ else if (high == -1 && low < 0)
return num_insns_constant_wide (low);
else if (mask64_operand (op, mode))
@@ -1161,6 +1199,60 @@ easy_fp_constant (op, mode)
abort ();
}
+/* Return 1 if the operand is a CONST_INT and can be put into a
+ register with one instruction. */
+
+static int
+easy_vector_constant (op)
+ rtx op;
+{
+ rtx elt;
+ int units, i;
+
+ if (GET_CODE (op) != CONST_VECTOR)
+ return 0;
+
+ units = CONST_VECTOR_NUNITS (op);
+
+ /* We can generate 0 easily. Look for that. */
+ for (i = 0; i < units; ++i)
+ {
+ elt = CONST_VECTOR_ELT (op, i);
+
+ /* We could probably simplify this by just checking for equality
+ with CONST0_RTX for the current mode, but let's be safe
+ instead. */
+
+ switch (GET_CODE (elt))
+ {
+ case CONST_INT:
+ if (INTVAL (elt) != 0)
+ return 0;
+ break;
+ case CONST_DOUBLE:
+ if (CONST_DOUBLE_LOW (elt) != 0 || CONST_DOUBLE_HIGH (elt) != 0)
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ }
+
+ /* We could probably generate a few other constants trivially, but
+ gcc doesn't generate them yet. FIXME later. */
+ return 1;
+}
+
+/* Return 1 if the operand is the constant 0. This works for scalars
+ as well as vectors. */
+int
+zero_constant (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return op == CONST0_RTX (mode);
+}
+
/* Return 1 if the operand is 0.0. */
int
zero_fp_constant (op, mode)
@@ -1230,8 +1322,8 @@ add_operand (op, mode)
enum machine_mode mode;
{
if (GET_CODE (op) == CONST_INT)
- return (CONST_OK_FOR_LETTER_P (INTVAL(op), 'I')
- || CONST_OK_FOR_LETTER_P (INTVAL(op), 'L'));
+ return (CONST_OK_FOR_LETTER_P (INTVAL (op), 'I')
+ || CONST_OK_FOR_LETTER_P (INTVAL (op), 'L'));
return gpc_reg_operand (op, mode);
}
@@ -1244,8 +1336,8 @@ non_add_cint_operand (op, mode)
enum machine_mode mode ATTRIBUTE_UNUSED;
{
return (GET_CODE (op) == CONST_INT
- && (unsigned HOST_WIDE_INT) (INTVAL (op) + 0x8000) >= 0x10000
- && ! CONST_OK_FOR_LETTER_P (INTVAL (op), 'L'));
+ && !CONST_OK_FOR_LETTER_P (INTVAL (op), 'I')
+ && !CONST_OK_FOR_LETTER_P (INTVAL (op), 'L'));
}
/* Return 1 if the operand is a non-special register or a constant that
@@ -1317,6 +1409,11 @@ mask_operand (op, mode)
c = INTVAL (op);
+ /* Fail in 64-bit mode if the mask wraps around because the upper
+ 32-bits of the mask will all be 1s, contrary to GCC's internal view. */
+ if (TARGET_POWERPC64 && (c & 0x80000001) == 0x80000001)
+ return 0;
+
/* We don't change the number of transitions by inverting,
so make sure we start with the LS bit zero. */
if (c & 1)
@@ -1629,8 +1726,8 @@ constant_pool_expr_1 (op, have_sym, have_toc)
return 0;
case PLUS:
case MINUS:
- return constant_pool_expr_1 (XEXP (op, 0), have_sym, have_toc) &&
- constant_pool_expr_1 (XEXP (op, 1), have_sym, have_toc);
+ return (constant_pool_expr_1 (XEXP (op, 0), have_sym, have_toc)
+ && constant_pool_expr_1 (XEXP (op, 1), have_sym, have_toc));
case CONST:
return constant_pool_expr_1 (XEXP (op, 0), have_sym, have_toc);
case CONST_INT:
@@ -1801,6 +1898,7 @@ rs6000_legitimize_reload_address (x, mode, opnum, type, ind_levels, win)
*win = 1;
return x;
}
+
#if TARGET_MACHO
if (DEFAULT_ABI == ABI_DARWIN && flag_pic
&& GET_CODE (x) == LO_SUM
@@ -1826,7 +1924,8 @@ rs6000_legitimize_reload_address (x, mode, opnum, type, ind_levels, win)
&& GET_CODE (XEXP (x, 0)) == REG
&& REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
&& REG_MODE_OK_FOR_BASE_P (XEXP (x, 0), mode)
- && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && !ALTIVEC_VECTOR_MODE (mode))
{
HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
@@ -1857,6 +1956,7 @@ rs6000_legitimize_reload_address (x, mode, opnum, type, ind_levels, win)
#if TARGET_MACHO
if (GET_CODE (x) == SYMBOL_REF
&& DEFAULT_ABI == ABI_DARWIN
+ && !ALTIVEC_VECTOR_MODE (mode)
&& flag_pic)
{
/* Darwin load of floating point constant. */
@@ -1875,8 +1975,8 @@ rs6000_legitimize_reload_address (x, mode, opnum, type, ind_levels, win)
}
#endif
if (TARGET_TOC
- && CONSTANT_POOL_EXPR_P (x)
- && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), mode))
+ && CONSTANT_POOL_EXPR_P (x)
+ && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), mode))
{
(x) = create_TOC_reference (x);
*win = 1;
@@ -2151,9 +2251,10 @@ rs6000_emit_move (dest, source, mode)
/* Handle the case where reload calls us with an invalid address;
and the case of CONSTANT_P_RTX. */
- if (! general_operand (operands[1], mode)
- || ! nonimmediate_operand (operands[0], mode)
- || GET_CODE (operands[1]) == CONSTANT_P_RTX)
+ if (!VECTOR_MODE_P (mode)
+ && (! general_operand (operands[1], mode)
+ || ! nonimmediate_operand (operands[0], mode)
+ || GET_CODE (operands[1]) == CONSTANT_P_RTX))
{
emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
return;
@@ -2183,8 +2284,8 @@ rs6000_emit_move (dest, source, mode)
case V8HImode:
case V4SFmode:
case V4SImode:
- /* fixme: aldyh -- allow vector constants when they are implemented. */
- if (CONSTANT_P (operands[1]))
+ if (CONSTANT_P (operands[1])
+ && !easy_vector_constant (operands[1]))
operands[1] = force_const_mem (mode, operands[1]);
break;
@@ -3013,42 +3114,7 @@ rs6000_va_arg (valist, type)
right-aligned, as if they were in fact integers. This only
matters for structures of size 1 or 2, or 4 when TARGET_64BIT. */
if (DEFAULT_ABI != ABI_V4)
- {
- HOST_WIDE_INT align, rounded_size;
- enum machine_mode mode;
- tree addr_tree;
-
- /* Compute the rounded size of the type. */
- align = PARM_BOUNDARY / BITS_PER_UNIT;
- rounded_size = (((int_size_in_bytes (type) + align - 1) / align)
- * align);
-
- addr_tree = valist;
-
- mode = TYPE_MODE (type);
- if (mode != BLKmode)
- {
- HOST_WIDE_INT adj;
- adj = TREE_INT_CST_LOW (TYPE_SIZE (type)) / BITS_PER_UNIT;
- if (rounded_size > align)
- adj = rounded_size;
-
- addr_tree = build (PLUS_EXPR, TREE_TYPE (addr_tree), addr_tree,
- build_int_2 (rounded_size - adj, 0));
- }
-
- addr_rtx = expand_expr (addr_tree, NULL_RTX, Pmode, EXPAND_NORMAL);
- addr_rtx = copy_to_reg (addr_rtx);
-
- /* Compute new value for AP. */
- t = build (MODIFY_EXPR, TREE_TYPE (valist), valist,
- build (PLUS_EXPR, TREE_TYPE (valist), valist,
- build_int_2 (rounded_size, 0)));
- TREE_SIDE_EFFECTS (t) = 1;
- expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
-
- return addr_rtx;
- }
+ return std_expand_builtin_va_arg (valist, type);
f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
f_fpr = TREE_CHAIN (f_gpr);
@@ -3072,7 +3138,8 @@ rs6000_va_arg (valist, type)
n_reg = 1;
sav_ofs = 0;
sav_scale = 4;
- size = rsize = UNITS_PER_WORD;
+ size = UNITS_PER_WORD;
+ rsize = 1;
}
else if (FLOAT_TYPE_P (type) && ! TARGET_SOFT_FLOAT)
{
@@ -3099,50 +3166,62 @@ rs6000_va_arg (valist, type)
lab_over = gen_label_rtx ();
addr_rtx = gen_reg_rtx (Pmode);
- emit_cmp_and_jump_insns (expand_expr (reg, NULL_RTX, QImode, EXPAND_NORMAL),
- GEN_INT (8 - n_reg + 1), GE, const1_rtx, QImode, 1,
- lab_false);
-
- /* Long long is aligned in the registers. */
- if (n_reg > 1)
+ /* Vectors never go in registers. */
+ if (TREE_CODE (type) != VECTOR_TYPE)
{
- u = build (BIT_AND_EXPR, TREE_TYPE (reg), reg,
- build_int_2 (n_reg - 1, 0));
- u = build (PLUS_EXPR, TREE_TYPE (reg), reg, u);
- u = build (MODIFY_EXPR, TREE_TYPE (reg), reg, u);
- TREE_SIDE_EFFECTS (u) = 1;
- expand_expr (u, const0_rtx, VOIDmode, EXPAND_NORMAL);
- }
+ TREE_THIS_VOLATILE (reg) = 1;
+ emit_cmp_and_jump_insns
+ (expand_expr (reg, NULL_RTX, QImode, EXPAND_NORMAL),
+ GEN_INT (8 - n_reg + 1), GE, const1_rtx, QImode, 1,
+ lab_false);
- if (sav_ofs)
- t = build (PLUS_EXPR, ptr_type_node, sav, build_int_2 (sav_ofs, 0));
- else
- t = sav;
+ /* Long long is aligned in the registers. */
+ if (n_reg > 1)
+ {
+ u = build (BIT_AND_EXPR, TREE_TYPE (reg), reg,
+ build_int_2 (n_reg - 1, 0));
+ u = build (PLUS_EXPR, TREE_TYPE (reg), reg, u);
+ u = build (MODIFY_EXPR, TREE_TYPE (reg), reg, u);
+ TREE_SIDE_EFFECTS (u) = 1;
+ expand_expr (u, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ }
- u = build (POSTINCREMENT_EXPR, TREE_TYPE (reg), reg, build_int_2 (n_reg, 0));
- TREE_SIDE_EFFECTS (u) = 1;
+ if (sav_ofs)
+ t = build (PLUS_EXPR, ptr_type_node, sav, build_int_2 (sav_ofs, 0));
+ else
+ t = sav;
- u = build1 (CONVERT_EXPR, integer_type_node, u);
- TREE_SIDE_EFFECTS (u) = 1;
+ u = build (POSTINCREMENT_EXPR, TREE_TYPE (reg), reg,
+ build_int_2 (n_reg, 0));
+ TREE_SIDE_EFFECTS (u) = 1;
- u = build (MULT_EXPR, integer_type_node, u, build_int_2 (sav_scale, 0));
- TREE_SIDE_EFFECTS (u) = 1;
+ u = build1 (CONVERT_EXPR, integer_type_node, u);
+ TREE_SIDE_EFFECTS (u) = 1;
- t = build (PLUS_EXPR, ptr_type_node, t, u);
- TREE_SIDE_EFFECTS (t) = 1;
+ u = build (MULT_EXPR, integer_type_node, u, build_int_2 (sav_scale, 0));
+ TREE_SIDE_EFFECTS (u) = 1;
- r = expand_expr (t, addr_rtx, Pmode, EXPAND_NORMAL);
- if (r != addr_rtx)
- emit_move_insn (addr_rtx, r);
+ t = build (PLUS_EXPR, ptr_type_node, t, u);
+ TREE_SIDE_EFFECTS (t) = 1;
+
+ r = expand_expr (t, addr_rtx, Pmode, EXPAND_NORMAL);
+ if (r != addr_rtx)
+ emit_move_insn (addr_rtx, r);
+
+ emit_jump_insn (gen_jump (lab_over));
+ emit_barrier ();
+ }
- emit_jump_insn (gen_jump (lab_over));
- emit_barrier ();
emit_label (lab_false);
/* ... otherwise out of the overflow area. */
- /* Make sure we don't find reg 7 for the next int arg. */
- if (n_reg > 1)
+ /* Make sure we don't find reg 7 for the next int arg.
+
+ All AltiVec vectors go in the overflow area. So in the AltiVec
+ case we need to get the vectors from the overflow area, but
+ remember where the GPRs and FPRs are. */
+ if (n_reg > 1 && TREE_CODE (type) != VECTOR_TYPE)
{
t = build (MODIFY_EXPR, TREE_TYPE (reg), reg, build_int_2 (8, 0));
TREE_SIDE_EFFECTS (t) = 1;
@@ -3154,8 +3233,16 @@ rs6000_va_arg (valist, type)
t = ovf;
else
{
- t = build (PLUS_EXPR, TREE_TYPE (ovf), ovf, build_int_2 (7, 0));
- t = build (BIT_AND_EXPR, TREE_TYPE (t), t, build_int_2 (-8, -1));
+ int align;
+
+ /* Vectors are 16 byte aligned. */
+ if (TREE_CODE (type) == VECTOR_TYPE)
+ align = 15;
+ else
+ align = 7;
+
+ t = build (PLUS_EXPR, TREE_TYPE (ovf), ovf, build_int_2 (align, 0));
+ t = build (BIT_AND_EXPR, TREE_TYPE (t), t, build_int_2 (-align-1, -1));
}
t = save_expr (t);
@@ -3352,19 +3439,47 @@ static const struct builtin_description bdesc_2arg[] =
{ MASK_ALTIVEC, CODE_FOR_altivec_vsum2sws, "__builtin_altivec_vsum2sws", ALTIVEC_BUILTIN_VSUM2SWS },
{ MASK_ALTIVEC, CODE_FOR_altivec_vsumsws, "__builtin_altivec_vsumsws", ALTIVEC_BUILTIN_VSUMSWS },
{ MASK_ALTIVEC, CODE_FOR_xorv4si3, "__builtin_altivec_vxor", ALTIVEC_BUILTIN_VXOR },
- { MASK_ALTIVEC, CODE_FOR_altivec_vcmpbfp_p, "__builtin_altivec_vcmpbfp_p", ALTIVEC_BUILTIN_VCMPBFP_P },
- { MASK_ALTIVEC, CODE_FOR_altivec_vcmpeqfp_p, "__builtin_altivec_vcmpeqfp_p", ALTIVEC_BUILTIN_VCMPEQFP_P },
- { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequb_p, "__builtin_altivec_vcmpequb_p", ALTIVEC_BUILTIN_VCMPEQUB_P },
- { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequh_p, "__builtin_altivec_vcmpequh_p", ALTIVEC_BUILTIN_VCMPEQUH_P },
- { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequw_p, "__builtin_altivec_vcmpequw_p", ALTIVEC_BUILTIN_VCMPEQUW_P },
- { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgefp_p, "__builtin_altivec_vcmpgefp_p", ALTIVEC_BUILTIN_VCMPGEFP_P },
- { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtfp_p, "__builtin_altivec_vcmpgtfp_p", ALTIVEC_BUILTIN_VCMPGTFP_P },
- { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsb_p, "__builtin_altivec_vcmpgtsb_p", ALTIVEC_BUILTIN_VCMPGTSB_P },
- { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsh_p, "__builtin_altivec_vcmpgtsh_p", ALTIVEC_BUILTIN_VCMPGTSH_P },
- { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsw_p, "__builtin_altivec_vcmpgtsw_p", ALTIVEC_BUILTIN_VCMPGTSW_P },
- { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtub_p, "__builtin_altivec_vcmpgtub_p", ALTIVEC_BUILTIN_VCMPGTUB_P },
- { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtuh_p, "__builtin_altivec_vcmpgtuh_p", ALTIVEC_BUILTIN_VCMPGTUH_P },
- { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtuw_p, "__builtin_altivec_vcmpgtuw_p", ALTIVEC_BUILTIN_VCMPGTUW_P },
+};
+
+/* AltiVec predicates. */
+
+struct builtin_description_predicates
+{
+ const unsigned int mask;
+ const enum insn_code icode;
+ const char *opcode;
+ const char *const name;
+ const enum rs6000_builtins code;
+};
+
+static const struct builtin_description_predicates bdesc_altivec_preds[] =
+{
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4sf, "*vcmpbfp.", "__builtin_altivec_vcmpbfp_p", ALTIVEC_BUILTIN_VCMPBFP_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4sf, "*vcmpeqfp.", "__builtin_altivec_vcmpeqfp_p", ALTIVEC_BUILTIN_VCMPEQFP_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4sf, "*vcmpgefp.", "__builtin_altivec_vcmpgefp_p", ALTIVEC_BUILTIN_VCMPGEFP_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4sf, "*vcmpgtfp.", "__builtin_altivec_vcmpgtfp_p", ALTIVEC_BUILTIN_VCMPGTFP_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4si, "*vcmpequw.", "__builtin_altivec_vcmpequw_p", ALTIVEC_BUILTIN_VCMPEQUW_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4si, "*vcmpgtsw.", "__builtin_altivec_vcmpgtsw_p", ALTIVEC_BUILTIN_VCMPGTSW_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4si, "*vcmpgtuw.", "__builtin_altivec_vcmpgtuw_p", ALTIVEC_BUILTIN_VCMPGTUW_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v8hi, "*vcmpgtuh.", "__builtin_altivec_vcmpgtuh_p", ALTIVEC_BUILTIN_VCMPGTUH_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v8hi, "*vcmpgtsh.", "__builtin_altivec_vcmpgtsh_p", ALTIVEC_BUILTIN_VCMPGTSH_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v8hi, "*vcmpequh.", "__builtin_altivec_vcmpequh_p", ALTIVEC_BUILTIN_VCMPEQUH_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v16qi, "*vcmpequb.", "__builtin_altivec_vcmpequb_p", ALTIVEC_BUILTIN_VCMPEQUB_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v16qi, "*vcmpgtsb.", "__builtin_altivec_vcmpgtsb_p", ALTIVEC_BUILTIN_VCMPGTSB_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v16qi, "*vcmpgtub.", "__builtin_altivec_vcmpgtub_p", ALTIVEC_BUILTIN_VCMPGTUB_P }
+};
+
+/* ABS* opreations. */
+
+static const struct builtin_description bdesc_abs[] =
+{
+ { MASK_ALTIVEC, CODE_FOR_absv4si2, "__builtin_altivec_abs_v4si", ALTIVEC_BUILTIN_ABS_V4SI },
+ { MASK_ALTIVEC, CODE_FOR_absv8hi2, "__builtin_altivec_abs_v8hi", ALTIVEC_BUILTIN_ABS_V8HI },
+ { MASK_ALTIVEC, CODE_FOR_absv4sf2, "__builtin_altivec_abs_v4sf", ALTIVEC_BUILTIN_ABS_V4SF },
+ { MASK_ALTIVEC, CODE_FOR_absv16qi2, "__builtin_altivec_abs_v16qi", ALTIVEC_BUILTIN_ABS_V16QI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_abss_v4si, "__builtin_altivec_abss_v4si", ALTIVEC_BUILTIN_ABSS_V4SI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_abss_v8hi, "__builtin_altivec_abss_v8hi", ALTIVEC_BUILTIN_ABSS_V8HI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_abss_v16qi, "__builtin_altivec_abss_v16qi", ALTIVEC_BUILTIN_ABSS_V16QI }
};
/* Simple unary operations: VECb = foo (unsigned literal) or VECb =
@@ -3422,6 +3537,42 @@ altivec_expand_unop_builtin (icode, arglist, target)
return target;
}
+
+static rtx
+altivec_expand_abs_builtin (icode, arglist, target)
+ enum insn_code icode;
+ tree arglist;
+ rtx target;
+{
+ rtx pat, scratch1, scratch2;
+ tree arg0 = TREE_VALUE (arglist);
+ rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+
+ /* If we have invalid arguments, bail out before generating bad rtl. */
+ if (arg0 == error_mark_node)
+ return NULL_RTX;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+
+ scratch1 = gen_reg_rtx (mode0);
+ scratch2 = gen_reg_rtx (mode0);
+
+ pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+
+ return target;
+}
+
static rtx
altivec_expand_binop_builtin (icode, arglist, target)
enum insn_code icode;
@@ -3460,6 +3611,87 @@ altivec_expand_binop_builtin (icode, arglist, target)
}
static rtx
+altivec_expand_predicate_builtin (icode, opcode, arglist, target)
+ enum insn_code icode;
+ const char *opcode;
+ tree arglist;
+ rtx target;
+{
+ rtx pat, scratch;
+ tree cr6_form = TREE_VALUE (arglist);
+ tree arg0 = TREE_VALUE (TREE_CHAIN (arglist));
+ tree arg1 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ enum machine_mode tmode = SImode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
+ int cr6_form_int;
+
+ if (TREE_CODE (cr6_form) != INTEGER_CST)
+ {
+ error ("argument 1 of __builtin_altivec_predicate must be a constant");
+ return NULL_RTX;
+ }
+ else
+ cr6_form_int = TREE_INT_CST_LOW (cr6_form);
+
+ if (mode0 != mode1)
+ abort ();
+
+ /* If we have invalid arguments, bail out before generating bad rtl. */
+ if (arg0 == error_mark_node || arg1 == error_mark_node)
+ return NULL_RTX;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ scratch = gen_reg_rtx (mode0);
+
+ pat = GEN_FCN (icode) (scratch, op0, op1,
+ gen_rtx (SYMBOL_REF, Pmode, opcode));
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+
+ /* The vec_any* and vec_all* predicates use the same opcodes for two
+ different operations, but the bits in CR6 will be different
+ depending on what information we want. So we have to play tricks
+ with CR6 to get the right bits out.
+
+ If you think this is disgusting, look at the specs for the
+ AltiVec predicates. */
+
+ switch (cr6_form_int)
+ {
+ case 0:
+ emit_insn (gen_cr6_test_for_zero (target));
+ break;
+ case 1:
+ emit_insn (gen_cr6_test_for_zero_reverse (target));
+ break;
+ case 2:
+ emit_insn (gen_cr6_test_for_lt (target));
+ break;
+ case 3:
+ emit_insn (gen_cr6_test_for_lt_reverse (target));
+ break;
+ default:
+ error ("argument 1 of __builtin_altivec_predicate is out of range");
+ break;
+ }
+
+ return target;
+}
+
+static rtx
altivec_expand_stv_builtin (icode, arglist)
enum insn_code icode;
tree arglist;
@@ -3543,6 +3775,7 @@ altivec_expand_builtin (exp, target)
rtx target;
{
struct builtin_description *d;
+ struct builtin_description_predicates *dp;
size_t i;
enum insn_code icode;
tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
@@ -3818,6 +4051,12 @@ altivec_expand_builtin (exp, target)
return NULL_RTX;
}
+ /* Expand abs* operations. */
+ d = (struct builtin_description *) bdesc_abs;
+ for (i = 0; i < sizeof (bdesc_abs) / sizeof *d; i++, d++)
+ if (d->code == fcode)
+ return altivec_expand_abs_builtin (d->icode, arglist, target);
+
/* Handle simple unary operations. */
d = (struct builtin_description *) bdesc_1arg;
for (i = 0; i < sizeof (bdesc_1arg) / sizeof *d; i++, d++)
@@ -3830,6 +4069,12 @@ altivec_expand_builtin (exp, target)
if (d->code == fcode)
return altivec_expand_binop_builtin (d->icode, arglist, target);
+ /* Expand the AltiVec predicates. */
+ dp = (struct builtin_description_predicates *) bdesc_altivec_preds;
+ for (i = 0; i < sizeof (bdesc_altivec_preds) / sizeof *dp; i++, dp++)
+ if (dp->code == fcode)
+ return altivec_expand_predicate_builtin (dp->icode, dp->opcode, arglist, target);
+
/* LV* are funky. We initialized them differently. */
switch (fcode)
{
@@ -3899,7 +4144,8 @@ rs6000_init_builtins ()
static void
altivec_init_builtins (void)
{
- struct builtin_description * d;
+ struct builtin_description *d;
+ struct builtin_description_predicates *dp;
size_t i;
tree endlink = void_list_node;
@@ -4055,15 +4301,11 @@ altivec_init_builtins (void)
/* void foo (void). */
tree void_ftype_void
- = build_function_type (void_type_node,
- tree_cons (NULL_TREE, void_type_node,
- endlink));
+ = build_function_type (void_type_node, void_list_node);
/* vshort foo (void). */
tree v8hi_ftype_void
- = build_function_type (V8HI_type_node,
- tree_cons (NULL_TREE, void_type_node,
- endlink));
+ = build_function_type (V8HI_type_node, void_list_node);
tree v4si_ftype_v4si_v4si
= build_function_type (V4SI_type_node,
@@ -4201,6 +4443,18 @@ altivec_init_builtins (void)
tree_cons (NULL_TREE, V4SF_type_node,
endlink)));
+ tree v4si_ftype_v4si
+ = build_function_type (V4SI_type_node,
+ tree_cons (NULL_TREE, V4SI_type_node, endlink));
+
+ tree v8hi_ftype_v8hi
+ = build_function_type (V8HI_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node, endlink));
+
+ tree v16qi_ftype_v16qi
+ = build_function_type (V16QI_type_node,
+ tree_cons (NULL_TREE, V16QI_type_node, endlink));
+
tree v8hi_ftype_v16qi_v16qi
= build_function_type (V8HI_type_node,
tree_cons (NULL_TREE, V16QI_type_node,
@@ -4265,6 +4519,38 @@ altivec_init_builtins (void)
tree_cons (NULL_TREE, V16QI_type_node,
endlink)));
+ tree int_ftype_int_v4si_v4si
+ = build_function_type
+ (integer_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE, V4SI_type_node,
+ tree_cons (NULL_TREE, V4SI_type_node,
+ endlink))));
+
+ tree int_ftype_int_v4sf_v4sf
+ = build_function_type
+ (integer_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE, V4SF_type_node,
+ tree_cons (NULL_TREE, V4SF_type_node,
+ endlink))));
+
+ tree int_ftype_int_v8hi_v8hi
+ = build_function_type
+ (integer_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node,
+ endlink))));
+
+ tree int_ftype_int_v16qi_v16qi
+ = build_function_type
+ (integer_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE, V16QI_type_node,
+ tree_cons (NULL_TREE, V16QI_type_node,
+ endlink))));
+
tree v16qi_ftype_int_pvoid
= build_function_type (V16QI_type_node,
tree_cons (NULL_TREE, integer_type_node,
@@ -4313,7 +4599,7 @@ altivec_init_builtins (void)
def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvehx", void_ftype_v8hi_int_pvoid, ALTIVEC_BUILTIN_STVEHX);
def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvewx", void_ftype_v4si_int_pvoid, ALTIVEC_BUILTIN_STVEWX);
def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvxl", void_ftype_v4si_int_pvoid, ALTIVEC_BUILTIN_STVXL);
-
+
/* Add the simple ternary operators. */
d = (struct builtin_description *) bdesc_3arg;
for (i = 0; i < sizeof (bdesc_3arg) / sizeof *d; i++, d++)
@@ -4412,6 +4698,36 @@ altivec_init_builtins (void)
for (i = 0; i < sizeof (bdesc_dst) / sizeof *d; i++, d++)
def_builtin (d->mask, d->name, void_ftype_pvoid_int_char, d->code);
+ /* Initialize the predicates. */
+ dp = (struct builtin_description_predicates *) bdesc_altivec_preds;
+ for (i = 0; i < sizeof (bdesc_altivec_preds) / sizeof *dp; i++, dp++)
+ {
+ enum machine_mode mode1;
+ tree type;
+
+ mode1 = insn_data[dp->icode].operand[1].mode;
+
+ switch (mode1)
+ {
+ case V4SImode:
+ type = int_ftype_int_v4si_v4si;
+ break;
+ case V8HImode:
+ type = int_ftype_int_v8hi_v8hi;
+ break;
+ case V16QImode:
+ type = int_ftype_int_v16qi_v16qi;
+ break;
+ case V4SFmode:
+ type = int_ftype_int_v4sf_v4sf;
+ break;
+ default:
+ abort ();
+ }
+
+ def_builtin (dp->mask, dp->name, type, dp->code);
+ }
+
/* Add the simple binary operators. */
d = (struct builtin_description *) bdesc_2arg;
for (i = 0; i < sizeof (bdesc_2arg) / sizeof *d; i++, d++)
@@ -4530,6 +4846,36 @@ altivec_init_builtins (void)
def_builtin (d->mask, d->name, type, d->code);
}
+ /* Initialize the abs* operators. */
+ d = (struct builtin_description *) bdesc_abs;
+ for (i = 0; i < sizeof (bdesc_abs) / sizeof *d; i++, d++)
+ {
+ enum machine_mode mode0;
+ tree type;
+
+ mode0 = insn_data[d->icode].operand[0].mode;
+
+ switch (mode0)
+ {
+ case V4SImode:
+ type = v4si_ftype_v4si;
+ break;
+ case V8HImode:
+ type = v8hi_ftype_v8hi;
+ break;
+ case V16QImode:
+ type = v16qi_ftype_v16qi;
+ break;
+ case V4SFmode:
+ type = v4sf_ftype_v4sf;
+ break;
+ default:
+ abort ();
+ }
+
+ def_builtin (d->mask, d->name, type, d->code);
+ }
+
/* Add the simple unary operators. */
d = (struct builtin_description *) bdesc_1arg;
for (i = 0; i < sizeof (bdesc_1arg) / sizeof *d; i++, d++)
@@ -5358,7 +5704,7 @@ min_max_operator (op, mode)
/* Return 1 if ANDOP is a mask that has no bits on that are not in the
mask required to convert the result of a rotate insn into a shift
- left insn of SHIFTOP bits. Both are known to be CONST_INT. */
+ left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
int
includes_lshift_p (shiftop, andop)
@@ -5369,7 +5715,7 @@ includes_lshift_p (shiftop, andop)
shift_mask <<= INTVAL (shiftop);
- return (INTVAL (andop) & ~shift_mask) == 0;
+ return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
}
/* Similar, but for right shift. */
@@ -5383,7 +5729,7 @@ includes_rshift_p (shiftop, andop)
shift_mask >>= INTVAL (shiftop);
- return (INTVAL (andop) & ~shift_mask) == 0;
+ return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
}
/* Return 1 if ANDOP is a mask suitable for use with an rldic insn
@@ -5769,8 +6115,8 @@ rs6000_got_register (value)
/* The second flow pass currently (June 1999) can't update
regs_ever_live without disturbing other parts of the compiler, so
update it here to make the prolog/epilogue code happy. */
- if (no_new_pseudos && ! regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
- regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
+ if (no_new_pseudos && ! regs_ever_live[RS6000_PIC_OFFSET_TABLE_REGNUM])
+ regs_ever_live[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
current_function_uses_pic_offset_table = 1;
@@ -6033,7 +6379,7 @@ print_operand (file, x, code)
case 'm':
/* MB value for a mask operand. */
- if (! mask_operand (x, VOIDmode))
+ if (! mask_operand (x, SImode))
output_operand_lossage ("invalid %%m value");
val = INT_LOWPART (x);
@@ -6068,7 +6414,7 @@ print_operand (file, x, code)
case 'M':
/* ME value for a mask operand. */
- if (! mask_operand (x, VOIDmode))
+ if (! mask_operand (x, SImode))
output_operand_lossage ("invalid %%M value");
val = INT_LOWPART (x);
@@ -6198,7 +6544,7 @@ print_operand (file, x, code)
/* PowerPC64 mask position. All 0's and all 1's are excluded.
CONST_INT 32-bit mask is considered sign-extended so any
transition must occur within the CONST_INT, not on the boundary. */
- if (! mask64_operand (x, VOIDmode))
+ if (! mask64_operand (x, DImode))
output_operand_lossage ("invalid %%S value");
val = INT_LOWPART (x);
@@ -6898,9 +7244,18 @@ rs6000_emit_cmove (dest, op, true_cond, false_cond)
rtx op0 = rs6000_compare_op0;
rtx op1 = rs6000_compare_op1;
REAL_VALUE_TYPE c1;
- enum machine_mode mode = GET_MODE (op0);
+ enum machine_mode compare_mode = GET_MODE (op0);
+ enum machine_mode result_mode = GET_MODE (dest);
rtx temp;
+ /* These modes should always match. */
+ if (GET_MODE (op1) != compare_mode)
+ return 0;
+ if (GET_MODE (true_cond) != result_mode)
+ return 0;
+ if (GET_MODE (false_cond) != result_mode)
+ return 0;
+
/* First, work out if the hardware can do this at all, or
if it's too slow... */
/* If the comparison is an integer one, since we only have fsel
@@ -6943,11 +7298,11 @@ rs6000_emit_cmove (dest, op, true_cond, false_cond)
/* At this point we know we can use fsel. */
/* Reduce the comparison to a comparison against zero. */
- temp = gen_reg_rtx (mode);
+ temp = gen_reg_rtx (compare_mode);
emit_insn (gen_rtx_SET (VOIDmode, temp,
- gen_rtx_MINUS (mode, op0, op1)));
+ gen_rtx_MINUS (compare_mode, op0, op1)));
op0 = temp;
- op1 = CONST0_RTX (mode);
+ op1 = CONST0_RTX (compare_mode);
/* If we don't care about NaNs we can reduce some of the comparisons
down to faster ones. */
@@ -6977,52 +7332,52 @@ rs6000_emit_cmove (dest, op, true_cond, false_cond)
break;
case LE:
- temp = gen_reg_rtx (mode);
- emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (mode, op0)));
+ temp = gen_reg_rtx (compare_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
op0 = temp;
break;
case ORDERED:
- temp = gen_reg_rtx (mode);
- emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_ABS (mode, op0)));
+ temp = gen_reg_rtx (compare_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_ABS (compare_mode, op0)));
op0 = temp;
break;
case EQ:
- temp = gen_reg_rtx (mode);
+ temp = gen_reg_rtx (compare_mode);
emit_insn (gen_rtx_SET (VOIDmode, temp,
- gen_rtx_NEG (mode,
- gen_rtx_ABS (mode, op0))));
+ gen_rtx_NEG (compare_mode,
+ gen_rtx_ABS (compare_mode, op0))));
op0 = temp;
break;
case UNGE:
- temp = gen_reg_rtx (mode);
+ temp = gen_reg_rtx (result_mode);
emit_insn (gen_rtx_SET (VOIDmode, temp,
- gen_rtx_IF_THEN_ELSE (mode,
+ gen_rtx_IF_THEN_ELSE (result_mode,
gen_rtx_GE (VOIDmode,
op0, op1),
true_cond, false_cond)));
false_cond = temp;
true_cond = false_cond;
- temp = gen_reg_rtx (mode);
- emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (mode, op0)));
+ temp = gen_reg_rtx (compare_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
op0 = temp;
break;
case GT:
- temp = gen_reg_rtx (mode);
+ temp = gen_reg_rtx (result_mode);
emit_insn (gen_rtx_SET (VOIDmode, temp,
- gen_rtx_IF_THEN_ELSE (mode,
+ gen_rtx_IF_THEN_ELSE (result_mode,
gen_rtx_GE (VOIDmode,
op0, op1),
true_cond, false_cond)));
true_cond = temp;
false_cond = true_cond;
- temp = gen_reg_rtx (mode);
- emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (mode, op0)));
+ temp = gen_reg_rtx (compare_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
op0 = temp;
break;
@@ -7031,7 +7386,7 @@ rs6000_emit_cmove (dest, op, true_cond, false_cond)
}
emit_insn (gen_rtx_SET (VOIDmode, dest,
- gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
+ gen_rtx_IF_THEN_ELSE (result_mode,
gen_rtx_GE (VOIDmode,
op0, op1),
true_cond, false_cond)));
@@ -7074,7 +7429,7 @@ first_reg_to_save ()
for (first_reg = 13; first_reg <= 31; first_reg++)
if (regs_ever_live[first_reg]
&& (! call_used_regs[first_reg]
- || (first_reg == PIC_OFFSET_TABLE_REGNUM
+ || (first_reg == RS6000_PIC_OFFSET_TABLE_REGNUM
&& ((DEFAULT_ABI == ABI_V4 && flag_pic == 1)
|| (DEFAULT_ABI == ABI_DARWIN && flag_pic)))))
break;
@@ -7128,8 +7483,8 @@ first_reg_to_save ()
#if TARGET_MACHO
if (flag_pic && current_function_uses_pic_offset_table &&
- (first_reg > PIC_OFFSET_TABLE_REGNUM))
- return PIC_OFFSET_TABLE_REGNUM;
+ (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM))
+ return RS6000_PIC_OFFSET_TABLE_REGNUM;
#endif
return first_reg;
@@ -7341,13 +7696,13 @@ rs6000_stack_info ()
/* Calculate which registers need to be saved & save area size. */
info_ptr->first_gp_reg_save = first_reg_to_save ();
- /* Assume that we will have to save PIC_OFFSET_TABLE_REGNUM,
+ /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
even if it currently looks like we won't. */
if (((TARGET_TOC && TARGET_MINIMAL_TOC)
|| (flag_pic == 1 && abi == ABI_V4)
|| (flag_pic && abi == ABI_DARWIN))
- && info_ptr->first_gp_reg_save > PIC_OFFSET_TABLE_REGNUM)
- info_ptr->gp_size = reg_size * (32 - PIC_OFFSET_TABLE_REGNUM);
+ && info_ptr->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
+ info_ptr->gp_size = reg_size * (32 - RS6000_PIC_OFFSET_TABLE_REGNUM);
else
info_ptr->gp_size = reg_size * (32 - info_ptr->first_gp_reg_save);
@@ -7533,8 +7888,7 @@ rs6000_stack_info ()
info_ptr->push_p = 1;
else if (abi == ABI_V4)
- info_ptr->push_p = (total_raw_size > info_ptr->fixed_size
- || info_ptr->calls_p);
+ info_ptr->push_p = total_raw_size > info_ptr->fixed_size;
else
info_ptr->push_p = (frame_pointer_needed
@@ -7768,7 +8122,7 @@ rs6000_emit_load_toc_table (fromprolog)
int fromprolog;
{
rtx dest;
- dest = gen_rtx_REG (Pmode, PIC_OFFSET_TABLE_REGNUM);
+ dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
if (TARGET_ELF && DEFAULT_ABI != ABI_AIX)
{
@@ -8390,7 +8744,10 @@ rs6000_emit_prologue ()
/* Get VRSAVE onto a GPR. */
reg = gen_rtx_REG (SImode, 12);
vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
- emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
+ if (TARGET_MACHO)
+ emit_insn (gen_get_vrsave_internal (reg));
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
/* Save VRSAVE. */
offset = info->vrsave_save_offset + sp_offset;
@@ -8506,7 +8863,7 @@ rs6000_emit_prologue ()
for (i = 0; i < 32 - info->first_gp_reg_save; i++)
if ((regs_ever_live[info->first_gp_reg_save+i]
&& ! call_used_regs[info->first_gp_reg_save+i])
- || (i+info->first_gp_reg_save == PIC_OFFSET_TABLE_REGNUM
+ || (i+info->first_gp_reg_save == RS6000_PIC_OFFSET_TABLE_REGNUM
&& ((DEFAULT_ABI == ABI_V4 && flag_pic == 1)
|| (DEFAULT_ABI == ABI_DARWIN && flag_pic))))
{
@@ -8608,10 +8965,10 @@ rs6000_emit_prologue ()
RTX_FRAME_RELATED_P (insn) = 1;
}
- /* If we are using PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
+ /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
if ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
|| (DEFAULT_ABI == ABI_V4 && flag_pic == 1
- && regs_ever_live[PIC_OFFSET_TABLE_REGNUM]))
+ && regs_ever_live[RS6000_PIC_OFFSET_TABLE_REGNUM]))
{
/* If emit_load_toc_table will use the link register, we need to save
it. We use R11 for this purpose because emit_load_toc_table
@@ -8639,7 +8996,7 @@ rs6000_emit_prologue ()
rs6000_maybe_dead (emit_insn (gen_load_macho_picbase (dest)));
rs6000_maybe_dead (
- emit_move_insn (gen_rtx_REG (Pmode, PIC_OFFSET_TABLE_REGNUM),
+ emit_move_insn (gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM),
gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM)));
}
}
@@ -8680,13 +9037,24 @@ rs6000_output_function_prologue (file, size)
if (! HAVE_prologue)
{
start_sequence ();
-
+
/* A NOTE_INSN_DELETED is supposed to be at the start and end of
the "toplevel" insn chain. */
emit_note (0, NOTE_INSN_DELETED);
rs6000_emit_prologue ();
emit_note (0, NOTE_INSN_DELETED);
-
+
+ /* Expand INSN_ADDRESSES so final() doesn't crash. */
+ {
+ rtx insn;
+ unsigned addr = 0;
+ for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
+ {
+ INSN_ADDRESSES_NEW (insn, addr);
+ addr += 4;
+ }
+ }
+
if (TARGET_DEBUG_STACK)
debug_rtx_list (get_insns (), 100);
final (get_insns (), file, FALSE, FALSE);
@@ -8881,7 +9249,7 @@ rs6000_emit_epilogue (sibcall)
for (i = 0; i < 32 - info->first_gp_reg_save; i++)
if ((regs_ever_live[info->first_gp_reg_save+i]
&& ! call_used_regs[info->first_gp_reg_save+i])
- || (i+info->first_gp_reg_save == PIC_OFFSET_TABLE_REGNUM
+ || (i+info->first_gp_reg_save == RS6000_PIC_OFFSET_TABLE_REGNUM
&& ((DEFAULT_ABI == ABI_V4 && flag_pic == 1)
|| (DEFAULT_ABI == ABI_DARWIN && flag_pic))))
{
@@ -9074,6 +9442,17 @@ rs6000_output_function_epilogue (file, size)
rs6000_emit_epilogue (FALSE);
emit_note (0, NOTE_INSN_DELETED);
+ /* Expand INSN_ADDRESSES so final() doesn't crash. */
+ {
+ rtx insn;
+ unsigned addr = 0;
+ for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
+ {
+ INSN_ADDRESSES_NEW (insn, addr);
+ addr += 4;
+ }
+ }
+
if (TARGET_DEBUG_STACK)
debug_rtx_list (get_insns (), 100);
final (get_insns (), file, FALSE, FALSE);
@@ -9291,6 +9670,8 @@ rs6000_output_function_epilogue (file, size)
Only emit this if the alloca bit was set above. */
if (frame_pointer_needed)
fputs ("\t.byte 31\n", file);
+
+ fputs ("\t.align 2\n", file);
}
return;
}
@@ -9863,7 +10244,7 @@ output_toc (file, x, labelno, mode)
if (TARGET_MINIMAL_TOC)
fputs (DOUBLE_INT_ASM_OP, file);
else
- fprintf (file, "\t.tc ID_%lx_%lx[TC],", (long)high, (long)low);
+ fprintf (file, "\t.tc ID_%lx_%lx[TC],", (long) high, (long) low);
fprintf (file, "0x%lx%08lx\n", (long) high, (long) low);
return;
}
@@ -9875,7 +10256,7 @@ output_toc (file, x, labelno, mode)
fputs ("\t.long ", file);
else
fprintf (file, "\t.tc ID_%lx_%lx[TC],",
- (long)high, (long)low);
+ (long) high, (long) low);
fprintf (file, "0x%lx,0x%lx\n", (long) high, (long) low);
}
else
@@ -10079,8 +10460,6 @@ output_profile_hook (labelno)
const char *label_name;
rtx fun;
- labelno += 1;
-
ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
STRIP_NAME_ENCODING (label_name, ggc_strdup (buf));
fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
@@ -10463,6 +10842,11 @@ rs6000_select_rtx_section (mode, x)
{
if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
toc_section ();
+ else if (flag_pic
+ && (GET_CODE (x) == SYMBOL_REF
+ || GET_CODE (x) == LABEL_REF
+ || GET_CODE (x) == CONST))
+ data_section ();
else
const_section ();
}
diff --git a/contrib/gcc/config/rs6000/rs6000.h b/contrib/gcc/config/rs6000/rs6000.h
index 2deaf75..3e4c5f2 100644
--- a/contrib/gcc/config/rs6000/rs6000.h
+++ b/contrib/gcc/config/rs6000/rs6000.h
@@ -191,7 +191,7 @@ extern int target_flags;
function, and one less allocable register. */
#define MASK_MINIMAL_TOC 0x00000200
-/* Nonzero for the 64bit model: ints, longs, and pointers are 64 bits. */
+/* Nonzero for the 64bit model: longs and pointers are 64 bits. */
#define MASK_64BIT 0x00000400
/* Disable use of FPRs. */
@@ -604,6 +604,9 @@ extern int rs6000_altivec_abi;
#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 64
#endif
+/* Work around rs6000_long_double_type_size dependency in ada/targtyps.c. */
+#define WIDEST_HARDWARE_FP_SIZE 64
+
/* Width in bits of a pointer.
See also the macro `Pmode' defined below. */
#define POINTER_SIZE (TARGET_32BIT ? 32 : 64)
@@ -626,9 +629,6 @@ extern int rs6000_altivec_abi;
#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
((TARGET_ALTIVEC && TREE_CODE (TYPE) == VECTOR_TYPE) ? 128 : ALIGN)
-/* Handle #pragma pack. */
-#define HANDLE_PRAGMA_PACK 1
-
/* Alignment of field after `int : 0' in a structure. */
#define EMPTY_FIELD_BOUNDARY 32
@@ -638,10 +638,13 @@ extern int rs6000_altivec_abi;
/* A bitfield declared as `int' forces `int' alignment for the struct. */
#define PCC_BITFIELD_TYPE_MATTERS 1
-/* Make strings word-aligned so strcpy from constants will be faster. */
-#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
- (TREE_CODE (EXP) == STRING_CST \
- && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+/* Make strings word-aligned so strcpy from constants will be faster.
+ Make vector constants quadword aligned. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD \
+ ? BITS_PER_WORD \
+ : (ALIGN))
/* Make arrays of chars word-aligned for the same reasons.
Align vectors to 128 bits. */
@@ -759,7 +762,7 @@ extern int rs6000_altivec_abi;
#define XER_REGNO 76
#define FIRST_ALTIVEC_REGNO 77
#define LAST_ALTIVEC_REGNO 108
-#define TOTAL_ALTIVEC_REGS (LAST_ALTIVEC_REGNO - FIRST_ALTIVEC_REGNO)
+#define TOTAL_ALTIVEC_REGS (LAST_ALTIVEC_REGNO - FIRST_ALTIVEC_REGNO + 1)
#define VRSAVE_REGNO 109
/* List the order in which to allocate registers. Each register must be
@@ -957,18 +960,24 @@ extern int rs6000_altivec_abi;
for (i = 32; i < 64; i++) \
fixed_regs[i] = call_used_regs[i] \
= call_really_used_regs[i] = 1; \
- if (DEFAULT_ABI == ABI_V4 && flag_pic == 1) \
- fixed_regs[PIC_OFFSET_TABLE_REGNUM] \
- = call_used_regs[PIC_OFFSET_TABLE_REGNUM] \
- = call_really_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
- if (DEFAULT_ABI == ABI_DARWIN && flag_pic) \
- global_regs[PIC_OFFSET_TABLE_REGNUM] \
- = fixed_regs[PIC_OFFSET_TABLE_REGNUM] \
- = call_used_regs[PIC_OFFSET_TABLE_REGNUM] \
- = call_really_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
+ if (DEFAULT_ABI == ABI_V4 \
+ && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM \
+ && flag_pic == 1) \
+ fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] \
+ = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] \
+ = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1; \
+ if (DEFAULT_ABI == ABI_DARWIN \
+ && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM) \
+ global_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] \
+ = fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] \
+ = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] \
+ = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1; \
if (! TARGET_ALTIVEC) \
- for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i) \
- fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1; \
+ { \
+ for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i) \
+ fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1; \
+ call_really_used_regs[VRSAVE_REGNO] = 1; \
+ } \
if (TARGET_ALTIVEC_ABI) \
for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i) \
call_used_regs[i] = call_really_used_regs[i] = 1; \
@@ -1199,14 +1208,14 @@ enum reg_class
'Q' means that is a memory operand that is just an offset from a reg.
'R' is for AIX TOC entries.
'S' is a constant that can be placed into a 64-bit mask operand
- 'T' is a consatnt that can be placed into a 32-bit mask operand
+ 'T' is a constant that can be placed into a 32-bit mask operand
'U' is for V.4 small data references. */
#define EXTRA_CONSTRAINT(OP, C) \
((C) == 'Q' ? GET_CODE (OP) == MEM && GET_CODE (XEXP (OP, 0)) == REG \
: (C) == 'R' ? LEGITIMATE_CONSTANT_POOL_ADDRESS_P (OP) \
- : (C) == 'S' ? mask64_operand (OP, VOIDmode) \
- : (C) == 'T' ? mask_operand (OP, VOIDmode) \
+ : (C) == 'S' ? mask64_operand (OP, DImode) \
+ : (C) == 'T' ? mask_operand (OP, SImode) \
: (C) == 'U' ? (DEFAULT_ABI == ABI_V4 \
&& small_data_operand (OP, GET_MODE (OP))) \
: 0)
@@ -1539,7 +1548,7 @@ typedef struct rs6000_stack {
On RS/6000, these are r3-r10 and fp1-fp13.
On AltiVec, v2 - v13 are used for passing vectors. */
#define FUNCTION_ARG_REGNO_P(N) \
- ((unsigned)(((N) - GP_ARG_MIN_REG) < (unsigned)(GP_ARG_NUM_REG)) \
+ (((unsigned)((N) - GP_ARG_MIN_REG) < (unsigned)(GP_ARG_NUM_REG)) \
|| (TARGET_ALTIVEC && \
(unsigned)((N) - ALTIVEC_ARG_MIN_REG) < (unsigned)(ALTIVEC_ARG_NUM_REG)) \
|| ((unsigned)((N) - FP_ARG_MIN_REG) < (unsigned)(FP_ARG_NUM_REG)))
@@ -1591,8 +1600,7 @@ typedef struct rs6000_args
#define RS6000_ARG_SIZE(MODE, TYPE) \
((MODE) != BLKmode \
? (GET_MODE_SIZE (MODE) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD \
- : ((unsigned HOST_WIDE_INT) int_size_in_bytes (TYPE) \
- + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
+ : (int_size_in_bytes (TYPE) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
/* Initialize a variable CUM of type CUMULATIVE_ARGS
for a call to a function whose data type is FNTYPE.
@@ -1711,6 +1719,14 @@ typedef struct rs6000_args
#define EXPAND_BUILTIN_VA_ARG(valist, type) \
rs6000_va_arg (valist, type)
+/* For AIX, the rule is that structures are passed left-aligned in
+ their stack slot. However, GCC does not presently do this:
+ structures which are the same size as integer types are passed
+ right-aligned, as if they were in fact integers. This only
+ matters for structures of size 1 or 2, or 4 when TARGET_64BIT.
+ ABI_V4 does not use std_expand_builtin_va_arg. */
+#define PAD_VARARGS_DOWN (TYPE_MODE (type) != BLKmode)
+
/* Define this macro to be a nonzero value if the location where a function
argument is passed depends on whether or not it is a named argument. */
#define STRICT_ARGUMENT_NAMING 1
@@ -1736,7 +1752,7 @@ typedef struct rs6000_args
#define EPILOGUE_USES(REGNO) \
((reload_completed && (REGNO) == LINK_REGISTER_REGNUM) \
- || (REGNO) == VRSAVE_REGNO \
+ || (TARGET_ALTIVEC && (REGNO) == VRSAVE_REGNO) \
|| (current_function_calls_eh_return \
&& TARGET_AIX \
&& (REGNO) == TOC_REGISTER))
@@ -1968,7 +1984,8 @@ typedef struct rs6000_args
&& GET_CODE (XEXP (X, 0)) == REG \
&& INT_REG_OK_FOR_BASE_P (XEXP (X, 0), (STRICT)) \
&& LEGITIMATE_ADDRESS_INTEGER_P (XEXP (X, 1), 0) \
- && (! ALTIVEC_VECTOR_MODE (MODE) || INTVAL (X) == 0) \
+ && (! ALTIVEC_VECTOR_MODE (MODE) \
+ || (GET_CODE (XEXP (X,1)) == CONST_INT && INTVAL (XEXP (X,1)) == 0)) \
&& (((MODE) != DFmode && (MODE) != DImode) \
|| (TARGET_32BIT \
? LEGITIMATE_ADDRESS_INTEGER_P (XEXP (X, 1), 4) \
@@ -2086,7 +2103,8 @@ do { \
this macro is not defined, it is up to the machine-dependent files
to allocate such a register (if necessary). */
-#define PIC_OFFSET_TABLE_REGNUM 30
+#define RS6000_PIC_OFFSET_TABLE_REGNUM 30
+#define PIC_OFFSET_TABLE_REGNUM (flag_pic ? RS6000_PIC_OFFSET_TABLE_REGNUM : INVALID_REGNUM)
#define TOC_REGISTER (TARGET_MINIMAL_TOC ? 30 : 2)
@@ -2415,43 +2433,71 @@ extern int toc_initialized;
#define RS6000_WEAK 0
#endif
-/* This implementes the `alias' attribute. */
-#define ASM_OUTPUT_DEF_FROM_DECLS(FILE,decl,target) \
-do { \
- const char * alias = XSTR (XEXP (DECL_RTL (decl), 0), 0); \
- char * name = IDENTIFIER_POINTER (target); \
- if (TREE_CODE (decl) == FUNCTION_DECL \
- && DEFAULT_ABI == ABI_AIX) \
- { \
- if (TREE_PUBLIC (decl)) \
- { \
- if (RS6000_WEAK && DECL_WEAK (decl)) \
- { \
- fputs ("\t.weak .", FILE); \
- assemble_name (FILE, alias); \
- putc ('\n', FILE); \
- } \
- else \
- { \
- fputs ("\t.globl .", FILE); \
- assemble_name (FILE, alias); \
- putc ('\n', FILE); \
- } \
- } \
- else \
- { \
- fputs ("\t.lglobl .", FILE); \
- assemble_name (FILE, alias); \
- putc ('\n', FILE); \
- } \
- fputs ("\t.set .", FILE); \
- assemble_name (FILE, alias); \
- fputs (",.", FILE); \
- assemble_name (FILE, name); \
- fputc ('\n', FILE); \
- } \
- ASM_OUTPUT_DEF (FILE, alias, name); \
-} while (0)
+#if RS6000_WEAK
+/* Used in lieu of ASM_WEAKEN_LABEL. */
+#define ASM_WEAKEN_DECL(FILE, DECL, NAME, VAL) \
+ do \
+ { \
+ fputs ("\t.weak\t", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL \
+ && DEFAULT_ABI == ABI_AIX) \
+ { \
+ fputs ("\n\t.weak\t.", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ } \
+ fputc ('\n', (FILE)); \
+ if (VAL) \
+ { \
+ ASM_OUTPUT_DEF ((FILE), (NAME), (VAL)); \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL \
+ && DEFAULT_ABI == ABI_AIX) \
+ { \
+ fputs ("\t.set\t.", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ fputs (",.", (FILE)); \
+ assemble_name ((FILE), (VAL)); \
+ fputc ('\n', (FILE)); \
+ } \
+ } \
+ } \
+ while (0)
+#endif
+
+/* This implements the `alias' attribute. */
+#undef ASM_OUTPUT_DEF_FROM_DECLS
+#define ASM_OUTPUT_DEF_FROM_DECLS(FILE, DECL, TARGET) \
+ do \
+ { \
+ const char *alias = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ const char *name = IDENTIFIER_POINTER (TARGET); \
+ if (TREE_CODE (DECL) == FUNCTION_DECL \
+ && DEFAULT_ABI == ABI_AIX) \
+ { \
+ if (TREE_PUBLIC (DECL)) \
+ { \
+ if (!RS6000_WEAK || !DECL_WEAK (DECL)) \
+ { \
+ fputs ("\t.globl\t.", FILE); \
+ assemble_name (FILE, alias); \
+ putc ('\n', FILE); \
+ } \
+ } \
+ else if (TARGET_XCOFF) \
+ { \
+ fputs ("\t.lglobl\t.", FILE); \
+ assemble_name (FILE, alias); \
+ putc ('\n', FILE); \
+ } \
+ fputs ("\t.set\t.", FILE); \
+ assemble_name (FILE, alias); \
+ fputs (",.", FILE); \
+ assemble_name (FILE, name); \
+ fputc ('\n', FILE); \
+ } \
+ ASM_OUTPUT_DEF (FILE, alias, name); \
+ } \
+ while (0)
/* Output to assembler file text saying following lines
may contain character constants, extra white space, comments, etc. */
@@ -2706,6 +2752,10 @@ extern char rs6000_reg_names[][8]; /* register names (0 vs. %r0). */
/* Define the codes that are matched by predicates in rs6000.c. */
#define PREDICATE_CODES \
+ {"any_operand", {CONST_INT, CONST_DOUBLE, CONST, SYMBOL_REF, \
+ LABEL_REF, SUBREG, REG, MEM, PARALLEL}}, \
+ {"zero_constant", {CONST_INT, CONST_DOUBLE, CONST, SYMBOL_REF, \
+ LABEL_REF, SUBREG, REG, MEM}}, \
{"short_cint_operand", {CONST_INT}}, \
{"u_short_cint_operand", {CONST_INT}}, \
{"non_short_cint_operand", {CONST_INT}}, \
@@ -2715,6 +2765,7 @@ extern char rs6000_reg_names[][8]; /* register names (0 vs. %r0). */
{"cc_reg_not_cr0_operand", {SUBREG, REG}}, \
{"reg_or_short_operand", {SUBREG, REG, CONST_INT}}, \
{"reg_or_neg_short_operand", {SUBREG, REG, CONST_INT}}, \
+ {"reg_or_aligned_short_operand", {SUBREG, REG, CONST_INT}}, \
{"reg_or_u_short_operand", {SUBREG, REG, CONST_INT}}, \
{"reg_or_cint_operand", {SUBREG, REG, CONST_INT}}, \
{"reg_or_arith_cint_operand", {SUBREG, REG, CONST_INT}}, \
@@ -2761,6 +2812,7 @@ extern char rs6000_reg_names[][8]; /* register names (0 vs. %r0). */
GT, LEU, LTU, GEU, GTU}}, \
{"boolean_operator", {AND, IOR, XOR}}, \
{"boolean_or_operator", {IOR, XOR}}, \
+ {"altivec_register_operand", {REG}}, \
{"min_max_operator", {SMIN, SMAX, UMIN, UMAX}},
/* uncomment for disabling the corresponding default options */
@@ -2938,19 +2990,6 @@ enum rs6000_builtins
ALTIVEC_BUILTIN_VUPKLSB,
ALTIVEC_BUILTIN_VUPKLPX,
ALTIVEC_BUILTIN_VUPKLSH,
- ALTIVEC_BUILTIN_VCMPBFP_P,
- ALTIVEC_BUILTIN_VCMPEQFP_P,
- ALTIVEC_BUILTIN_VCMPEQUB_P,
- ALTIVEC_BUILTIN_VCMPEQUH_P,
- ALTIVEC_BUILTIN_VCMPEQUW_P,
- ALTIVEC_BUILTIN_VCMPGEFP_P,
- ALTIVEC_BUILTIN_VCMPGTFP_P,
- ALTIVEC_BUILTIN_VCMPGTSB_P,
- ALTIVEC_BUILTIN_VCMPGTSH_P,
- ALTIVEC_BUILTIN_VCMPGTSW_P,
- ALTIVEC_BUILTIN_VCMPGTUB_P,
- ALTIVEC_BUILTIN_VCMPGTUH_P,
- ALTIVEC_BUILTIN_VCMPGTUW_P,
ALTIVEC_BUILTIN_MTVSCR,
ALTIVEC_BUILTIN_MFVSCR,
ALTIVEC_BUILTIN_DSSALL,
@@ -2970,5 +3009,25 @@ enum rs6000_builtins
ALTIVEC_BUILTIN_STVEBX,
ALTIVEC_BUILTIN_STVEHX,
ALTIVEC_BUILTIN_STVEWX,
- ALTIVEC_BUILTIN_STVXL
+ ALTIVEC_BUILTIN_STVXL,
+ ALTIVEC_BUILTIN_VCMPBFP_P,
+ ALTIVEC_BUILTIN_VCMPEQFP_P,
+ ALTIVEC_BUILTIN_VCMPEQUB_P,
+ ALTIVEC_BUILTIN_VCMPEQUH_P,
+ ALTIVEC_BUILTIN_VCMPEQUW_P,
+ ALTIVEC_BUILTIN_VCMPGEFP_P,
+ ALTIVEC_BUILTIN_VCMPGTFP_P,
+ ALTIVEC_BUILTIN_VCMPGTSB_P,
+ ALTIVEC_BUILTIN_VCMPGTSH_P,
+ ALTIVEC_BUILTIN_VCMPGTSW_P,
+ ALTIVEC_BUILTIN_VCMPGTUB_P,
+ ALTIVEC_BUILTIN_VCMPGTUH_P,
+ ALTIVEC_BUILTIN_VCMPGTUW_P,
+ ALTIVEC_BUILTIN_ABSS_V4SI,
+ ALTIVEC_BUILTIN_ABSS_V8HI,
+ ALTIVEC_BUILTIN_ABSS_V16QI,
+ ALTIVEC_BUILTIN_ABS_V4SI,
+ ALTIVEC_BUILTIN_ABS_V4SF,
+ ALTIVEC_BUILTIN_ABS_V8HI,
+ ALTIVEC_BUILTIN_ABS_V16QI
};
diff --git a/contrib/gcc/config/rs6000/rs6000.md b/contrib/gcc/config/rs6000/rs6000.md
index 8fb45ff..93c6fc3 100644
--- a/contrib/gcc/config/rs6000/rs6000.md
+++ b/contrib/gcc/config/rs6000/rs6000.md
@@ -2982,7 +2982,7 @@
(set_attr "length" "4,8")])
(define_split
- [(set (match_operand:CC 3 "cc_reg_operand" "")
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
(compare:CC (match_operator:SI 4 "boolean_operator"
[(match_operand:SI 1 "gpc_reg_operand" "")
(match_operand:SI 2 "gpc_reg_operand" "")])
@@ -3070,7 +3070,7 @@
(set_attr "length" "4,8")])
(define_split
- [(set (match_operand:CC 3 "cc_reg_operand" "")
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
(compare:CC (match_operator:SI 4 "boolean_operator"
[(not:SI (match_operand:SI 1 "gpc_reg_operand" ""))
(match_operand:SI 2 "gpc_reg_operand" "")])
@@ -3136,7 +3136,7 @@
(set_attr "length" "4,8")])
(define_split
- [(set (match_operand:CC 3 "cc_reg_operand" "")
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
(compare:CC (match_operator:SI 4 "boolean_operator"
[(not:SI (match_operand:SI 1 "gpc_reg_operand" ""))
(not:SI (match_operand:SI 2 "gpc_reg_operand" ""))])
@@ -7295,7 +7295,7 @@
(set_attr "length" "4,8")])
(define_split
- [(set (match_operand:CC 3 "cc_reg_operand" "")
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
(compare:CC (match_operator:DI 4 "boolean_operator"
[(match_operand:DI 1 "gpc_reg_operand" "")
(match_operand:DI 2 "gpc_reg_operand" "")])
@@ -7395,7 +7395,7 @@
(set_attr "length" "4,8")])
(define_split
- [(set (match_operand:CC 3 "cc_reg_operand" "")
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
(compare:CC (match_operator:DI 4 "boolean_operator"
[(not:DI (match_operand:DI 1 "gpc_reg_operand" ""))
(match_operand:DI 2 "gpc_reg_operand" "")])
@@ -7461,7 +7461,7 @@
(set_attr "length" "4,8")])
(define_split
- [(set (match_operand:CC 3 "cc_reg_operand" "")
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
(compare:CC (match_operator:DI 4 "boolean_operator"
[(not:DI (match_operand:DI 1 "gpc_reg_operand" ""))
(not:DI (match_operand:DI 2 "gpc_reg_operand" ""))])
@@ -7606,7 +7606,7 @@
operands2[0] = operands[0];
operands2[1] = operands[1];
operands2[2] = operands[2];
- operands2[3] = gen_rtx_REG (SImode, PIC_OFFSET_TABLE_REGNUM);
+ operands2[3] = gen_rtx_REG (SImode, RS6000_PIC_OFFSET_TABLE_REGNUM);
output_asm_insn (\"{l|lwz} %0,lo16(%2)(%1)\", operands);
/* We cannot rely on ha16(low half)==ha16(high half), alas,
although in practice it almost always is. */
@@ -9173,7 +9173,7 @@
(define_insn "*movdi_update1"
[(set (match_operand:DI 3 "gpc_reg_operand" "=r,r")
(mem:DI (plus:DI (match_operand:DI 1 "gpc_reg_operand" "0,0")
- (match_operand:DI 2 "reg_or_short_operand" "r,I"))))
+ (match_operand:DI 2 "reg_or_aligned_short_operand" "r,I"))))
(set (match_operand:DI 0 "gpc_reg_operand" "=b,b")
(plus:DI (match_dup 1) (match_dup 2)))]
"TARGET_POWERPC64 && TARGET_UPDATE"
@@ -9195,7 +9195,7 @@
(define_insn "movdi_update"
[(set (mem:DI (plus:DI (match_operand:DI 1 "gpc_reg_operand" "0,0")
- (match_operand:DI 2 "reg_or_short_operand" "r,I")))
+ (match_operand:DI 2 "reg_or_aligned_short_operand" "r,I")))
(match_operand:DI 3 "gpc_reg_operand" "r,r"))
(set (match_operand:DI 0 "gpc_reg_operand" "=b,b")
(plus:DI (match_dup 1) (match_dup 2)))]
@@ -9544,7 +9544,7 @@
;; Code to initialize the TOC register...
(define_insn "load_toc_aix_si"
- [(parallel [(set (match_operand:SI 0 "register_operand" "=r")
+ [(parallel [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(unspec:SI [(const_int 0)] 7))
(use (reg:SI 2))])]
"DEFAULT_ABI == ABI_AIX && TARGET_32BIT"
@@ -9559,14 +9559,19 @@
[(set_attr "type" "load")])
(define_insn "load_toc_aix_di"
- [(parallel [(set (match_operand:DI 0 "register_operand" "=r")
+ [(parallel [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
(unspec:DI [(const_int 0)] 7))
(use (reg:DI 2))])]
"DEFAULT_ABI == ABI_AIX && TARGET_64BIT"
"*
{
char buf[30];
+#ifdef TARGET_RELOCATABLE
+ ASM_GENERATE_INTERNAL_LABEL (buf, \"LCTOC\",
+ !TARGET_MINIMAL_TOC || TARGET_RELOCATABLE);
+#else
ASM_GENERATE_INTERNAL_LABEL (buf, \"LCTOC\", 1);
+#endif
if (TARGET_ELF)
strcat (buf, \"@toc\");
operands[1] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
@@ -10841,18 +10846,17 @@
(match_dup 4)))])
(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r,r")
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r,&r,&r,&r")
(plus:SI (eq:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r")
(match_operand:SI 2 "reg_or_cint_operand" "r,O,K,L,I"))
- (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r,r")))
- (clobber (match_scratch:SI 4 "=&r,&r,&r,&r,&r"))]
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r,r")))]
"! TARGET_POWERPC64"
"@
- xor %4,%1,%2\;{sfi|subfic} %4,%4,0\;{aze|addze} %0,%3
- {sfi|subfic} %4,%1,0\;{aze|addze} %0,%3
- {xoril|xori} %4,%1,%b2\;{sfi|subfic} %4,%4,0\;{aze|addze} %0,%3
- {xoriu|xoris} %4,%1,%u2\;{sfi|subfic} %4,%4,0\;{aze|addze} %0,%3
- {sfi|subfic} %4,%1,%2\;{sfi|subfic} %4,%4,0\;{aze|addze} %0,%3"
+ xor %0,%1,%2\;{sfi|subfic} %0,%0,0\;{aze|addze} %0,%3
+ {sfi|subfic} %0,%1,0\;{aze|addze} %0,%3
+ {xoril|xori} %0,%1,%b2\;{sfi|subfic} %0,%0,0\;{aze|addze} %0,%3
+ {xoriu|xoris} %0,%1,%u2\;{sfi|subfic} %0,%0,0\;{aze|addze} %0,%3
+ {sfi|subfic} %0,%1,%2\;{sfi|subfic} %0,%0,0\;{aze|addze} %0,%3"
[(set_attr "length" "12,8,12,12,12")])
(define_insn ""
@@ -10899,23 +10903,22 @@
"")
(define_insn ""
- [(set (match_operand:CC 5 "cc_reg_operand" "=x,x,x,x,x,?y,?y,?y,?y,?y")
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,x,x,x,?y,?y,?y,?y,?y")
(compare:CC
(plus:SI
(eq:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r,r,r")
(match_operand:SI 2 "reg_or_cint_operand" "r,O,K,L,I,r,O,K,L,I"))
(match_operand:SI 3 "gpc_reg_operand" "r,r,r,r,r,r,r,r,r,r"))
(const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r,r,r,r,r,r,r")
- (plus:SI (eq:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
- (clobber (match_scratch:SI 4 "=&r,&r,&r,&r,&r,&r,&r,&r,&r,&r"))]
+ (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r,&r,&r,&r,&r,&r,&r,&r,&r")
+ (plus:SI (eq:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
"! TARGET_POWERPC64"
"@
- xor %4,%1,%2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %0,%3
- {sfi|subfic} %4,%1,0\;{aze.|addze.} %0,%3
- {xoril|xori} %4,%1,%b2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %0,%3
- {xoriu|xoris} %4,%1,%u2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %0,%3
- {sfi|subfic} %4,%1,%2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %0,%3
+ xor %0,%1,%2\;{sfi|subfic} %0,%0,0\;{aze.|addze.} %0,%3
+ {sfi|subfic} %0,%1,0\;{aze.|addze.} %0,%3
+ {xoril|xori} %0,%1,%b2\;{sfi|subfic} %0,%0,0\;{aze.|addze.} %0,%3
+ {xoriu|xoris} %0,%1,%u2\;{sfi|subfic} %0,%0,0\;{aze.|addze.} %0,%3
+ {sfi|subfic} %0,%1,%2\;{sfi|subfic} %0,%0,0\;{aze.|addze.} %0,%3
#
#
#
@@ -10925,7 +10928,7 @@
(set_attr "length" "12,8,12,12,12,16,12,16,16,16")])
(define_split
- [(set (match_operand:CC 5 "cc_reg_not_cr0_operand" "")
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
(compare:CC
(plus:SI
(eq:SI (match_operand:SI 1 "gpc_reg_operand" "")
@@ -10933,13 +10936,11 @@
(match_operand:SI 3 "gpc_reg_operand" ""))
(const_int 0)))
(set (match_operand:SI 0 "gpc_reg_operand" "")
- (plus:SI (eq:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
- (clobber (match_scratch:SI 4 ""))]
+ (plus:SI (eq:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
"! TARGET_POWERPC64 && reload_completed"
- [(parallel [(set (match_dup 0)
+ [(set (match_dup 0)
(plus:SI (eq:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
- (clobber (match_dup 4))])
- (set (match_dup 5)
+ (set (match_dup 4)
(compare:CC (match_dup 0)
(const_int 0)))]
"")
@@ -11008,7 +11009,8 @@
(const_int 31))
(match_operand:SI 2 "gpc_reg_operand" "r,r"))
(const_int 0)))
- (clobber (match_scratch:SI 3 "=&r,&r"))]
+ (clobber (match_scratch:SI 3 "=&r,&r"))
+ (clobber (match_scratch:SI 4 "=X,&r"))]
"! TARGET_POWERPC64"
"@
{ai|addic} %3,%1,-1\;{aze.|addze.} %3,%2
@@ -11024,12 +11026,14 @@
(const_int 31))
(match_operand:SI 2 "gpc_reg_operand" ""))
(const_int 0)))
- (clobber (match_scratch:SI 3 ""))]
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:SI 4 ""))]
"! TARGET_POWERPC64 && reload_completed"
- [(set (match_dup 3)
- (plus:SI (lshiftrt:SI (neg:SI (abs:SI (match_dup 1)))
- (const_int 31))
- (match_dup 2)))
+ [(parallel [(set (match_dup 3)
+ (plus:SI (lshiftrt:SI (neg:SI (abs:SI (match_dup 1)))
+ (const_int 31))
+ (match_dup 2)))
+ (clobber (match_dup 4))])
(set (match_dup 0)
(compare:CC (match_dup 3)
(const_int 0)))]
@@ -11387,13 +11391,12 @@
(set_attr "length" "12,16")])
(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
(plus:SI (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "reg_or_short_operand" "rI"))
- (match_operand:SI 3 "gpc_reg_operand" "r")))
- (clobber (match_scratch:SI 4 "=&r"))]
+ (match_operand:SI 3 "gpc_reg_operand" "r")))]
"! TARGET_POWERPC64"
- "{sf%I2|subf%I2c} %4,%1,%2\;{aze|addze} %0,%3"
+ "{sf%I2|subf%I2c} %0,%1,%2\;{aze|addze} %0,%3"
[(set_attr "length" "8")])
(define_insn ""
@@ -11725,15 +11728,14 @@
"")
(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r")
(plus:SI (ltu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
(match_operand:SI 2 "reg_or_neg_short_operand" "r,P"))
- (match_operand:SI 3 "reg_or_short_operand" "rI,rI")))
- (clobber (match_scratch:SI 4 "=&r,&r"))]
+ (match_operand:SI 3 "reg_or_short_operand" "rI,rI")))]
"! TARGET_POWERPC64"
"@
- {sf|subfc} %4,%2,%1\;{sfe|subfe} %4,%4,%4\;{sf%I3|subf%I3c} %0,%4,%3
- {ai|addic} %4,%1,%n2\;{sfe|subfe} %4,%4,%4\;{sf%I3|subf%I3c} %0,%4,%3"
+ {sf|subfc} %0,%2,%1\;{sfe|subfe} %0,%0,%0\;{sf%I3|subf%I3c} %0,%0,%3
+ {ai|addic} %0,%1,%n2\;{sfe|subfe} %0,%0,%0\;{sf%I3|subf%I3c} %0,%0,%3"
[(set_attr "length" "12")])
(define_insn ""
@@ -12034,15 +12036,14 @@
"")
(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r")
(plus:SI (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
(match_operand:SI 2 "reg_or_neg_short_operand" "r,P"))
- (match_operand:SI 3 "gpc_reg_operand" "r,r")))
- (clobber (match_scratch:SI 4 "=&r,&r"))]
+ (match_operand:SI 3 "gpc_reg_operand" "r,r")))]
"! TARGET_POWERPC64"
"@
- {sf|subfc} %4,%2,%1\;{aze|addze} %0,%3
- {ai|addic} %4,%1,%n2\;{aze|addze} %0,%3"
+ {sf|subfc} %0,%2,%1\;{aze|addze} %0,%3
+ {ai|addic} %0,%1,%n2\;{aze|addze} %0,%3"
[(set_attr "length" "8")])
(define_insn ""
@@ -12335,13 +12336,12 @@
"")
(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
(plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
(const_int 0))
- (match_operand:SI 2 "gpc_reg_operand" "r")))
- (clobber (match_scratch:SI 3 "=&r"))]
+ (match_operand:SI 2 "gpc_reg_operand" "r")))]
"! TARGET_POWERPC64"
- "{a|addc} %3,%1,%1\;{sfe|subfe} %3,%1,%3\;{aze|addze} %0,%2"
+ "{a|addc} %0,%1,%1\;{sfe|subfe} %0,%1,%0\;{aze|addze} %0,%2"
[(set_attr "length" "12")])
(define_insn ""
@@ -12671,15 +12671,14 @@
"")
(define_insn ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r")
(plus:SI (gtu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
(match_operand:SI 2 "reg_or_short_operand" "I,rI"))
- (match_operand:SI 3 "reg_or_short_operand" "r,rI")))
- (clobber (match_scratch:SI 4 "=&r,&r"))]
+ (match_operand:SI 3 "reg_or_short_operand" "r,rI")))]
"! TARGET_POWERPC64"
"@
- {ai|addic} %4,%1,%k2\;{aze|addze} %0,%3
- {sf%I2|subf%I2c} %4,%1,%2\;{sfe|subfe} %4,%4,%4\;{sf%I3|subf%I3c} %0,%4,%3"
+ {ai|addic} %0,%1,%k2\;{aze|addze} %0,%3
+ {sf%I2|subf%I2c} %0,%1,%2\;{sfe|subfe} %0,%0,%0\;{sf%I3|subf%I3c} %0,%0,%3"
[(set_attr "length" "8,12")])
(define_insn ""
@@ -13256,15 +13255,15 @@
(define_insn "*ctrdi_internal1"
[(set (pc)
- (if_then_else (ne (match_operand:DI 1 "register_operand" "c,*r,*r")
+ (if_then_else (ne (match_operand:DI 1 "register_operand" "c,*r,*r,!*f")
(const_int 1))
(label_ref (match_operand 0 "" ""))
(pc)))
- (set (match_operand:DI 2 "register_operand" "=1,*r,m*q*c*l")
+ (set (match_operand:DI 2 "register_operand" "=1,*r,m*c*l,!*f")
(plus:DI (match_dup 1)
(const_int -1)))
- (clobber (match_scratch:CC 3 "=X,&x,&x"))
- (clobber (match_scratch:DI 4 "=X,X,r"))]
+ (clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
+ (clobber (match_scratch:DI 4 "=X,X,r,r"))]
"TARGET_POWERPC64"
"*
{
@@ -13276,19 +13275,19 @@
return \"bdz $+8\;b %l0\";
}"
[(set_attr "type" "branch")
- (set_attr "length" "*,12,16")])
+ (set_attr "length" "*,12,16,24")])
(define_insn "*ctrdi_internal2"
[(set (pc)
- (if_then_else (ne (match_operand:DI 1 "register_operand" "c,*r,*r")
+ (if_then_else (ne (match_operand:DI 1 "register_operand" "c,*r,*r,!*f")
(const_int 1))
(pc)
(label_ref (match_operand 0 "" ""))))
- (set (match_operand:DI 2 "register_operand" "=1,*r,m*q*c*l")
+ (set (match_operand:DI 2 "register_operand" "=1,*r,m*c*l,!*f")
(plus:DI (match_dup 1)
(const_int -1)))
- (clobber (match_scratch:CC 3 "=X,&x,&x"))
- (clobber (match_scratch:DI 4 "=X,X,r"))]
+ (clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
+ (clobber (match_scratch:DI 4 "=X,X,r,r"))]
"TARGET_POWERPC64"
"*
{
@@ -13300,7 +13299,7 @@
return \"{bdn|bdnz} $+8\;b %l0\";
}"
[(set_attr "type" "branch")
- (set_attr "length" "*,12,16")])
+ (set_attr "length" "*,12,16,24")])
;; Similar, but we can use GE since we have a REG_NONNEG.
@@ -13354,15 +13353,15 @@
(define_insn "*ctrdi_internal3"
[(set (pc)
- (if_then_else (ge (match_operand:DI 1 "register_operand" "c,*r,*r")
+ (if_then_else (ge (match_operand:DI 1 "register_operand" "c,*r,*r,!*f")
(const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))
- (set (match_operand:DI 2 "register_operand" "=1,*r,m*q*c*l")
+ (set (match_operand:DI 2 "register_operand" "=1,*r,m*c*l,!*f")
(plus:DI (match_dup 1)
(const_int -1)))
- (clobber (match_scratch:CC 3 "=X,&x,&X"))
- (clobber (match_scratch:DI 4 "=X,X,r"))]
+ (clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
+ (clobber (match_scratch:DI 4 "=X,X,r,r"))]
"TARGET_POWERPC64 && find_reg_note (insn, REG_NONNEG, 0)"
"*
{
@@ -13374,19 +13373,19 @@
return \"bdz $+8\;b %l0\";
}"
[(set_attr "type" "branch")
- (set_attr "length" "*,12,16")])
+ (set_attr "length" "*,12,16,24")])
(define_insn "*ctrdi_internal4"
[(set (pc)
- (if_then_else (ge (match_operand:DI 1 "register_operand" "c,*r,*r")
+ (if_then_else (ge (match_operand:DI 1 "register_operand" "c,*r,*r,!*f")
(const_int 0))
(pc)
(label_ref (match_operand 0 "" ""))))
- (set (match_operand:DI 2 "register_operand" "=1,*r,m*q*c*l")
+ (set (match_operand:DI 2 "register_operand" "=1,*r,m*c*l,!*f")
(plus:DI (match_dup 1)
(const_int -1)))
- (clobber (match_scratch:CC 3 "=X,&x,&X"))
- (clobber (match_scratch:DI 4 "=X,X,r"))]
+ (clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
+ (clobber (match_scratch:DI 4 "=X,X,r,r"))]
"TARGET_POWERPC64 && find_reg_note (insn, REG_NONNEG, 0)"
"*
{
@@ -13398,7 +13397,7 @@
return \"{bdn|bdnz} $+8\;b %l0\";
}"
[(set_attr "type" "branch")
- (set_attr "length" "*,12,16")])
+ (set_attr "length" "*,12,16,24")])
;; Similar but use EQ
@@ -13452,15 +13451,15 @@
(define_insn "*ctrdi_internal5"
[(set (pc)
- (if_then_else (eq (match_operand:DI 1 "register_operand" "c,*r,*r")
+ (if_then_else (eq (match_operand:DI 1 "register_operand" "c,*r,*r,!*f")
(const_int 1))
(label_ref (match_operand 0 "" ""))
(pc)))
- (set (match_operand:DI 2 "register_operand" "=1,*r,m*q*c*l")
+ (set (match_operand:DI 2 "register_operand" "=1,*r,m*c*l,!*f")
(plus:DI (match_dup 1)
(const_int -1)))
- (clobber (match_scratch:CC 3 "=X,&x,&x"))
- (clobber (match_scratch:DI 4 "=X,X,r"))]
+ (clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
+ (clobber (match_scratch:DI 4 "=X,X,r,r"))]
"TARGET_POWERPC64"
"*
{
@@ -13472,19 +13471,19 @@
return \"{bdn|bdnz} $+8\;b %l0\";
}"
[(set_attr "type" "branch")
- (set_attr "length" "*,12,16")])
+ (set_attr "length" "*,12,16,24")])
(define_insn "*ctrdi_internal6"
[(set (pc)
- (if_then_else (eq (match_operand:DI 1 "register_operand" "c,*r,*r")
+ (if_then_else (eq (match_operand:DI 1 "register_operand" "c,*r,*r,!*f")
(const_int 1))
(pc)
(label_ref (match_operand 0 "" ""))))
- (set (match_operand:DI 2 "register_operand" "=1,*r,m*q*c*l")
+ (set (match_operand:DI 2 "register_operand" "=1,*r,m*c*l,!*f")
(plus:DI (match_dup 1)
(const_int -1)))
- (clobber (match_scratch:CC 3 "=X,&x,&x"))
- (clobber (match_scratch:DI 4 "=X,X,r"))]
+ (clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
+ (clobber (match_scratch:DI 4 "=X,X,r,r"))]
"TARGET_POWERPC64"
"*
{
@@ -13496,7 +13495,7 @@
return \"bdz $+8\;b %l0\";
}"
[(set_attr "type" "branch")
- (set_attr "length" "*,12,16")])
+ (set_attr "length" "*,12,16,24")])
;; Now the splitters if we could not allocate the CTR register
@@ -13567,7 +13566,7 @@
(const_int -1)))
(clobber (match_scratch:CC 3 ""))
(clobber (match_scratch:DI 4 ""))]
- "TARGET_POWERPC64 && reload_completed"
+ "TARGET_POWERPC64 && reload_completed && INT_REGNO_P (REGNO (operands[0]))"
[(parallel [(set (match_dup 3)
(compare:CC (plus:DI (match_dup 1)
(const_int -1))
@@ -13610,6 +13609,45 @@
"
{ operands[7] = gen_rtx (GET_CODE (operands[2]), VOIDmode, operands[3],
const0_rtx); }")
+
+(define_split
+ [(set (pc)
+ (if_then_else (match_operator 2 "comparison_operator"
+ [(match_operand:DI 1 "gpc_reg_operand" "")
+ (const_int 1)])
+ (match_operand 5 "" "")
+ (match_operand 6 "" "")))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (plus:DI (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 ""))
+ (clobber (match_scratch:DI 4 ""))]
+ "TARGET_POWERPC64 && reload_completed && FP_REGNO_P (REGNO (operands[0]))"
+ [(set (match_dup 8)
+ (match_dup 1))
+ (set (match_dup 4)
+ (match_dup 8))
+ (parallel [(set (match_dup 3)
+ (compare:CC (plus:DI (match_dup 4)
+ (const_int -1))
+ (const_int 0)))
+ (set (match_dup 4)
+ (plus:DI (match_dup 4)
+ (const_int -1)))])
+ (set (match_dup 8)
+ (match_dup 4))
+ (set (match_dup 0)
+ (match_dup 8))
+ (set (pc) (if_then_else (match_dup 7)
+ (match_dup 5)
+ (match_dup 6)))]
+ "
+{
+ operands[7] = gen_rtx (GET_CODE (operands[2]), VOIDmode, operands[3],
+ const0_rtx);
+ operands[8] = assign_stack_temp (DImode, GET_MODE_SIZE (DImode), 0);
+}")
+
(define_insn "trap"
[(trap_if (const_int 1) (const_int 0))]
@@ -13871,28 +13909,28 @@
;; Generic LVX load instruction.
(define_insn "altivec_lvx_4si"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
+ [(set (match_operand:V4SI 0 "altivec_register_operand" "=v")
(match_operand:V4SI 1 "memory_operand" "m"))]
"TARGET_ALTIVEC"
"lvx %0,%y1"
[(set_attr "type" "vecload")])
(define_insn "altivec_lvx_8hi"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
+ [(set (match_operand:V8HI 0 "altivec_register_operand" "=v")
(match_operand:V8HI 1 "memory_operand" "m"))]
"TARGET_ALTIVEC"
"lvx %0,%y1"
[(set_attr "type" "vecload")])
(define_insn "altivec_lvx_16qi"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
+ [(set (match_operand:V16QI 0 "altivec_register_operand" "=v")
(match_operand:V16QI 1 "memory_operand" "m"))]
"TARGET_ALTIVEC"
"lvx %0,%y1"
[(set_attr "type" "vecload")])
(define_insn "altivec_lvx_4sf"
- [(set (match_operand:V4SF 0 "register_operand" "=v")
+ [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
(match_operand:V4SF 1 "memory_operand" "m"))]
"TARGET_ALTIVEC"
"lvx %0,%y1"
@@ -13901,28 +13939,28 @@
;; Generic STVX store instruction.
(define_insn "altivec_stvx_4si"
[(set (match_operand:V4SI 0 "memory_operand" "=m")
- (match_operand:V4SI 1 "register_operand" "v"))]
+ (match_operand:V4SI 1 "altivec_register_operand" "v"))]
"TARGET_ALTIVEC"
"stvx %1,%y0"
[(set_attr "type" "vecstore")])
(define_insn "altivec_stvx_8hi"
[(set (match_operand:V8HI 0 "memory_operand" "=m")
- (match_operand:V8HI 1 "register_operand" "v"))]
+ (match_operand:V8HI 1 "altivec_register_operand" "v"))]
"TARGET_ALTIVEC"
"stvx %1,%y0"
[(set_attr "type" "vecstore")])
(define_insn "altivec_stvx_16qi"
[(set (match_operand:V16QI 0 "memory_operand" "=m")
- (match_operand:V16QI 1 "register_operand" "v"))]
+ (match_operand:V16QI 1 "altivec_register_operand" "v"))]
"TARGET_ALTIVEC"
"stvx %1,%y0"
[(set_attr "type" "vecstore")])
(define_insn "altivec_stvx_4sf"
[(set (match_operand:V4SF 0 "memory_operand" "=m")
- (match_operand:V4SF 1 "register_operand" "v"))]
+ (match_operand:V4SF 1 "altivec_register_operand" "v"))]
"TARGET_ALTIVEC"
"stvx %1,%y0"
[(set_attr "type" "vecstore")])
@@ -13935,14 +13973,18 @@
"{ rs6000_emit_move (operands[0], operands[1], V4SImode); DONE; }")
(define_insn "*movv4si_internal"
- [(set (match_operand:V4SI 0 "nonimmediate_operand" "=m,v,v")
- (match_operand:V4SI 1 "input_operand" "v,m,v"))]
+ [(set (match_operand:V4SI 0 "nonimmediate_operand" "=m,v,v,o,r,r")
+ (match_operand:V4SI 1 "input_operand" "v,m,v,r,o,r"))]
"TARGET_ALTIVEC"
"@
stvx %1,%y0
lvx %0,%y1
- vor %0,%1,%1"
- [(set_attr "type" "altivec")])
+ vor %0,%1,%1
+ stw%U0 %1,%0\;stw %L1,%L0\;stw %Y1,%Y0\;stw %Z1,%Z0
+ lwz%U1 %0,%1\;lwz %L0,%L1\;lwz %Y0,%Y1\;lwz %Z0,%Z1
+ mr %0,%1\;mr %L0,%L1\;mr %Y0,%Y1\;mr %Z0,%Z1"
+ [(set_attr "type" "altivec")
+ (set_attr "length" "*,*,*,16,16,16")])
(define_expand "movv8hi"
[(set (match_operand:V8HI 0 "nonimmediate_operand" "")
@@ -13951,14 +13993,18 @@
"{ rs6000_emit_move (operands[0], operands[1], V8HImode); DONE; }")
(define_insn "*movv8hi_internal1"
- [(set (match_operand:V8HI 0 "nonimmediate_operand" "=m,v,v")
- (match_operand:V8HI 1 "input_operand" "v,m,v"))]
+ [(set (match_operand:V8HI 0 "nonimmediate_operand" "=m,v,v,o,r,r")
+ (match_operand:V8HI 1 "input_operand" "v,m,v,r,o,r"))]
"TARGET_ALTIVEC"
"@
stvx %1,%y0
lvx %0,%y1
- vor %0,%1,%1"
- [(set_attr "type" "altivec")])
+ vor %0,%1,%1
+ stw%U0 %1,%0\;stw %L1,%L0\;stw %Y1,%Y0\;stw %Z1,%Z0
+ lwz%U1 %0,%1\;lwz %L0,%L1\;lwz %Y0,%Y1\;lwz %Z0,%Z1
+ mr %0,%1\;mr %L0,%L1\;mr %Y0,%Y1\;mr %Z0,%Z1"
+ [(set_attr "type" "altivec")
+ (set_attr "length" "*,*,*,16,16,16")])
(define_expand "movv16qi"
[(set (match_operand:V16QI 0 "nonimmediate_operand" "")
@@ -13967,14 +14013,18 @@
"{ rs6000_emit_move (operands[0], operands[1], V16QImode); DONE; }")
(define_insn "*movv16qi_internal1"
- [(set (match_operand:V16QI 0 "nonimmediate_operand" "=m,v,v")
- (match_operand:V16QI 1 "input_operand" "v,m,v"))]
+ [(set (match_operand:V16QI 0 "nonimmediate_operand" "=m,v,v,o,r,r")
+ (match_operand:V16QI 1 "input_operand" "v,m,v,r,o,r"))]
"TARGET_ALTIVEC"
"@
stvx %1,%y0
lvx %0,%y1
- vor %0,%1,%1"
- [(set_attr "type" "altivec")])
+ vor %0,%1,%1
+ stw%U0 %1,%0\;stw %L1,%L0\;stw %Y1,%Y0\;stw %Z1,%Z0
+ lwz%U1 %0,%1\;lwz %L0,%L1\;lwz %Y0,%Y1\;lwz %Z0,%Z1
+ mr %0,%1\;mr %L0,%L1\;mr %Y0,%Y1\;mr %Z0,%Z1"
+ [(set_attr "type" "altivec")
+ (set_attr "length" "*,*,*,16,16,16")])
(define_expand "movv4sf"
[(set (match_operand:V4SF 0 "nonimmediate_operand" "")
@@ -13983,13 +14033,30 @@
"{ rs6000_emit_move (operands[0], operands[1], V4SFmode); DONE; }")
(define_insn "*movv4sf_internal1"
- [(set (match_operand:V4SF 0 "nonimmediate_operand" "=m,v,v")
- (match_operand:V4SF 1 "input_operand" "v,m,v"))]
+ [(set (match_operand:V4SF 0 "nonimmediate_operand" "=m,v,v,o,r,r")
+ (match_operand:V4SF 1 "input_operand" "v,m,v,r,o,r"))]
"TARGET_ALTIVEC"
"@
stvx %1,%y0
lvx %0,%y1
- vor %0,%1,%1"
+ vor %0,%1,%1
+ stw%U0 %1,%0\;stw %L1,%L0\;stw %Y1,%Y0\;stw %Z1,%Z0
+ lwz%U1 %0,%1\;lwz %L0,%L1\;lwz %Y0,%Y1\;lwz %Z0,%Z1
+ mr %0,%1\;mr %L0,%L1\;mr %Y0,%Y1\;mr %Z0,%Z1"
+ [(set_attr "type" "altivec")
+ (set_attr "length" "*,*,*,16,16,16")])
+
+(define_insn "get_vrsave_internal"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(reg:SI 109)] 214))]
+ "TARGET_ALTIVEC"
+ "*
+{
+ if (TARGET_MACHO)
+ return \"mtspr 256,%0\";
+ else
+ return \"mtvrsave %0\";
+}"
[(set_attr "type" "altivec")])
(define_insn "*set_vrsave_internal"
@@ -13998,9 +14065,45 @@
(unspec_volatile:SI [(match_operand:SI 1 "register_operand" "r")
(reg:SI 109)] 30))])]
"TARGET_ALTIVEC"
- "mtvrsave %1"
+ "*
+{
+ if (TARGET_MACHO)
+ return \"mfspr %1,256\";
+ else
+ return \"mtvrsave %1\";
+}"
[(set_attr "type" "altivec")])
+;; Vector clears
+(define_insn "*movv4si_const0"
+ [(set (match_operand:V4SI 0 "altivec_register_operand" "=v")
+ (match_operand:V4SI 1 "zero_constant" ""))]
+ "TARGET_ALTIVEC"
+ "vxor %0,%0,%0"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "*movv4sf_const0"
+ [(set (match_operand:V4SF 0 "altivec_register_operand" "=v")
+ (match_operand:V4SF 1 "zero_constant" ""))]
+
+ "TARGET_ALTIVEC"
+ "vxor %0,%0,%0"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "*movv8hi_const0"
+ [(set (match_operand:V8HI 0 "altivec_register_operand" "=v")
+ (match_operand:V8HI 1 "zero_constant" ""))]
+ "TARGET_ALTIVEC"
+ "vxor %0,%0,%0"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "*movv16qi_const0"
+ [(set (match_operand:V16QI 0 "altivec_register_operand" "=v")
+ (match_operand:V16QI 1 "zero_constant" ""))]
+ "TARGET_ALTIVEC"
+ "vxor %0,%0,%0"
+ [(set_attr "type" "vecsimple")])
+
;; Simple binary operations.
(define_insn "addv16qi3"
@@ -15033,6 +15136,7 @@
"TARGET_ALTIVEC"
"vspltb %0,%1,%2"
[(set_attr "type" "vecperm")])
+
(define_insn "altivec_vsplth"
[(set (match_operand:V8HI 0 "register_operand" "=v")
(unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
@@ -15318,110 +15422,79 @@
;; AltiVec predicates.
-(define_insn "altivec_vcmpequb_p"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")] 173))]
- "TARGET_ALTIVEC"
- "vcmpequb. %0,%1,%2"
-[(set_attr "type" "veccmp")])
-
-(define_insn "altivec_vcmpequh_p"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 174))]
+(define_expand "cr6_test_for_zero"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (eq:SI (reg:CC 74)
+ (const_int 0)))]
"TARGET_ALTIVEC"
- "vcmpequh. %0,%1,%2"
-[(set_attr "type" "veccmp")])
+ "")
-(define_insn "altivec_vcmpequw_p"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 175))]
+(define_expand "cr6_test_for_zero_reverse"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (eq:SI (reg:CC 74)
+ (const_int 0)))
+ (set (match_dup 0) (minus:SI (const_int 1) (match_dup 0)))]
"TARGET_ALTIVEC"
- "vcmpequw. %0,%1,%2"
-[(set_attr "type" "veccmp")])
+ "")
-(define_insn "altivec_vcmpeqfp_p"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
- (match_operand:V4SF 2 "register_operand" "v")] 176))]
+(define_expand "cr6_test_for_lt"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lt:SI (reg:CC 74)
+ (const_int 0)))]
"TARGET_ALTIVEC"
- "vcmpeqfp. %0,%1,%2"
-[(set_attr "type" "veccmp")])
+ "")
-(define_insn "altivec_vcmpgtub_p"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")] 177))]
+(define_expand "cr6_test_for_lt_reverse"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lt:SI (reg:CC 74)
+ (const_int 0)))
+ (set (match_dup 0) (minus:SI (const_int 1) (match_dup 0)))]
"TARGET_ALTIVEC"
- "vcmpgtub. %0,%1,%2"
-[(set_attr "type" "veccmp")])
+ "")
-(define_insn "altivec_vcmpgtsb_p"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
- (match_operand:V16QI 2 "register_operand" "v")] 178))]
- "TARGET_ALTIVEC"
- "vcmpgtsb. %0,%1,%2"
-[(set_attr "type" "veccmp")])
+;; We can get away with generating the opcode on the fly (%3 below)
+;; because all the predicates have the same scheduling parameters.
-(define_insn "altivec_vcmpgtuw_p"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 179))]
+(define_insn "altivec_predicate_v4si"
+ [(set (reg:CC 74)
+ (unspec:CC [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")
+ (match_operand 3 "any_operand" "")] 173))
+ (clobber (match_scratch:V4SI 0 "=v"))]
"TARGET_ALTIVEC"
- "vcmpgtuw. %0,%1,%2"
+ "%3 %0,%1,%2"
[(set_attr "type" "veccmp")])
-(define_insn "altivec_vcmpgtsw_p"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
- (match_operand:V4SI 2 "register_operand" "v")] 180))]
+(define_insn "altivec_predicate_v4sf"
+ [(set (reg:CC 74)
+ (unspec:CC [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")
+ (match_operand 3 "any_operand" "")] 174))
+ (clobber (match_scratch:V4SF 0 "=v"))]
"TARGET_ALTIVEC"
- "vcmpgtsw. %0,%1,%2"
+ "%3 %0,%1,%2"
[(set_attr "type" "veccmp")])
-(define_insn "altivec_vcmpgefp_p"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
- (match_operand:V4SF 2 "register_operand" "v")] 181))]
+(define_insn "altivec_predicate_v8hi"
+ [(set (reg:CC 74)
+ (unspec:CC [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand 3 "any_operand" "")] 175))
+ (clobber (match_scratch:V8HI 0 "=v"))]
"TARGET_ALTIVEC"
- "vcmpgefp. %0,%1,%2"
+ "%3 %0,%1,%2"
[(set_attr "type" "veccmp")])
-(define_insn "altivec_vcmpgtfp_p"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
- (match_operand:V4SF 2 "register_operand" "v")] 182))]
+(define_insn "altivec_predicate_v16qi"
+ [(set (reg:CC 74)
+ (unspec:CC [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")
+ (match_operand 3 "any_operand" "")] 175))
+ (clobber (match_scratch:V16QI 0 "=v"))]
"TARGET_ALTIVEC"
- "vcmpgtfp. %0,%1,%2"
+ "%3 %0,%1,%2"
[(set_attr "type" "veccmp")])
-(define_insn "altivec_vcmpbfp_p"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
- (match_operand:V4SF 2 "register_operand" "v")] 183))]
- "TARGET_ALTIVEC"
- "vcmpbfp. %0,%1,%2"
-[(set_attr "type" "veccmp")])
-
-(define_insn "altivec_vcmpgtuh_p"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 184))]
- "TARGET_ALTIVEC"
- "vcmpgtuh. %0,%1,%2"
-[(set_attr "type" "veccmp")])
-
-(define_insn "altivec_vcmpgtsh_p"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
- (match_operand:V8HI 2 "register_operand" "v")] 185))]
- "TARGET_ALTIVEC"
- "vcmpgtsh. %0,%1,%2"
- [(set_attr "type" "veccmp")])
-
(define_insn "altivec_mtvscr"
[(unspec [(match_operand:V4SI 0 "register_operand" "v")] 186)]
"TARGET_ALTIVEC"
@@ -15495,49 +15568,61 @@
"lvsr %0,%1,%2"
[(set_attr "type" "vecload")])
+;; Parallel some of the LVE* and STV*'s with unspecs because some have
+;; identical rtl but different instructions-- and gcc gets confused.
+
(define_insn "altivec_lvebx"
- [(set (match_operand:V16QI 0 "register_operand" "=v")
- (unspec:V16QI [(match_operand:SI 1 "register_operand" "b")
- (match_operand:SI 2 "register_operand" "r")] 196))]
+ [(parallel
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (mem:V16QI (plus:SI (match_operand:SI 1 "register_operand" "b")
+ (match_operand:SI 2 "register_operand" "r"))))
+ (unspec [(const_int 0)] 196)])]
"TARGET_ALTIVEC"
"lvebx %0,%1,%2"
[(set_attr "type" "vecload")])
(define_insn "altivec_lvehx"
- [(set (match_operand:V8HI 0 "register_operand" "=v")
- (unspec:V8HI [(match_operand:SI 1 "register_operand" "b")
- (match_operand:SI 2 "register_operand" "r")] 197))]
+ [(parallel
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (mem:V8HI
+ (and:SI (plus:SI (match_operand:SI 1 "register_operand" "b")
+ (match_operand:SI 2 "register_operand" "r"))
+ (const_int -2))))
+ (unspec [(const_int 0)] 197)])]
"TARGET_ALTIVEC"
"lvehx %0,%1,%2"
[(set_attr "type" "vecload")])
(define_insn "altivec_lvewx"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:SI 1 "register_operand" "b")
- (match_operand:SI 2 "register_operand" "r")] 198))]
+ [(parallel
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (mem:V4SI
+ (and:SI (plus:SI (match_operand:SI 1 "register_operand" "b")
+ (match_operand:SI 2 "register_operand" "r"))
+ (const_int -4))))
+ (unspec [(const_int 0)] 198)])]
"TARGET_ALTIVEC"
"lvewx %0,%1,%2"
[(set_attr "type" "vecload")])
(define_insn "altivec_lvxl"
- [(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:SI 1 "register_operand" "b")
- (match_operand:SI 2 "register_operand" "r")] 199))]
+ [(parallel
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (mem:V4SI (plus:SI (match_operand:SI 1 "register_operand" "b")
+ (match_operand:SI 2 "register_operand" "r"))))
+ (unspec [(const_int 0)] 213)])]
"TARGET_ALTIVEC"
"lvxl %0,%1,%2"
[(set_attr "type" "vecload")])
(define_insn "altivec_lvx"
[(set (match_operand:V4SI 0 "register_operand" "=v")
- (unspec:V4SI [(match_operand:SI 1 "register_operand" "b")
- (match_operand:SI 2 "register_operand" "r")] 200))]
+ (mem:V4SI (plus:SI (match_operand:SI 1 "register_operand" "b")
+ (match_operand:SI 2 "register_operand" "r"))))]
"TARGET_ALTIVEC"
"lvx %0,%1,%2"
[(set_attr "type" "vecload")])
-;; Parallel the STV*'s with unspecs because some of them have
-;; identical rtl but are different instructions-- and gcc gets confused.
-
(define_insn "altivec_stvx"
[(parallel
[(set (mem:V4SI
@@ -15596,3 +15681,73 @@
"TARGET_ALTIVEC"
"stvewx %2,%0,%1"
[(set_attr "type" "vecstore")])
+
+(define_insn "absv16qi2"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (abs:V16QI (match_operand:V16QI 1 "register_operand" "v")))
+ (clobber (match_scratch:V16QI 2 "=v"))
+ (clobber (match_scratch:V16QI 3 "=v"))]
+ "TARGET_ALTIVEC"
+ "vspltisb %2,0\;vsububm %3,%2,%1\;vmaxsb %0,%1,%3"
+ [(set_attr "type" "altivec")
+ (set_attr "length" "12")])
+
+(define_insn "absv8hi2"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (abs:V8HI (match_operand:V8HI 1 "register_operand" "v")))
+ (clobber (match_scratch:V8HI 2 "=v"))
+ (clobber (match_scratch:V8HI 3 "=v"))]
+ "TARGET_ALTIVEC"
+ "vspltisb %2,0\;vsubuhm %3,%2,%1\;vmaxsh %0,%1,%3"
+ [(set_attr "type" "altivec")
+ (set_attr "length" "12")])
+
+(define_insn "absv4si2"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (abs:V4SI (match_operand:V4SI 1 "register_operand" "v")))
+ (clobber (match_scratch:V4SI 2 "=v"))
+ (clobber (match_scratch:V4SI 3 "=v"))]
+ "TARGET_ALTIVEC"
+ "vspltisb %2,0\;vsubuwm %3,%2,%1\;vmaxsw %0,%1,%3"
+ [(set_attr "type" "altivec")
+ (set_attr "length" "12")])
+
+(define_insn "absv4sf2"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (abs:V4SF (match_operand:V4SF 1 "register_operand" "v")))
+ (clobber (match_scratch:V4SF 2 "=v"))
+ (clobber (match_scratch:V4SF 3 "=v"))]
+ "TARGET_ALTIVEC"
+ "vspltisw %2, -1\;vslw %3,%2,%2\;vandc %0,%1,%3"
+ [(set_attr "type" "altivec")
+ (set_attr "length" "12")])
+
+(define_insn "altivec_abss_v16qi"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")] 210))
+ (clobber (match_scratch:V16QI 2 "=v"))
+ (clobber (match_scratch:V16QI 3 "=v"))]
+ "TARGET_ALTIVEC"
+ "vspltisb %2,0\;vsubsbs %3,%2,%1\;vmaxsb %0,%1,%3"
+ [(set_attr "type" "altivec")
+ (set_attr "length" "12")])
+
+(define_insn "altivec_abss_v8hi"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")] 211))
+ (clobber (match_scratch:V8HI 2 "=v"))
+ (clobber (match_scratch:V8HI 3 "=v"))]
+ "TARGET_ALTIVEC"
+ "vspltisb %2,0\;vsubshs %3,%2,%1\;vmaxsh %0,%1,%3"
+ [(set_attr "type" "altivec")
+ (set_attr "length" "12")])
+
+(define_insn "altivec_abss_v4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")] 212))
+ (clobber (match_scratch:V4SI 2 "=v"))
+ (clobber (match_scratch:V4SI 3 "=v"))]
+ "TARGET_ALTIVEC"
+ "vspltisb %2,0\;vsubsws %3,%2,%1\;vmaxsw %0,%1,%3"
+ [(set_attr "type" "altivec")
+ (set_attr "length" "12")])
diff --git a/contrib/gcc/config/rs6000/rtems.h b/contrib/gcc/config/rs6000/rtems.h
index 6810d38..aa68130 100644
--- a/contrib/gcc/config/rs6000/rtems.h
+++ b/contrib/gcc/config/rs6000/rtems.h
@@ -1,5 +1,5 @@
/* Definitions for rtems targeting a PowerPC using elf.
- Copyright (C) 1996, 1997, 2000, 2001 Free Software Foundation, Inc.
+ Copyright (C) 1996, 1997, 2000, 2001, 2002 Free Software Foundation, Inc.
Contributed by Joel Sherrill (joel@OARcorp.com).
This file is part of GNU CC.
@@ -22,13 +22,5 @@ Boston, MA 02111-1307, USA. */
/* Specify predefined symbols in preprocessor. */
#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-DPPC -Drtems -D__rtems__ \
+#define CPP_PREDEFINES "-DPPC -D__rtems__ \
-Asystem=rtems -Acpu=powerpc -Amachine=powerpc"
-
-/* Generate calls to memcpy, memcmp and memset. */
-#ifndef TARGET_MEM_FUNCTIONS
-#define TARGET_MEM_FUNCTIONS
-#endif
-
-#undef STARTFILE_DEFAULT_SPEC
-#define STARTFILE_DEFAULT_SPEC "crt0.o%s"
diff --git a/contrib/gcc/config/rs6000/sysv4.h b/contrib/gcc/config/rs6000/sysv4.h
index c20eaa0..b807bb7 100644
--- a/contrib/gcc/config/rs6000/sysv4.h
+++ b/contrib/gcc/config/rs6000/sysv4.h
@@ -197,6 +197,8 @@ do { \
rs6000_current_abi = ABI_V4; \
else if (!strcmp (rs6000_abi_name, "linux")) \
rs6000_current_abi = ABI_V4; \
+ else if (!strcmp (rs6000_abi_name, "gnu")) \
+ rs6000_current_abi = ABI_V4; \
else if (!strcmp (rs6000_abi_name, "netbsd")) \
rs6000_current_abi = ABI_V4; \
else if (!strcmp (rs6000_abi_name, "i960-old")) \
@@ -383,7 +385,7 @@ do { \
/* No data type wants to be aligned rounder than this. */
#undef BIGGEST_ALIGNMENT
-#define BIGGEST_ALIGNMENT (TARGET_EABI ? 64 : 128)
+#define BIGGEST_ALIGNMENT ((TARGET_EABI && !TARGET_ALTIVEC) ? 64 : 128)
/* An expression for the alignment of a structure field FIELD if the
alignment computed in the usual way is COMPUTED. */
@@ -397,7 +399,8 @@ do { \
SPECIFIED. */
#define ROUND_TYPE_ALIGN(TYPE, COMPUTED, SPECIFIED) \
((TARGET_ALTIVEC && TREE_CODE (TYPE) == VECTOR_TYPE) \
- ? 128 : MAX (COMPUTED, SPECIFIED))
+ ? MAX (MAX ((COMPUTED), (SPECIFIED)), 128) \
+ : MAX (COMPUTED, SPECIFIED))
#undef BIGGEST_FIELD_ALIGNMENT
#undef ADJUST_FIELD_ALIGN
@@ -877,6 +880,7 @@ do { \
%{mcall-freebsd: -mbig} \
%{mcall-i960-old: -mlittle} \
%{mcall-linux: -mbig} \
+ %{mcall-gnu: -mbig} \
%{mcall-netbsd: -mbig} \
}}}}"
@@ -889,7 +893,7 @@ do { \
} \
}}"
-#define CC1_ENDIAN_DEFAULT_SPEC "%(cc1_endian_big_spec)"
+#define CC1_ENDIAN_DEFAULT_SPEC "%(cc1_endian_big)"
/* Pass -G xxx to the compiler and set correct endian mode. */
#define CC1_SPEC "%{G*} \
@@ -900,10 +904,11 @@ do { \
%{mcall-freebsd: -mbig %(cc1_endian_big) } \
%{mcall-i960-old: -mlittle %(cc1_endian_little) } \
%{mcall-linux: -mbig %(cc1_endian_big) } \
+ %{mcall-gnu: -mbig %(cc1_endian_big) } \
%{mcall-netbsd: -mbig %(cc1_endian_big) } \
- %{!mcall-aixdesc: %{!mcall-freebsd: %{!mcall-i960-old: %{!mcall-linux: %{!mcall-netbsd: \
+ %{!mcall-aixdesc: %{!mcall-freebsd: %{!mcall-i960-old: %{!mcall-linux: %{!mcall-gnu: %{!mcall-netbsd: \
%(cc1_endian_default) \
- }}}}} \
+ }}}}}} \
}}}} \
%{mno-sdata: -msdata=none } \
%{meabi: %{!mcall-*: -mcall-sysv }} \
@@ -912,6 +917,7 @@ do { \
%{mcall-freebsd: -mno-eabi } \
%{mcall-i960-old: -meabi } \
%{mcall-linux: -mno-eabi } \
+ %{mcall-gnu: -mno-eabi } \
%{mcall-netbsd: -mno-eabi }}} \
%{msdata: -msdata=default} \
%{mno-sdata: -msdata=none} \
@@ -942,9 +948,10 @@ do { \
%{msim: %(link_start_sim) } \
%{mcall-freebsd: %(link_start_freebsd) } \
%{mcall-linux: %(link_start_linux) } \
+%{mcall-gnu: %(link_start_gnu) } \
%{mcall-netbsd: %(link_start_netbsd) } \
-%{!mads: %{!myellowknife: %{!mmvme: %{!msim: %{!mcall-linux: \
- %{!mcall-netbsd: %{!mcall-freebsd: %(link_start_default) }}}}}}}"
+%{!mads: %{!myellowknife: %{!mmvme: %{!msim: %{!mcall-linux: %{!mcall-gnu: \
+ %{!mcall-netbsd: %{!mcall-freebsd: %(link_start_default) }}}}}}}}"
#define LINK_START_DEFAULT_SPEC ""
@@ -998,8 +1005,9 @@ do { \
%{msim: %(link_os_sim) } \
%{mcall-freebsd: %(link_os_freebsd) } \
%{mcall-linux: %(link_os_linux) } \
+%{mcall-gnu: %(link_os_gnu) } \
%{mcall-netbsd: %(link_os_netbsd) } \
-%{!mads: %{!myellowknife: %{!mmvme: %{!msim: %{!mcall-freebsd: %{!mcall-linux: %{!mcall-netbsd: %(link_os_default) }}}}}}}"
+%{!mads: %{!myellowknife: %{!mmvme: %{!msim: %{!mcall-freebsd: %{!mcall-linux: %{!mcall-gnu: %{!mcall-netbsd: %(link_os_default) }}}}}}}}"
#define LINK_OS_DEFAULT_SPEC ""
@@ -1044,10 +1052,11 @@ do { \
%{!mlittle: %{!mlittle-endian: %{!mbig: %{!mbig-endian: \
%{mcall-freebsd: %(cpp_endian_big) } \
%{mcall-linux: %(cpp_endian_big) } \
+ %{mcall-gnu: %(cpp_endian_big) } \
%{mcall-netbsd: %(cpp_endian_big) } \
%{mcall-i960-old: %(cpp_endian_little) } \
%{mcall-aixdesc: %(cpp_endian_big) } \
- %{!mcall-linux: %{!mcall-freebsd: %{!mcall-netbsd: %{!mcall-aixdesc: %(cpp_endian_default) }}}}}}}}"
+ %{!mcall-linux: %{!mcall-gnu: %{!mcall-freebsd: %{!mcall-netbsd: %{!mcall-aixdesc: %(cpp_endian_default) }}}}}}}}}"
#define CPP_ENDIAN_DEFAULT_SPEC "%(cpp_endian_big)"
@@ -1060,8 +1069,9 @@ do { \
%{msim: %(cpp_os_sim) } \
%{mcall-freebsd: %(cpp_os_freebsd) } \
%{mcall-linux: %(cpp_os_linux) } \
+%{mcall-gnu: %(cpp_os_gnu) } \
%{mcall-netbsd: %(cpp_os_netbsd) } \
-%{!mads: %{!myellowknife: %{!mmvme: %{!msim: %{!mcall-freebsd: %{!mcall-linux: %{!mcall-netbsd: %(cpp_os_default) }}}}}}}"
+%{!mads: %{!myellowknife: %{!mmvme: %{!msim: %{!mcall-freebsd: %{!mcall-linux: %{!mcall-gnu: %{!mcall-netbsd: %(cpp_os_default) }}}}}}}}"
#define CPP_OS_DEFAULT_SPEC ""
@@ -1074,8 +1084,9 @@ do { \
%{msim: %(startfile_sim) } \
%{mcall-freebsd: %(startfile_freebsd) } \
%{mcall-linux: %(startfile_linux) } \
+%{mcall-gnu: %(startfile_gnu) } \
%{mcall-netbsd: %(startfile_netbsd) } \
-%{!mads: %{!myellowknife: %{!mmvme: %{!msim: %{!mcall-freebsd: %{!mcall-linux: %{!mcall-netbsd: %(startfile_default) }}}}}}}"
+%{!mads: %{!myellowknife: %{!mmvme: %{!msim: %{!mcall-freebsd: %{!mcall-linux: %{!mcall-gnu: %{!mcall-netbsd: %(startfile_default) }}}}}}}}"
#define STARTFILE_DEFAULT_SPEC ""
@@ -1088,23 +1099,25 @@ do { \
%{msim: %(lib_sim) } \
%{mcall-freebsd: %(lib_freebsd) } \
%{mcall-linux: %(lib_linux) } \
+%{mcall-gnu: %(lib_gnu) } \
%{mcall-netbsd: %(lib_netbsd) } \
-%{!mads: %{!myellowknife: %{!mmvme: %{!msim: %{!mcall-freebsd: %{!mcall-linux: %{!mcall-netbsd: %(lib_default) }}}}}}}"
+%{!mads: %{!myellowknife: %{!mmvme: %{!msim: %{!mcall-freebsd: %{!mcall-linux: %{!mcall-gnu: %{!mcall-netbsd: %(lib_default) }}}}}}}}"
#define LIB_DEFAULT_SPEC ""
/* Override svr4.h definition. */
#undef ENDFILE_SPEC
-#define ENDFILE_SPEC "\
+#define ENDFILE_SPEC "crtsavres.o%s \
%{mads: %(endfile_ads)} \
%{myellowknife: %(endfile_yellowknife)} \
%{mmvme: %(endfile_mvme)} \
%{msim: %(endfile_sim)} \
%{mcall-freebsd: %(endfile_freebsd) } \
%{mcall-linux: %(endfile_linux) } \
+%{mcall-gnu: %(endfile_gnu) } \
%{mcall-netbsd: %(endfile_netbsd) } \
%{mvxworks: %(endfile_vxworks) } \
-%{!mads: %{!myellowknife: %{!mmvme: %{!msim: %{!mcall-freebsd: %{!mcall-linux: %{!mcall-netbsd: %{!mvxworks: %(endfile_default) }}}}}}}}"
+%{!mads: %{!myellowknife: %{!mmvme: %{!msim: %{!mcall-freebsd: %{!mcall-linux: %{!mcall-gnu: %{!mcall-netbsd: %{!mvxworks: %(endfile_default) }}}}}}}}}"
#define ENDFILE_DEFAULT_SPEC ""
@@ -1184,10 +1197,18 @@ do { \
%{profile:-lc_p} %{!profile:-lc}}}"
#endif
+#ifdef USE_GNULIBC_1
#define STARTFILE_LINUX_SPEC "\
%{!shared: %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} %{!p:crt1.o%s}}} \
%{mnewlib: ecrti.o%s} %{!mnewlib: crti.o%s} \
%{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
+#else
+#define STARTFILE_LINUX_SPEC "\
+%{!shared: %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} %{!p:crt1.o%s}}} \
+%{mnewlib: ecrti.o%s} %{!mnewlib: crti.o%s} \
+%{static:crtbeginT.o%s} \
+%{!static:%{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}}"
+#endif
#define ENDFILE_LINUX_SPEC "%{!shared:crtend.o%s} %{shared:crtendS.o%s} \
%{mnewlib: ecrtn.o%s} %{!mnewlib: crtn.o%s}"
@@ -1198,22 +1219,51 @@ do { \
%{rdynamic:-export-dynamic} \
%{!dynamic-linker:-dynamic-linker /lib/ld.so.1}}}"
+#if !defined(USE_GNULIBC_1) && defined(HAVE_LD_EH_FRAME_HDR)
+# define LINK_EH_SPEC "%{!static:--eh-frame-hdr} "
+#endif
+
#ifdef USE_GNULIBC_1
-#define CPP_OS_LINUX_SPEC "-D__unix__ -D__linux__ \
-%{!undef: \
- %{!ansi: \
- %{!std=*:-Dunix -D__unix -Dlinux -D__linux} \
- %{std=gnu*:-Dunix -D__unix -Dlinux -D__linux}}} \
+#define CPP_OS_LINUX_SPEC "-D__unix__ -D__gnu_linux__ -D__linux__ \
+%{!undef: \
+ %{!ansi: \
+ %{!std=*:-Dunix -D__unix -Dlinux -D__linux} \
+ %{std=gnu*:-Dunix -D__unix -Dlinux -D__linux}}} \
-Asystem=unix -Asystem=posix"
#else
-#define CPP_OS_LINUX_SPEC "-D__unix__ -D__linux__ \
-%{!undef: \
- %{!ansi: \
- %{!std=*:-Dunix -D__unix -Dlinux -D__linux} \
- %{std=gnu*:-Dunix -D__unix -Dlinux -D__linux}}} \
+#define CPP_OS_LINUX_SPEC "-D__unix__ -D__gnu_linux__ -D__linux__ \
+%{!undef: \
+ %{!ansi: \
+ %{!std=*:-Dunix -D__unix -Dlinux -D__linux} \
+ %{std=gnu*:-Dunix -D__unix -Dlinux -D__linux}}} \
-Asystem=unix -Asystem=posix %{pthread:-D_REENTRANT}"
#endif
+/* GNU/Hurd support. */
+#define LIB_GNU_SPEC "%{mnewlib: --start-group -lgnu -lc --end-group } \
+%{!mnewlib: %{shared:-lc} %{!shared: %{pthread:-lpthread } \
+%{profile:-lc_p} %{!profile:-lc}}}"
+
+#define STARTFILE_GNU_SPEC "\
+%{!shared: %{!static: %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} %{!p:crt1.o%s}}}} \
+%{static: %{pg:gcrt0.o%s} %{!pg:%{p:gcrt0.o%s} %{!p:crt0.o%s}}} \
+%{mnewlib: ecrti.o%s} %{!mnewlib: crti.o%s} \
+%{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
+
+#define ENDFILE_GNU_SPEC "%{!shared:crtend.o%s} %{shared:crtendS.o%s} \
+%{mnewlib: ecrtn.o%s} %{!mnewlib: crtn.o%s}"
+
+#define LINK_START_GNU_SPEC ""
+
+#define LINK_OS_GNU_SPEC "-m elf32ppclinux %{!shared: %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /lib/ld.so.1}}}"
+
+#define CPP_OS_GNU_SPEC "-D__unix__ -D__gnu_hurd__ -D__GNU__ \
+%{!undef: \
+ %{!ansi: -Dunix -D__unix}} \
+-Asystem=gnu -Asystem=unix -Asystem=posix %{pthread:-D_REENTRANT}"
+
/* NetBSD support. */
#define LIB_NETBSD_SPEC "\
%{profile:-lgmon -lc_p} %{!profile:-lc}"
@@ -1292,6 +1342,7 @@ ncrtn.o%s"
{ "lib_mvme", LIB_MVME_SPEC }, \
{ "lib_sim", LIB_SIM_SPEC }, \
{ "lib_freebsd", LIB_FREEBSD_SPEC }, \
+ { "lib_gnu", LIB_GNU_SPEC }, \
{ "lib_linux", LIB_LINUX_SPEC }, \
{ "lib_netbsd", LIB_NETBSD_SPEC }, \
{ "lib_vxworks", LIB_VXWORKS_SPEC }, \
@@ -1301,6 +1352,7 @@ ncrtn.o%s"
{ "startfile_mvme", STARTFILE_MVME_SPEC }, \
{ "startfile_sim", STARTFILE_SIM_SPEC }, \
{ "startfile_freebsd", STARTFILE_FREEBSD_SPEC }, \
+ { "startfile_gnu", STARTFILE_GNU_SPEC }, \
{ "startfile_linux", STARTFILE_LINUX_SPEC }, \
{ "startfile_netbsd", STARTFILE_NETBSD_SPEC }, \
{ "startfile_vxworks", STARTFILE_VXWORKS_SPEC }, \
@@ -1310,6 +1362,7 @@ ncrtn.o%s"
{ "endfile_mvme", ENDFILE_MVME_SPEC }, \
{ "endfile_sim", ENDFILE_SIM_SPEC }, \
{ "endfile_freebsd", ENDFILE_FREEBSD_SPEC }, \
+ { "endfile_gnu", ENDFILE_GNU_SPEC }, \
{ "endfile_linux", ENDFILE_LINUX_SPEC }, \
{ "endfile_netbsd", ENDFILE_NETBSD_SPEC }, \
{ "endfile_vxworks", ENDFILE_VXWORKS_SPEC }, \
@@ -1323,6 +1376,7 @@ ncrtn.o%s"
{ "link_start_mvme", LINK_START_MVME_SPEC }, \
{ "link_start_sim", LINK_START_SIM_SPEC }, \
{ "link_start_freebsd", LINK_START_FREEBSD_SPEC }, \
+ { "link_start_gnu", LINK_START_GNU_SPEC }, \
{ "link_start_linux", LINK_START_LINUX_SPEC }, \
{ "link_start_netbsd", LINK_START_NETBSD_SPEC }, \
{ "link_start_vxworks", LINK_START_VXWORKS_SPEC }, \
@@ -1334,6 +1388,7 @@ ncrtn.o%s"
{ "link_os_sim", LINK_OS_SIM_SPEC }, \
{ "link_os_freebsd", LINK_OS_FREEBSD_SPEC }, \
{ "link_os_linux", LINK_OS_LINUX_SPEC }, \
+ { "link_os_gnu", LINK_OS_GNU_SPEC }, \
{ "link_os_netbsd", LINK_OS_NETBSD_SPEC }, \
{ "link_os_vxworks", LINK_OS_VXWORKS_SPEC }, \
{ "link_os_default", LINK_OS_DEFAULT_SPEC }, \
@@ -1349,6 +1404,7 @@ ncrtn.o%s"
{ "cpp_os_mvme", CPP_OS_MVME_SPEC }, \
{ "cpp_os_sim", CPP_OS_SIM_SPEC }, \
{ "cpp_os_freebsd", CPP_OS_FREEBSD_SPEC }, \
+ { "cpp_os_gnu", CPP_OS_GNU_SPEC }, \
{ "cpp_os_linux", CPP_OS_LINUX_SPEC }, \
{ "cpp_os_netbsd", CPP_OS_NETBSD_SPEC }, \
{ "cpp_os_vxworks", CPP_OS_VXWORKS_SPEC }, \
diff --git a/contrib/gcc/config/rs6000/t-aix43 b/contrib/gcc/config/rs6000/t-aix43
index 209a763..7be8ebc 100644
--- a/contrib/gcc/config/rs6000/t-aix43
+++ b/contrib/gcc/config/rs6000/t-aix43
@@ -62,7 +62,7 @@ SHLIB_INSTALL = $(INSTALL_DATA) @shlib_base_name@.a $$(slibdir)/
SHLIB_LIBS = -lc `case @shlib_base_name@ in *pthread*) echo -lpthread ;; esac`
SHLIB_MKMAP = $(srcdir)/mkmap-flat.awk
SHLIB_MAPFILES = $(srcdir)/libgcc-std.ver
-SHLIB_NM_FLAGS = -Bpg
+SHLIB_NM_FLAGS = -Bpg -X32_64
# Either 32-bit and 64-bit objects in archives.
AR_FLAGS_FOR_TARGET = -X32_64
diff --git a/contrib/gcc/config/rs6000/t-linux64 b/contrib/gcc/config/rs6000/t-linux64
new file mode 100644
index 0000000..48b6150
--- /dev/null
+++ b/contrib/gcc/config/rs6000/t-linux64
@@ -0,0 +1,16 @@
+# Override t-linux. We don't want -fPIC.
+CRTSTUFF_T_CFLAGS_S =
+TARGET_LIBGCC2_CFLAGS =
+
+EXTRA_MULTILIB_PARTS=crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o \
+ crtsavres.o
+
+# ld provides these functions as needed.
+crtsavres.S:
+ echo >crtsavres.S
+
+$(T)crtsavres.o: crtsavres.S
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtsavres.S -o $(T)crtsavres.o
+
+# Modify the shared lib version file
+SHLIB_MKMAP_OPTS = -v dotsyms=1
diff --git a/contrib/gcc/config/rs6000/t-ppccomm b/contrib/gcc/config/rs6000/t-ppccomm
index e264586..7570af4 100644
--- a/contrib/gcc/config/rs6000/t-ppccomm
+++ b/contrib/gcc/config/rs6000/t-ppccomm
@@ -38,9 +38,10 @@ MULTILIB_MATCHES_SYSV = mcall-sysv=mcall-sysv-eabi mcall-sysv=mcall-sysv-noeabi
LIBGCC = stmp-multilib
INSTALL_LIBGCC = install-multilib
EXTRA_MULTILIB_PARTS = crtbegin$(objext) crtend$(objext) \
- crtbeginS$(objext) crtendS$(objext) \
+ crtbeginS$(objext) crtendS$(objext) crtbeginT$(objext) \
ecrti$(objext) ecrtn$(objext) \
- ncrti$(objext) ncrtn$(objext)
+ ncrti$(objext) ncrtn$(objext) \
+ crtsavres$(objext)
# We build {e,n}crti.o and {e,n}crtn.o, which serve to add begin and
# end labels to all of the special sections used when we link using gcc.
@@ -58,6 +59,9 @@ ncrti.S: $(srcdir)/config/rs6000/sol-ci.asm
ncrtn.S: $(srcdir)/config/rs6000/sol-cn.asm
cat $(srcdir)/config/rs6000/sol-cn.asm >ncrtn.S
+crtsavres.S: $(srcdir)/config/rs6000/crtsavres.asm
+ cat $(srcdir)/config/rs6000/crtsavres.asm >crtsavres.S
+
# Build multiple copies of ?crt{i,n}.o, one for each target switch.
$(T)ecrti$(objext): ecrti.S
$(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c ecrti.S -o $(T)ecrti$(objext)
@@ -71,6 +75,9 @@ $(T)ncrti$(objext): ncrti.S
$(T)ncrtn$(objext): ncrtn.S
$(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c ncrtn.S -o $(T)ncrtn$(objext)
+$(T)crtsavres$(objext): crtsavres.S
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtsavres.S -o $(T)crtsavres$(objext)
+
# It is important that crtbegin.o, etc., aren't surprised by stuff in .sdata.
CRTSTUFF_T_CFLAGS = -msdata=none
# Make sure crt*.o are built with -fPIC even if configured with
diff --git a/contrib/gcc/config/rs6000/xcoff.h b/contrib/gcc/config/rs6000/xcoff.h
index e60f3fe..4a51b04 100644
--- a/contrib/gcc/config/rs6000/xcoff.h
+++ b/contrib/gcc/config/rs6000/xcoff.h
@@ -1,6 +1,6 @@
/* Definitions of target machine for GNU compiler,
for some generic XCOFF file format
- Copyright (C) 2001 Free Software Foundation, Inc.
+ Copyright (C) 2001, 2002 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -22,10 +22,6 @@ Boston, MA 02111-1307, USA. */
#define TARGET_OBJECT_FORMAT OBJECT_XCOFF
-/* The AIX linker will discard static constructors in object files before
- collect has a chance to see them, so scan the object files directly. */
-#define COLLECT_EXPORT_LIST
-
/* The RS/6000 uses the XCOFF format. */
#define XCOFF_DEBUGGING_INFO
@@ -345,13 +341,7 @@ toc_section () \
SYMBOL_REF_FLAG (sym_ref) = 1; \
if (TREE_PUBLIC (DECL)) \
{ \
- if (RS6000_WEAK && DECL_WEAK (decl)) \
- { \
- fputs ("\t.weak .", FILE); \
- RS6000_OUTPUT_BASENAME (FILE, NAME); \
- putc ('\n', FILE); \
- } \
- else \
+ if (!RS6000_WEAK || !DECL_WEAK (decl)) \
{ \
fputs ("\t.globl .", FILE); \
RS6000_OUTPUT_BASENAME (FILE, NAME); \
@@ -464,20 +454,6 @@ toc_section () \
xcoff_bss_section_name); \
} while (0)
-/* Output a weak symbol, if weak support present. */
-#ifdef HAVE_GAS_WEAK
-#define HANDLE_PRAGMA_WEAK 1
-
-#define ASM_WEAKEN_LABEL(FILE, NAME) \
- do \
- { \
- fputs ("\t.weak ", (FILE)); \
- assemble_name ((FILE), (NAME)); \
- fputc ('\n', (FILE)); \
- } \
- while (0)
-#endif /* HAVE_GAS_WEAK */
-
/* This is how we tell the assembler that two symbols have the same value. */
#define SET_ASM_OP "\t.set "
diff --git a/contrib/gcc/config/rtems.h b/contrib/gcc/config/rtems.h
index a5ac8a7..a3f9ba3 100644
--- a/contrib/gcc/config/rtems.h
+++ b/contrib/gcc/config/rtems.h
@@ -1,5 +1,5 @@
/* Configuration common to all targets running RTEMS.
- Copyright (C) 2000 Free Software Foundation, Inc.
+ Copyright (C) 2000, 2002 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -20,3 +20,18 @@ Boston, MA 02111-1307, USA. */
/* The system headers under RTEMS are C++-aware. */
#define NO_IMPLICIT_EXTERN_C
+
+/* Generate calls to memcpy, memcmp and memset. */
+#ifndef TARGET_MEM_FUNCTIONS
+#define TARGET_MEM_FUNCTIONS
+#endif
+
+/*
+ * Dummy start/end specification to let linker work as
+ * needed by autoconf scripts using this compiler.
+ */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "crt0.o%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC ""
diff --git a/contrib/gcc/config/sparc/aout.h b/contrib/gcc/config/sparc/aout.h
index fd84d75..3a2273f 100644
--- a/contrib/gcc/config/sparc/aout.h
+++ b/contrib/gcc/config/sparc/aout.h
@@ -88,7 +88,7 @@ do { \
pointer is really %i7. */
#define DBX_REGISTER_NUMBER(REGNO) \
- (TARGET_FLAT && REGNO == FRAME_POINTER_REGNUM ? 31 : REGNO)
+ (TARGET_FLAT && (REGNO) == HARD_FRAME_POINTER_REGNUM ? 31 : REGNO)
/* This is how to output a note to DBX telling it the line number
to which the following sequence of instructions corresponds.
diff --git a/contrib/gcc/config/sparc/crtfastmath.c b/contrib/gcc/config/sparc/crtfastmath.c
new file mode 100644
index 0000000..df3f907
--- /dev/null
+++ b/contrib/gcc/config/sparc/crtfastmath.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2001 Free Software Foundation, Inc.
+ * Contributed by David S. Miller (davem@redhat.com)
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * In addition to the permissions in the GNU General Public License, the
+ * Free Software Foundation gives you unlimited permission to link the
+ * compiled version of this file with other programs, and to distribute
+ * those programs without any restriction coming from the use of this
+ * file. (The General Public License restrictions do apply in other
+ * respects; for example, they cover modification of the file, and
+ * distribution when not linked into another program.)
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ *
+ * As a special exception, if you link this library with files
+ * compiled with GCC to produce an executable, this does not cause
+ * the resulting executable to be covered by the GNU General Public License.
+ * This exception does not however invalidate any other reasons why
+ * the executable file might be covered by the GNU General Public License.
+ */
+
+#define FPRS_NS (1 << 22) /* Non-Standard fpu results */
+
+static void __attribute__((constructor))
+set_fast_math (void)
+{
+ unsigned int fsr;
+
+ /* This works for the 64-bit case because, even if 32-bit ld/st of
+ the fsr register modified the upper 32-bit, the only thing up there
+ are the 3 other condition codes which are "do not care" at the time
+ that this runs. */
+
+ __asm__("st %%fsr, %0"
+ : "=m" (fsr));
+
+ fsr |= FPRS_NS;
+
+ __asm__("ld %0, %%fsr"
+ : : "m" (fsr));
+}
diff --git a/contrib/gcc/config/sparc/elf.h b/contrib/gcc/config/sparc/elf.h
index 20bea82..d2d3a0b 100644
--- a/contrib/gcc/config/sparc/elf.h
+++ b/contrib/gcc/config/sparc/elf.h
@@ -26,7 +26,9 @@ Boston, MA 02111-1307, USA. */
#define STARTFILE_SPEC "crt0.o%s crti.o%s crtbegin.o%s"
#undef ENDFILE_SPEC
-#define ENDFILE_SPEC "crtend.o%s crtn.o%s"
+#define ENDFILE_SPEC \
+ "%{ffast-math|funsafe-math-optimizations:crtfastmath.o%s} \
+ crtend.o%s crtn.o%s"
/* Use the default. */
#undef LINK_SPEC
diff --git a/contrib/gcc/config/sparc/freebsd.h b/contrib/gcc/config/sparc/freebsd.h
index 314b07b..54b5f0a 100644
--- a/contrib/gcc/config/sparc/freebsd.h
+++ b/contrib/gcc/config/sparc/freebsd.h
@@ -1,5 +1,5 @@
/* Definitions for Sun Sparc64 running FreeBSD using the ELF format
- Copyright (C) 2001 Free Software Foundation, Inc.
+ Copyright (C) 2001, 2002 Free Software Foundation, Inc.
Contributed by David E. O'Brien <obrien@FreeBSD.org> and BSDi.
This file is part of GNU CC.
@@ -26,8 +26,8 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
#undef CPP_PREDEFINES
#define CPP_PREDEFINES FBSD_CPP_PREDEFINES
-#define LINK_SPEC "-m elf64_sparc %(link_arch) \
- %{!mno-relax:%{!r:-relax} \
+#define LINK_SPEC "%(link_arch) \
+ %{!mno-relax:%{!r:-relax}} \
%{p:%e`-p' not supported; use `-pg' and gprof(1)} \
%{Wl,*:%*} \
%{assert*} %{R*} %{rpath*} %{defsym*} \
@@ -91,22 +91,19 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
#undef TARGET_DEFAULT
#define TARGET_DEFAULT \
- (MASK_V9 + MASK_64BIT + MASK_PTR64 + MASK_VIS + MASK_FASTER_STRUCTS \
- + MASK_STACK_BIAS + MASK_APP_REGS /* + MASK_EPILOGUE */ + MASK_FPU \
+ (MASK_V9 + MASK_64BIT + MASK_PTR64 /* + MASK_FASTER_STRUCTS */ \
+ + MASK_STACK_BIAS + MASK_APP_REGS + MASK_FPU \
+ MASK_LONG_DOUBLE_128 /* + MASK_HARD_QUAD */)
/* The default code model. */
#undef SPARC_DEFAULT_CMODEL
-#define SPARC_DEFAULT_CMODEL CM_MEDMID
+#define SPARC_DEFAULT_CMODEL CM_MEDLOW
/************************[ Assembler stuff ]********************************/
-/* XXX */
-#if 0
-#undef ASM_CPU_DEFAULT_SPEC
-#define ASM_CPU_DEFAULT_SPEC "-Av9a"
-#endif
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
/* XXX2 */
/* This is how to output a definition of an internal numbered label where
@@ -151,3 +148,12 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
RELATIVE relocations. */
/* #define DWARF_OFFSET_SIZE PTR_SIZE */
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC \
+ "%{ffast-math|funsafe-math-optimizations:crtfastmath.o%s}" \
+ FBSD_ENDFILE_SPEC
+
+/* We use GNU ld so undefine this so that attribute((init_priority)) works. */
+#undef CTORS_SECTION_ASM_OP
+#undef DTORS_SECTION_ASM_OP
diff --git a/contrib/gcc/config/sparc/gmon-sol2.c b/contrib/gcc/config/sparc/gmon-sol2.c
index 6d66966..bcb0c06 100644
--- a/contrib/gcc/config/sparc/gmon-sol2.c
+++ b/contrib/gcc/config/sparc/gmon-sol2.c
@@ -291,8 +291,10 @@ static void internal_mcount(selfpc, frompcindex)
if(!already_setup) {
extern char etext[];
+ extern char _start[];
+ extern char _init[];
already_setup = 1;
- monstartup(0, (char *)etext);
+ monstartup(_start < _init ? _start : _init, etext);
#ifdef USE_ONEXIT
on_exit(_mcleanup, 0);
#else
diff --git a/contrib/gcc/config/sparc/libgcc-sparc-glibc.ver b/contrib/gcc/config/sparc/libgcc-sparc-glibc.ver
new file mode 100644
index 0000000..e3ba0bb
--- /dev/null
+++ b/contrib/gcc/config/sparc/libgcc-sparc-glibc.ver
@@ -0,0 +1,28 @@
+# In order to work around the very problems that force us to now generally
+# create a libgcc.so, glibc reexported a number of routines from libgcc.a.
+# By now choosing the same version tags for these specific routines, we
+# maintain enough binary compatibility to allow future versions of glibc
+# to defer implementation of these routines to libgcc.so via DT_AUXILIARY.
+
+%ifdef __arch64__
+%define GLIBC_VER GLIBC_2.2
+%else
+%define GLIBC_VER GLIBC_2.0
+%endif
+%inherit GCC_3.0 GLIBC_VER
+GLIBC_VER {
+ # Sampling of DImode arithmetic used by (at least) i386 and m68k.
+ __divdi3
+ __moddi3
+ __udivdi3
+ __umoddi3
+
+ # Exception handling support functions used by most everyone.
+ __register_frame
+ __register_frame_table
+ __deregister_frame
+ __register_frame_info
+ __deregister_frame_info
+ __frame_state_for
+ __register_frame_info_table
+}
diff --git a/contrib/gcc/config/sparc/linux-aout.h b/contrib/gcc/config/sparc/linux-aout.h
index 55c833f..41d3877 100644
--- a/contrib/gcc/config/sparc/linux-aout.h
+++ b/contrib/gcc/config/sparc/linux-aout.h
@@ -1,5 +1,5 @@
/* Definitions for SPARC running Linux-based GNU systems with a.out.
- Copyright (C) 1996, 1997, 1999 Free Software Foundation, Inc.
+ Copyright (C) 1996, 1997, 1999, 2002 Free Software Foundation, Inc.
Contributed by Eddie C. Dost (ecd@skynet.be)
This file is part of GNU CC.
@@ -59,10 +59,8 @@ Boston, MA 02111-1307, USA. */
#undef WCHAR_TYPE_SIZE
#define WCHAR_TYPE_SIZE 32
-#undef MAX_WCHAR_TYPE_SIZE
-
#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-Dunix -Dsparc -Dlinux -Asystem=unix -Asystem=posix"
+#define CPP_PREDEFINES "-Dunix -Dsparc -D__gnu_linux__ -Dlinux -Asystem=unix -Asystem=posix"
#undef CPP_SUBTARGET_SPEC
#define CPP_SUBTARGET_SPEC \
diff --git a/contrib/gcc/config/sparc/linux.h b/contrib/gcc/config/sparc/linux.h
index d464133..b93b46c 100644
--- a/contrib/gcc/config/sparc/linux.h
+++ b/contrib/gcc/config/sparc/linux.h
@@ -1,5 +1,5 @@
/* Definitions for SPARC running Linux-based GNU systems with ELF.
- Copyright (C) 1996, 1997, 1998, 1999, 2000 Free Software Foundation, Inc.
+ Copyright (C) 1996, 1997, 1998, 1999, 2000, 2002 Free Software Foundation, Inc.
Contributed by Eddie C. Dost (ecd@skynet.be)
This file is part of GNU CC.
@@ -62,7 +62,8 @@ Boston, MA 02111-1307, USA. */
#undef ENDFILE_SPEC
#define ENDFILE_SPEC \
- "%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s"
+ "%{ffast-math|funsafe-math-optimizations:crtfastmath.o%s} \
+ %{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s"
/* This is for -profile to use -lc_p instead of -lc. */
#undef CC1_SPEC
@@ -97,10 +98,8 @@ Boston, MA 02111-1307, USA. */
#undef WCHAR_TYPE_SIZE
#define WCHAR_TYPE_SIZE 32
-#undef MAX_WCHAR_TYPE_SIZE
-
#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-D__ELF__ -Dunix -D__sparc__ -Dlinux -Asystem=unix -Asystem=posix"
+#define CPP_PREDEFINES "-D__ELF__ -Dunix -D__sparc__ -D__gnu_linux__ -Dlinux -Asystem=unix -Asystem=posix"
#undef CPP_SUBTARGET_SPEC
#ifdef USE_GNULIBC_1
@@ -186,7 +185,7 @@ Boston, MA 02111-1307, USA. */
#undef ASM_SPEC
#define ASM_SPEC \
"%{V} %{v:%{!V:-V}} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Wa,*:%*} -s %{fpic:-K PIC} \
- %{fPIC:-K PIC} %(asm_relax)"
+ %{fPIC:-K PIC} %(asm_cpu) %(asm_relax)"
/* Same as sparc.h */
#undef DBX_REGISTER_NUMBER
@@ -204,6 +203,9 @@ do { \
#undef COMMON_ASM_OP
#define COMMON_ASM_OP "\t.common\t"
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
/* This is how to output a definition of an internal numbered label where
PREFIX is the class of label and NUM is the number within the class. */
@@ -247,3 +249,73 @@ do { \
#define LINK_EH_SPEC "%{!static:--eh-frame-hdr} "
#endif
+/* Don't be different from other Linux platforms in this regard. */
+#define HANDLE_PRAGMA_PACK_PUSH_POP
+
+/* We use GNU ld so undefine this so that attribute((init_priority)) works. */
+#undef CTORS_SECTION_ASM_OP
+#undef DTORS_SECTION_ASM_OP
+
+/* Do code reading to identify a signal frame, and set the frame
+ state data appropriately. See unwind-dw2.c for the structs. */
+
+#define MD_FALLBACK_FRAME_STATE_FOR(CONTEXT, FS, SUCCESS) \
+ do { \
+ unsigned int *pc_ = (CONTEXT)->ra; \
+ int new_cfa_, i_, oldstyle_; \
+ int regs_off_, fpu_save_off_; \
+ int fpu_save_, this_cfa_; \
+ \
+ if (pc_[1] != 0x91d02010) /* ta 0x10 */ \
+ break; \
+ if (pc_[0] == 0x821020d8) /* mov NR_sigreturn, %g1 */ \
+ oldstyle_ = 1; \
+ else if (pc_[0] == 0x82102065) /* mov NR_rt_sigreturn, %g1 */ \
+ oldstyle_ = 0; \
+ else \
+ break; \
+ if (oldstyle_) \
+ { \
+ regs_off_ = 96; \
+ fpu_save_off_ = regs_off_ + (4 * 4) + (16 * 4); \
+ } \
+ else \
+ { \
+ regs_off_ = 96 + 128; \
+ fpu_save_off_ = regs_off_ + (4 * 4) + (16 * 4) + (2 * 4); \
+ } \
+ this_cfa_ = (int) (CONTEXT)->cfa; \
+ new_cfa_ = *(int *)(((CONTEXT)->cfa) + (regs_off_+(4*4)+(14 * 4))); \
+ fpu_save_ = *(int *)((this_cfa_) + (fpu_save_off_)); \
+ (FS)->cfa_how = CFA_REG_OFFSET; \
+ (FS)->cfa_reg = 14; \
+ (FS)->cfa_offset = new_cfa_ - (int) (CONTEXT)->cfa; \
+ for (i_ = 1; i_ < 16; ++i_) \
+ { \
+ if (i_ == 14) \
+ continue; \
+ (FS)->regs.reg[i_].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[i_].loc.offset = \
+ this_cfa_ + (regs_off_+(4 * 4)+(i_ * 4)) - new_cfa_; \
+ } \
+ for (i_ = 0; i_ < 16; ++i_) \
+ { \
+ (FS)->regs.reg[i_ + 16].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[i_ + 16].loc.offset = \
+ this_cfa_ + (i_ * 4) - new_cfa_; \
+ } \
+ if (fpu_save_) \
+ { \
+ for (i_ = 0; i_ < 32; ++i_) \
+ { \
+ (FS)->regs.reg[i_ + 32].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[i_ + 32].loc.offset = \
+ (fpu_save_ + (i_ * 4)) - new_cfa_; \
+ } \
+ } \
+ /* Stick return address into %g0, same trick Alpha uses. */ \
+ (FS)->regs.reg[0].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[0].loc.offset = this_cfa_+(regs_off_+4)-new_cfa_; \
+ (FS)->retaddr_column = 0; \
+ goto SUCCESS; \
+ } while (0)
diff --git a/contrib/gcc/config/sparc/linux64.h b/contrib/gcc/config/sparc/linux64.h
index 31d8204..33c3f54 100644
--- a/contrib/gcc/config/sparc/linux64.h
+++ b/contrib/gcc/config/sparc/linux64.h
@@ -34,7 +34,7 @@ Boston, MA 02111-1307, USA. */
#undef TARGET_DEFAULT
#define TARGET_DEFAULT \
(MASK_V9 + MASK_PTR64 + MASK_64BIT /* + MASK_HARD_QUAD */ \
- + MASK_STACK_BIAS + MASK_APP_REGS + MASK_EPILOGUE + MASK_FPU + MASK_LONG_DOUBLE_128)
+ + MASK_STACK_BIAS + MASK_APP_REGS + MASK_FPU + MASK_LONG_DOUBLE_128)
#endif
#undef ASM_CPU_DEFAULT_SPEC
@@ -58,8 +58,8 @@ Boston, MA 02111-1307, USA. */
#define STARTFILE_SPEC32 \
"%{!shared: \
- %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} %{!p:crt1.o%s}}}\
- crti.o%s %{static:crtbeginT.o%s}\
+ %{pg:/usr/lib/gcrt1.o%s} %{!pg:%{/usr/lib/p:gcrt1.o%s} %{!p:/usr/lib/crt1.o%s}}}\
+ /usr/lib/crti.o%s %{static:crtbeginT.o%s}\
%{!static:%{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}}"
#define STARTFILE_SPEC64 \
@@ -97,28 +97,33 @@ Boston, MA 02111-1307, USA. */
#undef ENDFILE_SPEC
#define ENDFILE_SPEC32 \
- "%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s"
+ "%{!shared:crtend.o%s} %{shared:crtendS.o%s} /usr/lib/crtn.o%s"
#define ENDFILE_SPEC64 \
"%{!shared:crtend.o%s} %{shared:crtendS.o%s} /usr/lib64/crtn.o%s"
+#define ENDFILE_SPEC_COMMON \
+ "%{ffast-math|funsafe-math-optimizations:crtfastmath.o%s}"
+
#ifdef SPARC_BI_ARCH
#if DEFAULT_ARCH32_P
#define ENDFILE_SPEC "\
%{m32:" ENDFILE_SPEC32 "} \
%{m64:" ENDFILE_SPEC64 "} \
-%{!m32:%{!m64:" ENDFILE_SPEC32 "}}"
+%{!m32:%{!m64:" ENDFILE_SPEC32 "}} " \
+ENDFILE_SPEC_COMMON
#else
#define ENDFILE_SPEC "\
%{m32:" ENDFILE_SPEC32 "} \
%{m64:" ENDFILE_SPEC64 "} \
-%{!m32:%{!m64:" ENDFILE_SPEC64 "}}"
+%{!m32:%{!m64:" ENDFILE_SPEC64 "}} " \
+ENDFILE_SPEC_COMMON
#endif
#else
-#define ENDFILE_SPEC ENDFILE_SPEC64
+#define ENDFILE_SPEC ENDFILE_SPEC64 " " ENDFILE_SPEC_COMMON
#endif
@@ -144,8 +149,6 @@ Boston, MA 02111-1307, USA. */
#undef WCHAR_TYPE_SIZE
#define WCHAR_TYPE_SIZE 32
-#undef MAX_WCHAR_TYPE_SIZE
-
/* Define for support of TFmode long double and REAL_ARITHMETIC.
Sparc ABI says that long double is 4 words. */
#undef LONG_DOUBLE_TYPE_SIZE
@@ -164,7 +167,7 @@ Boston, MA 02111-1307, USA. */
#endif
#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-D__ELF__ -Dunix -D_LONGLONG -D__sparc__ -Dlinux -Asystem=unix -Asystem=posix"
+#define CPP_PREDEFINES "-D__ELF__ -Dunix -D_LONGLONG -D__sparc__ -D__gnu_linux__ -Dlinux -Asystem=unix -Asystem=posix"
#undef CPP_SUBTARGET_SPEC
#define CPP_SUBTARGET_SPEC "\
@@ -246,6 +249,7 @@ Boston, MA 02111-1307, USA. */
%{mcypress:-mcpu=cypress} \
%{msparclite:-mcpu=sparclite} %{mf930:-mcpu=f930} %{mf934:-mcpu=f934} \
%{mv8:-mcpu=v8} %{msupersparc:-mcpu=supersparc} \
+%{m32:%{m64:%emay not use both -m32 and -m64}} \
%{m64:-mptr64 -mstack-bias -mlong-double-128 \
%{!mcpu*:%{!mcypress:%{!msparclite:%{!mf930:%{!mf934:%{!mv8:%{!msupersparc:-mcpu=ultrasparc}}}}}}} \
%{!mno-vis:%{!mcpu=v9:-mvis}}} \
@@ -256,6 +260,7 @@ Boston, MA 02111-1307, USA. */
%{mcypress:-mcpu=cypress} \
%{msparclite:-mcpu=sparclite} %{mf930:-mcpu=f930} %{mf934:-mcpu=f934} \
%{mv8:-mcpu=v8} %{msupersparc:-mcpu=supersparc} \
+%{m32:%{m64:%emay not use both -m32 and -m64}} \
%{m32:-mptr32 -mno-stack-bias %{!mlong-double-128:-mlong-double-64} \
%{!mcpu*:%{!mcypress:%{!msparclite:%{!mf930:%{!mf934:%{!mv8:%{!msupersparc:-mcpu=cypress}}}}}}}} \
%{!m32:%{!mcpu*:-mcpu=ultrasparc}} \
@@ -325,6 +330,9 @@ do { \
#undef COMMON_ASM_OP
#define COMMON_ASM_OP "\t.common\t"
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
/* This is how to output a definition of an internal numbered label where
PREFIX is the class of label and NUM is the number within the class. */
@@ -361,3 +369,128 @@ do { \
#define LINK_EH_SPEC "%{!static:--eh-frame-hdr} "
#endif
+/* Don't be different from other Linux platforms in this regard. */
+#define HANDLE_PRAGMA_PACK_PUSH_POP
+
+/* We use GNU ld so undefine this so that attribute((init_priority)) works. */
+#undef CTORS_SECTION_ASM_OP
+#undef DTORS_SECTION_ASM_OP
+
+/* Do code reading to identify a signal frame, and set the frame
+ state data appropriately. See unwind-dw2.c for the structs. */
+
+/* Handle multilib correctly. */
+#if defined(__arch64__)
+/* 64-bit Sparc version */
+#define MD_FALLBACK_FRAME_STATE_FOR(CONTEXT, FS, SUCCESS) \
+ do { \
+ unsigned int *pc_ = (CONTEXT)->ra; \
+ long new_cfa_, i_; \
+ long regs_off_, fpu_save_off_; \
+ long this_cfa_, fpu_save_; \
+ \
+ if (pc_[0] != 0x82102065 /* mov NR_rt_sigreturn, %g1 */ \
+ || pc_[1] != 0x91d0206d) /* ta 0x6d */ \
+ break; \
+ regs_off_ = 192 + 128; \
+ fpu_save_off_ = regs_off_ + (16 * 8) + (3 * 8) + (2 * 4); \
+ this_cfa_ = (long) (CONTEXT)->cfa; \
+ new_cfa_ = *(long *)(((CONTEXT)->cfa) + (regs_off_ + (14 * 8))); \
+ new_cfa_ += 2047; /* Stack bias */ \
+ fpu_save_ = *(long *)((this_cfa_) + (fpu_save_off_)); \
+ (FS)->cfa_how = CFA_REG_OFFSET; \
+ (FS)->cfa_reg = 14; \
+ (FS)->cfa_offset = new_cfa_ - (long) (CONTEXT)->cfa; \
+ for (i_ = 1; i_ < 16; ++i_) \
+ { \
+ (FS)->regs.reg[i_].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[i_].loc.offset = \
+ this_cfa_ + (regs_off_ + (i_ * 8)) - new_cfa_; \
+ } \
+ for (i_ = 0; i_ < 16; ++i_) \
+ { \
+ (FS)->regs.reg[i_ + 16].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[i_ + 16].loc.offset = \
+ this_cfa_ + (i_ * 8) - new_cfa_; \
+ } \
+ if (fpu_save_) \
+ { \
+ for (i_ = 0; i_ < 64; ++i_) \
+ { \
+ if (i_ > 32 && (i_ & 0x1)) \
+ continue; \
+ (FS)->regs.reg[i_ + 32].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[i_ + 32].loc.offset = \
+ (fpu_save_ + (i_ * 4)) - new_cfa_; \
+ } \
+ } \
+ /* Stick return address into %g0, same trick Alpha uses. */ \
+ (FS)->regs.reg[0].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[0].loc.offset = \
+ this_cfa_ + (regs_off_ + (16 * 8) + 8) - new_cfa_; \
+ (FS)->retaddr_column = 0; \
+ goto SUCCESS; \
+ } while (0)
+#else
+/* 32-bit Sparc version */
+#define MD_FALLBACK_FRAME_STATE_FOR(CONTEXT, FS, SUCCESS) \
+ do { \
+ unsigned int *pc_ = (CONTEXT)->ra; \
+ int new_cfa_, i_, oldstyle_; \
+ int regs_off_, fpu_save_off_; \
+ int fpu_save_, this_cfa_; \
+ \
+ if (pc_[1] != 0x91d02010) /* ta 0x10 */ \
+ break; \
+ if (pc_[0] == 0x821020d8) /* mov NR_sigreturn, %g1 */ \
+ oldstyle_ = 1; \
+ else if (pc_[0] == 0x82102065) /* mov NR_rt_sigreturn, %g1 */ \
+ oldstyle_ = 0; \
+ else \
+ break; \
+ if (oldstyle_) \
+ { \
+ regs_off_ = 96; \
+ fpu_save_off_ = regs_off_ + (4 * 4) + (16 * 4); \
+ } \
+ else \
+ { \
+ regs_off_ = 96 + 128; \
+ fpu_save_off_ = regs_off_ + (4 * 4) + (16 * 4) + (2 * 4); \
+ } \
+ this_cfa_ = (int) (CONTEXT)->cfa; \
+ new_cfa_ = *(int *)(((CONTEXT)->cfa) + (regs_off_+(4*4)+(14 * 4))); \
+ fpu_save_ = *(int *)((this_cfa_) + (fpu_save_off_)); \
+ (FS)->cfa_how = CFA_REG_OFFSET; \
+ (FS)->cfa_reg = 14; \
+ (FS)->cfa_offset = new_cfa_ - (int) (CONTEXT)->cfa; \
+ for (i_ = 1; i_ < 16; ++i_) \
+ { \
+ if (i_ == 14) \
+ continue; \
+ (FS)->regs.reg[i_].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[i_].loc.offset = \
+ this_cfa_ + (regs_off_+(4 * 4)+(i_ * 4)) - new_cfa_; \
+ } \
+ for (i_ = 0; i_ < 16; ++i_) \
+ { \
+ (FS)->regs.reg[i_ + 16].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[i_ + 16].loc.offset = \
+ this_cfa_ + (i_ * 4) - new_cfa_; \
+ } \
+ if (fpu_save_) \
+ { \
+ for (i_ = 0; i_ < 32; ++i_) \
+ { \
+ (FS)->regs.reg[i_ + 32].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[i_ + 32].loc.offset = \
+ (fpu_save_ + (i_ * 4)) - new_cfa_; \
+ } \
+ } \
+ /* Stick return address into %g0, same trick Alpha uses. */ \
+ (FS)->regs.reg[0].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[0].loc.offset = this_cfa_+(regs_off_+4)-new_cfa_; \
+ (FS)->retaddr_column = 0; \
+ goto SUCCESS; \
+ } while (0)
+#endif
diff --git a/contrib/gcc/config/sparc/lite.h b/contrib/gcc/config/sparc/lite.h
index f740443..95da781 100644
--- a/contrib/gcc/config/sparc/lite.h
+++ b/contrib/gcc/config/sparc/lite.h
@@ -28,7 +28,7 @@ Boston, MA 02111-1307, USA. */
/* Enable app-regs and epilogue options. Do not enable the fpu. */
#undef TARGET_DEFAULT
-#define TARGET_DEFAULT (MASK_APP_REGS + MASK_EPILOGUE)
+#define TARGET_DEFAULT MASK_APP_REGS
/* US Software GOFAST library support. */
#undef INIT_SUBTARGET_OPTABS
diff --git a/contrib/gcc/config/sparc/litecoff.h b/contrib/gcc/config/sparc/litecoff.h
index dede792..ad0e1225 100644
--- a/contrib/gcc/config/sparc/litecoff.h
+++ b/contrib/gcc/config/sparc/litecoff.h
@@ -73,4 +73,4 @@ do { \
pointer is really %i7. */
#define DBX_REGISTER_NUMBER(REGNO) \
- (TARGET_FLAT && REGNO == FRAME_POINTER_REGNUM ? 31 : REGNO)
+ (TARGET_FLAT && (REGNO) == HARD_FRAME_POINTER_REGNUM ? 31 : REGNO)
diff --git a/contrib/gcc/config/sparc/liteelf.h b/contrib/gcc/config/sparc/liteelf.h
index d7c88b5..5c93c2d 100644
--- a/contrib/gcc/config/sparc/liteelf.h
+++ b/contrib/gcc/config/sparc/liteelf.h
@@ -38,7 +38,7 @@ Boston, MA 02111-1307, USA. */
/* Enable app-regs and epilogue options. Do not enable the fpu. */
#undef TARGET_DEFAULT
-#define TARGET_DEFAULT (MASK_APP_REGS + MASK_EPILOGUE)
+#define TARGET_DEFAULT MASK_APP_REGS
/* US Software GOFAST library support. */
#undef INIT_SUBTARGET_OPTABS
diff --git a/contrib/gcc/config/sparc/netbsd-elf.h b/contrib/gcc/config/sparc/netbsd-elf.h
new file mode 100644
index 0000000..b4bbf71
--- /dev/null
+++ b/contrib/gcc/config/sparc/netbsd-elf.h
@@ -0,0 +1,344 @@
+/* Definitions of target machine for GNU compiler, for ELF on NetBSD/sparc
+ and NetBSD/sparc64.
+ Copyright (C) 2002 Free Software Foundation, Inc.
+ Contributed by Matthew Green (mrg@eterna.com.au).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Make sure these are undefined. */
+#undef MD_EXEC_PREFIX
+#undef MD_STARTFILE_PREFIX
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-D__sparc__ -D__NetBSD__ -D__ELF__ \
+-Asystem=unix -Asystem=NetBSD"
+
+/* CPP defines used for 64 bit code. */
+#undef CPP_SUBTARGET_SPEC64
+#define CPP_SUBTARGET_SPEC64 \
+ "-D__sparc64__ -D__arch64__ -D__sparc_v9__ %{posix:-D_POSIX_SOURCE}"
+
+/* CPP defines used for 32 bit code. */
+#undef CPP_SUBTARGET_SPEC32
+#define CPP_SUBTARGET_SPEC32 "-D__sparc %{posix:-D_POSIX_SOURCE}"
+
+/* SIZE_TYPE and PTRDIFF_TYPE are wrong from sparc/sparc.h. */
+#undef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+/* This is the char to use for continuation (in case we need to turn
+ continuation back on). */
+#undef DBX_CONTIN_CHAR
+#define DBX_CONTIN_CHAR '?'
+
+#undef DBX_REGISTER_NUMBER
+#define DBX_REGISTER_NUMBER(REGNO) \
+ (TARGET_FLAT && REGNO == HARD_FRAME_POINTER_REGNUM ? 31 : REGNO)
+
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
+/* This is how to output a definition of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#undef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".L%s%d:\n", PREFIX, NUM)
+
+/* This is how to output a reference to an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#undef ASM_OUTPUT_INTERNAL_LABELREF
+#define ASM_OUTPUT_INTERNAL_LABELREF(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".L%s%d", PREFIX, NUM)
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf ((LABEL), "*.L%s%ld", (PREFIX), (long)(NUM))
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+
+#undef ASM_SPEC
+#define ASM_SPEC "%{fpic:-K PIC} %{fPIC:-K PIC} %{V} %{v:%{!V:-V}} \
+%{mlittle-endian:-EL} \
+%(asm_cpu) %(asm_arch) %(asm_relax)"
+
+#undef STDC_0_IN_SYSTEM_HEADERS
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (%s)", TARGET_NAME);
+
+/*
+ * Clean up afterwards generic SPARC ELF configuration.
+ */
+
+#undef TRANSFER_FROM_TRAMPOLINE
+#define TRANSFER_FROM_TRAMPOLINE
+
+/* FIXME: Aren't these supposed to be available for SPARC ELF? */
+#undef MULDI3_LIBCALL
+#undef DIVDI3_LIBCALL
+#undef UDIVDI3_LIBCALL
+#undef MODDI3_LIBCALL
+#undef UMODDI3_LIBCALL
+#undef INIT_SUBTARGET_OPTABS
+#define INIT_SUBTARGET_OPTABS
+
+/* Below here exists the merged NetBSD/sparc & NetBSD/sparc64 compiler
+ description, allowing one to build 32 bit or 64 bit applications
+ on either. We define the sparc & sparc64 versions of things,
+ occasionally a neutral version (should be the same as "netbsd-elf.h")
+ and then based on SPARC_BI_ARCH, DEFAULT_ARCH32_P, and TARGET_CPU_DEFAULT,
+ we choose the correct version. */
+
+/* We use the default NetBSD ELF STARTFILE_SPEC and ENDFILE_SPEC
+ definitions, even for the SPARC_BI_ARCH compiler, because NetBSD does
+ not have a default place to find these libraries.. */
+
+/* Name the port(s). */
+#define TARGET_NAME64 "NetBSD/sparc64 ELF"
+#define TARGET_NAME32 "NetBSD/sparc ELF"
+
+/* TARGET_CPU_DEFAULT is set in Makefile.in. We test for 64-bit default
+ platform here. */
+
+#if TARGET_CPU_DEFAULT == TARGET_CPU_v9 \
+ || TARGET_CPU_DEFAULT == TARGET_CPU_ultrasparc
+/* A 64 bit v9 compiler with stack-bias,
+ in a Medium/Low code model environment. */
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT \
+ (MASK_V9 + MASK_PTR64 + MASK_64BIT /* + MASK_HARD_QUAD */ \
+ + MASK_STACK_BIAS + MASK_APP_REGS + MASK_FPU + MASK_LONG_DOUBLE_128)
+
+#undef SPARC_DEFAULT_CMODEL
+#define SPARC_DEFAULT_CMODEL CM_MEDANY
+
+#endif
+
+/* CC1_SPEC for NetBSD/sparc. */
+#define CC1_SPEC32 \
+ "%{sun4:} %{target:} \
+ %{mcypress:-mcpu=cypress} \
+ %{msparclite:-mcpu=sparclite} %{mf930:-mcpu=f930} %{mf934:-mcpu=f934} \
+ %{mv8:-mcpu=v8} %{msupersparc:-mcpu=supersparc} \
+ %{m32:%{m64:%emay not use both -m32 and -m64}} \
+ %{m64: \
+ -mptr64 -mstack-bias -mno-v8plus -mlong-double-128 \
+ %{!mcpu*: \
+ %{!mcypress: \
+ %{!msparclite: \
+ %{!mf930: \
+ %{!mf934: \
+ %{!mv8*: \
+ %{!msupersparc:-mcpu=ultrasparc}}}}}}} \
+ %{!mno-vis:%{!mcpu=v9:-mvis}} \
+ %{p:-mcmodel=medlow} \
+ %{pg:-mcmodel=medlow}}"
+
+#define CC1_SPEC64 \
+ "%{sun4:} %{target:} \
+ %{mcypress:-mcpu=cypress} \
+ %{msparclite:-mcpu=sparclite} %{mf930:-mcpu=f930} %{mf934:-mcpu=f934} \
+ %{mv8:-mcpu=v8} %{msupersparc:-mcpu=supersparc} \
+ %{m32:%{m64:%emay not use both -m32 and -m64}} \
+ %{m32: \
+ -mptr32 -mno-stack-bias \
+ %{!mlong-double-128:-mlong-double-64} \
+ %{!mcpu*: \
+ %{!mcypress: \
+ %{!msparclite: \
+ %{!mf930: \
+ %{!mf934: \
+ %{!mv8*: \
+ %{!msupersparc:-mcpu=cypress}}}}}}}} \
+ %{!m32: \
+ %{p:-mcmodel=medlow} \
+ %{pg:-mcmodel=medlow}}"
+
+/* Make sure we use the right output format. Pick a default and then
+ make sure -m32/-m64 switch to the right one. */
+
+#define LINK_ARCH32_SPEC \
+ "%-m elf32_sparc \
+ %{assert*} %{R*} %{V} %{v:%{!V:-V}} \
+ %{shared:-shared} \
+ %{!shared: \
+ -dp \
+ %{!nostdlib:%{!r*:%{!e*:-e __start}}} \
+ %{!static: \
+ -dy %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /usr/libexec/ld.elf_so}} \
+ %{static:-static}}"
+
+#define LINK_ARCH64_SPEC \
+ "%-m elf64_sparc \
+ %{assert*} %{R*} %{V} %{v:%{!V:-V}} \
+ %{shared:-shared} \
+ %{!shared: \
+ -dp \
+ %{!nostdlib:%{!r*:%{!e*:-e __start}}} \
+ %{!static: \
+ -dy %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /usr/libexec/ld.elf_so}} \
+ %{static:-static}}"
+
+#define LINK_ARCH_SPEC "\
+%{m32:%(link_arch32)} \
+%{m64:%(link_arch64)} \
+%{!m32:%{!m64:%(link_arch_default)}} \
+"
+
+#if DEFAULT_ARCH32_P
+#define LINK_ARCH_DEFAULT_SPEC LINK_ARCH32_SPEC
+#else
+#define LINK_ARCH_DEFAULT_SPEC LINK_ARCH64_SPEC
+#endif
+
+/* What extra spec entries do we need? */
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ { "link_arch32", LINK_ARCH32_SPEC }, \
+ { "link_arch64", LINK_ARCH64_SPEC }, \
+ { "link_arch_default", LINK_ARCH_DEFAULT_SPEC }, \
+ { "link_arch", LINK_ARCH_SPEC }, \
+ { "cpp_subtarget_spec32", CPP_SUBTARGET_SPEC32 }, \
+ { "cpp_subtarget_spec64", CPP_SUBTARGET_SPEC64 },
+
+
+/* What extra switches do we need? */
+#undef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES \
+ {"long-double-64", -MASK_LONG_DOUBLE_128, N_("Use 64 bit long doubles") }, \
+ {"long-double-128", MASK_LONG_DOUBLE_128, N_("Use 128 bit long doubles") },
+
+
+/* Build a compiler that supports -m32 and -m64? */
+
+#ifdef SPARC_BI_ARCH
+
+#undef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE (TARGET_LONG_DOUBLE_128 ? 128 : 64)
+
+#undef MAX_LONG_DOUBLE_TYPE_SIZE
+#define MAX_LONG_DOUBLE_TYPE_SIZE 128
+
+#if defined(__arch64__) || defined(__LONG_DOUBLE_128__)
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 128
+#else
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 64
+#endif
+
+#undef CC1_SPEC
+#if DEFAULT_ARCH32_P
+#define CC1_SPEC CC1_SPEC32
+#else
+#define CC1_SPEC CC1_SPEC64
+#endif
+
+#if DEFAULT_ARCH32_P
+#define MULTILIB_DEFAULTS { "m32" }
+#else
+#define MULTILIB_DEFAULTS { "m64" }
+#endif
+
+#undef CPP_SUBTARGET_SPEC
+#if DEFAULT_ARCH32_P
+#define CPP_SUBTARGET_SPEC \
+ "%{m64:%(cpp_subtarget_spec64)}%{!m64:%(cpp_subtarget_spec32)}"
+#else
+#define CPP_SUBTARGET_SPEC \
+ "%{!m32:%(cpp_subtarget_spec64)}%{m32:%(cpp_subtarget_spec32)}"
+#endif
+
+/* Restore this from sparc/sparc.h, netbsd.h changes it. */
+#undef CPP_SPEC
+#define CPP_SPEC "%(cpp_cpu) %(cpp_arch) %(cpp_endian) %(cpp_subtarget)"
+
+/* Name the port. */
+#undef TARGET_NAME
+#define TARGET_NAME (DEFAULT_ARCH32_P ? TARGET_NAME32 : TARGET_NAME64)
+
+#else /* SPARC_BI_ARCH */
+
+#if TARGET_CPU_DEFAULT == TARGET_CPU_v9 \
+ || TARGET_CPU_DEFAULT == TARGET_CPU_ultrasparc
+
+#undef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE 128
+
+#undef MAX_LONG_DOUBLE_TYPE_SIZE
+#define MAX_LONG_DOUBLE_TYPE_SIZE 128
+
+#undef LIBGCC2_LONG_DOUBLE_TYPE_SIZE
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 128
+
+#undef CC1_SPEC
+#define CC1_SPEC CC1_SPEC64
+
+#undef CPP_SUBTARGET_SPEC
+#define CPP_SUBTARGET_SPEC CPP_SUBTARGET_SPEC64
+
+#undef TARGET_NAME
+#define TARGET_NAME TARGET_NAME64
+
+#else /* TARGET_CPU_DEFAULT == TARGET_CPU_v9 \
+ || TARGET_CPU_DEFAULT == TARGET_CPU_ultrasparc */
+
+/* A 32-bit only compiler. NetBSD don't support 128 bit `long double'
+ for 32-bit code, unlike Solaris. */
+
+#undef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE 64
+
+#undef MAX_LONG_DOUBLE_TYPE_SIZE
+#define MAX_LONG_DOUBLE_TYPE_SIZE 64
+
+#undef LIBGCC2_LONG_DOUBLE_TYPE_SIZE
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 64
+
+#undef CPP_SUBTARGET_SPEC
+#define CPP_SUBTARGET_SPEC CPP_SUBTARGET_SPEC32
+
+#undef CC1_SPEC
+#define CC1_SPEC CC1_SPEC32
+
+#undef TARGET_NAME
+#define TARGET_NAME TARGET_NAME32
+
+#endif /* TARGET_CPU_DEFAULT == TARGET_CPU_v9 \
+ || TARGET_CPU_DEFAULT == TARGET_CPU_ultrasparc */
+
+#endif /* SPARC_BI_ARCH */
+
+/* We use GNU ld so undefine this so that attribute((init_priority)) works. */
+#undef CTORS_SECTION_ASM_OP
+#undef DTORS_SECTION_ASM_OP
diff --git a/contrib/gcc/config/sparc/netbsd.h b/contrib/gcc/config/sparc/netbsd.h
index 4be5b6b..b7b4440 100644
--- a/contrib/gcc/config/sparc/netbsd.h
+++ b/contrib/gcc/config/sparc/netbsd.h
@@ -11,15 +11,6 @@
#undef PTRDIFF_TYPE
#define PTRDIFF_TYPE "int"
-#undef WCHAR_TYPE
-#define WCHAR_TYPE "int"
-
-#undef WCHAR_UNSIGNED
-#define WCHAR_UNSIGNED 0
-
-#undef WCHAR_TYPE_SIZE
-#define WCHAR_TYPE_SIZE 32
-
/* This is BSD, so it wants DBX format. */
#define DBX_DEBUGGING_INFO
diff --git a/contrib/gcc/config/sparc/pbd.h b/contrib/gcc/config/sparc/pbd.h
index 27dfa96..9d267a5 100644
--- a/contrib/gcc/config/sparc/pbd.h
+++ b/contrib/gcc/config/sparc/pbd.h
@@ -111,6 +111,9 @@ Boston, MA 02111-1307, USA. */
* Internal labels are prefixed with a period.
*/
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
/* This is how to store into the string LABEL
the symbol_ref name of an internal numbered label where
PREFIX is the class of label and NUM is the number within the class.
diff --git a/contrib/gcc/config/sparc/rtems.h b/contrib/gcc/config/sparc/rtems.h
index 9db8049..e537f1c 100644
--- a/contrib/gcc/config/sparc/rtems.h
+++ b/contrib/gcc/config/sparc/rtems.h
@@ -1,5 +1,5 @@
/* Definitions for rtems targeting a SPARC using a.out.
- Copyright (C) 1996, 1997, 2000 Free Software Foundation, Inc.
+ Copyright (C) 1996, 1997, 2000, 2002 Free Software Foundation, Inc.
Contributed by Joel Sherrill (joel@OARcorp.com).
This file is part of GNU CC.
@@ -23,10 +23,5 @@ Boston, MA 02111-1307, USA. */
/* Specify predefined symbols in preprocessor. */
#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-Dsparc -D__GCC_NEW_VARARGS__ -Drtems -D__rtems__ \
+#define CPP_PREDEFINES "-Dsparc -D__GCC_NEW_VARARGS__ -D__rtems__ \
-Asystem=rtems"
-
-/* Generate calls to memcpy, memcmp and memset. */
-#ifndef TARGET_MEM_FUNCTIONS
-#define TARGET_MEM_FUNCTIONS
-#endif
diff --git a/contrib/gcc/config/sparc/rtemself.h b/contrib/gcc/config/sparc/rtemself.h
index 108ff33..4f2cc66 100644
--- a/contrib/gcc/config/sparc/rtemself.h
+++ b/contrib/gcc/config/sparc/rtemself.h
@@ -1,5 +1,5 @@
/* Definitions for rtems targeting a SPARC using ELF.
- Copyright (C) 1996, 1997, 2000 Free Software Foundation, Inc.
+ Copyright (C) 1996, 1997, 2000, 2002 Free Software Foundation, Inc.
Contributed by Joel Sherrill (joel@OARcorp.com).
This file is part of GNU CC.
@@ -22,10 +22,5 @@ Boston, MA 02111-1307, USA. */
/* Specify predefined symbols in preprocessor. */
#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-Dsparc -D__GCC_NEW_VARARGS__ -Drtems -D__rtems__ \
- -Asystem=rtems"
-
-/* Generate calls to memcpy, memcmp and memset. */
-#ifndef TARGET_MEM_FUNCTIONS
-#define TARGET_MEM_FUNCTIONS
-#endif
+#define CPP_PREDEFINES "-Dsparc -D__GCC_NEW_VARARGS__ -D__rtems__ \
+ -D__USE_INIT_FINI__ -Asystem=rtems"
diff --git a/contrib/gcc/config/sparc/sol2-64.h b/contrib/gcc/config/sparc/sol2-64.h
index 40db497..ccdc8f3 100644
--- a/contrib/gcc/config/sparc/sol2-64.h
+++ b/contrib/gcc/config/sparc/sol2-64.h
@@ -1,20 +1,7 @@
-/* Definitions of target machine for GNU compiler, for 64-bit SPARC
- running Solaris 2 using the GNU linker. */
+/* Definitions of target machine for GNU compiler, for bi-arch SPARC
+ running Solaris 2, defaulting to 64-bit code generation. */
-#ifdef LINKER_DOES_NOT_WORK_WITH_DWARF2
-#undef LINKER_DOES_NOT_WORK_WITH_DWARF2
-#endif
-
-#ifdef AS_SPARC64_FLAG
-
-#ifdef LINK_ARCH_SPEC
-#undef LINK_ARCH_SPEC
-#endif
-
-#define LINK_ARCH_SPEC "\
-%{m32:-m elf32_sparc %(link_arch32)} \
-%{m64:-m elf64_sparc %(link_arch64)} \
-%{!m32:%{!m64:%(link_arch_default)}} \
-"
-
-#endif
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT \
+ (MASK_V9 + MASK_PTR64 + MASK_64BIT /* + MASK_HARD_QUAD */ + \
+ MASK_STACK_BIAS + MASK_FPU + MASK_LONG_DOUBLE_128)
diff --git a/contrib/gcc/config/sparc/sol2-bi.h b/contrib/gcc/config/sparc/sol2-bi.h
new file mode 100644
index 0000000..9828d63
--- /dev/null
+++ b/contrib/gcc/config/sparc/sol2-bi.h
@@ -0,0 +1,279 @@
+/* Definitions of target machine for GNU compiler, for bi-arch SPARC
+ running Solaris 2 using the system assembler and linker. */
+
+/* The default code model. */
+#undef SPARC_DEFAULT_CMODEL
+#define SPARC_DEFAULT_CMODEL CM_MEDANY
+
+#undef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE 128
+
+#define AS_SPARC64_FLAG "-xarch=v9"
+
+#undef ASM_CPU32_DEFAULT_SPEC
+#define ASM_CPU32_DEFAULT_SPEC ""
+#undef ASM_CPU64_DEFAULT_SPEC
+#define ASM_CPU64_DEFAULT_SPEC AS_SPARC64_FLAG
+
+#if TARGET_CPU_DEFAULT == TARGET_CPU_v9
+#undef CPP_CPU64_DEFAULT_SPEC
+#define CPP_CPU64_DEFAULT_SPEC ""
+#undef ASM_CPU32_DEFAULT_SPEC
+#define ASM_CPU32_DEFAULT_SPEC "-xarch=v8plus"
+#endif
+#if TARGET_CPU_DEFAULT == TARGET_CPU_ultrasparc
+#undef CPP_CPU64_DEFAULT_SPEC
+#define CPP_CPU64_DEFAULT_SPEC ""
+#undef ASM_CPU32_DEFAULT_SPEC
+#define ASM_CPU32_DEFAULT_SPEC "-xarch=v8plusa"
+#undef ASM_CPU64_DEFAULT_SPEC
+#define ASM_CPU64_DEFAULT_SPEC AS_SPARC64_FLAG "a"
+#endif
+
+/* The sun bundled assembler doesn't accept -Yd, (and neither does gas).
+ It's safe to pass -s always, even if -g is not used. */
+#undef ASM_SPEC
+#define ASM_SPEC "\
+%{v:-V} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Wa,*:%*} -s \
+%{fpic:-K PIC} %{fPIC:-K PIC} \
+%(asm_cpu)\
+"
+
+#if DEFAULT_ARCH32_P
+#define DEF_ARCH32_SPEC(__str) "%{!m64:" __str "}"
+#define DEF_ARCH64_SPEC(__str) "%{m64:" __str "}"
+#else
+#define DEF_ARCH32_SPEC(__str) "%{m32:" __str "}"
+#define DEF_ARCH64_SPEC(__str) "%{!m32:" __str "}"
+#endif
+
+#undef CPP_CPU_SPEC
+#define CPP_CPU_SPEC "\
+%{mcypress:} \
+%{msparclite:-D__sparclite__} \
+%{mf930:-D__sparclite__} %{mf934:-D__sparclite__} \
+%{mv8:" DEF_ARCH32_SPEC("-D__sparcv8") "} \
+%{msupersparc:-D__supersparc__ " DEF_ARCH32_SPEC("-D__sparcv8") "} \
+%{mcpu=sparclet:-D__sparclet__} %{mcpu=tsc701:-D__sparclet__} \
+%{mcpu=sparclite:-D__sparclite__} \
+%{mcpu=f930:-D__sparclite__} %{mcpu=f934:-D__sparclite__} \
+%{mcpu=v8:" DEF_ARCH32_SPEC("-D__sparcv8") "} \
+%{mcpu=supersparc:-D__supersparc__ " DEF_ARCH32_SPEC("-D__sparcv8") "} \
+%{mcpu=v9:" DEF_ARCH32_SPEC("-D__sparcv8") "} \
+%{mcpu=ultrasparc:" DEF_ARCH32_SPEC("-D__sparcv8") "} \
+%{!mcpu*:%{!mcypress:%{!msparclite:%{!mf930:%{!mf934:%{!mv8:%{!msupersparc:%(cpp_cpu_default)}}}}}}} \
+"
+
+#undef ASM_CPU_SPEC
+#define ASM_CPU_SPEC "\
+%{mcpu=ultrasparc:" DEF_ARCH32_SPEC("-xarch=v8plusa") DEF_ARCH64_SPEC(AS_SPARC64_FLAG "a") "} \
+%{mcpu=v9:" DEF_ARCH32_SPEC("-xarch=v8plus") DEF_ARCH64_SPEC(AS_SPARC64_FLAG) "} \
+%{!mcpu=ultrasparc:%{!mcpu=v9:%{mcpu*:" DEF_ARCH32_SPEC("-xarch=v8") DEF_ARCH64_SPEC(AS_SPARC64_FLAG) "}}} \
+%{!mcpu*:%(asm_cpu_default)} \
+"
+
+#define STARTFILE_SPEC32 "\
+%{ansi:values-Xc.o%s} \
+%{!ansi: \
+ %{traditional:values-Xt.o%s} \
+ %{!traditional:values-Xa.o%s}}"
+
+#define STARTFILE_SPEC64 "\
+%{ansi:/usr/lib/sparcv9/values-Xc.o%s} \
+%{!ansi: \
+ %{traditional:/usr/lib/sparcv9/values-Xt.o%s} \
+ %{!traditional:/usr/lib/sparcv9/values-Xa.o%s}}"
+
+#if DEFAULT_ARCH32_P
+#define STARTFILE_ARCH_SPEC "\
+%{m32:" STARTFILE_SPEC32 "} \
+%{m64:" STARTFILE_SPEC64 "} \
+%{!m32:%{!m64:" STARTFILE_SPEC32 "}}"
+#else
+#define STARTFILE_ARCH_SPEC "\
+%{m32:" STARTFILE_SPEC32 "} \
+%{m64:" STARTFILE_SPEC64 "} \
+%{!m32:%{!m64:" STARTFILE_SPEC64 "}}"
+#endif
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared: \
+ %{!symbolic: \
+ %{p:mcrt1.o%s} \
+ %{!p: \
+ %{pg:gcrt1.o%s gmon.o%s} \
+ %{!pg:crt1.o%s}}}} \
+ crti.o%s " STARTFILE_ARCH_SPEC " \
+ crtbegin.o%s"
+
+#undef CPP_CPU_DEFAULT_SPEC
+#define CPP_CPU_DEFAULT_SPEC \
+(DEFAULT_ARCH32_P ? "\
+%{m64:" CPP_CPU64_DEFAULT_SPEC "} \
+%{!m64:" CPP_CPU32_DEFAULT_SPEC "} \
+" : "\
+%{m32:" CPP_CPU32_DEFAULT_SPEC "} \
+%{!m32:" CPP_CPU64_DEFAULT_SPEC "} \
+")
+
+#undef ASM_CPU_DEFAULT_SPEC
+#define ASM_CPU_DEFAULT_SPEC \
+(DEFAULT_ARCH32_P ? "\
+%{m64:" ASM_CPU64_DEFAULT_SPEC "} \
+%{!m64:" ASM_CPU32_DEFAULT_SPEC "} \
+" : "\
+%{m32:" ASM_CPU32_DEFAULT_SPEC "} \
+%{!m32:" ASM_CPU64_DEFAULT_SPEC "} \
+")
+
+/* wchar_t is called differently in <wchar.h> for 32 and 64-bit
+ compilations. This is called for by SCD 2.4.1, p. 6-83, Figure 6-65
+ (32-bit) and p. 6P-10, Figure 6.38 (64-bit). */
+#define NO_BUILTIN_WCHAR_TYPE
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE (TARGET_ARCH64 ? "int" : "long int")
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+/* Same for wint_t. See SCD 2.4.1, p. 6-83, Figure 6-66 (32-bit). There's
+ no corresponding 64-bit definition, but this is what Solaris 8
+ <iso/wchar_iso.h> uses. */
+#define NO_BUILTIN_WINT_TYPE
+
+#undef WINT_TYPE
+#define WINT_TYPE (TARGET_ARCH64 ? "int" : "long int")
+
+#undef WINT_TYPE_SIZE
+#define WINT_TYPE_SIZE 32
+
+#undef CPP_ARCH32_SPEC
+#define CPP_ARCH32_SPEC "-D__SIZE_TYPE__=unsigned\\ int -D__PTRDIFF_TYPE__=int \
+-D__WCHAR_TYPE__=long\\ int -D__WINT_TYPE__=long\\ int \
+-D__GCC_NEW_VARARGS__ -Acpu=sparc -Amachine=sparc"
+#undef CPP_ARCH64_SPEC
+#define CPP_ARCH64_SPEC "-D__SIZE_TYPE__=long\\ unsigned\\ int -D__PTRDIFF_TYPE__=long\\ int \
+-D__WCHAR_TYPE__=int -D__WINT_TYPE__=int \
+-D__arch64__ -Acpu=sparc64 -Amachine=sparcv9 -D__sparcv9"
+
+#undef CPP_ARCH_SPEC
+#define CPP_ARCH_SPEC "\
+%{m32:%(cpp_arch32)} \
+%{m64:%(cpp_arch64)} \
+%{!m32:%{!m64:%(cpp_arch_default)}} \
+"
+
+#undef ASM_ARCH_SPEC
+#define ASM_ARCH_SPEC ""
+
+#undef ASM_ARCH32_SPEC
+#define ASM_ARCH32_SPEC ""
+
+#undef ASM_ARCH64_SPEC
+#define ASM_ARCH64_SPEC ""
+
+#undef ASM_ARCH_DEFAULT_SPEC
+#define ASM_ARCH_DEFAULT_SPEC ""
+
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ { "link_arch32", LINK_ARCH32_SPEC }, \
+ { "link_arch64", LINK_ARCH64_SPEC }, \
+ { "link_arch_default", LINK_ARCH_DEFAULT_SPEC }, \
+ { "link_arch", LINK_ARCH_SPEC },
+
+/* This should be the same as in svr4.h, except with -R added. */
+#define LINK_ARCH32_SPEC \
+ "%{G:-G} \
+ %{YP,*} \
+ %{R*} \
+ %{compat-bsd: \
+ %{!YP,*:%{p:-Y P,/usr/ucblib:/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{pg:-Y P,/usr/ucblib:/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{!p:%{!pg:-Y P,/usr/ucblib:/usr/ccs/lib:/usr/lib}}} \
+ -R /usr/ucblib} \
+ %{!compat-bsd: \
+ %{!YP,*:%{p:-Y P,/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{pg:-Y P,/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{!p:%{!pg:-Y P,/usr/ccs/lib:/usr/lib}}}}"
+
+#define LINK_ARCH64_SPEC \
+ "%{mcmodel=medlow:-M /usr/lib/ld/sparcv9/map.below4G} \
+ %{G:-G} \
+ %{YP,*} \
+ %{R*} \
+ %{compat-bsd: \
+ %{!YP,*:%{p:-Y P,/usr/ucblib/sparcv9:/usr/lib/libp/sparcv9:/usr/lib/sparcv9} \
+ %{pg:-Y P,/usr/ucblib/sparcv9:/usr/lib/libp/sparcv9:/usr/lib/sparcv9} \
+ %{!p:%{!pg:-Y P,/usr/ucblib/sparcv9:/usr/lib/sparcv9}}} \
+ -R /usr/ucblib} \
+ %{!compat-bsd: \
+ %{!YP,*:%{p:-Y P,/usr/lib/libp/sparcv9:/usr/lib/sparcv9} \
+ %{pg:-Y P,/usr/lib/libp/sparcv9:/usr/lib/sparcv9} \
+ %{!p:%{!pg:-Y P,/usr/lib/sparcv9}}}}"
+
+#define LINK_ARCH_SPEC "\
+%{m32:%(link_arch32)} \
+%{m64:%(link_arch64)} \
+%{!m32:%{!m64:%(link_arch_default)}} \
+"
+
+#define LINK_ARCH_DEFAULT_SPEC \
+(DEFAULT_ARCH32_P ? LINK_ARCH32_SPEC : LINK_ARCH64_SPEC)
+
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "%{h*} %{v:-V} \
+ %{b} %{Wl,*:%*} \
+ %{static:-dn -Bstatic} \
+ %{shared:-G -dy %{!mimpure-text:-z text}} \
+ %{symbolic:-Bsymbolic -G -dy -z text} \
+ %(link_arch) \
+ %{Qy:} %{!Qn:-Qy}"
+
+#undef CC1_SPEC
+#if DEFAULT_ARCH32_P
+#define CC1_SPEC "\
+%{sun4:} %{target:} \
+%{mcypress:-mcpu=cypress} \
+%{msparclite:-mcpu=sparclite} %{mf930:-mcpu=f930} %{mf934:-mcpu=f934} \
+%{mv8:-mcpu=v8} %{msupersparc:-mcpu=supersparc} \
+%{m32:%{m64:%emay not use both -m32 and -m64}} \
+%{m64:-mptr64 -mstack-bias -mno-v8plus \
+ %{!mcpu*:%{!mcypress:%{!msparclite:%{!mf930:%{!mf934:%{!mv8*:%{!msupersparc:-mcpu=v9}}}}}}}} \
+"
+#else
+#define CC1_SPEC "\
+%{sun4:} %{target:} \
+%{mcypress:-mcpu=cypress} \
+%{msparclite:-mcpu=sparclite} %{mf930:-mcpu=f930} %{mf934:-mcpu=f934} \
+%{mv8:-mcpu=v8} %{msupersparc:-mcpu=supersparc} \
+%{m32:%{m64:%emay not use both -m32 and -m64}} \
+%{m32:-mptr32 -mno-stack-bias \
+ %{!mcpu*:%{!mcypress:%{!msparclite:%{!mf930:%{!mf934:%{!mv8*:%{!msupersparc:-mcpu=cypress}}}}}}}} \
+%{mv8plus:-m32 -mptr32 -mno-stack-bias \
+ %{!mcpu*:%{!mcypress:%{!msparclite:%{!mf930:%{!mf934:%{!mv8:%{!msupersparc:-mcpu=v9}}}}}}}} \
+"
+#endif
+
+#if DEFAULT_ARCH32_P
+#define MULTILIB_DEFAULTS { "m32" }
+#else
+#define MULTILIB_DEFAULTS { "m64" }
+#endif
+
+/* We use stabs-in-elf in 32-bit mode, because that is what the native
+ toolchain uses. But gdb can't handle truncated 32-bit stabs so we
+ use dwarf2 in 64-bit mode. */
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE (TARGET_ARCH32 ? DBX_DEBUG : DWARF2_DEBUG)
+
+/* We can't use the above definition for the purposes of specs. */
+#if defined(HAVE_AS_GDWARF2_DEBUG_FLAG) && defined(HAVE_AS_GSTABS_DEBUG_FLAG)
+# if DEFAULT_ARCH32_P
+# define ASM_DEBUG_SPEC "%{gdwarf-2*:--gdwarf2}%{!gdwarf-2*:%{g*:--gstabs}}"
+# else
+# define ASM_DEBUG_SPEC "%{gstabs*:--gstabs}%{!gstabs*:%{g*:--gdwarf2}}"
+# endif
+#endif
diff --git a/contrib/gcc/config/sparc/sol2-gas-bi.h b/contrib/gcc/config/sparc/sol2-gas-bi.h
new file mode 100644
index 0000000..0b6cb61
--- /dev/null
+++ b/contrib/gcc/config/sparc/sol2-gas-bi.h
@@ -0,0 +1,5 @@
+/* Definitions of target machine for GNU compiler, for bi-arch SPARC
+ running Solaris 2 using the GNU assembler. */
+
+#undef AS_SPARC64_FLAG
+#define AS_SPARC64_FLAG "-64 -Av9"
diff --git a/contrib/gcc/config/sparc/sol2-gld-bi.h b/contrib/gcc/config/sparc/sol2-gld-bi.h
new file mode 100644
index 0000000..81a1ff2
--- /dev/null
+++ b/contrib/gcc/config/sparc/sol2-gld-bi.h
@@ -0,0 +1,9 @@
+/* Definitions of target machine for GNU compiler, for bi-arch SPARC
+ running Solaris 2 using the GNU linker. */
+
+#undef LINK_ARCH_SPEC
+#define LINK_ARCH_SPEC "\
+%{m32:-m elf32_sparc %(link_arch32)} \
+%{m64:-m elf64_sparc %(link_arch64)} \
+%{!m32:%{!m64:%(link_arch_default)}} \
+"
diff --git a/contrib/gcc/config/sparc/sol2-gld.h b/contrib/gcc/config/sparc/sol2-gld.h
new file mode 100644
index 0000000..76e034e
--- /dev/null
+++ b/contrib/gcc/config/sparc/sol2-gld.h
@@ -0,0 +1,6 @@
+/* Definitions of target machine for GNU compiler, for SPARC running Solaris 2
+ using the GNU linker. */
+
+/* Undefine this so that attribute((init_priority)) works. */
+#undef CTORS_SECTION_ASM_OP
+#undef DTORS_SECTION_ASM_OP
diff --git a/contrib/gcc/config/sparc/sol2.h b/contrib/gcc/config/sparc/sol2.h
index 46e80c1..67e064d 100644
--- a/contrib/gcc/config/sparc/sol2.h
+++ b/contrib/gcc/config/sparc/sol2.h
@@ -1,6 +1,6 @@
/* Definitions of target machine for GNU compiler, for SPARC running Solaris 2
- Copyright 1992, 1995, 1996, 1997, 1998, 1999, 2000,
- 2001 Free Software Foundation, Inc.
+ Copyright 1992, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002
+ Free Software Foundation, Inc.
Contributed by Ron Guilmette (rfg@netcom.com).
Additional changes by David V. Henkel-Wallace (gumby@cygnus.com).
@@ -23,17 +23,26 @@ Boston, MA 02111-1307, USA. */
/* Supposedly the same as vanilla sparc svr4, except for the stuff below: */
+/* Solaris 2 (at least as of 2.5.1) uses a 32-bit wchar_t. */
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
/* Solaris 2 uses a wint_t different from the default. This is required
by the SCD 2.4.1, p. 6-83, Figure 6-66. */
#undef WINT_TYPE
#define WINT_TYPE "long int"
#undef WINT_TYPE_SIZE
-#define WINT_TYPE_SIZE BITS_PER_WORD
+#define WINT_TYPE_SIZE 32
+
+#define HANDLE_PRAGMA_REDEFINE_EXTNAME 1
#undef CPP_PREDEFINES
#define CPP_PREDEFINES \
-"-Dsparc -Dsun -Dunix -D__svr4__ -D__SVR4 \
+"-Dsparc -Dsun -Dunix -D__svr4__ -D__SVR4 -D__PRAGMA_REDEFINE_EXTNAME \
-Asystem=unix -Asystem=svr4"
#undef CPP_SUBTARGET_SPEC
@@ -80,12 +89,12 @@ Boston, MA 02111-1307, USA. */
/* However it appears that Solaris 2.0 uses the same reg numbering as
the old BSD-style system did. */
-#undef DBX_REGISTER_NUMBER
/* Same as sparc.h */
+#undef DBX_REGISTER_NUMBER
#define DBX_REGISTER_NUMBER(REGNO) \
- (TARGET_FLAT && REGNO == FRAME_POINTER_REGNUM ? 31 : REGNO)
+ (TARGET_FLAT && (REGNO) == HARD_FRAME_POINTER_REGNUM ? 31 : REGNO)
-/* We use stabs-in-elf for debugging, because that is what the native
+/* We use stabs-in-elf by default, because that is what the native
toolchain uses. */
#undef PREFERRED_DEBUGGING_TYPE
#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
@@ -95,6 +104,9 @@ Boston, MA 02111-1307, USA. */
#define ASM_OUTPUT_SKIP(FILE,SIZE) \
fprintf (FILE, "\t.skip %u\n", (SIZE))
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
/* This is how to output a definition of an internal numbered label where
PREFIX is the class of label and NUM is the number within the class. */
@@ -150,7 +162,9 @@ Boston, MA 02111-1307, USA. */
%{p|pg:-ldl} -lc}}"
#undef ENDFILE_SPEC
-#define ENDFILE_SPEC "crtend.o%s crtn.o%s"
+#define ENDFILE_SPEC \
+ "%{ffast-math|funsafe-math-optimizations:crtfastmath.o%s} \
+ crtend.o%s crtn.o%s"
/* This should be the same as in svr4.h, except with -R added. */
#undef LINK_SPEC
@@ -227,7 +241,7 @@ Boston, MA 02111-1307, USA. */
/* Solaris allows 64 bit out and global registers in 32 bit mode.
sparc_override_options will disable V8+ if not generating V9 code. */
#undef TARGET_DEFAULT
-#define TARGET_DEFAULT (MASK_EPILOGUE + MASK_FPU + MASK_V8PLUS + MASK_LONG_DOUBLE_128)
+#define TARGET_DEFAULT (MASK_FPU + MASK_V8PLUS + MASK_LONG_DOUBLE_128)
/*
* Attempt to turn on access permissions for the stack.
@@ -243,7 +257,9 @@ Boston, MA 02111-1307, USA. */
/* This declares mprotect (used in TRANSFER_FROM_TRAMPOLINE) for
libgcc2.c. */
-#ifdef L_trampoline
+/* We don't want to include this because sys/mman.h is not present on
+ some non-Solaris configurations that use sol2.h. */
+#if 0 /* def L_trampoline */
#include <sys/mman.h>
#endif
diff --git a/contrib/gcc/config/sparc/sol27-sld.h b/contrib/gcc/config/sparc/sol27-sld.h
new file mode 100644
index 0000000..fb47b61
--- /dev/null
+++ b/contrib/gcc/config/sparc/sol27-sld.h
@@ -0,0 +1,8 @@
+/* Up through Solaris 2.7, the system linker does not work with DWARF
+ or DWARF2, since it does not have working support for relocations
+ to unaligned data. */
+
+#undef DWARF_DEBUGGING_INFO
+#undef DWARF2_DEBUGGING_INFO
+#undef PREFERRED_DEBUGGING_TYPE
+#undef ASM_DEBUG_SPEC
diff --git a/contrib/gcc/config/sparc/sp64-aout.h b/contrib/gcc/config/sparc/sp64-aout.h
index 669adc3..1af9dea 100644
--- a/contrib/gcc/config/sparc/sp64-aout.h
+++ b/contrib/gcc/config/sparc/sp64-aout.h
@@ -26,7 +26,7 @@ Boston, MA 02111-1307, USA. */
#undef TARGET_DEFAULT
#define TARGET_DEFAULT \
(MASK_V9 + MASK_PTR64 + MASK_64BIT + MASK_HARD_QUAD \
- + MASK_APP_REGS + MASK_EPILOGUE + MASK_FPU + MASK_STACK_BIAS)
+ + MASK_APP_REGS + MASK_FPU + MASK_STACK_BIAS)
/* The only code model supported is Medium/Low. */
#undef SPARC_DEFAULT_CMODEL
diff --git a/contrib/gcc/config/sparc/sp64-elf.h b/contrib/gcc/config/sparc/sp64-elf.h
index 3bd2a53..caf944b 100644
--- a/contrib/gcc/config/sparc/sp64-elf.h
+++ b/contrib/gcc/config/sparc/sp64-elf.h
@@ -33,7 +33,7 @@ Boston, MA 02111-1307, USA. */
#undef TARGET_DEFAULT
#define TARGET_DEFAULT \
(MASK_V9 + MASK_PTR64 + MASK_64BIT + MASK_HARD_QUAD \
- + MASK_APP_REGS + MASK_EPILOGUE + MASK_FPU + MASK_STACK_BIAS + MASK_LONG_DOUBLE_128)
+ + MASK_APP_REGS + MASK_FPU + MASK_STACK_BIAS + MASK_LONG_DOUBLE_128)
#undef SPARC_DEFAULT_CMODEL
#define SPARC_DEFAULT_CMODEL CM_EMBMEDANY
@@ -71,7 +71,9 @@ crtbegin.o%s \
"
#undef ENDFILE_SPEC
-#define ENDFILE_SPEC "crtend.o%s"
+#define ENDFILE_SPEC \
+ "%{ffast-math|funsafe-math-optimizations:crtfastmath.o%s} \
+ crtend.o%s"
/* Use the default (for now). */
#undef LIB_SPEC
diff --git a/contrib/gcc/config/sparc/sp86x-aout.h b/contrib/gcc/config/sparc/sp86x-aout.h
index ed31925..74607d3 100644
--- a/contrib/gcc/config/sparc/sp86x-aout.h
+++ b/contrib/gcc/config/sparc/sp86x-aout.h
@@ -28,7 +28,7 @@ Boston, MA 02111-1307, USA. */
/* Enable app-regs and epilogue options. Do not enable the fpu. */
#undef TARGET_DEFAULT
-#define TARGET_DEFAULT (MASK_APP_REGS + MASK_EPILOGUE)
+#define TARGET_DEFAULT MASK_APP_REGS
#undef ASM_SPEC
#define ASM_SPEC "%{v:-v} %{mlittle-endian-data:--little-endian-data} %(asm_cpu)"
diff --git a/contrib/gcc/config/sparc/sp86x-elf.h b/contrib/gcc/config/sparc/sp86x-elf.h
index 40ecd55..42239a9 100644
--- a/contrib/gcc/config/sparc/sp86x-elf.h
+++ b/contrib/gcc/config/sparc/sp86x-elf.h
@@ -38,7 +38,7 @@ Boston, MA 02111-1307, USA. */
/* Enable app-regs and epilogue options. Do not enable the fpu. */
#undef TARGET_DEFAULT
-#define TARGET_DEFAULT (MASK_APP_REGS + MASK_EPILOGUE)
+#define TARGET_DEFAULT MASK_APP_REGS
#undef ASM_SPEC
#define ASM_SPEC "%{v:-V} %{mlittle-endian-data:--little-endian-data} %(asm_cpu)"
diff --git a/contrib/gcc/config/sparc/sparc-protos.h b/contrib/gcc/config/sparc/sparc-protos.h
index c0f3edc..fd372de 100644
--- a/contrib/gcc/config/sparc/sparc-protos.h
+++ b/contrib/gcc/config/sparc/sparc-protos.h
@@ -24,6 +24,8 @@ Boston, MA 02111-1307, USA. */
#ifndef __SPARC_PROTOS_H__
#define __SPARC_PROTOS_H__
+extern bool sparc_emitting_epilogue;
+
#ifdef TREE_CODE
extern struct rtx_def *function_value PARAMS ((tree, enum machine_mode, int));
extern void function_arg_advance PARAMS ((CUMULATIVE_ARGS *,
@@ -57,10 +59,7 @@ extern int check_pic PARAMS ((int));
extern int short_branch PARAMS ((int, int));
extern int sparc_flat_epilogue_delay_slots PARAMS ((void));
extern unsigned long sparc_flat_compute_frame_size PARAMS ((int));
-extern void sparc_function_profiler PARAMS ((FILE *, int));
-extern void sparc_function_block_profiler PARAMS ((FILE *, int));
-extern void sparc_block_profiler PARAMS ((FILE *, int));
-extern void sparc_function_block_profiler_exit PARAMS ((FILE *));
+extern void sparc_profile_hook PARAMS ((int));
extern void sparc_override_options PARAMS ((void));
extern int leaf_return_peephole_ok PARAMS ((void));
extern void sparc_output_scratch_registers PARAMS ((FILE *));
@@ -74,6 +73,10 @@ extern enum machine_mode select_cc_mode PARAMS ((enum rtx_code, rtx, rtx));
/* Define the function that build the compare insn for scc and bcc. */
extern rtx gen_compare_reg PARAMS ((enum rtx_code code, rtx, rtx));
extern void sparc_emit_float_lib_cmp PARAMS ((rtx, rtx, enum rtx_code));
+extern void sparc_emit_floatunsdi PARAMS ((rtx [2]));
+extern void emit_tfmode_binop PARAMS ((enum rtx_code, rtx *));
+extern void emit_tfmode_unop PARAMS ((enum rtx_code, rtx *));
+extern void emit_tfmode_cvt PARAMS ((enum rtx_code, rtx *));
/* This function handles all v9 scc insns */
extern int gen_v9_scc PARAMS ((enum rtx_code, rtx *));
extern void sparc_initialize_trampoline PARAMS ((rtx, rtx, rtx));
@@ -85,10 +88,10 @@ extern void sparc_emit_set_const64 PARAMS ((rtx, rtx));
extern void sparc_emit_set_symbolic_const64 PARAMS ((rtx, rtx, rtx));
extern int sparc_splitdi_legitimate PARAMS ((rtx, rtx));
extern int sparc_absnegfloat_split_legitimate PARAMS ((rtx, rtx));
-extern char *output_cbranch PARAMS ((rtx, int, int, int, int, rtx));
-extern const char *output_return PARAMS ((rtx *));
+extern char *output_cbranch PARAMS ((rtx, rtx, int, int, int, int, rtx));
extern const char *output_sibcall PARAMS ((rtx, rtx));
-extern char *output_v9branch PARAMS ((rtx, int, int, int, int, int, rtx));
+extern char *output_v9branch PARAMS ((rtx, rtx, int, int, int, int, int,
+ rtx));
extern void emit_v9_brxx_insn PARAMS ((enum rtx_code, rtx, rtx));
extern void print_operand PARAMS ((FILE *, rtx, int));
extern int mems_ok_for_ldd_peep PARAMS ((rtx, rtx, rtx));
@@ -97,6 +100,7 @@ extern int arith_4096_operand PARAMS ((rtx, enum machine_mode));
extern int zero_operand PARAMS ((rtx, enum machine_mode));
extern int fp_zero_operand PARAMS ((rtx, enum machine_mode));
extern int reg_or_0_operand PARAMS ((rtx, enum machine_mode));
+extern int empty_delay_slot PARAMS ((rtx));
extern int eligible_for_epilogue_delay PARAMS ((rtx, int));
extern int eligible_for_return_delay PARAMS ((rtx));
extern int eligible_for_sibcall_delay PARAMS ((rtx));
@@ -115,8 +119,8 @@ extern char *sparc_v8plus_shift PARAMS ((rtx *, rtx, const char *));
/* Function used for V8+ code generation. Returns 1 if the high
32 bits of REG are 0 before INSN. */
extern int sparc_check_64 PARAMS ((rtx, rtx));
-extern int sparc_return_peephole_ok PARAMS ((rtx, rtx));
extern rtx gen_df_reg PARAMS ((rtx, int));
+extern int sparc_extra_constraint_check PARAMS ((rtx, int, int));
#endif /* RTX_CODE */
#endif /* __SPARC_PROTOS_H__ */
diff --git a/contrib/gcc/config/sparc/sparc.c b/contrib/gcc/config/sparc/sparc.c
index 8cc4449..28bbcec 100644
--- a/contrib/gcc/config/sparc/sparc.c
+++ b/contrib/gcc/config/sparc/sparc.c
@@ -68,26 +68,24 @@ Boston, MA 02111-1307, USA. */
static int apparent_fsize;
static int actual_fsize;
-/* Number of live general or floating point registers needed to be saved
- (as 4-byte quantities). This is only done if TARGET_EPILOGUE. */
+/* Number of live general or floating point registers needed to be
+ saved (as 4-byte quantities). */
static int num_gfregs;
/* Save the operands last given to a compare for use when we
generate a scc or bcc insn. */
-
rtx sparc_compare_op0, sparc_compare_op1;
-/* We may need an epilogue if we spill too many registers.
- If this is non-zero, then we branch here for the epilogue. */
-static rtx leaf_label;
+/* Coordinate with the md file wrt special insns created by
+ sparc_nonflat_function_epilogue. */
+bool sparc_emitting_epilogue;
#ifdef LEAF_REGISTERS
-/* Vector to say how input registers are mapped to output
- registers. FRAME_POINTER_REGNUM cannot be remapped by
- this function to eliminate it. You must use -fomit-frame-pointer
- to get that. */
-const char leaf_reg_remap[] =
+/* Vector to say how input registers are mapped to output registers.
+ HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
+ eliminate it. You must use -fomit-frame-pointer to get that. */
+char leaf_reg_remap[] =
{ 0, 1, 2, 3, 4, 5, 6, 7,
-1, -1, -1, -1, -1, -1, 14, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
@@ -180,6 +178,12 @@ static int sparc_issue_rate PARAMS ((void));
static int sparc_variable_issue PARAMS ((FILE *, int, rtx, int));
static void sparc_sched_init PARAMS ((FILE *, int, int));
static int sparc_sched_reorder PARAMS ((FILE *, int, rtx *, int *, int));
+
+static void emit_soft_tfmode_libcall PARAMS ((const char *, int, rtx *));
+static void emit_soft_tfmode_binop PARAMS ((enum rtx_code, rtx *));
+static void emit_soft_tfmode_unop PARAMS ((enum rtx_code, rtx *));
+static void emit_soft_tfmode_cvt PARAMS ((enum rtx_code, rtx *));
+static void emit_hard_tfmode_operation PARAMS ((enum rtx_code, rtx *));
/* Option handling. */
@@ -433,12 +437,6 @@ sparc_override_options ()
/* Do various machine dependent initializations. */
sparc_init_modes ();
- if ((profile_flag)
- && sparc_cmodel != CM_32 && sparc_cmodel != CM_MEDLOW)
- {
- error ("profiling does not support code models other than medlow");
- }
-
/* Register global variables with the garbage collector. */
sparc_add_gc_roots ();
}
@@ -492,6 +490,20 @@ fp_zero_operand (op, mode)
return op == CONST0_RTX (mode);
}
+/* Nonzero if OP is a register operand in floating point register. */
+
+int
+fp_register_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (! register_operand (op, mode))
+ return 0;
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ return GET_CODE (op) == REG && SPARC_FP_REG_P (REGNO (op));
+}
+
/* Nonzero if OP is a floating point constant which can
be loaded into an integer register using a single
sethi instruction. */
@@ -609,6 +621,27 @@ fcc_reg_operand (op, mode)
#endif
}
+/* Nonzero if OP is a floating point condition code fcc0 register. */
+
+int
+fcc0_reg_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ /* This can happen when recog is called from combine. Op may be a MEM.
+ Fail instead of calling abort in this case. */
+ if (GET_CODE (op) != REG)
+ return 0;
+
+ if (mode != VOIDmode && mode != GET_MODE (op))
+ return 0;
+ if (mode == VOIDmode
+ && (GET_MODE (op) != CCFPmode && GET_MODE (op) != CCFPEmode))
+ return 0;
+
+ return REGNO (op) == SPARC_FCC_REG;
+}
+
/* Nonzero if OP is an integer or floating point condition code register. */
int
@@ -879,12 +912,35 @@ noov_compare_op (op, mode)
if (GET_RTX_CLASS (code) != '<')
return 0;
- if (GET_MODE (XEXP (op, 0)) == CC_NOOVmode)
+ if (GET_MODE (XEXP (op, 0)) == CC_NOOVmode
+ || GET_MODE (XEXP (op, 0)) == CCX_NOOVmode)
/* These are the only branches which work with CC_NOOVmode. */
return (code == EQ || code == NE || code == GE || code == LT);
return 1;
}
+/* Return 1 if this is a 64-bit comparison operator. This allows the use of
+ MATCH_OPERATOR to recognize all the branch insns. */
+
+int
+noov_compare64_op (op, mode)
+ register rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ enum rtx_code code = GET_CODE (op);
+
+ if (! TARGET_V9)
+ return 0;
+
+ if (GET_RTX_CLASS (code) != '<')
+ return 0;
+
+ if (GET_MODE (XEXP (op, 0)) == CCX_NOOVmode)
+ /* These are the only branches which work with CCX_NOOVmode. */
+ return (code == EQ || code == NE || code == GE || code == LT);
+ return (GET_MODE (XEXP (op, 0)) == CCXmode);
+}
+
/* Nonzero if OP is a comparison operator suitable for use in v9
conditional move or branch on register contents instructions. */
@@ -951,13 +1007,11 @@ arith_operand (op, mode)
rtx op;
enum machine_mode mode;
{
- int val;
if (register_operand (op, mode))
return 1;
if (GET_CODE (op) != CONST_INT)
return 0;
- val = INTVAL (op) & 0xffffffff;
- return SPARC_SIMM13_P (val);
+ return SMALL_INT32 (op);
}
/* Return true if OP is a constant 4096 */
@@ -967,11 +1021,10 @@ arith_4096_operand (op, mode)
rtx op;
enum machine_mode mode ATTRIBUTE_UNUSED;
{
- int val;
if (GET_CODE (op) != CONST_INT)
return 0;
- val = INTVAL (op) & 0xffffffff;
- return val == 4096;
+ else
+ return INTVAL (op) == 4096;
}
/* Return true if OP is suitable as second operand for add/sub */
@@ -999,7 +1052,7 @@ const64_operand (op, mode)
&& SPARC_SIMM13_P (CONST_DOUBLE_LOW (op))
&& (CONST_DOUBLE_HIGH (op) ==
((CONST_DOUBLE_LOW (op) & 0x80000000) != 0 ?
- (HOST_WIDE_INT)0xffffffff : 0)))
+ (HOST_WIDE_INT)-1 : 0)))
#endif
);
}
@@ -1008,21 +1061,15 @@ const64_operand (op, mode)
int
const64_high_operand (op, mode)
rtx op;
- enum machine_mode mode ATTRIBUTE_UNUSED;
+ enum machine_mode mode;
{
return ((GET_CODE (op) == CONST_INT
- && (INTVAL (op) & 0xfffffc00) != 0
- && SPARC_SETHI_P (INTVAL (op))
-#if HOST_BITS_PER_WIDE_INT != 64
- /* Must be positive on non-64bit host else the
- optimizer is fooled into thinking that sethi
- sign extends, even though it does not. */
- && INTVAL (op) >= 0
-#endif
+ && (INTVAL (op) & ~(HOST_WIDE_INT)0x3ff) != 0
+ && SPARC_SETHI_P (INTVAL (op) & GET_MODE_MASK (mode))
)
|| (GET_CODE (op) == CONST_DOUBLE
&& CONST_DOUBLE_HIGH (op) == 0
- && (CONST_DOUBLE_LOW (op) & 0xfffffc00) != 0
+ && (CONST_DOUBLE_LOW (op) & ~(HOST_WIDE_INT)0x3ff) != 0
&& SPARC_SETHI_P (CONST_DOUBLE_LOW (op))));
}
@@ -1231,12 +1278,7 @@ input_operand (op, mode)
variants when we are working in DImode and !arch64. */
if (GET_MODE_CLASS (mode) == MODE_INT
&& ((GET_CODE (op) == CONST_INT
- && ((SPARC_SETHI_P (INTVAL (op))
- && (! TARGET_ARCH64
- || (INTVAL (op) >= 0)
- || mode == SImode
- || mode == HImode
- || mode == QImode))
+ && (SPARC_SETHI_P (INTVAL (op) & GET_MODE_MASK (mode))
|| SPARC_SIMM13_P (INTVAL (op))
|| (mode == DImode
&& ! TARGET_ARCH64)))
@@ -1315,7 +1357,7 @@ sparc_emit_set_const32 (op0, op1)
{
HOST_WIDE_INT value = INTVAL (op1);
- if (SPARC_SETHI_P (value)
+ if (SPARC_SETHI_P (value & GET_MODE_MASK (mode))
|| SPARC_SIMM13_P (value))
abort ();
}
@@ -1336,11 +1378,13 @@ sparc_emit_set_const32 (op0, op1)
&& (INTVAL (op1) & 0x80000000) != 0)
emit_insn (gen_rtx_SET
(VOIDmode, temp,
- gen_rtx_CONST_DOUBLE (VOIDmode, INTVAL (op1) & 0xfffffc00,
+ gen_rtx_CONST_DOUBLE (VOIDmode,
+ INTVAL (op1) & ~(HOST_WIDE_INT)0x3ff,
0)));
else
emit_insn (gen_rtx_SET (VOIDmode, temp,
- GEN_INT (INTVAL (op1) & 0xfffffc00)));
+ GEN_INT (INTVAL (op1)
+ & ~(HOST_WIDE_INT)0x3ff)));
emit_insn (gen_rtx_SET (VOIDmode,
op0,
@@ -1366,6 +1410,14 @@ sparc_emit_set_symbolic_const64 (op0, op1, temp1)
rtx op1;
rtx temp1;
{
+ rtx ti_temp1 = 0;
+
+ if (temp1 && GET_MODE (temp1) == TImode)
+ {
+ ti_temp1 = temp1;
+ temp1 = gen_rtx_REG (DImode, REGNO (temp1));
+ }
+
switch (sparc_cmodel)
{
case CM_MEDLOW:
@@ -1419,12 +1471,16 @@ sparc_emit_set_symbolic_const64 (op0, op1, temp1)
sllx %temp3, 32, %temp5
or %temp4, %temp5, %reg */
- /* Getting this right wrt. reloading is really tricky.
- We _MUST_ have a separate temporary at this point,
- if we don't barf immediately instead of generating
- incorrect code. */
+ /* It is possible that one of the registers we got for operands[2]
+ might coincide with that of operands[0] (which is why we made
+ it TImode). Pick the other one to use as our scratch. */
if (rtx_equal_p (temp1, op0))
- abort ();
+ {
+ if (ti_temp1)
+ temp1 = gen_rtx_REG (DImode, REGNO (temp1) + 1);
+ else
+ abort();
+ }
emit_insn (gen_sethh (op0, op1));
emit_insn (gen_setlm (temp1, op1));
@@ -1462,12 +1518,16 @@ sparc_emit_set_symbolic_const64 (op0, op1, temp1)
}
else
{
- /* Getting this right wrt. reloading is really tricky.
- We _MUST_ have a separate temporary at this point,
- so we barf immediately instead of generating
- incorrect code. */
- if (temp1 == op0)
- abort ();
+ /* It is possible that one of the registers we got for operands[2]
+ might coincide with that of operands[0] (which is why we made
+ it TImode). Pick the other one to use as our scratch. */
+ if (rtx_equal_p (temp1, op0))
+ {
+ if (ti_temp1)
+ temp1 = gen_rtx_REG (DImode, REGNO (temp1) + 1);
+ else
+ abort();
+ }
emit_insn (gen_embmedany_textuhi (op0, op1));
emit_insn (gen_embmedany_texthi (temp1, op1));
@@ -1494,15 +1554,15 @@ static rtx gen_safe_OR64 PARAMS ((rtx, HOST_WIDE_INT));
static rtx gen_safe_XOR64 PARAMS ((rtx, HOST_WIDE_INT));
#if HOST_BITS_PER_WIDE_INT == 64
-#define GEN_HIGHINT64(__x) GEN_INT ((__x) & 0xfffffc00)
+#define GEN_HIGHINT64(__x) GEN_INT ((__x) & ~(HOST_WIDE_INT)0x3ff)
#define GEN_INT64(__x) GEN_INT (__x)
#else
#define GEN_HIGHINT64(__x) \
- gen_rtx_CONST_DOUBLE (VOIDmode, (__x) & 0xfffffc00, 0)
+ gen_rtx_CONST_DOUBLE (VOIDmode, (__x) & ~(HOST_WIDE_INT)0x3ff, 0)
#define GEN_INT64(__x) \
gen_rtx_CONST_DOUBLE (VOIDmode, (__x) & 0xffffffff, \
((__x) & 0x80000000 \
- ? 0xffffffff : 0))
+ ? -1 : 0))
#endif
/* The optimizer is not to assume anything about exactly
@@ -1587,7 +1647,8 @@ sparc_emit_set_const64_quick1 (op0, temp, low_bits, is_neg)
{
emit_insn (gen_rtx_SET (VOIDmode, op0,
gen_safe_XOR64 (temp,
- (-0x400 | (low_bits & 0x3ff)))));
+ (-(HOST_WIDE_INT)0x400
+ | (low_bits & 0x3ff)))));
}
}
}
@@ -2295,16 +2356,7 @@ gen_v9_scc (compare_code, operands)
|| GET_MODE (operands[0]) == DImode))
return 0;
- /* Handle the case where operands[0] == sparc_compare_op0.
- We "early clobber" the result. */
- if (REGNO (operands[0]) == REGNO (sparc_compare_op0))
- {
- op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0));
- emit_move_insn (op0, sparc_compare_op0);
- }
- else
- op0 = sparc_compare_op0;
- /* For consistency in the following. */
+ op0 = sparc_compare_op0;
op1 = sparc_compare_op1;
/* Try to use the movrCC insns. */
@@ -2314,14 +2366,12 @@ gen_v9_scc (compare_code, operands)
&& v9_regcmp_p (compare_code))
{
/* Special case for op0 != 0. This can be done with one instruction if
- operands[0] == sparc_compare_op0. We don't assume they are equal
- now though. */
+ operands[0] == sparc_compare_op0. */
if (compare_code == NE
&& GET_MODE (operands[0]) == DImode
- && GET_MODE (op0) == DImode)
+ && rtx_equal_p (op0, operands[0]))
{
- emit_insn (gen_rtx_SET (VOIDmode, operands[0], op0));
emit_insn (gen_rtx_SET (VOIDmode, operands[0],
gen_rtx_IF_THEN_ELSE (DImode,
gen_rtx_fmt_ee (compare_code, DImode,
@@ -2331,6 +2381,14 @@ gen_v9_scc (compare_code, operands)
return 1;
}
+ if (reg_overlap_mentioned_p (operands[0], op0))
+ {
+ /* Handle the case where operands[0] == sparc_compare_op0.
+ We "early clobber" the result. */
+ op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0));
+ emit_move_insn (op0, sparc_compare_op0);
+ }
+
emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
if (GET_MODE (op0) != DImode)
{
@@ -2406,6 +2464,304 @@ gen_df_reg (reg, low)
return gen_rtx_REG (DFmode, regno);
}
+/* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
+ Unlike normal calls, TFmode operands are passed by reference. It is
+ assumed that no more than 3 operands are required. */
+
+static void
+emit_soft_tfmode_libcall (func_name, nargs, operands)
+ const char *func_name;
+ int nargs;
+ rtx *operands;
+{
+ rtx ret_slot = NULL, arg[3], func_sym;
+ int i;
+
+ /* We only expect to be called for conversions, unary, and binary ops. */
+ if (nargs < 2 || nargs > 3)
+ abort ();
+
+ for (i = 0; i < nargs; ++i)
+ {
+ rtx this_arg = operands[i];
+ rtx this_slot;
+
+ /* TFmode arguments and return values are passed by reference. */
+ if (GET_MODE (this_arg) == TFmode)
+ {
+ if (GET_CODE (this_arg) == MEM)
+ this_arg = XEXP (this_arg, 0);
+ else if (CONSTANT_P (this_arg))
+ {
+ this_slot = force_const_mem (TFmode, this_arg);
+ this_arg = XEXP (this_slot, 0);
+ }
+ else
+ {
+ this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
+
+ /* Operand 0 is the return value. We'll copy it out later. */
+ if (i > 0)
+ emit_move_insn (this_slot, this_arg);
+ else
+ ret_slot = this_slot;
+
+ this_arg = XEXP (this_slot, 0);
+ }
+ }
+
+ arg[i] = this_arg;
+ }
+
+ func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
+
+ if (GET_MODE (operands[0]) == TFmode)
+ {
+ if (nargs == 2)
+ emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
+ arg[0], GET_MODE (arg[0]),
+ arg[1], GET_MODE (arg[1]));
+ else
+ emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
+ arg[0], GET_MODE (arg[0]),
+ arg[1], GET_MODE (arg[1]),
+ arg[2], GET_MODE (arg[2]));
+
+ if (ret_slot)
+ emit_move_insn (operands[0], ret_slot);
+ }
+ else
+ {
+ rtx ret;
+
+ if (nargs != 2)
+ abort ();
+
+ ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
+ GET_MODE (operands[0]), 1,
+ arg[1], GET_MODE (arg[1]));
+
+ if (ret != operands[0])
+ emit_move_insn (operands[0], ret);
+ }
+}
+
+/* Expand soft-float TFmode calls to sparc abi routines. */
+
+static void
+emit_soft_tfmode_binop (code, operands)
+ enum rtx_code code;
+ rtx *operands;
+{
+ const char *func;
+
+ switch (code)
+ {
+ case PLUS:
+ func = "_Qp_add";
+ break;
+ case MINUS:
+ func = "_Qp_sub";
+ break;
+ case MULT:
+ func = "_Qp_mul";
+ break;
+ case DIV:
+ func = "_Qp_div";
+ break;
+ default:
+ abort ();
+ }
+
+ emit_soft_tfmode_libcall (func, 3, operands);
+}
+
+static void
+emit_soft_tfmode_unop (code, operands)
+ enum rtx_code code;
+ rtx *operands;
+{
+ const char *func;
+
+ switch (code)
+ {
+ case SQRT:
+ func = "_Qp_sqrt";
+ break;
+ default:
+ abort ();
+ }
+
+ emit_soft_tfmode_libcall (func, 2, operands);
+}
+
+static void
+emit_soft_tfmode_cvt (code, operands)
+ enum rtx_code code;
+ rtx *operands;
+{
+ const char *func;
+
+ switch (code)
+ {
+ case FLOAT_EXTEND:
+ switch (GET_MODE (operands[1]))
+ {
+ case SFmode:
+ func = "_Qp_stoq";
+ break;
+ case DFmode:
+ func = "_Qp_dtoq";
+ break;
+ default:
+ abort ();
+ }
+ break;
+
+ case FLOAT_TRUNCATE:
+ switch (GET_MODE (operands[0]))
+ {
+ case SFmode:
+ func = "_Qp_qtos";
+ break;
+ case DFmode:
+ func = "_Qp_qtod";
+ break;
+ default:
+ abort ();
+ }
+ break;
+
+ case FLOAT:
+ switch (GET_MODE (operands[1]))
+ {
+ case SImode:
+ func = "_Qp_itoq";
+ break;
+ case DImode:
+ func = "_Qp_xtoq";
+ break;
+ default:
+ abort ();
+ }
+ break;
+
+ case UNSIGNED_FLOAT:
+ switch (GET_MODE (operands[1]))
+ {
+ case SImode:
+ func = "_Qp_uitoq";
+ break;
+ case DImode:
+ func = "_Qp_uxtoq";
+ break;
+ default:
+ abort ();
+ }
+ break;
+
+ case FIX:
+ switch (GET_MODE (operands[0]))
+ {
+ case SImode:
+ func = "_Qp_qtoi";
+ break;
+ case DImode:
+ func = "_Qp_qtox";
+ break;
+ default:
+ abort ();
+ }
+ break;
+
+ case UNSIGNED_FIX:
+ switch (GET_MODE (operands[0]))
+ {
+ case SImode:
+ func = "_Qp_qtoui";
+ break;
+ case DImode:
+ func = "_Qp_qtoux";
+ break;
+ default:
+ abort ();
+ }
+ break;
+
+ default:
+ abort ();
+ }
+
+ emit_soft_tfmode_libcall (func, 2, operands);
+}
+
+/* Expand a hard-float tfmode operation. All arguments must be in
+ registers. */
+
+static void
+emit_hard_tfmode_operation (code, operands)
+ enum rtx_code code;
+ rtx *operands;
+{
+ rtx op, dest;
+
+ if (GET_RTX_CLASS (code) == '1')
+ {
+ operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
+ op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
+ }
+ else
+ {
+ operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
+ operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
+ op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
+ operands[1], operands[2]);
+ }
+
+ if (register_operand (operands[0], VOIDmode))
+ dest = operands[0];
+ else
+ dest = gen_reg_rtx (GET_MODE (operands[0]));
+
+ emit_insn (gen_rtx_SET (VOIDmode, dest, op));
+
+ if (dest != operands[0])
+ emit_move_insn (operands[0], dest);
+}
+
+void
+emit_tfmode_binop (code, operands)
+ enum rtx_code code;
+ rtx *operands;
+{
+ if (TARGET_HARD_QUAD)
+ emit_hard_tfmode_operation (code, operands);
+ else
+ emit_soft_tfmode_binop (code, operands);
+}
+
+void
+emit_tfmode_unop (code, operands)
+ enum rtx_code code;
+ rtx *operands;
+{
+ if (TARGET_HARD_QUAD)
+ emit_hard_tfmode_operation (code, operands);
+ else
+ emit_soft_tfmode_unop (code, operands);
+}
+
+void
+emit_tfmode_cvt (code, operands)
+ enum rtx_code code;
+ rtx *operands;
+{
+ if (TARGET_HARD_QUAD)
+ emit_hard_tfmode_operation (code, operands);
+ else
+ emit_soft_tfmode_cvt (code, operands);
+}
+
/* Return nonzero if a return peephole merging return with
setting of output register is ok. */
int
@@ -2414,6 +2770,26 @@ leaf_return_peephole_ok ()
return (actual_fsize == 0);
}
+/* Return nonzero if a branch/jump/call instruction will be emitting
+ nop into its delay slot. */
+
+int
+empty_delay_slot (insn)
+ rtx insn;
+{
+ rtx seq;
+
+ /* If no previous instruction (should not happen), return true. */
+ if (PREV_INSN (insn) == NULL)
+ return 1;
+
+ seq = NEXT_INSN (PREV_INSN (insn));
+ if (GET_CODE (PATTERN (seq)) == SEQUENCE)
+ return 0;
+
+ return 1;
+}
+
/* Return nonzero if TRIAL can go into the function epilogue's
delay slot. SLOT is the slot we are trying to fill. */
@@ -2931,7 +3307,7 @@ load_pic_register ()
}
/* Return 1 if RTX is a MEM which is known to be aligned to at
- least an 8 byte boundary. */
+ least a DESIRED byte boundary. */
int
mem_min_alignment (mem, desired)
@@ -2972,8 +3348,7 @@ mem_min_alignment (mem, desired)
{
int regno = REGNO (base);
- if (regno != FRAME_POINTER_REGNUM
- && regno != STACK_POINTER_REGNUM)
+ if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
{
/* Check if the compiler has recorded some information
about the alignment of the base REG. If reload has
@@ -3190,7 +3565,7 @@ sparc_init_modes ()
{
if (i < 16 && TARGET_V8PLUS)
sparc_regno_reg_class[i] = I64_REGS;
- else if (i < 32)
+ else if (i < 32 || i == FRAME_POINTER_REGNUM)
sparc_regno_reg_class[i] = GENERAL_REGS;
else if (i < 64)
sparc_regno_reg_class[i] = FP_REGS;
@@ -3334,31 +3709,28 @@ compute_frame_size (size, leaf_function)
int outgoing_args_size = (current_function_outgoing_args_size
+ REG_PARM_STACK_SPACE (current_function_decl));
- if (TARGET_EPILOGUE)
- {
- /* N_REGS is the number of 4-byte regs saved thus far. This applies
- even to v9 int regs to be consistent with save_regs/restore_regs. */
+ /* N_REGS is the number of 4-byte regs saved thus far. This applies
+ even to v9 int regs to be consistent with save_regs/restore_regs. */
- if (TARGET_ARCH64)
- {
- for (i = 0; i < 8; i++)
- if (regs_ever_live[i] && ! call_used_regs[i])
- n_regs += 2;
- }
- else
- {
- for (i = 0; i < 8; i += 2)
- if ((regs_ever_live[i] && ! call_used_regs[i])
- || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
- n_regs += 2;
- }
-
- for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
+ if (TARGET_ARCH64)
+ {
+ for (i = 0; i < 8; i++)
+ if (regs_ever_live[i] && ! call_used_regs[i])
+ n_regs += 2;
+ }
+ else
+ {
+ for (i = 0; i < 8; i += 2)
if ((regs_ever_live[i] && ! call_used_regs[i])
|| (regs_ever_live[i+1] && ! call_used_regs[i+1]))
n_regs += 2;
}
+ for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
+ if ((regs_ever_live[i] && ! call_used_regs[i])
+ || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
+ n_regs += 2;
+
/* Set up values for use in `function_epilogue'. */
num_gfregs = n_regs;
@@ -3369,9 +3741,8 @@ compute_frame_size (size, leaf_function)
}
else
{
- /* We subtract STARTING_FRAME_OFFSET, remember it's negative.
- The stack bias (if any) is taken out to undo its effects. */
- apparent_fsize = (size - STARTING_FRAME_OFFSET + SPARC_STACK_BIAS + 7) & -8;
+ /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
+ apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
apparent_fsize += n_regs * 4;
actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
}
@@ -3538,7 +3909,7 @@ sparc_nonflat_function_prologue (file, size, leaf_function)
/* The canonical frame address refers to the top of the frame. */
dwarf2out_def_cfa (label, (leaf_function ? STACK_POINTER_REGNUM
- : FRAME_POINTER_REGNUM),
+ : HARD_FRAME_POINTER_REGNUM),
frame_base_offset);
if (! leaf_function)
@@ -3583,24 +3954,9 @@ sparc_nonflat_function_prologue (file, size, leaf_function)
base = frame_base_name;
}
- n_regs = 0;
- if (TARGET_EPILOGUE && ! leaf_function)
- /* ??? Originally saved regs 0-15 here. */
- n_regs = save_regs (file, 0, 8, base, offset, 0, real_offset);
- else if (leaf_function)
- /* ??? Originally saved regs 0-31 here. */
- n_regs = save_regs (file, 0, 8, base, offset, 0, real_offset);
- if (TARGET_EPILOGUE)
- save_regs (file, 32, TARGET_V9 ? 96 : 64, base, offset, n_regs,
- real_offset);
- }
-
- leaf_label = 0;
- if (leaf_function && actual_fsize != 0)
- {
- /* warning ("leaf procedure with frame size %d", actual_fsize); */
- if (! TARGET_EPILOGUE)
- leaf_label = gen_label_rtx ();
+ n_regs = save_regs (file, 0, 8, base, offset, 0, real_offset);
+ save_regs (file, 32, TARGET_V9 ? 96 : 64, base, offset, n_regs,
+ real_offset);
}
}
@@ -3609,7 +3965,7 @@ sparc_nonflat_function_prologue (file, size, leaf_function)
static void
output_restore_regs (file, leaf_function)
FILE *file;
- int leaf_function;
+ int leaf_function ATTRIBUTE_UNUSED;
{
int offset, n_regs;
const char *base;
@@ -3627,15 +3983,8 @@ output_restore_regs (file, leaf_function)
base = frame_base_name;
}
- n_regs = 0;
- if (TARGET_EPILOGUE && ! leaf_function)
- /* ??? Originally saved regs 0-15 here. */
- n_regs = restore_regs (file, 0, 8, base, offset, 0);
- else if (leaf_function)
- /* ??? Originally saved regs 0-31 here. */
- n_regs = restore_regs (file, 0, 8, base, offset, 0);
- if (TARGET_EPILOGUE)
- restore_regs (file, 32, TARGET_V9 ? 96 : 64, base, offset, n_regs);
+ n_regs = restore_regs (file, 0, 8, base, offset, 0);
+ restore_regs (file, 32, TARGET_V9 ? 96 : 64, base, offset, n_regs);
}
/* This function generates the assembly code for function exit,
@@ -3668,21 +4017,25 @@ sparc_nonflat_function_epilogue (file, size, leaf_function)
{
const char *ret;
- if (leaf_label)
- {
- emit_label_after (leaf_label, get_last_insn ());
- final_scan_insn (get_last_insn (), file, 0, 0, 1);
- }
-
if (current_function_epilogue_delay_list == 0)
{
/* If code does not drop into the epilogue, we need
- do nothing except output pending case vectors. */
- rtx insn = get_last_insn ();
- if (GET_CODE (insn) == NOTE)
- insn = prev_nonnote_insn (insn);
- if (insn && GET_CODE (insn) == BARRIER)
- goto output_vectors;
+ do nothing except output pending case vectors.
+
+ We have to still output a dummy nop for the sake of
+ sane backtraces. Otherwise, if the last two instructions
+ of a function were call foo; dslot; this can make the return
+ PC of foo (ie. address of call instruction plus 8) point to
+ the first instruction in the next function. */
+ rtx insn;
+
+ fputs("\tnop\n", file);
+
+ insn = get_last_insn ();
+ if (GET_CODE (insn) == NOTE)
+ insn = prev_nonnote_insn (insn);
+ if (insn && GET_CODE (insn) == BARRIER)
+ goto output_vectors;
}
if (num_gfregs)
@@ -3694,95 +4047,93 @@ sparc_nonflat_function_epilogue (file, size, leaf_function)
else
ret = (SKIP_CALLERS_UNIMP_P ? "jmp\t%i7+12" : "ret");
- if (TARGET_EPILOGUE || leaf_label)
+ if (! leaf_function)
{
- int old_target_epilogue = TARGET_EPILOGUE;
- target_flags &= ~old_target_epilogue;
+ if (current_function_calls_eh_return)
+ {
+ if (current_function_epilogue_delay_list)
+ abort ();
+ if (SKIP_CALLERS_UNIMP_P)
+ abort ();
- if (! leaf_function)
+ fputs ("\trestore\n\tretl\n\tadd\t%sp, %g1, %sp\n", file);
+ }
+ /* If we wound up with things in our delay slot, flush them here. */
+ else if (current_function_epilogue_delay_list)
{
- if (current_function_calls_eh_return)
- {
- if (current_function_epilogue_delay_list)
- abort ();
- if (SKIP_CALLERS_UNIMP_P)
- abort ();
+ rtx delay = PATTERN (XEXP (current_function_epilogue_delay_list, 0));
- fputs ("\trestore\n\tretl\n\tadd\t%sp, %g1, %sp\n", file);
+ if (TARGET_V9 && ! epilogue_renumber (&delay, 1))
+ {
+ epilogue_renumber (&delay, 0);
+ fputs (SKIP_CALLERS_UNIMP_P
+ ? "\treturn\t%i7+12\n"
+ : "\treturn\t%i7+8\n", file);
+ final_scan_insn (XEXP (current_function_epilogue_delay_list, 0),
+ file, 1, 0, 0);
}
- /* If we wound up with things in our delay slot, flush them here. */
- else if (current_function_epilogue_delay_list)
+ else
{
- rtx delay = PATTERN (XEXP (current_function_epilogue_delay_list, 0));
+ rtx insn, src;
- if (TARGET_V9 && ! epilogue_renumber (&delay, 1))
- {
- epilogue_renumber (&delay, 0);
- fputs (SKIP_CALLERS_UNIMP_P
- ? "\treturn\t%i7+12\n"
- : "\treturn\t%i7+8\n", file);
- final_scan_insn (XEXP (current_function_epilogue_delay_list, 0), file, 1, 0, 0);
- }
- else
- {
- rtx insn = emit_jump_insn_after (gen_rtx_RETURN (VOIDmode),
- get_last_insn ());
- rtx src;
+ if (GET_CODE (delay) != SET)
+ abort();
- if (GET_CODE (delay) != SET)
+ src = SET_SRC (delay);
+ if (GET_CODE (src) == ASHIFT)
+ {
+ if (XEXP (src, 1) != const1_rtx)
abort();
+ SET_SRC (delay)
+ = gen_rtx_PLUS (GET_MODE (src), XEXP (src, 0),
+ XEXP (src, 0));
+ }
- src = SET_SRC (delay);
- if (GET_CODE (src) == ASHIFT)
- {
- if (XEXP (src, 1) != const1_rtx)
- abort();
- SET_SRC (delay) = gen_rtx_PLUS (GET_MODE (src), XEXP (src, 0),
- XEXP (src, 0));
- }
+ insn = gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2, delay,
+ gen_rtx_RETURN (VOIDmode)));
+ insn = emit_jump_insn (insn);
- PATTERN (insn) = gen_rtx_PARALLEL (VOIDmode,
- gen_rtvec (2, delay, PATTERN (insn)));
- final_scan_insn (insn, file, 1, 0, 1);
- }
+ sparc_emitting_epilogue = true;
+ final_scan_insn (insn, file, 1, 0, 1);
+ sparc_emitting_epilogue = false;
}
- else if (TARGET_V9 && ! SKIP_CALLERS_UNIMP_P)
- fputs ("\treturn\t%i7+8\n\tnop\n", file);
- else
- fprintf (file, "\t%s\n\trestore\n", ret);
}
- else if (current_function_calls_eh_return)
+ else if (TARGET_V9 && ! SKIP_CALLERS_UNIMP_P)
+ fputs ("\treturn\t%i7+8\n\tnop\n", file);
+ else
+ fprintf (file, "\t%s\n\trestore\n", ret);
+ }
+ /* All of the following cases are for leaf functions. */
+ else if (current_function_calls_eh_return)
+ abort ();
+ else if (current_function_epilogue_delay_list)
+ {
+ /* eligible_for_epilogue_delay_slot ensures that if this is a
+ leaf function, then we will only have insn in the delay slot
+ if the frame size is zero, thus no adjust for the stack is
+ needed here. */
+ if (actual_fsize != 0)
abort ();
- /* All of the following cases are for leaf functions. */
- else if (current_function_epilogue_delay_list)
- {
- /* eligible_for_epilogue_delay_slot ensures that if this is a
- leaf function, then we will only have insn in the delay slot
- if the frame size is zero, thus no adjust for the stack is
- needed here. */
- if (actual_fsize != 0)
- abort ();
- fprintf (file, "\t%s\n", ret);
- final_scan_insn (XEXP (current_function_epilogue_delay_list, 0),
- file, 1, 0, 1);
- }
- /* Output 'nop' instead of 'sub %sp,-0,%sp' when no frame, so as to
- avoid generating confusing assembly language output. */
- else if (actual_fsize == 0)
- fprintf (file, "\t%s\n\tnop\n", ret);
- else if (actual_fsize <= 4096)
- fprintf (file, "\t%s\n\tsub\t%%sp, -%d, %%sp\n", ret, actual_fsize);
- else if (actual_fsize <= 8192)
- fprintf (file, "\tsub\t%%sp, -4096, %%sp\n\t%s\n\tsub\t%%sp, -%d, %%sp\n",
- ret, actual_fsize - 4096);
- else if ((actual_fsize & 0x3ff) == 0)
- fprintf (file, "\tsethi\t%%hi(%d), %%g1\n\t%s\n\tadd\t%%sp, %%g1, %%sp\n",
- actual_fsize, ret);
- else
- fprintf (file, "\tsethi\t%%hi(%d), %%g1\n\tor\t%%g1, %%lo(%d), %%g1\n\t%s\n\tadd\t%%sp, %%g1, %%sp\n",
- actual_fsize, actual_fsize, ret);
- target_flags |= old_target_epilogue;
+ fprintf (file, "\t%s\n", ret);
+ final_scan_insn (XEXP (current_function_epilogue_delay_list, 0),
+ file, 1, 0, 1);
}
+ /* Output 'nop' instead of 'sub %sp,-0,%sp' when no frame, so as to
+ avoid generating confusing assembly language output. */
+ else if (actual_fsize == 0)
+ fprintf (file, "\t%s\n\tnop\n", ret);
+ else if (actual_fsize <= 4096)
+ fprintf (file, "\t%s\n\tsub\t%%sp, -%d, %%sp\n", ret, actual_fsize);
+ else if (actual_fsize <= 8192)
+ fprintf (file, "\tsub\t%%sp, -4096, %%sp\n\t%s\n\tsub\t%%sp, -%d, %%sp\n",
+ ret, actual_fsize - 4096);
+ else if ((actual_fsize & 0x3ff) == 0)
+ fprintf (file, "\tsethi\t%%hi(%d), %%g1\n\t%s\n\tadd\t%%sp, %%g1, %%sp\n",
+ actual_fsize, ret);
+ else
+ fprintf (file, "\tsethi\t%%hi(%d), %%g1\n\tor\t%%g1, %%lo(%d), %%g1\n\t%s\n\tadd\t%%sp, %%g1, %%sp\n",
+ actual_fsize, actual_fsize, ret);
output_vectors:
sparc_output_deferred_case_vectors ();
@@ -4222,7 +4573,10 @@ function_arg_record_value_1 (type, startbitpos, parms)
if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
function_arg_record_value_1 (TREE_TYPE (field), bitpos, parms);
- else if (TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
+ else if ((TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
+ || (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE
+ && (TREE_CODE (TREE_TYPE (TREE_TYPE (field)))
+ == REAL_TYPE)))
&& TARGET_FPU
&& ! packed_p
&& parms->named)
@@ -4245,6 +4599,8 @@ function_arg_record_value_1 (type, startbitpos, parms)
/* There's no need to check this_slotno < SPARC_FP_ARG MAX.
If it wasn't true we wouldn't be here. */
parms->nregs += 1;
+ if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
+ parms->nregs += 1;
}
else
{
@@ -4348,24 +4704,45 @@ function_arg_record_value_2 (type, startbitpos, parms)
if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
function_arg_record_value_2 (TREE_TYPE (field), bitpos, parms);
- else if (TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
+ else if ((TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
+ || (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE
+ && (TREE_CODE (TREE_TYPE (TREE_TYPE (field)))
+ == REAL_TYPE)))
&& TARGET_FPU
&& ! packed_p
&& parms->named)
{
int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
+ int regno;
+ enum machine_mode mode = DECL_MODE (field);
rtx reg;
function_arg_record_value_3 (bitpos, parms);
-
- reg = gen_rtx_REG (DECL_MODE (field),
- (SPARC_FP_ARG_FIRST + this_slotno * 2
- + (DECL_MODE (field) == SFmode
- && (bitpos & 32) != 0)));
+ regno = SPARC_FP_ARG_FIRST + this_slotno * 2
+ + ((mode == SFmode || mode == SCmode)
+ && (bitpos & 32) != 0);
+ switch (mode)
+ {
+ case SCmode: mode = SFmode; break;
+ case DCmode: mode = DFmode; break;
+ case TCmode: mode = TFmode; break;
+ default: break;
+ }
+ reg = gen_rtx_REG (mode, regno);
XVECEXP (parms->ret, 0, parms->nregs)
= gen_rtx_EXPR_LIST (VOIDmode, reg,
GEN_INT (bitpos / BITS_PER_UNIT));
parms->nregs += 1;
+ if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
+ {
+ regno += GET_MODE_SIZE (mode) / 4;
+ reg = gen_rtx_REG (mode, regno);
+ XVECEXP (parms->ret, 0, parms->nregs)
+ = gen_rtx_EXPR_LIST (VOIDmode, reg,
+ GEN_INT ((bitpos + GET_MODE_BITSIZE (mode))
+ / BITS_PER_UNIT));
+ parms->nregs += 1;
+ }
}
else
{
@@ -4693,8 +5070,9 @@ function_arg_pass_by_reference (cum, mode, type, named)
return ((type && TREE_CODE (type) == ARRAY_TYPE)
/* Consider complex values as aggregates, so care for TCmode. */
|| GET_MODE_SIZE (mode) > 16
- || (type && AGGREGATE_TYPE_P (type)
- && int_size_in_bytes (type) > 16));
+ || (type
+ && AGGREGATE_TYPE_P (type)
+ && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16));
}
}
@@ -4846,7 +5224,7 @@ sparc_builtin_saveregs ()
emit_move_insn (gen_rtx_MEM (word_mode,
gen_rtx_PLUS (Pmode,
frame_pointer_rtx,
- GEN_INT (STACK_POINTER_OFFSET
+ GEN_INT (FIRST_PARM_OFFSET (0)
+ (UNITS_PER_WORD
* regno)))),
gen_rtx_REG (word_mode,
@@ -4854,7 +5232,7 @@ sparc_builtin_saveregs ()
address = gen_rtx_PLUS (Pmode,
frame_pointer_rtx,
- GEN_INT (STACK_POINTER_OFFSET
+ GEN_INT (FIRST_PARM_OFFSET (0)
+ UNITS_PER_WORD * first_reg));
return address;
@@ -4895,11 +5273,15 @@ sparc_va_arg (valist, type)
if (AGGREGATE_TYPE_P (type))
{
- if (size > 16)
+ if ((unsigned HOST_WIDE_INT) size > 16)
{
indirect = 1;
size = rsize = UNITS_PER_WORD;
}
+ /* SPARC v9 ABI states that structures up to 8 bytes in size are
+ given one 8 byte slot. */
+ else if (size == 0)
+ size = rsize = UNITS_PER_WORD;
else
size = rsize;
}
@@ -4994,25 +5376,43 @@ sparc_va_arg (valist, type)
INSN, if set, is the insn. */
char *
-output_cbranch (op, label, reversed, annul, noop, insn)
- rtx op;
+output_cbranch (op, dest, label, reversed, annul, noop, insn)
+ rtx op, dest;
int label;
int reversed, annul, noop;
rtx insn;
{
- static char string[32];
+ static char string[50];
enum rtx_code code = GET_CODE (op);
rtx cc_reg = XEXP (op, 0);
enum machine_mode mode = GET_MODE (cc_reg);
- static char v8_labelno[] = "%lX";
- static char v9_icc_labelno[] = "%%icc, %lX";
- static char v9_xcc_labelno[] = "%%xcc, %lX";
- static char v9_fcc_labelno[] = "%%fccX, %lY";
- char *labelno;
- const char *branch;
- int labeloff, spaces = 8;
+ const char *labelno, *branch;
+ int spaces = 8, far;
+ char *p;
+
+ /* v9 branches are limited to +-1MB. If it is too far away,
+ change
+
+ bne,pt %xcc, .LC30
+
+ to
+
+ be,pn %xcc, .+12
+ nop
+ ba .LC30
+
+ and
+
+ fbne,a,pn %fcc2, .LC29
+
+ to
- if (reversed)
+ fbe,pt %fcc2, .+16
+ nop
+ ba .LC29 */
+
+ far = get_attr_length (insn) >= 3;
+ if (reversed ^ far)
{
/* Reversal of FP compares takes care -- an ordered compare
becomes an unordered compare and vice versa. */
@@ -5095,7 +5495,7 @@ output_cbranch (op, label, reversed, annul, noop, insn)
branch = "be";
break;
case GE:
- if (mode == CC_NOOVmode)
+ if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
branch = "bpos";
else
branch = "bge";
@@ -5107,7 +5507,7 @@ output_cbranch (op, label, reversed, annul, noop, insn)
branch = "ble";
break;
case LT:
- if (mode == CC_NOOVmode)
+ if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
branch = "bneg";
else
branch = "bl";
@@ -5131,54 +5531,89 @@ output_cbranch (op, label, reversed, annul, noop, insn)
strcpy (string, branch);
}
spaces -= strlen (branch);
+ p = strchr (string, '\0');
/* Now add the annulling, the label, and a possible noop. */
- if (annul)
+ if (annul && ! far)
{
- strcat (string, ",a");
+ strcpy (p, ",a");
+ p += 2;
spaces -= 2;
}
if (! TARGET_V9)
- {
- labeloff = 2;
- labelno = v8_labelno;
- }
+ labelno = "";
else
{
rtx note;
+ int v8 = 0;
- if (insn && (note = find_reg_note (insn, REG_BR_PRED, NULL_RTX)))
+ if (! far && insn && INSN_ADDRESSES_SET_P ())
{
- strcat (string,
- INTVAL (XEXP (note, 0)) & ATTR_FLAG_likely ? ",pt" : ",pn");
- spaces -= 3;
+ int delta = (INSN_ADDRESSES (INSN_UID (dest))
+ - INSN_ADDRESSES (INSN_UID (insn)));
+ /* Leave some instructions for "slop". */
+ if (delta < -260000 || delta >= 260000)
+ v8 = 1;
}
- labeloff = 9;
if (mode == CCFPmode || mode == CCFPEmode)
{
- labeloff = 10;
- labelno = v9_fcc_labelno;
+ static char v9_fcc_labelno[] = "%%fccX, ";
/* Set the char indicating the number of the fcc reg to use. */
- labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
+ v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
+ labelno = v9_fcc_labelno;
+ if (v8)
+ {
+ if (REGNO (cc_reg) == SPARC_FCC_REG)
+ labelno = "";
+ else
+ abort ();
+ }
}
else if (mode == CCXmode || mode == CCX_NOOVmode)
- labelno = v9_xcc_labelno;
+ {
+ labelno = "%%xcc, ";
+ if (v8)
+ abort ();
+ }
else
- labelno = v9_icc_labelno;
+ {
+ labelno = "%%icc, ";
+ if (v8)
+ labelno = "";
+ }
+
+ if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
+ {
+ strcpy (p,
+ ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
+ ? ",pt" : ",pn");
+ p += 3;
+ spaces -= 3;
+ }
}
- /* Set the char indicating the number of the operand containing the
- label_ref. */
- labelno[labeloff] = label + '0';
if (spaces > 0)
- strcat (string, "\t");
+ *p++ = '\t';
else
- strcat (string, " ");
- strcat (string, labelno);
-
+ *p++ = ' ';
+ strcpy (p, labelno);
+ p = strchr (p, '\0');
+ if (far)
+ {
+ strcpy (p, ".+12\n\tnop\n\tb\t");
+ if (annul || noop)
+ p[3] = '6';
+ p += 13;
+ }
+ *p++ = '%';
+ *p++ = 'l';
+ /* Set the char indicating the number of the operand containing the
+ label_ref. */
+ *p++ = label + '0';
+ *p = '\0';
if (noop)
- strcat (string, "\n\tnop");
+ strcpy (p, "\n\tnop");
return string;
}
@@ -5257,7 +5692,7 @@ sparc_emit_float_lib_cmp (x, y, comparison)
else
slot1 = y;
- emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), 1,
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
DImode, 2,
XEXP (slot0, 0), Pmode,
XEXP (slot1, 0), Pmode);
@@ -5266,7 +5701,7 @@ sparc_emit_float_lib_cmp (x, y, comparison)
}
else
{
- emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), 1,
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
SImode, 2,
x, TFmode, y, TFmode);
@@ -5324,6 +5759,42 @@ sparc_emit_float_lib_cmp (x, y, comparison)
}
}
+/* Generate an unsigned DImode to FP conversion. This is the same code
+ optabs would emit if we didn't have TFmode patterns. */
+
+void
+sparc_emit_floatunsdi (operands)
+ rtx operands[2];
+{
+ rtx neglab, donelab, i0, i1, f0, in, out;
+ enum machine_mode mode;
+
+ out = operands[0];
+ in = force_reg (DImode, operands[1]);
+ mode = GET_MODE (out);
+ neglab = gen_label_rtx ();
+ donelab = gen_label_rtx ();
+ i0 = gen_reg_rtx (DImode);
+ i1 = gen_reg_rtx (DImode);
+ f0 = gen_reg_rtx (mode);
+
+ emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
+
+ emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
+ emit_jump_insn (gen_jump (donelab));
+ emit_barrier ();
+
+ emit_label (neglab);
+
+ emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
+ emit_insn (gen_anddi3 (i1, in, const1_rtx));
+ emit_insn (gen_iordi3 (i0, i0, i1));
+ emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
+ emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
+
+ emit_label (donelab);
+}
+
/* Return the string to output a conditional branch to LABEL, testing
register REG. LABEL is the operand number of the label; REG is the
operand number of the reg. OP is the conditional expression. The mode
@@ -5336,22 +5807,45 @@ sparc_emit_float_lib_cmp (x, y, comparison)
NOOP is non-zero if we have to follow this branch by a noop. */
char *
-output_v9branch (op, reg, label, reversed, annul, noop, insn)
- rtx op;
+output_v9branch (op, dest, reg, label, reversed, annul, noop, insn)
+ rtx op, dest;
int reg, label;
int reversed, annul, noop;
rtx insn;
{
- static char string[20];
+ static char string[50];
enum rtx_code code = GET_CODE (op);
enum machine_mode mode = GET_MODE (XEXP (op, 0));
- static char labelno[] = "%X, %lX";
rtx note;
- int spaces = 8;
+ int far;
+ char *p;
+
+ /* branch on register are limited to +-128KB. If it is too far away,
+ change
+
+ brnz,pt %g1, .LC30
+
+ to
+
+ brz,pn %g1, .+12
+ nop
+ ba,pt %xcc, .LC30
+
+ and
+
+ brgez,a,pn %o1, .LC29
+
+ to
+
+ brlz,pt %o1, .+16
+ nop
+ ba,pt %xcc, .LC29 */
+
+ far = get_attr_length (insn) >= 3;
/* If not floating-point or if EQ or NE, we can just reverse the code. */
- if (reversed)
- code = reverse_condition (code), reversed = 0;
+ if (reversed ^ far)
+ code = reverse_condition (code);
/* Only 64 bit versions of these instructions exist. */
if (mode != DImode)
@@ -5363,62 +5857,90 @@ output_v9branch (op, reg, label, reversed, annul, noop, insn)
{
case NE:
strcpy (string, "brnz");
- spaces -= 4;
break;
case EQ:
strcpy (string, "brz");
- spaces -= 3;
break;
case GE:
strcpy (string, "brgez");
- spaces -= 5;
break;
case LT:
strcpy (string, "brlz");
- spaces -= 4;
break;
case LE:
strcpy (string, "brlez");
- spaces -= 5;
break;
case GT:
strcpy (string, "brgz");
- spaces -= 4;
break;
default:
abort ();
}
+ p = strchr (string, '\0');
+
/* Now add the annulling, reg, label, and nop. */
- if (annul)
+ if (annul && ! far)
{
- strcat (string, ",a");
- spaces -= 2;
+ strcpy (p, ",a");
+ p += 2;
}
- if (insn && (note = find_reg_note (insn, REG_BR_PRED, NULL_RTX)))
+ if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
{
- strcat (string,
- INTVAL (XEXP (note, 0)) & ATTR_FLAG_likely ? ",pt" : ",pn");
- spaces -= 3;
+ strcpy (p,
+ ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
+ ? ",pt" : ",pn");
+ p += 3;
}
- labelno[1] = reg + '0';
- labelno[6] = label + '0';
- if (spaces > 0)
- strcat (string, "\t");
- else
- strcat (string, " ");
- strcat (string, labelno);
+ *p = p < string + 8 ? '\t' : ' ';
+ p++;
+ *p++ = '%';
+ *p++ = '0' + reg;
+ *p++ = ',';
+ *p++ = ' ';
+ if (far)
+ {
+ int veryfar = 1, delta;
+
+ if (INSN_ADDRESSES_SET_P ())
+ {
+ delta = (INSN_ADDRESSES (INSN_UID (dest))
+ - INSN_ADDRESSES (INSN_UID (insn)));
+ /* Leave some instructions for "slop". */
+ if (delta >= -260000 && delta < 260000)
+ veryfar = 0;
+ }
+
+ strcpy (p, ".+12\n\tnop\n\t");
+ if (annul || noop)
+ p[3] = '6';
+ p += 11;
+ if (veryfar)
+ {
+ strcpy (p, "b\t");
+ p += 2;
+ }
+ else
+ {
+ strcpy (p, "ba,pt\t%%xcc, ");
+ p += 13;
+ }
+ }
+ *p++ = '%';
+ *p++ = 'l';
+ *p++ = '0' + label;
+ *p = '\0';
if (noop)
- strcat (string, "\n\tnop");
+ strcpy (p, "\n\tnop");
return string;
}
@@ -5467,7 +5989,7 @@ epilogue_renumber (where, test)
are in the return delayed slot. */
case PLUS:
if (GET_CODE (XEXP (*where, 0)) == REG
- && REGNO (XEXP (*where, 0)) == FRAME_POINTER_REGNUM
+ && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
&& (GET_CODE (XEXP (*where, 1)) != CONST_INT
|| INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
return 1;
@@ -5476,7 +5998,7 @@ epilogue_renumber (where, test)
case MEM:
if (SPARC_STACK_BIAS
&& GET_CODE (XEXP (*where, 0)) == REG
- && REGNO (XEXP (*where, 0)) == FRAME_POINTER_REGNUM)
+ && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
return 1;
break;
@@ -5501,87 +6023,6 @@ epilogue_renumber (where, test)
}
return 0;
}
-
-/* Output assembler code to return from a function. */
-
-const char *
-output_return (operands)
- rtx *operands;
-{
- rtx delay = final_sequence ? XVECEXP (final_sequence, 0, 1) : 0;
-
- if (leaf_label)
- {
- operands[0] = leaf_label;
- return "b%* %l0%(";
- }
- else if (current_function_uses_only_leaf_regs)
- {
- /* No delay slot in a leaf function. */
- if (delay)
- abort ();
-
- /* If we didn't allocate a frame pointer for the current function,
- the stack pointer might have been adjusted. Output code to
- restore it now. */
-
- operands[0] = GEN_INT (actual_fsize);
-
- /* Use sub of negated value in first two cases instead of add to
- allow actual_fsize == 4096. */
-
- if (actual_fsize <= 4096)
- {
- if (SKIP_CALLERS_UNIMP_P)
- return "jmp\t%%o7+12\n\tsub\t%%sp, -%0, %%sp";
- else
- return "retl\n\tsub\t%%sp, -%0, %%sp";
- }
- else if (actual_fsize <= 8192)
- {
- operands[0] = GEN_INT (actual_fsize - 4096);
- if (SKIP_CALLERS_UNIMP_P)
- return "sub\t%%sp, -4096, %%sp\n\tjmp\t%%o7+12\n\tsub\t%%sp, -%0, %%sp";
- else
- return "sub\t%%sp, -4096, %%sp\n\tretl\n\tsub\t%%sp, -%0, %%sp";
- }
- else if (SKIP_CALLERS_UNIMP_P)
- {
- if ((actual_fsize & 0x3ff) != 0)
- return "sethi\t%%hi(%a0), %%g1\n\tor\t%%g1, %%lo(%a0), %%g1\n\tjmp\t%%o7+12\n\tadd\t%%sp, %%g1, %%sp";
- else
- return "sethi\t%%hi(%a0), %%g1\n\tjmp\t%%o7+12\n\tadd\t%%sp, %%g1, %%sp";
- }
- else
- {
- if ((actual_fsize & 0x3ff) != 0)
- return "sethi\t%%hi(%a0), %%g1\n\tor\t%%g1, %%lo(%a0), %%g1\n\tretl\n\tadd\t%%sp, %%g1, %%sp";
- else
- return "sethi\t%%hi(%a0), %%g1\n\tretl\n\tadd\t%%sp, %%g1, %%sp";
- }
- }
- else if (TARGET_V9)
- {
- if (delay)
- {
- epilogue_renumber (&SET_DEST (PATTERN (delay)), 0);
- epilogue_renumber (&SET_SRC (PATTERN (delay)), 0);
- }
- if (SKIP_CALLERS_UNIMP_P)
- return "return\t%%i7+12%#";
- else
- return "return\t%%i7+8%#";
- }
- else
- {
- if (delay)
- abort ();
- if (SKIP_CALLERS_UNIMP_P)
- return "jmp\t%%i7+12\n\trestore";
- else
- return "ret\n\trestore";
- }
-}
/* Leaf functions and non-leaf functions have different needs. */
@@ -5689,6 +6130,20 @@ registers_ok_for_ldd_peep (reg1, reg2)
ld [%o0 + 4], %o1
to
ldd [%o0], %o0
+ nor:
+ ld [%g3 + 4], %g3
+ ld [%g3], %g2
+ to
+ ldd [%g3], %g2
+
+ But, note that the transformation from:
+ ld [%g2 + 4], %g3
+ ld [%g2], %g2
+ to
+ ldd [%g2], %g2
+ is perfectly fine. Thus, the peephole2 patterns always pass us
+ the destination register of the first load, never the second one.
+
For stores we don't have a similar problem, so dependent_reg_rtx is
NULL_RTX. */
@@ -5987,9 +6442,7 @@ print_operand (file, x, code)
case 'b':
{
/* Print a sign-extended character. */
- int i = INTVAL (x) & 0xff;
- if (i & 0x80)
- i |= 0xffffff00;
+ int i = trunc_int_for_mode (INTVAL (x), QImode);
fprintf (file, "%d", i);
return;
}
@@ -6260,38 +6713,42 @@ sparc_initialize_trampoline (tramp, fnaddr, cxt)
*/
#ifdef TRANSFER_FROM_TRAMPOLINE
emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "__enable_execute_stack"),
- 0, VOIDmode, 1, tramp, Pmode);
+ LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
#endif
- emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
- expand_binop (SImode, ior_optab,
- expand_shift (RSHIFT_EXPR, SImode, fnaddr,
- size_int (10), 0, 1),
- GEN_INT (0x03000000),
- NULL_RTX, 1, OPTAB_DIRECT));
-
- emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
- expand_binop (SImode, ior_optab,
- expand_shift (RSHIFT_EXPR, SImode, cxt,
- size_int (10), 0, 1),
- GEN_INT (0x05000000),
- NULL_RTX, 1, OPTAB_DIRECT));
-
- emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
- expand_binop (SImode, ior_optab,
- expand_and (fnaddr, GEN_INT (0x3ff), NULL_RTX),
- GEN_INT (0x81c06000),
- NULL_RTX, 1, OPTAB_DIRECT));
-
- emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
- expand_binop (SImode, ior_optab,
- expand_and (cxt, GEN_INT (0x3ff), NULL_RTX),
- GEN_INT (0x8410a000),
- NULL_RTX, 1, OPTAB_DIRECT));
+ emit_move_insn
+ (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
+ expand_binop (SImode, ior_optab,
+ expand_shift (RSHIFT_EXPR, SImode, fnaddr,
+ size_int (10), 0, 1),
+ GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
+ NULL_RTX, 1, OPTAB_DIRECT));
+
+ emit_move_insn
+ (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
+ expand_binop (SImode, ior_optab,
+ expand_shift (RSHIFT_EXPR, SImode, cxt,
+ size_int (10), 0, 1),
+ GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
+ NULL_RTX, 1, OPTAB_DIRECT));
+
+ emit_move_insn
+ (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
+ expand_binop (SImode, ior_optab,
+ expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
+ GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
+ NULL_RTX, 1, OPTAB_DIRECT));
+
+ emit_move_insn
+ (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
+ expand_binop (SImode, ior_optab,
+ expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
+ GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
+ NULL_RTX, 1, OPTAB_DIRECT));
- emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
/* On UltraSPARC a flush flushes an entire cache line. The trampoline is
aligned on a 16 byte boundary so one flush clears it all. */
+ emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
if (sparc_cpu != PROCESSOR_ULTRASPARC)
emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
plus_constant (tramp, 8)))));
@@ -6307,7 +6764,7 @@ sparc64_initialize_trampoline (tramp, fnaddr, cxt)
{
#ifdef TRANSFER_FROM_TRAMPOLINE
emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "__enable_execute_stack"),
- 0, VOIDmode, 1, tramp, Pmode);
+ LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
#endif
/*
@@ -6319,13 +6776,13 @@ sparc64_initialize_trampoline (tramp, fnaddr, cxt)
*/
emit_move_insn (gen_rtx_MEM (SImode, tramp),
- GEN_INT (0x83414000));
+ GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
- GEN_INT (0xca586018));
+ GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
- GEN_INT (0x81c14000));
+ GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
- GEN_INT (0xca586010));
+ GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 24)), fnaddr);
emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, tramp))));
@@ -6421,12 +6878,12 @@ struct sparc_frame_info zero_frame_info;
/* Tell prologue and epilogue if register REGNO should be saved / restored. */
#define RETURN_ADDR_REGNUM 15
-#define FRAME_POINTER_MASK (1 << (FRAME_POINTER_REGNUM))
+#define HARD_FRAME_POINTER_MASK (1 << (HARD_FRAME_POINTER_REGNUM))
#define RETURN_ADDR_MASK (1 << (RETURN_ADDR_REGNUM))
#define MUST_SAVE_REGISTER(regno) \
- ((regs_ever_live[regno] && !call_used_regs[regno]) \
- || (regno == FRAME_POINTER_REGNUM && frame_pointer_needed) \
+ ((regs_ever_live[regno] && !call_used_regs[regno]) \
+ || (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed) \
|| (regno == RETURN_ADDR_REGNUM && regs_ever_live[RETURN_ADDR_REGNUM]))
/* Return the bytes needed to compute the frame pointer from the current
@@ -6699,7 +7156,7 @@ sparc_flat_function_prologue (file, size)
if (size > 0)
{
unsigned int reg_offset = current_frame_info.reg_offset;
- const char *const fp_str = reg_names[FRAME_POINTER_REGNUM];
+ const char *const fp_str = reg_names[HARD_FRAME_POINTER_REGNUM];
static const char *const t1_str = "%g1";
/* Things get a little tricky if local variables take up more than ~4096
@@ -6720,7 +7177,7 @@ sparc_flat_function_prologue (file, size)
{
fprintf (file, "\tadd\t%s, %d, %s\n",
sp_str, (int) -size, sp_str);
- if (gmask & FRAME_POINTER_MASK)
+ if (gmask & HARD_FRAME_POINTER_MASK)
{
fprintf (file, "\tst\t%s, [%s+%d]\n",
fp_str, sp_str, reg_offset);
@@ -6735,7 +7192,7 @@ sparc_flat_function_prologue (file, size)
fprintf (file, HOST_WIDE_INT_PRINT_DEC, size);
fprintf (file, ", %s\n\tsub\t%s, %s, %s\n",
t1_str, sp_str, t1_str, sp_str);
- if (gmask & FRAME_POINTER_MASK)
+ if (gmask & HARD_FRAME_POINTER_MASK)
{
fprintf (file, "\tst\t%s, [%s+%d]\n",
fp_str, sp_str, reg_offset);
@@ -6747,11 +7204,11 @@ sparc_flat_function_prologue (file, size)
if (dwarf2out_do_frame ())
{
char *l = dwarf2out_cfi_label ();
- if (gmask & FRAME_POINTER_MASK)
+ if (gmask & HARD_FRAME_POINTER_MASK)
{
- dwarf2out_reg_save (l, FRAME_POINTER_REGNUM,
+ dwarf2out_reg_save (l, HARD_FRAME_POINTER_REGNUM,
reg_offset - 4 - size);
- dwarf2out_def_cfa (l, FRAME_POINTER_REGNUM, 0);
+ dwarf2out_def_cfa (l, HARD_FRAME_POINTER_REGNUM, 0);
}
else
dwarf2out_def_cfa (l, STACK_POINTER_REGNUM, size);
@@ -6765,7 +7222,7 @@ sparc_flat_function_prologue (file, size)
reg_offset += 4;
}
sparc_flat_save_restore (file, sp_str, reg_offset,
- gmask & ~(FRAME_POINTER_MASK | RETURN_ADDR_MASK),
+ gmask & ~(HARD_FRAME_POINTER_MASK | RETURN_ADDR_MASK),
current_frame_info.fmask,
"st", "std", -size);
}
@@ -6782,7 +7239,7 @@ sparc_flat_function_prologue (file, size)
{
fprintf (file, "\tadd\t%s, %d, %s\n",
sp_str, (int) -size1, sp_str);
- if (gmask & FRAME_POINTER_MASK)
+ if (gmask & HARD_FRAME_POINTER_MASK)
{
fprintf (file, "\tst\t%s, [%s+%d]\n\tsub\t%s, %d, %s\t%s# set up frame pointer\n",
fp_str, sp_str, (int) offset, sp_str, (int) -size1,
@@ -6796,7 +7253,7 @@ sparc_flat_function_prologue (file, size)
fprintf (file, HOST_WIDE_INT_PRINT_DEC, size1);
fprintf (file, ", %s\n\tsub\t%s, %s, %s\n",
t1_str, sp_str, t1_str, sp_str);
- if (gmask & FRAME_POINTER_MASK)
+ if (gmask & HARD_FRAME_POINTER_MASK)
{
fprintf (file, "\tst\t%s, [%s+%d]\n\tadd\t%s, %s, %s\t%s# set up frame pointer\n",
fp_str, sp_str, (int) offset, sp_str, t1_str,
@@ -6807,11 +7264,11 @@ sparc_flat_function_prologue (file, size)
if (dwarf2out_do_frame ())
{
char *l = dwarf2out_cfi_label ();
- if (gmask & FRAME_POINTER_MASK)
+ if (gmask & HARD_FRAME_POINTER_MASK)
{
- dwarf2out_reg_save (l, FRAME_POINTER_REGNUM,
+ dwarf2out_reg_save (l, HARD_FRAME_POINTER_REGNUM,
offset - 4 - size1);
- dwarf2out_def_cfa (l, FRAME_POINTER_REGNUM, 0);
+ dwarf2out_def_cfa (l, HARD_FRAME_POINTER_REGNUM, 0);
}
else
dwarf2out_def_cfa (l, STACK_POINTER_REGNUM, size1);
@@ -6827,7 +7284,7 @@ sparc_flat_function_prologue (file, size)
offset += 4;
}
sparc_flat_save_restore (file, sp_str, offset,
- gmask & ~(FRAME_POINTER_MASK | RETURN_ADDR_MASK),
+ gmask & ~(HARD_FRAME_POINTER_MASK | RETURN_ADDR_MASK),
current_frame_info.fmask,
"st", "std", -size1);
fprintf (file, "\tset\t");
@@ -6835,7 +7292,7 @@ sparc_flat_function_prologue (file, size)
fprintf (file, ", %s\n\tsub\t%s, %s, %s\n",
t1_str, sp_str, t1_str, sp_str);
if (dwarf2out_do_frame ())
- if (! (gmask & FRAME_POINTER_MASK))
+ if (! (gmask & HARD_FRAME_POINTER_MASK))
dwarf2out_def_cfa ("", STACK_POINTER_REGNUM, size);
}
}
@@ -6884,7 +7341,7 @@ sparc_flat_function_epilogue (file, size)
unsigned HOST_WIDE_INT reg_offset = current_frame_info.reg_offset;
unsigned HOST_WIDE_INT size1;
const char *const sp_str = reg_names[STACK_POINTER_REGNUM];
- const char *const fp_str = reg_names[FRAME_POINTER_REGNUM];
+ const char *const fp_str = reg_names[HARD_FRAME_POINTER_REGNUM];
static const char *const t1_str = "%g1";
/* In the reload sequence, we don't need to fill the load delay
@@ -6930,7 +7387,7 @@ sparc_flat_function_epilogue (file, size)
/* We must restore the frame pointer and return address reg first
because they are treated specially by the prologue output code. */
- if (current_frame_info.gmask & FRAME_POINTER_MASK)
+ if (current_frame_info.gmask & HARD_FRAME_POINTER_MASK)
{
fprintf (file, "\tld\t[%s+%d], %s\n",
sp_str, (int) reg_offset, fp_str);
@@ -6945,7 +7402,7 @@ sparc_flat_function_epilogue (file, size)
/* Restore any remaining saved registers. */
sparc_flat_save_restore (file, sp_str, reg_offset,
- current_frame_info.gmask & ~(FRAME_POINTER_MASK | RETURN_ADDR_MASK),
+ current_frame_info.gmask & ~(HARD_FRAME_POINTER_MASK | RETURN_ADDR_MASK),
current_frame_info.fmask,
"ld", "ldd", 0);
@@ -8397,60 +8854,24 @@ sparc_v8plus_shift (operands, insn, opcode)
else
return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
}
-
-
-/* Return 1 if DEST and SRC reference only global and in registers. */
-
-int
-sparc_return_peephole_ok (dest, src)
- rtx dest, src;
-{
- if (! TARGET_V9)
- return 0;
- if (current_function_uses_only_leaf_regs)
- return 0;
- if (GET_CODE (src) != CONST_INT
- && (GET_CODE (src) != REG || ! IN_OR_GLOBAL_P (src)))
- return 0;
- return IN_OR_GLOBAL_P (dest);
-}
-/* Output assembler code to FILE to increment profiler label # LABELNO
- for profiling a function entry.
-
- 32 bit sparc uses %g2 as the STATIC_CHAIN_REGNUM which gets clobbered
- during profiling so we need to save/restore it around the call to mcount.
- We're guaranteed that a save has just been done, and we use the space
- allocated for intreg/fpreg value passing. */
+/* Output rtl to increment the profiler label LABELNO
+ for profiling a function entry. */
void
-sparc_function_profiler (file, labelno)
- FILE *file;
+sparc_profile_hook (labelno)
int labelno;
{
char buf[32];
- ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
-
- if (! TARGET_ARCH64)
- fputs ("\tst\t%g2, [%fp-4]\n", file);
-
- fputs ("\tsethi\t%hi(", file);
- assemble_name (file, buf);
- fputs ("), %o0\n", file);
+ rtx lab, fun;
- fputs ("\tcall\t", file);
- assemble_name (file, MCOUNT_FUNCTION);
- putc ('\n', file);
-
- fputs ("\t or\t%o0, %lo(", file);
- assemble_name (file, buf);
- fputs ("), %o0\n", file);
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
+ lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
+ fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
- if (! TARGET_ARCH64)
- fputs ("\tld\t[%fp-4], %g2\n", file);
+ emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
}
-
-
+
/* Mark ARG, which is really a struct ultrasparc_pipline_state *, for
GC. */
@@ -8474,7 +8895,6 @@ sparc_add_gc_roots ()
{
ggc_add_rtx_root (&sparc_compare_op0, 1);
ggc_add_rtx_root (&sparc_compare_op1, 1);
- ggc_add_rtx_root (&leaf_label, 1);
ggc_add_rtx_root (&global_offset_table, 1);
ggc_add_rtx_root (&get_pc_symbol, 1);
ggc_add_rtx_root (&sparc_addr_diff_list, 1);
@@ -8511,3 +8931,65 @@ sparc_elf_asm_named_section (name, flags)
fputc ('\n', asm_out_file);
}
#endif /* OBJECT_FORMAT_ELF */
+
+int
+sparc_extra_constraint_check (op, c, strict)
+ rtx op;
+ int c;
+ int strict;
+{
+ int reload_ok_mem;
+
+ if (TARGET_ARCH64
+ && (c == 'T' || c == 'U'))
+ return 0;
+
+ switch (c)
+ {
+ case 'Q':
+ return fp_sethi_p (op);
+
+ case 'R':
+ return fp_mov_p (op);
+
+ case 'S':
+ return fp_high_losum_p (op);
+
+ case 'U':
+ if (! strict
+ || (GET_CODE (op) == REG
+ && (REGNO (op) < FIRST_PSEUDO_REGISTER
+ || reg_renumber[REGNO (op)] >= 0)))
+ return register_ok_for_ldd (op);
+
+ return 0;
+
+ case 'W':
+ case 'T':
+ break;
+
+ default:
+ return 0;
+ }
+
+ /* Our memory extra constraints have to emulate the
+ behavior of 'm' and 'o' in order for reload to work
+ correctly. */
+ if (GET_CODE (op) == MEM)
+ {
+ reload_ok_mem = 0;
+ if ((TARGET_ARCH64 || mem_min_alignment (op, 8))
+ && (! strict
+ || strict_memory_address_p (Pmode, XEXP (op, 0))))
+ reload_ok_mem = 1;
+ }
+ else
+ {
+ reload_ok_mem = (reload_in_progress
+ && GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER
+ && reg_renumber [REGNO (op)] < 0);
+ }
+
+ return reload_ok_mem;
+}
diff --git a/contrib/gcc/config/sparc/sparc.h b/contrib/gcc/config/sparc/sparc.h
index 608b393..3f21578 100644
--- a/contrib/gcc/config/sparc/sparc.h
+++ b/contrib/gcc/config/sparc/sparc.h
@@ -358,6 +358,11 @@ Unrecognized value in TARGET_CPU_DEFAULT.
SUBTARGET_EXTRA_SPECS
#define SUBTARGET_EXTRA_SPECS
+
+/* Because libgcc can generate references back to libc (via .umul etc.) we have
+ to list libc again after the second libgcc. */
+#define LINK_GCC_C_SEQUENCE_SPEC "%G %L %G %L"
+
#ifdef SPARC_BI_ARCH
#define NO_BUILTIN_PTRDIFF_TYPE
@@ -369,34 +374,11 @@ Unrecognized value in TARGET_CPU_DEFAULT.
/* ??? This should be 32 bits for v9 but what can we do? */
#define WCHAR_TYPE "short unsigned int"
#define WCHAR_TYPE_SIZE 16
-#define MAX_WCHAR_TYPE_SIZE 16
/* Show we can debug even without a frame pointer. */
#define CAN_DEBUG_WITHOUT_FP
-/* To make profiling work with -f{pic,PIC}, we need to emit the profiling
- code into the rtl. Also, if we are profiling, we cannot eliminate
- the frame pointer (because the return address will get smashed). */
-
-#define OVERRIDE_OPTIONS \
- do { \
- if (profile_flag || profile_arc_flag) \
- { \
- if (flag_pic) \
- { \
- const char *const pic_string = (flag_pic == 1) ? "-fpic" : "-fPIC";\
- warning ("%s and profiling conflict: disabling %s", \
- pic_string, pic_string); \
- flag_pic = 0; \
- } \
- flag_omit_frame_pointer = 0; \
- } \
- sparc_override_options (); \
- SUBTARGET_OVERRIDE_OPTIONS; \
- } while (0)
-
-/* This is meant to be redefined in the host dependent files. */
-#define SUBTARGET_OVERRIDE_OPTIONS
+#define OVERRIDE_OPTIONS sparc_override_options ()
/* Generate DBX debugging information. */
@@ -410,11 +392,6 @@ extern int target_flags;
#define MASK_FPU 1
#define TARGET_FPU (target_flags & MASK_FPU)
-/* Nonzero if we should use function_epilogue(). Otherwise, we
- use fast return insns, but lose some generality. */
-#define MASK_EPILOGUE 2
-#define TARGET_EPILOGUE (target_flags & MASK_EPILOGUE)
-
/* Nonzero if we should assume that double pointers might be unaligned.
This can happen when linking gcc compiled code with other compilers,
because the ABI only guarantees 4 byte alignment. */
@@ -558,10 +535,6 @@ extern int target_flags;
{"soft-float", -MASK_FPU, \
N_("Do not use hardware fp") }, \
{"soft-float", MASK_FPU_SET, NULL }, \
- {"epilogue", MASK_EPILOGUE, \
- N_("Use function_epilogue()") }, \
- {"no-epilogue", -MASK_EPILOGUE, \
- N_("Do not use function_epilogue()") }, \
{"unaligned-doubles", MASK_UNALIGNED_DOUBLES, \
N_("Assume possible double misalignment") }, \
{"no-unaligned-doubles", -MASK_UNALIGNED_DOUBLES, \
@@ -630,7 +603,7 @@ extern int target_flags;
/* MASK_APP_REGS must always be the default because that's what
FIXED_REGISTERS is set to and -ffixed- is processed before
CONDITIONAL_REGISTER_USAGE is called (where we process -mno-app-regs). */
-#define TARGET_DEFAULT (MASK_APP_REGS + MASK_EPILOGUE + MASK_FPU)
+#define TARGET_DEFAULT (MASK_APP_REGS + MASK_FPU)
/* This is meant to be redefined in target specific files. */
#define SUBTARGET_SWITCHES
@@ -722,7 +695,11 @@ extern struct sparc_cpu_select sparc_select[];
/* Width of a word, in units (bytes). */
#define UNITS_PER_WORD (TARGET_ARCH64 ? 8 : 4)
+#ifdef IN_LIBGCC2
+#define MIN_UNITS_PER_WORD UNITS_PER_WORD
+#else
#define MIN_UNITS_PER_WORD 4
+#endif
/* Now define the sizes of the C data types. */
@@ -894,9 +871,10 @@ if (TARGET_ARCH64 \
accessible. We still account for them to simplify register computations
(eg: in CLASS_MAX_NREGS). There are also 4 fp condition code registers, so
32+32+32+4 == 100.
- Register 100 is used as the integer condition code register. */
+ Register 100 is used as the integer condition code register.
+ Register 101 is used as the soft frame pointer register. */
-#define FIRST_PSEUDO_REGISTER 101
+#define FIRST_PSEUDO_REGISTER 102
#define SPARC_FIRST_FP_REG 32
/* Additional V9 fp regs. */
@@ -962,7 +940,7 @@ if (TARGET_ARCH64 \
0, 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, \
\
- 0, 0, 0, 0, 0}
+ 0, 0, 0, 0, 0, 1}
/* 1 for registers not available across function calls.
These must include the FIXED_REGISTERS and also any
@@ -987,7 +965,7 @@ if (TARGET_ARCH64 \
1, 1, 1, 1, 1, 1, 1, 1, \
1, 1, 1, 1, 1, 1, 1, 1, \
\
- 1, 1, 1, 1, 1}
+ 1, 1, 1, 1, 1, 1}
/* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
they won't be allocated. */
@@ -995,7 +973,7 @@ if (TARGET_ARCH64 \
#define CONDITIONAL_REGISTER_USAGE \
do \
{ \
- if (flag_pic) \
+ if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM) \
{ \
fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
@@ -1039,12 +1017,16 @@ do \
fixed_regs[4] = 0; \
if (TARGET_FLAT) \
{ \
+ int regno; \
/* Let the compiler believe the frame pointer is still \
%fp, but output it as %i7. */ \
fixed_regs[31] = 1; \
- reg_names[FRAME_POINTER_REGNUM] = "%i7"; \
+ reg_names[HARD_FRAME_POINTER_REGNUM] = "%i7"; \
/* Disable leaf functions */ \
memset (sparc_leaf_regs, 0, FIRST_PSEUDO_REGISTER); \
+ /* Make LEAF_REG_REMAP a noop. */ \
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) \
+ leaf_reg_remap [regno] = regno; \
} \
} \
while (0)
@@ -1062,9 +1044,9 @@ while (0)
#define HARD_REGNO_NREGS(REGNO, MODE) \
(TARGET_ARCH64 \
- ? ((REGNO) < 32 \
- ? (GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD \
- : (GET_MODE_SIZE (MODE) + 3) / 4) \
+ ? ((REGNO) < 32 || (REGNO) == FRAME_POINTER_REGNUM \
+ ? (GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD \
+ : (GET_MODE_SIZE (MODE) + 3) / 4) \
: ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
/* Due to the ARCH64 descrepancy above we must override this next
@@ -1107,27 +1089,32 @@ extern int sparc_mode_class[];
/* Register to use for pushing function arguments. */
#define STACK_POINTER_REGNUM 14
+/* The stack bias (amount by which the hardware register is offset by). */
+#define SPARC_STACK_BIAS ((TARGET_ARCH64 && TARGET_STACK_BIAS) ? 2047 : 0)
+
/* Actual top-of-stack address is 92/176 greater than the contents of the
stack pointer register for !v9/v9. That is:
- !v9: 64 bytes for the in and local registers, 4 bytes for structure return
address, and 6*4 bytes for the 6 register parameters.
- v9: 128 bytes for the in and local registers + 6*8 bytes for the integer
parameter regs. */
-#define STACK_POINTER_OFFSET FIRST_PARM_OFFSET(0)
-
-/* The stack bias (amount by which the hardware register is offset by). */
-#define SPARC_STACK_BIAS ((TARGET_ARCH64 && TARGET_STACK_BIAS) ? 2047 : 0)
-
-/* Is stack biased? */
-#define STACK_BIAS SPARC_STACK_BIAS
+#define STACK_POINTER_OFFSET (FIRST_PARM_OFFSET(0) + SPARC_STACK_BIAS)
/* Base register for access to local variables of the function. */
-#define FRAME_POINTER_REGNUM 30
-
-#if 0
-/* Register that is used for the return address for the flat model. */
-#define RETURN_ADDR_REGNUM 15
-#endif
+#define HARD_FRAME_POINTER_REGNUM 30
+
+/* The soft frame pointer does not have the stack bias applied. */
+#define FRAME_POINTER_REGNUM 101
+
+/* Given the stack bias, the stack pointer isn't actually aligned. */
+#define INIT_EXPANDERS \
+ do { \
+ if (cfun && cfun->emit->regno_pointer_align && SPARC_STACK_BIAS) \
+ { \
+ REGNO_POINTER_ALIGN (STACK_POINTER_REGNUM) = BITS_PER_UNIT; \
+ REGNO_POINTER_ALIGN (HARD_FRAME_POINTER_REGNUM) = BITS_PER_UNIT; \
+ } \
+ } while (0)
/* Value should be nonzero if functions must have frame pointers.
Zero means the frame pointer need not be set up (and parms
@@ -1138,21 +1125,13 @@ extern int sparc_mode_class[];
Being a non-leaf function does not mean a frame pointer is needed in the
flat window model. However, the debugger won't be able to backtrace through
us with out it. */
-#define FRAME_POINTER_REQUIRED \
- (TARGET_FLAT ? (current_function_calls_alloca || current_function_varargs \
- || !leaf_function_p ()) \
+#define FRAME_POINTER_REQUIRED \
+ (TARGET_FLAT \
+ ? (current_function_calls_alloca \
+ || current_function_varargs \
+ || !leaf_function_p ()) \
: ! (leaf_function_p () && only_leaf_regs_used ()))
-/* C statement to store the difference between the frame pointer
- and the stack pointer values immediately after the function prologue.
-
- Note, we always pretend that this is a leaf function because if
- it's not, there's no point in trying to eliminate the
- frame pointer. If it is a leaf function, we guessed right! */
-#define INITIAL_FRAME_POINTER_OFFSET(VAR) \
- ((VAR) = (TARGET_FLAT ? sparc_flat_compute_frame_size (get_frame_size ()) \
- : compute_frame_size (get_frame_size (), 1)))
-
/* Base register for access to arguments of the function. */
#define ARG_POINTER_REGNUM FRAME_POINTER_REGNUM
@@ -1163,7 +1142,7 @@ extern int sparc_mode_class[];
/* Register which holds offset table for position-independent
data references. */
-#define PIC_OFFSET_TABLE_REGNUM 23
+#define PIC_OFFSET_TABLE_REGNUM (flag_pic ? 23 : INVALID_REGNUM)
/* Pick a default value we can notice from override_options:
!v9: Default is on.
@@ -1275,10 +1254,16 @@ enum reg_class { NO_REGS, FPCC_REGS, I64_REGS, GENERAL_REGS, FP_REGS,
This is an initializer for a vector of HARD_REG_SET
of length N_REG_CLASSES. */
-#define REG_CLASS_CONTENTS \
- {{0, 0, 0, 0}, {0, 0, 0, 0xf}, {0xffff, 0, 0, 0}, \
- {-1, 0, 0, 0}, {0, -1, 0, 0}, {0, -1, -1, 0}, \
- {-1, -1, 0, 0}, {-1, -1, -1, 0}, {-1, -1, -1, 0x1f}}
+#define REG_CLASS_CONTENTS \
+ {{0, 0, 0, 0}, /* NO_REGS */ \
+ {0, 0, 0, 0xf}, /* FPCC_REGS */ \
+ {0xffff, 0, 0, 0}, /* I64_REGS */ \
+ {-1, 0, 0, 0x20}, /* GENERAL_REGS */ \
+ {0, -1, 0, 0}, /* FP_REGS */ \
+ {0, -1, -1, 0}, /* EXTRA_FP_REGS */ \
+ {-1, -1, 0, 0x20}, /* GENERAL_OR_FP_REGS */ \
+ {-1, -1, -1, 0x20}, /* GENERAL_OR_EXTRA_FP_REGS */ \
+ {-1, -1, -1, 0x3f}} /* ALL_REGS */
/* The same information, inverted:
Return the class number of the smallest class containing
@@ -1310,7 +1295,7 @@ extern enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
88, 89, 90, 91, 92, 93, 94, 95, /* %f56-%f63 */ \
32, 33, /* %f0,%f1 */ \
96, 97, 98, 99, 100, /* %fcc0-3, %icc */ \
- 1, 4, 5, 6, 7, 0, 14, 30}
+ 1, 4, 5, 6, 7, 0, 14, 30, 101}
/* This is the order in which to allocate registers for
leaf functions. If all registers can fit in the "gi" registers,
@@ -1331,14 +1316,14 @@ extern enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
88, 89, 90, 91, 92, 93, 94, 95, \
32, 33, \
96, 97, 98, 99, 100, \
- 0, 14, 30, 31}
+ 0, 14, 30, 31, 101}
#define ORDER_REGS_FOR_LOCAL_ALLOC order_regs_for_local_alloc ()
extern char sparc_leaf_regs[];
#define LEAF_REGISTERS sparc_leaf_regs
-extern const char leaf_reg_remap[];
+extern char leaf_reg_remap[];
#define LEAF_REG_REMAP(REGNO) (leaf_reg_remap[REGNO])
/* The class value for index registers, and the one for base regs. */
@@ -1379,7 +1364,8 @@ extern const char leaf_reg_remap[];
`J' is used for the range which is just zero (since that is R0).
`K' is used for constants which can be loaded with a single sethi insn.
`L' is used for the range of constants supported by the movcc insns.
- `M' is used for the range of constants supported by the movrcc insns. */
+ `M' is used for the range of constants supported by the movrcc insns.
+ `N' is like K, but for constants wider than 32 bits. */
#define SPARC_SIMM10_P(X) ((unsigned HOST_WIDE_INT) (X) + 0x200 < 0x400)
#define SPARC_SIMM11_P(X) ((unsigned HOST_WIDE_INT) (X) + 0x400 < 0x800)
@@ -1388,17 +1374,21 @@ extern const char leaf_reg_remap[];
SMALL_INT is used throughout the port so we continue to use it. */
#define SMALL_INT(X) (SPARC_SIMM13_P (INTVAL (X)))
/* 13 bit immediate, considering only the low 32 bits */
-#define SMALL_INT32(X) (SPARC_SIMM13_P ((int)INTVAL (X) & 0xffffffff))
+#define SMALL_INT32(X) (SPARC_SIMM13_P (trunc_int_for_mode \
+ (INTVAL (X), SImode)))
#define SPARC_SETHI_P(X) \
-(((unsigned HOST_WIDE_INT) (X) & \
- (TARGET_ARCH64 ? ~(unsigned HOST_WIDE_INT) 0xfffffc00 : 0x3ff)) == 0)
+ (((unsigned HOST_WIDE_INT) (X) \
+ & ((unsigned HOST_WIDE_INT) 0x3ff - GET_MODE_MASK (SImode) - 1)) == 0)
+#define SPARC_SETHI32_P(X) \
+ (SPARC_SETHI_P ((unsigned HOST_WIDE_INT) (X) & GET_MODE_MASK (SImode)))
#define CONST_OK_FOR_LETTER_P(VALUE, C) \
((C) == 'I' ? SPARC_SIMM13_P (VALUE) \
: (C) == 'J' ? (VALUE) == 0 \
- : (C) == 'K' ? SPARC_SETHI_P (VALUE) \
+ : (C) == 'K' ? SPARC_SETHI32_P (VALUE) \
: (C) == 'L' ? SPARC_SIMM11_P (VALUE) \
: (C) == 'M' ? SPARC_SIMM10_P (VALUE) \
+ : (C) == 'N' ? SPARC_SETHI_P (VALUE) \
: 0)
/* Similar, but for floating constants, and defining letters G and H.
@@ -1440,7 +1430,10 @@ extern const char leaf_reg_remap[];
We need a temporary when loading/storing a HImode/QImode value
between memory and the FPU registers. This can happen when combine puts
- a paradoxical subreg in a float/fix conversion insn. */
+ a paradoxical subreg in a float/fix conversion insn.
+
+ We need a temporary when loading/storing a DFmode value between
+ unaligned memory and the upper FPU registers. */
#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, IN) \
((FP_REG_CLASS_P (CLASS) \
@@ -1449,28 +1442,36 @@ extern const char leaf_reg_remap[];
|| ((GET_CODE (IN) == REG || GET_CODE (IN) == SUBREG) \
&& true_regnum (IN) == -1))) \
? GENERAL_REGS \
- : (((TARGET_CM_MEDANY \
- && symbolic_operand ((IN), (MODE))) \
- || (TARGET_CM_EMBMEDANY \
- && text_segment_operand ((IN), (MODE)))) \
- && !flag_pic) \
- ? GENERAL_REGS \
- : NO_REGS)
+ : ((CLASS) == EXTRA_FP_REGS && (MODE) == DFmode \
+ && GET_CODE (IN) == MEM && TARGET_ARCH32 \
+ && ! mem_min_alignment ((IN), 8)) \
+ ? FP_REGS \
+ : (((TARGET_CM_MEDANY \
+ && symbolic_operand ((IN), (MODE))) \
+ || (TARGET_CM_EMBMEDANY \
+ && text_segment_operand ((IN), (MODE)))) \
+ && !flag_pic) \
+ ? GENERAL_REGS \
+ : NO_REGS)
#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, IN) \
- ((FP_REG_CLASS_P (CLASS) \
+ ((FP_REG_CLASS_P (CLASS) \
&& ((MODE) == HImode || (MODE) == QImode) \
&& (GET_CODE (IN) == MEM \
|| ((GET_CODE (IN) == REG || GET_CODE (IN) == SUBREG) \
&& true_regnum (IN) == -1))) \
- ? GENERAL_REGS \
- : (((TARGET_CM_MEDANY \
- && symbolic_operand ((IN), (MODE))) \
- || (TARGET_CM_EMBMEDANY \
- && text_segment_operand ((IN), (MODE)))) \
- && !flag_pic) \
- ? GENERAL_REGS \
- : NO_REGS)
+ ? GENERAL_REGS \
+ : ((CLASS) == EXTRA_FP_REGS && (MODE) == DFmode \
+ && GET_CODE (IN) == MEM && TARGET_ARCH32 \
+ && ! mem_min_alignment ((IN), 8)) \
+ ? FP_REGS \
+ : (((TARGET_CM_MEDANY \
+ && symbolic_operand ((IN), (MODE))) \
+ || (TARGET_CM_EMBMEDANY \
+ && text_segment_operand ((IN), (MODE)))) \
+ && !flag_pic) \
+ ? GENERAL_REGS \
+ : NO_REGS)
/* On SPARC it is not possible to directly move data between
GENERAL_REGS and FP_REGS. */
@@ -1535,7 +1536,7 @@ extern const char leaf_reg_remap[];
of the first local allocated. */
/* This allows space for one TFmode floating point value. */
#define STARTING_FRAME_OFFSET \
- (TARGET_ARCH64 ? (SPARC_STACK_BIAS - 16) \
+ (TARGET_ARCH64 ? -16 \
: (-SPARC_STACK_ALIGN (LONG_DOUBLE_TYPE_SIZE / BITS_PER_UNIT)))
/* If we generate an insn to push BYTES bytes,
@@ -1548,14 +1549,12 @@ extern const char leaf_reg_remap[];
even if this function isn't going to use it.
v9: This is 128 for the ins and locals. */
#define FIRST_PARM_OFFSET(FNDECL) \
- (TARGET_ARCH64 ? (SPARC_STACK_BIAS + 16 * UNITS_PER_WORD) \
- : (STRUCT_VALUE_OFFSET + UNITS_PER_WORD))
+ (TARGET_ARCH64 ? 16 * UNITS_PER_WORD : STRUCT_VALUE_OFFSET + UNITS_PER_WORD)
/* Offset from the argument pointer register value to the CFA.
This is different from FIRST_PARM_OFFSET because the register window
comes between the CFA and the arguments. */
-
-#define ARG_POINTER_CFA_OFFSET(FNDECL) SPARC_STACK_BIAS
+#define ARG_POINTER_CFA_OFFSET(FNDECL) 0
/* When a parameter is passed in a register, stack space is still
allocated for it.
@@ -1568,6 +1567,38 @@ extern const char leaf_reg_remap[];
all 6 slots even for v9. */
#define REG_PARM_STACK_SPACE(DECL) (6 * UNITS_PER_WORD)
+/* Definitions for register elimination. */
+/* ??? In TARGET_FLAT mode we needn't have a hard frame pointer. */
+
+#define ELIMINABLE_REGS \
+ {{ FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM} }
+
+/* The way this is structured, we can't eliminate SFP in favor of SP
+ if the frame pointer is required: we want to use the SFP->HFP elimination
+ in that case. But the test in update_eliminables doesn't know we are
+ assuming below that we only do the former elimination. */
+#define CAN_ELIMINATE(FROM, TO) \
+ ((TO) == HARD_FRAME_POINTER_REGNUM || !FRAME_POINTER_REQUIRED)
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ do { \
+ (OFFSET) = 0; \
+ if ((TO) == STACK_POINTER_REGNUM) \
+ { \
+ /* Note, we always pretend that this is a leaf function \
+ because if it's not, there's no point in trying to \
+ eliminate the frame pointer. If it is a leaf \
+ function, we guessed right! */ \
+ if (TARGET_FLAT) \
+ (OFFSET) = \
+ sparc_flat_compute_frame_size (get_frame_size ()); \
+ else \
+ (OFFSET) = compute_frame_size (get_frame_size (), 1); \
+ } \
+ (OFFSET) += SPARC_STACK_BIAS; \
+ } while (0)
+
/* Keep the stack pointer constant throughout the function.
This is both an optimization and a necessity: longjmp
doesn't behave itself when the stack pointer moves within
@@ -1828,14 +1859,13 @@ do { \
#endif
-/* Output assembler code to FILE to increment profiler label # LABELNO
- for profiling a function entry. */
+/* Emit rtl for profiling. */
+#define PROFILE_HOOK(LABEL) sparc_profile_hook (LABEL)
-#define FUNCTION_PROFILER(FILE, LABELNO) \
- sparc_function_profiler(FILE, LABELNO)
+/* All the work done in PROFILE_HOOK, but still required. */
+#define FUNCTION_PROFILER(FILE, LABELNO) do { } while (0)
/* Set the name of the mcount function for the system. */
-
#define MCOUNT_FUNCTION "*mcount"
/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
@@ -1899,8 +1929,25 @@ do { \
#define STRICT_ARGUMENT_NAMING TARGET_V9
/* We do not allow sibling calls if -mflat, nor
- we do not allow indirect calls to be optimized into sibling calls. */
-#define FUNCTION_OK_FOR_SIBCALL(DECL) (DECL && ! TARGET_FLAT)
+ we do not allow indirect calls to be optimized into sibling calls.
+
+ Also, on sparc 32-bit we cannot emit a sibling call when the
+ current function returns a structure. This is because the "unimp
+ after call" convention would cause the callee to return to the
+ wrong place. The generic code already disallows cases where the
+ function being called returns a structure.
+
+ It may seem strange how this last case could occur. Usually there
+ is code after the call which jumps to epilogue code which dumps the
+ return value into the struct return area. That ought to invalidate
+ the sibling call right? Well, in the c++ case we can end up passing
+ the pointer to the struct return area to a constructor (which returns
+ void) and then nothing else happens. Such a sibling call would look
+ valid without the added check here. */
+#define FUNCTION_OK_FOR_SIBCALL(DECL) \
+ (DECL \
+ && ! TARGET_FLAT \
+ && (TARGET_ARCH64 || ! current_function_returns_struct))
/* Generate RTL to flush the register windows so as to make arbitrary frames
available. */
@@ -1933,7 +1980,8 @@ do { \
? gen_rtx_REG (Pmode, 31) \
: gen_rtx_MEM (Pmode, \
memory_address (Pmode, plus_constant (frame, \
- 15 * UNITS_PER_WORD))))
+ 15 * UNITS_PER_WORD \
+ + SPARC_STACK_BIAS))))
/* Before the prologue, the return address is %o7 + 8. OK, sometimes it's
+12, but always using +8 is close enough for frame unwind purposes.
@@ -1960,14 +2008,27 @@ do { \
If assembler and linker properly support .uaword %r_disp32(foo),
then use PC relative 32-bit relocations instead of absolute relocs
for shared libraries. On sparc64, use pc relative 32-bit relocs even
- for binaries, to save memory. */
+ for binaries, to save memory.
+
+ binutils 2.12 would emit a R_SPARC_DISP32 dynamic relocation if the
+ symbol %r_disp32() is against was not local, but .hidden. In that
+ case, we have to use DW_EH_PE_absptr for pic personality. */
#ifdef HAVE_AS_SPARC_UA_PCREL
+#ifdef HAVE_AS_SPARC_UA_PCREL_HIDDEN
#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
(flag_pic \
? (GLOBAL ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4\
: ((TARGET_ARCH64 && ! GLOBAL) \
? (DW_EH_PE_pcrel | DW_EH_PE_sdata4) \
: DW_EH_PE_absptr))
+#else
+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
+ (flag_pic \
+ ? (GLOBAL ? DW_EH_PE_absptr : (DW_EH_PE_pcrel | DW_EH_PE_sdata4)) \
+ : ((TARGET_ARCH64 && ! GLOBAL) \
+ ? (DW_EH_PE_pcrel | DW_EH_PE_sdata4) \
+ : DW_EH_PE_absptr))
+#endif
/* Emit a PC-relative relocation. */
#define ASM_OUTPUT_DWARF_PCREL(FILE, SIZE, LABEL) \
@@ -1996,9 +2057,12 @@ do { \
has been allocated, which happens in local-alloc.c. */
#define REGNO_OK_FOR_INDEX_P(REGNO) \
-((REGNO) < 32 || (unsigned) reg_renumber[REGNO] < (unsigned)32)
-#define REGNO_OK_FOR_BASE_P(REGNO) \
-((REGNO) < 32 || (unsigned) reg_renumber[REGNO] < (unsigned)32)
+((REGNO) < 32 || (unsigned) reg_renumber[REGNO] < (unsigned)32 \
+ || (REGNO) == FRAME_POINTER_REGNUM \
+ || reg_renumber[REGNO] == FRAME_POINTER_REGNUM)
+
+#define REGNO_OK_FOR_BASE_P(REGNO) REGNO_OK_FOR_INDEX_P (REGNO)
+
#define REGNO_OK_FOR_FP_P(REGNO) \
(((unsigned) (REGNO) - 32 < (TARGET_V9 ? (unsigned)64 : (unsigned)32)) \
|| ((unsigned) reg_renumber[REGNO] - 32 < (TARGET_V9 ? (unsigned)64 : (unsigned)32)))
@@ -2079,34 +2143,32 @@ do { \
be at least 8 bytes.
`U' handles all pseudo registers or a hard even numbered
- integer register, needed for ldd/std instructions. */
+ integer register, needed for ldd/std instructions.
-#define EXTRA_CONSTRAINT_BASE(OP, C) \
- ((C) == 'Q' ? fp_sethi_p(OP) \
- : (C) == 'R' ? fp_mov_p(OP) \
- : (C) == 'S' ? fp_high_losum_p(OP) \
- : 0)
+ 'W' handles the memory operand when moving operands in/out
+ of 'e' constraint floating point registers. */
#ifndef REG_OK_STRICT
/* Nonzero if X is a hard reg that can be used as an index
or if it is a pseudo reg. */
#define REG_OK_FOR_INDEX_P(X) \
- (((unsigned) REGNO (X)) - 32 >= (FIRST_PSEUDO_REGISTER - 32))
+ (REGNO (X) < 32 \
+ || REGNO (X) == FRAME_POINTER_REGNUM \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
/* Nonzero if X is a hard reg that can be used as a base reg
or if it is a pseudo reg. */
-#define REG_OK_FOR_BASE_P(X) \
- (((unsigned) REGNO (X)) - 32 >= (FIRST_PSEUDO_REGISTER - 32))
+#define REG_OK_FOR_BASE_P(X) REG_OK_FOR_INDEX_P (X)
+
+/* 'T', 'U' are for aligned memory loads which aren't needed for arch64.
+ 'W' is like 'T' but is assumed true on arch64.
-/* 'T', 'U' are for aligned memory loads which aren't needed for arch64. */
+ Remember to accept pseudo-registers for memory constraints if reload is
+ in progress. */
-#define EXTRA_CONSTRAINT(OP, C) \
- (EXTRA_CONSTRAINT_BASE(OP, C) \
- || ((! TARGET_ARCH64 && (C) == 'T') \
- ? (mem_min_alignment (OP, 8)) \
- : ((! TARGET_ARCH64 && (C) == 'U') \
- ? (register_ok_for_ldd (OP)) \
- : 0)))
+#define EXTRA_CONSTRAINT(OP, C) \
+ sparc_extra_constraint_check(OP, C, 0)
#else
@@ -2115,16 +2177,8 @@ do { \
/* Nonzero if X is a hard reg that can be used as a base reg. */
#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
-#define EXTRA_CONSTRAINT(OP, C) \
- (EXTRA_CONSTRAINT_BASE(OP, C) \
- || ((! TARGET_ARCH64 && (C) == 'T') \
- ? mem_min_alignment (OP, 8) && strict_memory_address_p (Pmode, XEXP (OP, 0)) \
- : ((! TARGET_ARCH64 && (C) == 'U') \
- ? (GET_CODE (OP) == REG \
- && (REGNO (OP) < FIRST_PSEUDO_REGISTER \
- || reg_renumber[REGNO (OP)] >= 0) \
- && register_ok_for_ldd (OP)) \
- : 0)))
+#define EXTRA_CONSTRAINT(OP, C) \
+ sparc_extra_constraint_check(OP, C, 1)
#endif
@@ -2326,12 +2380,13 @@ do { \
/* Decompose SImode constants into hi+lo_sum. We do have to \
rerecognize what we produce, so be careful. */ \
if (CONSTANT_P (X) \
- && (MODE != TFmode || TARGET_V9) \
+ && (MODE != TFmode || TARGET_ARCH64) \
&& GET_MODE (X) == SImode \
&& GET_CODE (X) != LO_SUM && GET_CODE (X) != HIGH \
&& ! (flag_pic \
&& (symbolic_operand (X, Pmode) \
- || pic_address_needs_scratch (X)))) \
+ || pic_address_needs_scratch (X))) \
+ && sparc_cmodel <= CM_MEDLOW) \
{ \
X = gen_rtx_LO_SUM (GET_MODE (X), \
gen_rtx_HIGH (GET_MODE (X), X), X); \
@@ -2596,6 +2651,23 @@ do { \
|| (CLASS1) == FPCC_REGS || (CLASS2) == FPCC_REGS) \
? (sparc_cpu == PROCESSOR_ULTRASPARC ? 12 : 6) : 2)
+/* Provide the cost of a branch. For pre-v9 processors we use
+ a value of 3 to take into account the potential annulling of
+ the delay slot (which ends up being a bubble in the pipeline slot)
+ plus a cycle to take into consideration the instruction cache
+ effects.
+
+ On v9 and later, which have branch prediction facilities, we set
+ it to the depth of the pipeline as that is the cost of a
+ mispredicted branch.
+
+ ??? Set to 9 when PROCESSOR_ULTRASPARC3 is added */
+
+#define BRANCH_COST \
+ ((sparc_cpu == PROCESSOR_V9 \
+ || sparc_cpu == PROCESSOR_ULTRASPARC) \
+ ? 7 : 3)
+
/* Provide the costs of a rtl expression. This is in the body of a
switch on CODE. The purpose for the cost of MULT is to encourage
`synth_mult' to find a synthetic multiply when reasonable.
@@ -2623,13 +2695,12 @@ do { \
case FIX: \
return 19;
-/* Conditional branches with empty delay slots have a length of two. */
-#define ADJUST_INSN_LENGTH(INSN, LENGTH) \
-do { \
- if (GET_CODE (INSN) == CALL_INSN \
- || (GET_CODE (INSN) == JUMP_INSN && ! simplejump_p (insn))) \
- LENGTH += 1; \
-} while (0)
+#define PREFETCH_BLOCK \
+ ((sparc_cpu == PROCESSOR_ULTRASPARC) ? 64 : 32)
+
+/* ??? UltraSPARC-III note: Can set this to 8 for ultra3. */
+#define SIMULTANEOUS_PREFETCHES \
+ ((sparc_cpu == PROCESSOR_ULTRASPARC) ? 2 : 3)
/* Control the assembler format that we output. */
@@ -2675,7 +2746,7 @@ do { \
"%f40", "%f41", "%f42", "%f43", "%f44", "%f45", "%f46", "%f47", \
"%f48", "%f49", "%f50", "%f51", "%f52", "%f53", "%f54", "%f55", \
"%f56", "%f57", "%f58", "%f59", "%f60", "%f61", "%f62", "%f63", \
- "%fcc0", "%fcc1", "%fcc2", "%fcc3", "%icc"}
+ "%fcc0", "%fcc1", "%fcc2", "%fcc3", "%icc", "%sfp" }
/* Define additional names for use in asm clobbers and asm declarations. */
@@ -2944,8 +3015,10 @@ do { \
#define PREDICATE_CODES \
{"reg_or_0_operand", {SUBREG, REG, CONST_INT, CONST_DOUBLE}}, \
{"fp_zero_operand", {CONST_DOUBLE}}, \
+{"fp_register_operand", {SUBREG, REG}}, \
{"intreg_operand", {SUBREG, REG}}, \
{"fcc_reg_operand", {REG}}, \
+{"fcc0_reg_operand", {REG}}, \
{"icc_or_fcc_reg_operand", {REG}}, \
{"restore_operand", {REG}}, \
{"call_operand", {MEM}}, \
@@ -2963,6 +3036,7 @@ do { \
{"eq_or_neq", {EQ, NE}}, \
{"normal_comp_operator", {GE, GT, LE, LT, GTU, LEU}}, \
{"noov_compare_op", {NE, EQ, GE, GT, LE, LT, GEU, GTU, LEU, LTU}}, \
+{"noov_compare64_op", {NE, EQ, GE, GT, LE, LT, GEU, GTU, LEU, LTU}}, \
{"v9_regcmp_op", {EQ, NE, GE, LT, LE, GT}}, \
{"extend_op", {SIGN_EXTEND, ZERO_EXTEND}}, \
{"cc_arithop", {AND, IOR, XOR}}, \
diff --git a/contrib/gcc/config/sparc/sparc.md b/contrib/gcc/config/sparc/sparc.md
index c0b8f60..ebb6768 100644
--- a/contrib/gcc/config/sparc/sparc.md
+++ b/contrib/gcc/config/sparc/sparc.md
@@ -1,4 +1,4 @@
-;- Machine description for SPARC chip for GNU C compiler
+;; Machine description for SPARC chip for GNU C compiler
;; Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
;; 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
;; Contributed by Michael Tiemann (tiemann@cygnus.com)
@@ -87,8 +87,78 @@
"ialu,compare,shift,load,sload,store,uncond_branch,branch,call,sibcall,call_no_delay_slot,return,imul,idiv,fpload,fpstore,fp,fpmove,fpcmove,fpcmp,fpmul,fpdivs,fpdivd,fpsqrts,fpsqrtd,cmove,multi,misc"
(const_string "ialu"))
+;; true if branch/call has empty delay slot and will emit a nop in it
+(define_attr "empty_delay_slot" "false,true"
+ (symbol_ref "empty_delay_slot (insn)"))
+
+(define_attr "branch_type" "none,icc,fcc,reg" (const_string "none"))
+
+(define_attr "pic" "false,true"
+ (symbol_ref "flag_pic != 0"))
+
;; Length (in # of insns).
-(define_attr "length" "" (const_int 1))
+(define_attr "length" ""
+ (cond [(eq_attr "type" "uncond_branch,call,sibcall")
+ (if_then_else (eq_attr "empty_delay_slot" "true")
+ (const_int 2)
+ (const_int 1))
+ (eq_attr "branch_type" "icc")
+ (if_then_else (match_operand 0 "noov_compare64_op" "")
+ (if_then_else (lt (pc) (match_dup 1))
+ (if_then_else (lt (minus (match_dup 1) (pc)) (const_int 260000))
+ (if_then_else (eq_attr "empty_delay_slot" "true")
+ (const_int 2)
+ (const_int 1))
+ (if_then_else (eq_attr "empty_delay_slot" "true")
+ (const_int 4)
+ (const_int 3)))
+ (if_then_else (lt (minus (pc) (match_dup 1)) (const_int 260000))
+ (if_then_else (eq_attr "empty_delay_slot" "true")
+ (const_int 2)
+ (const_int 1))
+ (if_then_else (eq_attr "empty_delay_slot" "true")
+ (const_int 4)
+ (const_int 3))))
+ (if_then_else (eq_attr "empty_delay_slot" "true")
+ (const_int 2)
+ (const_int 1)))
+ (eq_attr "branch_type" "fcc")
+ (if_then_else (match_operand 0 "fcc0_reg_operand" "")
+ (if_then_else (eq_attr "empty_delay_slot" "true")
+ (const_int 2)
+ (const_int 1))
+ (if_then_else (lt (pc) (match_dup 2))
+ (if_then_else (lt (minus (match_dup 2) (pc)) (const_int 260000))
+ (if_then_else (eq_attr "empty_delay_slot" "true")
+ (const_int 2)
+ (const_int 1))
+ (if_then_else (eq_attr "empty_delay_slot" "true")
+ (const_int 4)
+ (const_int 3)))
+ (if_then_else (lt (minus (pc) (match_dup 2)) (const_int 260000))
+ (if_then_else (eq_attr "empty_delay_slot" "true")
+ (const_int 2)
+ (const_int 1))
+ (if_then_else (eq_attr "empty_delay_slot" "true")
+ (const_int 4)
+ (const_int 3)))))
+ (eq_attr "branch_type" "reg")
+ (if_then_else (lt (pc) (match_dup 2))
+ (if_then_else (lt (minus (match_dup 2) (pc)) (const_int 32000))
+ (if_then_else (eq_attr "empty_delay_slot" "true")
+ (const_int 2)
+ (const_int 1))
+ (if_then_else (eq_attr "empty_delay_slot" "true")
+ (const_int 4)
+ (const_int 3)))
+ (if_then_else (lt (minus (pc) (match_dup 2)) (const_int 32000))
+ (if_then_else (eq_attr "empty_delay_slot" "true")
+ (const_int 2)
+ (const_int 1))
+ (if_then_else (eq_attr "empty_delay_slot" "true")
+ (const_int 4)
+ (const_int 3))))
+ ] (const_int 1)))
;; FP precision.
(define_attr "fptype" "single,double" (const_string "single"))
@@ -1893,11 +1963,12 @@
""
"*
{
- return output_cbranch (operands[0], 1, 0,
+ return output_cbranch (operands[0], operands[1], 1, 0,
final_sequence && INSN_ANNULLED_BRANCH_P (insn),
! final_sequence, insn);
}"
- [(set_attr "type" "branch")])
+ [(set_attr "type" "branch")
+ (set_attr "branch_type" "icc")])
;; XXX fpcmp nop braindamage
(define_insn "*inverted_branch"
@@ -1909,11 +1980,12 @@
""
"*
{
- return output_cbranch (operands[0], 1, 1,
+ return output_cbranch (operands[0], operands[1], 1, 1,
final_sequence && INSN_ANNULLED_BRANCH_P (insn),
! final_sequence, insn);
}"
- [(set_attr "type" "branch")])
+ [(set_attr "type" "branch")
+ (set_attr "branch_type" "icc")])
;; XXX fpcmp nop braindamage
(define_insn "*normal_fp_branch"
@@ -1926,11 +1998,12 @@
""
"*
{
- return output_cbranch (operands[1], 2, 0,
+ return output_cbranch (operands[1], operands[2], 2, 0,
final_sequence && INSN_ANNULLED_BRANCH_P (insn),
! final_sequence, insn);
}"
- [(set_attr "type" "branch")])
+ [(set_attr "type" "branch")
+ (set_attr "branch_type" "fcc")])
;; XXX fpcmp nop braindamage
(define_insn "*inverted_fp_branch"
@@ -1943,11 +2016,12 @@
""
"*
{
- return output_cbranch (operands[1], 2, 1,
+ return output_cbranch (operands[1], operands[2], 2, 1,
final_sequence && INSN_ANNULLED_BRANCH_P (insn),
! final_sequence, insn);
}"
- [(set_attr "type" "branch")])
+ [(set_attr "type" "branch")
+ (set_attr "branch_type" "fcc")])
;; XXX fpcmp nop braindamage
(define_insn "*normal_fpe_branch"
@@ -1960,11 +2034,12 @@
""
"*
{
- return output_cbranch (operands[1], 2, 0,
+ return output_cbranch (operands[1], operands[2], 2, 0,
final_sequence && INSN_ANNULLED_BRANCH_P (insn),
! final_sequence, insn);
}"
- [(set_attr "type" "branch")])
+ [(set_attr "type" "branch")
+ (set_attr "branch_type" "fcc")])
;; XXX fpcmp nop braindamage
(define_insn "*inverted_fpe_branch"
@@ -1977,11 +2052,12 @@
""
"*
{
- return output_cbranch (operands[1], 2, 1,
+ return output_cbranch (operands[1], operands[2], 2, 1,
final_sequence && INSN_ANNULLED_BRANCH_P (insn),
! final_sequence, insn);
}"
- [(set_attr "type" "branch")])
+ [(set_attr "type" "branch")
+ (set_attr "branch_type" "fcc")])
;; Sparc V9-specific jump insns. None of these are guaranteed to be
;; in the architecture.
@@ -1999,11 +2075,12 @@
"TARGET_ARCH64"
"*
{
- return output_v9branch (operands[0], 1, 2, 0,
+ return output_v9branch (operands[0], operands[2], 1, 2, 0,
final_sequence && INSN_ANNULLED_BRANCH_P (insn),
! final_sequence, insn);
}"
- [(set_attr "type" "branch")])
+ [(set_attr "type" "branch")
+ (set_attr "branch_type" "reg")])
;; XXX
(define_insn "*inverted_int_branch_sp64"
@@ -2016,11 +2093,12 @@
"TARGET_ARCH64"
"*
{
- return output_v9branch (operands[0], 1, 2, 1,
+ return output_v9branch (operands[0], operands[2], 1, 2, 1,
final_sequence && INSN_ANNULLED_BRANCH_P (insn),
! final_sequence, insn);
}"
- [(set_attr "type" "branch")])
+ [(set_attr "type" "branch")
+ (set_attr "branch_type" "reg")])
;; Load program counter insns.
@@ -2053,14 +2131,8 @@
a double if needed. */
if (GET_CODE (operands[1]) == CONST_DOUBLE)
{
- operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]) & 0xff);
- }
- else if (GET_CODE (operands[1]) == CONST_INT)
- {
- /* And further, we know for all QI cases that only the
- low byte is significant, which we can always process
- in a single insn. So mask it now. */
- operands[1] = GEN_INT (INTVAL (operands[1]) & 0xff);
+ operands[1] = GEN_INT (trunc_int_for_mode
+ (CONST_DOUBLE_LOW (operands[1]), QImode));
}
/* Handle sets of MEM first. */
@@ -2459,13 +2531,14 @@
(define_insn "*movdi_insn_sp32_v9"
[(set (match_operand:DI 0 "nonimmediate_operand"
- "=m,T,U,o,r,r,r,?T,?f,?f,?o,?f")
+ "=T,o,T,U,o,r,r,r,?T,?f,?f,?o,?f")
(match_operand:DI 1 "input_operand"
- " J,U,T,r,o,i,r, f, T, o, f, f"))]
+ " J,J,U,T,r,o,i,r, f, T, o, f, f"))]
"! TARGET_ARCH64 && TARGET_V9
&& (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
"@
stx\\t%%g0, %0
+ #
std\\t%1, %0
ldd\\t%1, %0
#
@@ -2477,18 +2550,19 @@
#
#
#"
- [(set_attr "type" "store,store,load,*,*,*,*,fpstore,fpload,*,*,*")
- (set_attr "length" "*,*,*,2,2,2,2,*,*,2,2,2")])
+ [(set_attr "type" "store,store,store,load,*,*,*,*,fpstore,fpload,*,*,*")
+ (set_attr "length" "*,2,*,*,2,2,2,2,*,*,2,2,2")])
(define_insn "*movdi_insn_sp32"
[(set (match_operand:DI 0 "nonimmediate_operand"
- "=T,U,o,r,r,r,?T,?f,?f,?o,?f")
+ "=o,T,U,o,r,r,r,?T,?f,?f,?o,?f")
(match_operand:DI 1 "input_operand"
- " U,T,r,o,i,r, f, T, o, f, f"))]
+ " J,U,T,r,o,i,r, f, T, o, f, f"))]
"! TARGET_ARCH64
&& (register_operand (operands[0], DImode)
|| register_operand (operands[1], DImode))"
"@
+ #
std\\t%1, %0
ldd\\t%1, %0
#
@@ -2500,8 +2574,8 @@
#
#
#"
- [(set_attr "type" "store,load,*,*,*,*,fpstore,fpload,*,*,*")
- (set_attr "length" "*,*,2,2,2,2,*,*,2,2,2")])
+ [(set_attr "type" "store,store,load,*,*,*,*,fpstore,fpload,*,*,*")
+ (set_attr "length" "2,*,*,2,2,2,2,*,*,2,2,2")])
;; The following are generated by sparc_emit_set_const64
(define_insn "*movdi_sp64_dbl"
@@ -2520,8 +2594,8 @@
"sethi\\t%%hi(%a1), %0")
(define_insn "*movdi_insn_sp64_novis"
- [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,r,m,?e,?e,?m")
- (match_operand:DI 1 "input_operand" "rI,K,J,m,rJ,e,m,e"))]
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,r,m,?e,?e,?W")
+ (match_operand:DI 1 "input_operand" "rI,N,J,m,rJ,e,W,e"))]
"TARGET_ARCH64 && ! TARGET_VIS
&& (register_operand (operands[0], DImode)
|| reg_or_0_operand (operands[1], DImode))"
@@ -2538,8 +2612,8 @@
(set_attr "fptype" "*,*,*,*,*,double,*,*")])
(define_insn "*movdi_insn_sp64_vis"
- [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,r,m,?e,?e,?m,b")
- (match_operand:DI 1 "input_operand" "rI,K,J,m,rJ,e,m,e,J"))]
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,r,m,?e,?e,?W,b")
+ (match_operand:DI 1 "input_operand" "rI,N,J,m,rJ,e,W,e,J"))]
"TARGET_ARCH64 && TARGET_VIS &&
(register_operand (operands[0], DImode)
|| reg_or_0_operand (operands[1], DImode))"
@@ -2734,8 +2808,7 @@
&& ! flag_pic"
"
{
- sparc_emit_set_symbolic_const64 (operands[0], operands[1],
- gen_rtx_REG (DImode, REGNO (operands[2])));
+ sparc_emit_set_symbolic_const64 (operands[0], operands[1], operands[2]);
DONE;
}")
@@ -2748,8 +2821,7 @@
&& ! flag_pic"
"
{
- sparc_emit_set_symbolic_const64 (operands[0], operands[1],
- gen_rtx_REG (DImode, REGNO (operands[2])));
+ sparc_emit_set_symbolic_const64 (operands[0], operands[1], operands[2]);
DONE;
}")
@@ -2771,8 +2843,8 @@
#else
unsigned int low, high;
- low = INTVAL (operands[1]) & 0xffffffff;
- high = (INTVAL (operands[1]) >> 32) & 0xffffffff;
+ low = trunc_int_for_mode (INTVAL (operands[1]), SImode);
+ high = trunc_int_for_mode (INTVAL (operands[1]) >> 32, SImode);
emit_insn (gen_movsi (gen_highpart (SImode, operands[0]), GEN_INT (high)));
/* Slick... but this trick loses if this subreg constant part
@@ -2799,7 +2871,7 @@
/* Slick... but this trick loses if this subreg constant part
can be done in one insn. */
if (CONST_DOUBLE_LOW (operands[1]) == CONST_DOUBLE_HIGH (operands[1])
- && !(SPARC_SETHI_P (CONST_DOUBLE_HIGH (operands[1]))
+ && !(SPARC_SETHI32_P (CONST_DOUBLE_HIGH (operands[1]))
|| SPARC_SIMM13_P (CONST_DOUBLE_HIGH (operands[1]))))
{
emit_insn (gen_movsi (gen_lowpart (SImode, operands[0]),
@@ -2890,6 +2962,21 @@
DONE;
}")
+(define_split
+ [(set (match_operand:DI 0 "memory_operand" "")
+ (const_int 0))]
+ "reload_completed
+ && (! TARGET_V9
+ || (! TARGET_ARCH64
+ && ! mem_min_alignment (operands[0], 8)))
+ && offsettable_memref_p (operands[0])"
+ [(clobber (const_int 0))]
+ "
+{
+ emit_insn (gen_movsi (adjust_address (operands[0], SImode, 0), const0_rtx));
+ emit_insn (gen_movsi (adjust_address (operands[0], SImode, 4), const0_rtx));
+ DONE;
+}")
;; Floating point move insns
@@ -3202,8 +3289,8 @@
;; Be careful, fmovd does not exist when !v9.
(define_insn "*movdf_insn_sp32"
- [(set (match_operand:DF 0 "nonimmediate_operand" "=e,T,U,T,o,e,*r,o,e,o")
- (match_operand:DF 1 "input_operand" "T#F,e,T,U,G,e,*rFo,*r,o#F,e"))]
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=e,W,U,T,o,e,*r,o,e,o")
+ (match_operand:DF 1 "input_operand" "W#F,e,T,U,G,e,*rFo,*r,o#F,e"))]
"TARGET_FPU
&& ! TARGET_V9
&& (register_operand (operands[0], DFmode)
@@ -3262,8 +3349,8 @@
;; We have available v9 double floats but not 64-bit
;; integer registers and no VIS.
(define_insn "*movdf_insn_v9only_novis"
- [(set (match_operand:DF 0 "nonimmediate_operand" "=e,e,T,T,U,T,e,*r,o")
- (match_operand:DF 1 "input_operand" "e,T#F,G,e,T,U,o#F,*roF,*rGe"))]
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=e,e,T,W,U,T,f,*r,o")
+ (match_operand:DF 1 "input_operand" "e,W#F,G,e,T,U,o#F,*roF,*rGf"))]
"TARGET_FPU
&& TARGET_V9
&& ! TARGET_VIS
@@ -3288,8 +3375,8 @@
;; We have available v9 double floats but not 64-bit
;; integer registers but we have VIS.
(define_insn "*movdf_insn_v9only_vis"
- [(set (match_operand:DF 0 "nonimmediate_operand" "=e,e,e,T,T,U,T,e,*r,o")
- (match_operand:DF 1 "input_operand" "G,e,T#F,G,e,T,U,o#F,*roGF,*rGe"))]
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=e,e,e,T,W,U,T,f,*r,o")
+ (match_operand:DF 1 "input_operand" "G,e,W#F,G,e,T,U,o#F,*roGF,*rGf"))]
"TARGET_FPU
&& TARGET_VIS
&& ! TARGET_ARCH64
@@ -3314,8 +3401,8 @@
;; We have available both v9 double floats and 64-bit
;; integer registers. No VIS though.
(define_insn "*movdf_insn_sp64_novis"
- [(set (match_operand:DF 0 "nonimmediate_operand" "=e,e,m,*r,*r,m,*r")
- (match_operand:DF 1 "input_operand" "e,m#F,e,*rG,m,*rG,F"))]
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=e,e,W,*r,*r,m,*r")
+ (match_operand:DF 1 "input_operand" "e,W#F,e,*rG,m,*rG,F"))]
"TARGET_FPU
&& ! TARGET_VIS
&& TARGET_ARCH64
@@ -3337,8 +3424,8 @@
;; We have available both v9 double floats and 64-bit
;; integer registers. And we have VIS.
(define_insn "*movdf_insn_sp64_vis"
- [(set (match_operand:DF 0 "nonimmediate_operand" "=e,e,e,m,*r,*r,m,*r")
- (match_operand:DF 1 "input_operand" "G,e,m#F,e,*rG,m,*rG,F"))]
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=e,e,e,W,*r,*r,m,*r")
+ (match_operand:DF 1 "input_operand" "G,e,W#F,e,*rG,m,*rG,F"))]
"TARGET_FPU
&& TARGET_VIS
&& TARGET_ARCH64
@@ -3411,7 +3498,7 @@
/* Slick... but this trick loses if this subreg constant part
can be done in one insn. */
if (l[1] == l[0]
- && !(SPARC_SETHI_P (l[0])
+ && !(SPARC_SETHI32_P (l[0])
|| SPARC_SIMM13_P (l[0])))
{
emit_insn (gen_movsi (gen_lowpart (SImode, operands[0]),
@@ -3753,7 +3840,8 @@
"reload_completed
&& (! TARGET_ARCH64
|| (TARGET_FPU
- && ! TARGET_HARD_QUAD))"
+ && ! TARGET_HARD_QUAD)
+ || ! fp_register_operand (operands[0], TFmode))"
[(clobber (const_int 0))]
"
{
@@ -3815,7 +3903,10 @@
[(set (match_operand:TF 0 "register_operand" "")
(match_operand:TF 1 "memory_operand" ""))]
"(reload_completed
- && offsettable_memref_p (operands[1]))"
+ && offsettable_memref_p (operands[1])
+ && (! TARGET_ARCH64
+ || ! TARGET_HARD_QUAD
+ || ! fp_register_operand (operands[0], TFmode)))"
[(clobber (const_int 0))]
"
{
@@ -3848,7 +3939,10 @@
[(set (match_operand:TF 0 "memory_operand" "")
(match_operand:TF 1 "register_operand" ""))]
"(reload_completed
- && offsettable_memref_p (operands[0]))"
+ && offsettable_memref_p (operands[0])
+ && (! TARGET_ARCH64
+ || ! TARGET_HARD_QUAD
+ || ! fp_register_operand (operands[1], TFmode)))"
[(clobber (const_int 0))]
"
{
@@ -4929,31 +5023,11 @@
(set_attr "fptype" "double")])
(define_expand "extendsftf2"
- [(set (match_operand:TF 0 "register_operand" "=e")
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
(float_extend:TF
- (match_operand:SF 1 "register_operand" "f")))]
+ (match_operand:SF 1 "register_operand" "")))]
"TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
- "
-{
- if (! TARGET_HARD_QUAD)
- {
- rtx slot0;
-
- if (GET_CODE (operands[0]) != MEM)
- slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- else
- slot0 = operands[0];
-
- emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_stoq\"), 0,
- VOIDmode, 2,
- XEXP (slot0, 0), Pmode,
- operands[1], SFmode);
-
- if (GET_CODE (operands[0]) != MEM)
- emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
- DONE;
- }
-}")
+ "emit_tfmode_cvt (FLOAT_EXTEND, operands); DONE;")
(define_insn "*extendsftf2_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
@@ -4964,31 +5038,11 @@
[(set_attr "type" "fp")])
(define_expand "extenddftf2"
- [(set (match_operand:TF 0 "register_operand" "=e")
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
(float_extend:TF
- (match_operand:DF 1 "register_operand" "e")))]
+ (match_operand:DF 1 "register_operand" "")))]
"TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
- "
-{
- if (! TARGET_HARD_QUAD)
- {
- rtx slot0;
-
- if (GET_CODE (operands[0]) != MEM)
- slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- else
- slot0 = operands[0];
-
- emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_dtoq\"), 0,
- VOIDmode, 2,
- XEXP (slot0, 0), Pmode,
- operands[1], DFmode);
-
- if (GET_CODE (operands[0]) != MEM)
- emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
- DONE;
- }
-}")
+ "emit_tfmode_cvt (FLOAT_EXTEND, operands); DONE;")
(define_insn "*extenddftf2_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
@@ -5008,30 +5062,11 @@
(set_attr "fptype" "double")])
(define_expand "trunctfsf2"
- [(set (match_operand:SF 0 "register_operand" "=f")
+ [(set (match_operand:SF 0 "register_operand" "")
(float_truncate:SF
- (match_operand:TF 1 "register_operand" "e")))]
+ (match_operand:TF 1 "general_operand" "")))]
"TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
- "
-{
- if (! TARGET_HARD_QUAD)
- {
- rtx slot0;
-
- if (GET_CODE (operands[1]) != MEM)
- {
- slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
- }
- else
- slot0 = operands[1];
-
- emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtos\"),
- operands[0], 0, SFmode, 1,
- XEXP (slot0, 0), Pmode);
- DONE;
- }
-}")
+ "emit_tfmode_cvt (FLOAT_TRUNCATE, operands); DONE;")
(define_insn "*trunctfsf2_hq"
[(set (match_operand:SF 0 "register_operand" "=f")
@@ -5042,30 +5077,11 @@
[(set_attr "type" "fp")])
(define_expand "trunctfdf2"
- [(set (match_operand:DF 0 "register_operand" "=f")
+ [(set (match_operand:DF 0 "register_operand" "")
(float_truncate:DF
- (match_operand:TF 1 "register_operand" "e")))]
+ (match_operand:TF 1 "general_operand" "")))]
"TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
- "
-{
- if (! TARGET_HARD_QUAD)
- {
- rtx slot0;
-
- if (GET_CODE (operands[1]) != MEM)
- {
- slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
- }
- else
- slot0 = operands[1];
-
- emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtod\"),
- operands[0], 0, DFmode, 1,
- XEXP (slot0, 0), Pmode);
- DONE;
- }
-}")
+ "emit_tfmode_cvt (FLOAT_TRUNCATE, operands); DONE;")
(define_insn "*trunctfdf2_hq"
[(set (match_operand:DF 0 "register_operand" "=e")
@@ -5094,30 +5110,10 @@
(set_attr "fptype" "double")])
(define_expand "floatsitf2"
- [(set (match_operand:TF 0 "register_operand" "=e")
- (float:TF (match_operand:SI 1 "register_operand" "f")))]
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (float:TF (match_operand:SI 1 "register_operand" "")))]
"TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
- "
-{
- if (! TARGET_HARD_QUAD)
- {
- rtx slot0;
-
- if (GET_CODE (operands[1]) != MEM)
- slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- else
- slot0 = operands[1];
-
- emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_itoq\"), 0,
- VOIDmode, 2,
- XEXP (slot0, 0), Pmode,
- operands[1], SImode);
-
- if (GET_CODE (operands[0]) != MEM)
- emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
- DONE;
- }
-}")
+ "emit_tfmode_cvt (FLOAT, operands); DONE;")
(define_insn "*floatsitf2_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
@@ -5127,27 +5123,10 @@
[(set_attr "type" "fp")])
(define_expand "floatunssitf2"
- [(set (match_operand:TF 0 "register_operand" "=e")
- (unsigned_float:TF (match_operand:SI 1 "register_operand" "e")))]
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (unsigned_float:TF (match_operand:SI 1 "register_operand" "")))]
"TARGET_FPU && TARGET_ARCH64 && ! TARGET_HARD_QUAD"
- "
-{
- rtx slot0;
-
- if (GET_CODE (operands[1]) != MEM)
- slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- else
- slot0 = operands[1];
-
- emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_uitoq\"), 0,
- VOIDmode, 2,
- XEXP (slot0, 0), Pmode,
- operands[1], SImode);
-
- if (GET_CODE (operands[0]) != MEM)
- emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
- DONE;
-}")
+ "emit_tfmode_cvt (UNSIGNED_FLOAT, operands); DONE;")
;; Now the same for 64 bit sources.
@@ -5159,6 +5138,12 @@
[(set_attr "type" "fp")
(set_attr "fptype" "double")])
+(define_expand "floatunsdisf2"
+ [(use (match_operand:SF 0 "register_operand" ""))
+ (use (match_operand:DI 1 "register_operand" ""))]
+ "TARGET_ARCH64 && TARGET_FPU"
+ "sparc_emit_floatunsdi (operands); DONE;")
+
(define_insn "floatdidf2"
[(set (match_operand:DF 0 "register_operand" "=e")
(float:DF (match_operand:DI 1 "register_operand" "e")))]
@@ -5167,31 +5152,17 @@
[(set_attr "type" "fp")
(set_attr "fptype" "double")])
+(define_expand "floatunsdidf2"
+ [(use (match_operand:DF 0 "register_operand" ""))
+ (use (match_operand:DI 1 "register_operand" ""))]
+ "TARGET_ARCH64 && TARGET_FPU"
+ "sparc_emit_floatunsdi (operands); DONE;")
+
(define_expand "floatditf2"
- [(set (match_operand:TF 0 "register_operand" "=e")
- (float:TF (match_operand:DI 1 "register_operand" "e")))]
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (float:TF (match_operand:DI 1 "register_operand" "")))]
"TARGET_FPU && TARGET_V9 && (TARGET_HARD_QUAD || TARGET_ARCH64)"
- "
-{
- if (! TARGET_HARD_QUAD)
- {
- rtx slot0;
-
- if (GET_CODE (operands[1]) != MEM)
- slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- else
- slot0 = operands[1];
-
- emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_xtoq\"), 0,
- VOIDmode, 2,
- XEXP (slot0, 0), Pmode,
- operands[1], DImode);
-
- if (GET_CODE (operands[0]) != MEM)
- emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
- DONE;
- }
-}")
+ "emit_tfmode_cvt (FLOAT, operands); DONE;")
(define_insn "*floatditf2_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
@@ -5201,27 +5172,10 @@
[(set_attr "type" "fp")])
(define_expand "floatunsditf2"
- [(set (match_operand:TF 0 "register_operand" "=e")
- (unsigned_float:TF (match_operand:DI 1 "register_operand" "e")))]
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (unsigned_float:TF (match_operand:DI 1 "register_operand" "")))]
"TARGET_FPU && TARGET_ARCH64 && ! TARGET_HARD_QUAD"
- "
-{
- rtx slot0;
-
- if (GET_CODE (operands[1]) != MEM)
- slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- else
- slot0 = operands[1];
-
- emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_uxtoq\"), 0,
- VOIDmode, 2,
- XEXP (slot0, 0), Pmode,
- operands[1], DImode);
-
- if (GET_CODE (operands[0]) != MEM)
- emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
- DONE;
-}")
+ "emit_tfmode_cvt (UNSIGNED_FLOAT, operands); DONE;")
;; Convert a float to an actual integer.
;; Truncation is performed as part of the conversion.
@@ -5243,58 +5197,23 @@
(set_attr "fptype" "double")])
(define_expand "fix_trunctfsi2"
- [(set (match_operand:SI 0 "register_operand" "=f")
- (fix:SI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ [(set (match_operand:SI 0 "register_operand" "")
+ (fix:SI (match_operand:TF 1 "general_operand" "")))]
"TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
- "
-{
- if (! TARGET_HARD_QUAD)
- {
- rtx slot0;
-
- if (GET_CODE (operands[1]) != MEM)
- {
- slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
- }
- else
- slot0 = operands[1];
-
- emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtoi\"),
- operands[0], 0, SImode, 1,
- XEXP (slot0, 0), Pmode);
- DONE;
- }
-}")
+ "emit_tfmode_cvt (FIX, operands); DONE;")
(define_insn "*fix_trunctfsi2_hq"
[(set (match_operand:SI 0 "register_operand" "=f")
- (fix:SI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ (fix:SI (match_operand:TF 1 "register_operand" "e")))]
"TARGET_FPU && TARGET_HARD_QUAD"
"fqtoi\\t%1, %0"
[(set_attr "type" "fp")])
(define_expand "fixuns_trunctfsi2"
- [(set (match_operand:SI 0 "register_operand" "=f")
- (unsigned_fix:SI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ [(set (match_operand:SI 0 "register_operand" "")
+ (unsigned_fix:SI (match_operand:TF 1 "general_operand" "")))]
"TARGET_FPU && TARGET_ARCH64 && ! TARGET_HARD_QUAD"
- "
-{
- rtx slot0;
-
- if (GET_CODE (operands[1]) != MEM)
- {
- slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
- }
- else
- slot0 = operands[1];
-
- emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtoui\"),
- operands[0], 0, SImode, 1,
- XEXP (slot0, 0), Pmode);
- DONE;
-}")
+ "emit_tfmode_cvt (UNSIGNED_FIX, operands); DONE;")
;; Now the same, for V9 targets
@@ -5315,59 +5234,23 @@
(set_attr "fptype" "double")])
(define_expand "fix_trunctfdi2"
- [(set (match_operand:DI 0 "register_operand" "=e")
- (fix:DI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ [(set (match_operand:DI 0 "register_operand" "")
+ (fix:DI (match_operand:TF 1 "general_operand" "")))]
"TARGET_V9 && TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
- "
-{
- if (! TARGET_HARD_QUAD)
- {
- rtx slot0;
-
- if (GET_CODE (operands[1]) != MEM)
- {
- slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
- }
- else
- slot0 = operands[1];
-
- emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtox\"),
- operands[0], 0, DImode, 1,
- XEXP (slot0, 0), Pmode);
- DONE;
- }
-}")
+ "emit_tfmode_cvt (FIX, operands); DONE;")
(define_insn "*fix_trunctfdi2_hq"
[(set (match_operand:DI 0 "register_operand" "=e")
- (fix:DI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ (fix:DI (match_operand:TF 1 "register_operand" "e")))]
"TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
"fqtox\\t%1, %0"
[(set_attr "type" "fp")])
(define_expand "fixuns_trunctfdi2"
- [(set (match_operand:DI 0 "register_operand" "=f")
- (unsigned_fix:DI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ [(set (match_operand:DI 0 "register_operand" "")
+ (unsigned_fix:DI (match_operand:TF 1 "general_operand" "")))]
"TARGET_FPU && TARGET_ARCH64 && ! TARGET_HARD_QUAD"
- "
-{
- rtx slot0;
-
- if (GET_CODE (operands[1]) != MEM)
- {
- slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- emit_insn (gen_rtx_SET (VOIDmode, slot0, operands[1]));
- }
- else
- slot0 = operands[1];
-
- emit_library_call_value (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_qtoux\"),
- operands[0], 0, DImode, 1,
- XEXP (slot0, 0), Pmode);
- DONE;
-}")
-
+ "emit_tfmode_cvt (UNSIGNED_FIX, operands); DONE;")
;;- arithmetic instructions
@@ -5920,6 +5803,13 @@
else
return \"sllx\\t%H1, 32, %3\\n\\tor\\t%L1, %3, %3\\n\\tmulx\\t%3, %2, %3\\n\\tsrlx\\t%3, 32, %H0\\n\\tmov\\t%3, %L0\";
}
+ else if (rtx_equal_p (operands[1], operands[2]))
+ {
+ if (which_alternative == 1)
+ return \"or\\t%L1, %H1, %H1\\n\\tmulx\\t%H1, %H1, %L0\;srlx\\t%L0, 32, %H0\";
+ else
+ return \"sllx\\t%H1, 32, %3\\n\\tor\\t%L1, %3, %3\\n\\tmulx\\t%3, %3, %3\\n\\tsrlx\\t%3, 32, %H0\\n\\tmov\\t%3, %L0\";
+ }
if (sparc_check_64 (operands[2], insn) <= 0)
output_asm_insn (\"srl\\t%L2, 0, %L2\", operands);
if (which_alternative == 1)
@@ -6377,7 +6267,7 @@
[(set_attr "type" "multi")
(set (attr "length")
(if_then_else (eq_attr "isa" "v9")
- (const_int 4) (const_int 7)))])
+ (const_int 4) (const_int 6)))])
(define_insn "divsi3_sp64"
[(set (match_operand:SI 0 "register_operand" "=r")
@@ -6576,7 +6466,7 @@
(set (match_dup 0) (and:SI (not:SI (match_dup 3)) (match_dup 1)))]
"
{
- operands[4] = GEN_INT (~INTVAL (operands[2]) & 0xffffffff);
+ operands[4] = GEN_INT (~INTVAL (operands[2]));
}")
;; Split DImode logical operations requiring two instructions.
@@ -6719,7 +6609,7 @@
(set (match_dup 0) (ior:SI (not:SI (match_dup 3)) (match_dup 1)))]
"
{
- operands[4] = GEN_INT (~INTVAL (operands[2]) & 0xffffffff);
+ operands[4] = GEN_INT (~INTVAL (operands[2]));
}")
(define_insn "*or_not_di_sp32"
@@ -6835,7 +6725,7 @@
(set (match_dup 0) (not:SI (xor:SI (match_dup 3) (match_dup 1))))]
"
{
- operands[4] = GEN_INT (~INTVAL (operands[2]) & 0xffffffff);
+ operands[4] = GEN_INT (~INTVAL (operands[2]));
}")
(define_split
@@ -6850,7 +6740,7 @@
(set (match_dup 0) (xor:SI (match_dup 3) (match_dup 1)))]
"
{
- operands[4] = GEN_INT (~INTVAL (operands[2]) & 0xffffffff);
+ operands[4] = GEN_INT (~INTVAL (operands[2]));
}")
;; xnor patterns. Note that (a ^ ~b) == (~a ^ b) == ~(a ^ b).
@@ -7267,42 +7157,7 @@
(plus:TF (match_operand:TF 1 "general_operand" "")
(match_operand:TF 2 "general_operand" "")))]
"TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
- "
-{
- if (! TARGET_HARD_QUAD)
- {
- rtx slot0, slot1, slot2;
-
- if (GET_CODE (operands[0]) != MEM)
- slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- else
- slot0 = operands[0];
- if (GET_CODE (operands[1]) != MEM)
- {
- slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
- }
- else
- slot1 = operands[1];
- if (GET_CODE (operands[2]) != MEM)
- {
- slot2 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- emit_insn (gen_rtx_SET (VOIDmode, slot2, operands[2]));
- }
- else
- slot2 = operands[2];
-
- emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_add\"), 0,
- VOIDmode, 3,
- XEXP (slot0, 0), Pmode,
- XEXP (slot1, 0), Pmode,
- XEXP (slot2, 0), Pmode);
-
- if (GET_CODE (operands[0]) != MEM)
- emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
- DONE;
- }
-}")
+ "emit_tfmode_binop (PLUS, operands); DONE;")
(define_insn "*addtf3_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
@@ -7334,42 +7189,7 @@
(minus:TF (match_operand:TF 1 "general_operand" "")
(match_operand:TF 2 "general_operand" "")))]
"TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
- "
-{
- if (! TARGET_HARD_QUAD)
- {
- rtx slot0, slot1, slot2;
-
- if (GET_CODE (operands[0]) != MEM)
- slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- else
- slot0 = operands[0];
- if (GET_CODE (operands[1]) != MEM)
- {
- slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
- }
- else
- slot1 = operands[1];
- if (GET_CODE (operands[2]) != MEM)
- {
- slot2 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- emit_insn (gen_rtx_SET (VOIDmode, slot2, operands[2]));
- }
- else
- slot2 = operands[2];
-
- emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_sub\"), 0,
- VOIDmode, 3,
- XEXP (slot0, 0), Pmode,
- XEXP (slot1, 0), Pmode,
- XEXP (slot2, 0), Pmode);
-
- if (GET_CODE (operands[0]) != MEM)
- emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
- DONE;
- }
-}")
+ "emit_tfmode_binop (MINUS, operands); DONE;")
(define_insn "*subtf3_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
@@ -7401,42 +7221,7 @@
(mult:TF (match_operand:TF 1 "general_operand" "")
(match_operand:TF 2 "general_operand" "")))]
"TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
- "
-{
- if (! TARGET_HARD_QUAD)
- {
- rtx slot0, slot1, slot2;
-
- if (GET_CODE (operands[0]) != MEM)
- slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- else
- slot0 = operands[0];
- if (GET_CODE (operands[1]) != MEM)
- {
- slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
- }
- else
- slot1 = operands[1];
- if (GET_CODE (operands[2]) != MEM)
- {
- slot2 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- emit_insn (gen_rtx_SET (VOIDmode, slot2, operands[2]));
- }
- else
- slot2 = operands[2];
-
- emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_mul\"), 0,
- VOIDmode, 3,
- XEXP (slot0, 0), Pmode,
- XEXP (slot1, 0), Pmode,
- XEXP (slot2, 0), Pmode);
-
- if (GET_CODE (operands[0]) != MEM)
- emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
- DONE;
- }
-}")
+ "emit_tfmode_binop (MULT, operands); DONE;")
(define_insn "*multf3_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
@@ -7485,42 +7270,7 @@
(div:TF (match_operand:TF 1 "general_operand" "")
(match_operand:TF 2 "general_operand" "")))]
"TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
- "
-{
- if (! TARGET_HARD_QUAD)
- {
- rtx slot0, slot1, slot2;
-
- if (GET_CODE (operands[0]) != MEM)
- slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- else
- slot0 = operands[0];
- if (GET_CODE (operands[1]) != MEM)
- {
- slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
- }
- else
- slot1 = operands[1];
- if (GET_CODE (operands[2]) != MEM)
- {
- slot2 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- emit_insn (gen_rtx_SET (VOIDmode, slot2, operands[2]));
- }
- else
- slot2 = operands[2];
-
- emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_div\"), 0,
- VOIDmode, 3,
- XEXP (slot0, 0), Pmode,
- XEXP (slot1, 0), Pmode,
- XEXP (slot2, 0), Pmode);
-
- if (GET_CODE (operands[0]) != MEM)
- emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
- DONE;
- }
-}")
+ "emit_tfmode_binop (DIV, operands); DONE;")
;; don't have timing for quad-prec. divide.
(define_insn "*divtf3_hq"
@@ -7769,37 +7519,10 @@
[(set_attr "type" "fpmove")])
(define_expand "sqrttf2"
- [(set (match_operand:TF 0 "register_operand" "=e")
- (sqrt:TF (match_operand:TF 1 "register_operand" "e")))]
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (sqrt:TF (match_operand:TF 1 "general_operand" "")))]
"TARGET_FPU && (TARGET_HARD_QUAD || TARGET_ARCH64)"
- "
-{
- if (! TARGET_HARD_QUAD)
- {
- rtx slot0, slot1;
-
- if (GET_CODE (operands[0]) != MEM)
- slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- else
- slot0 = operands[0];
- if (GET_CODE (operands[1]) != MEM)
- {
- slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
- emit_insn (gen_rtx_SET (VOIDmode, slot1, operands[1]));
- }
- else
- slot1 = operands[1];
-
- emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \"_Qp_sqrt\"), 0,
- VOIDmode, 2,
- XEXP (slot0, 0), Pmode,
- XEXP (slot1, 0), Pmode);
-
- if (GET_CODE (operands[0]) != MEM)
- emit_insn (gen_rtx_SET (VOIDmode, operands[0], slot0));
- DONE;
- }
-}")
+ "emit_tfmode_unop (SQRT, operands); DONE;")
(define_insn "*sqrttf2_hq"
[(set (match_operand:TF 0 "register_operand" "=e")
@@ -8403,7 +8126,7 @@
"! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 0"
"call\\t%a0, %1\\n\\tnop\\n\\tunimp\\t%2"
[(set_attr "type" "call_no_delay_slot")
- (set_attr "length" "2")])
+ (set_attr "length" "3")])
;; This is a call that wants a structure value.
;; There is no such critter for v9 (??? we may need one anyway).
@@ -8416,7 +8139,7 @@
"! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 0"
"call\\t%a0, %1\\n\\tnop\\n\\tunimp\\t%2"
[(set_attr "type" "call_no_delay_slot")
- (set_attr "length" "2")])
+ (set_attr "length" "3")])
;; This is a call that may want a structure value. This is used for
;; untyped_calls.
@@ -8429,7 +8152,7 @@
"! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0"
"call\\t%a0, %1\\n\\tnop\\n\\tnop"
[(set_attr "type" "call_no_delay_slot")
- (set_attr "length" "2")])
+ (set_attr "length" "3")])
;; This is a call that wants a structure value.
(define_insn "*call_symbolic_untyped_struct_value_sp32"
@@ -8441,7 +8164,7 @@
"! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0"
"call\\t%a0, %1\\n\\tnop\\n\\tnop"
[(set_attr "type" "call_no_delay_slot")
- (set_attr "length" "2")])
+ (set_attr "length" "3")])
(define_expand "call_value"
;; Note that this expression is not used for generating RTL.
@@ -8665,21 +8388,6 @@
[(set_attr "type" "multi")
(set_attr "length" "3")])
-(define_insn "return"
- [(return)
- (use (reg:SI 31))]
- "! TARGET_EPILOGUE"
- "* return output_return (operands);"
- [(set_attr "type" "return")])
-
-(define_peephole
- [(set (match_operand:SI 0 "register_operand" "=r")
- (match_operand:SI 1 "arith_operand" "rI"))
- (parallel [(return)
- (use (reg:SI 31))])]
- "sparc_return_peephole_ok (operands[0], operands[1])"
- "return\\t%%i7+8\\n\\tmov\\t%Y1, %Y0")
-
(define_insn "nop"
[(const_int 0)]
""
@@ -8737,7 +8445,7 @@
/* Restore %fp from stack pointer value for containing function.
The restore insn that follows will move this to %sp,
and reload the appropriate value into %fp. */
- emit_move_insn (frame_pointer_rtx, stack);
+ emit_move_insn (hard_frame_pointer_rtx, stack);
/* USE of frame_pointer_rtx added for consistency; not clear if
really needed. */
@@ -8813,22 +8521,41 @@
DONE;
}")
-;; ??? Should set length to zero when !current_function_calls_alloca,
-;; ??? but there is no easy way to get at that definition. It would
-;; ??? require including function.h into sparc-protos.h and that is
-;; ??? likely not a good idea. -DaveM
(define_insn "do_builtin_setjmp_setup"
[(unspec_volatile [(const_int 0)] 5)]
""
"*
{
- if (!current_function_calls_alloca)
- return \"\";
- if (TARGET_V9)
- return \"flushw\";
- return \"ta\\t3\";
+ if (! current_function_calls_alloca || ! TARGET_V9 || TARGET_FLAT)
+ return \"#\";
+ fputs (\"\tflushw\n\", asm_out_file);
+ if (flag_pic)
+ fprintf (asm_out_file, \"\tst%c\t%%l7, [%%sp+%d]\n\",
+ TARGET_ARCH64 ? 'x' : 'w',
+ SPARC_STACK_BIAS + 7 * UNITS_PER_WORD);
+ fprintf (asm_out_file, \"\tst%c\t%%fp, [%%sp+%d]\n\",
+ TARGET_ARCH64 ? 'x' : 'w',
+ SPARC_STACK_BIAS + 14 * UNITS_PER_WORD);
+ fprintf (asm_out_file, \"\tst%c\t%%i7, [%%sp+%d]\n\",
+ TARGET_ARCH64 ? 'x' : 'w',
+ SPARC_STACK_BIAS + 15 * UNITS_PER_WORD);
+ return \"\";
}"
- [(set_attr "type" "misc")])
+ [(set_attr "type" "misc")
+ (set (attr "length") (if_then_else (eq_attr "pic" "true")
+ (const_int 4)
+ (const_int 3)))])
+
+(define_split
+ [(unspec_volatile [(const_int 0)] 5)]
+ "! current_function_calls_alloca || ! TARGET_V9 || TARGET_FLAT"
+ [(const_int 0)]
+ "
+{
+ if (current_function_calls_alloca)
+ emit_insn (gen_flush_register_windows ());
+ DONE;
+}")
;; Pattern for use after a setjmp to store FP and the return register
;; into the stack area.
@@ -8998,7 +8725,7 @@
(set (match_operand:SI 2 "register_operand" "")
(match_operand:SI 3 "memory_operand" ""))]
"registers_ok_for_ldd_peep (operands[2], operands[0])
- && mems_ok_for_ldd_peep (operands[3], operands[1], operands[2])"
+ && mems_ok_for_ldd_peep (operands[3], operands[1], operands[0])"
[(set (match_dup 2)
(match_dup 3))]
"operands[3] = change_address (operands[3], DImode, NULL);
@@ -9023,7 +8750,7 @@
(set (match_operand:SF 2 "register_operand" "")
(match_operand:SF 3 "memory_operand" ""))]
"registers_ok_for_ldd_peep (operands[2], operands[0])
- && mems_ok_for_ldd_peep (operands[3], operands[1], operands[2])"
+ && mems_ok_for_ldd_peep (operands[3], operands[1], operands[0])"
[(set (match_dup 2)
(match_dup 3))]
"operands[3] = change_address (operands[3], DFmode, NULL);
@@ -9073,17 +8800,17 @@
&& ! SPARC_FP_REG_P (REGNO (operands[1]))"
[(parallel [(set (match_dup 0) (match_dup 1))
(set (reg:CCX 100)
- (compare:CC (match_dup 1) (const_int 0)))])]
+ (compare:CCX (match_dup 1) (const_int 0)))])]
"")
-;; Return peepholes. First the "normal" ones.
-;; These are necessary to catch insns ending up in the epilogue delay list.
+;; Return peepholes. These are generated by sparc_nonflat_function_epilogue
+;; who then immediately calls final_scan_insn.
(define_insn "*return_qi"
[(set (match_operand:QI 0 "restore_operand" "")
(match_operand:QI 1 "arith_operand" "rI"))
(return)]
- "! TARGET_EPILOGUE"
+ "sparc_emitting_epilogue"
"*
{
if (! TARGET_ARCH64 && current_function_returns_struct)
@@ -9101,7 +8828,7 @@
[(set (match_operand:HI 0 "restore_operand" "")
(match_operand:HI 1 "arith_operand" "rI"))
(return)]
- "! TARGET_EPILOGUE"
+ "sparc_emitting_epilogue"
"*
{
if (! TARGET_ARCH64 && current_function_returns_struct)
@@ -9119,7 +8846,7 @@
[(set (match_operand:SI 0 "restore_operand" "")
(match_operand:SI 1 "arith_operand" "rI"))
(return)]
- "! TARGET_EPILOGUE"
+ "sparc_emitting_epilogue"
"*
{
if (! TARGET_ARCH64 && current_function_returns_struct)
@@ -9133,15 +8860,11 @@
[(set_attr "type" "multi")
(set_attr "length" "2")])
-;; The following pattern is only generated by delayed-branch scheduling,
-;; when the insn winds up in the epilogue. This can happen not only when
-;; ! TARGET_FPU because we move complex types around by parts using
-;; SF mode SUBREGs.
(define_insn "*return_sf_no_fpu"
[(set (match_operand:SF 0 "restore_operand" "=r")
(match_operand:SF 1 "register_operand" "r"))
(return)]
- "! TARGET_EPILOGUE"
+ "sparc_emitting_epilogue"
"*
{
if (! TARGET_ARCH64 && current_function_returns_struct)
@@ -9158,7 +8881,7 @@
[(set (match_operand:DF 0 "restore_operand" "=r")
(match_operand:DF 1 "register_operand" "r"))
(return)]
- "! TARGET_EPILOGUE && TARGET_ARCH64"
+ "sparc_emitting_epilogue && TARGET_ARCH64"
"*
{
if (IN_OR_GLOBAL_P (operands[1]))
@@ -9174,7 +8897,7 @@
(plus:SI (match_operand:SI 1 "register_operand" "r")
(match_operand:SI 2 "arith_operand" "rI")))
(return)]
- "! TARGET_EPILOGUE"
+ "sparc_emitting_epilogue"
"*
{
if (! TARGET_ARCH64 && current_function_returns_struct)
@@ -9195,7 +8918,7 @@
(lo_sum:SI (match_operand:SI 1 "register_operand" "r")
(match_operand:SI 2 "immediate_operand" "in")))
(return)]
- "! TARGET_EPILOGUE && ! TARGET_CM_MEDMID"
+ "sparc_emitting_epilogue && ! TARGET_CM_MEDMID"
"*
{
if (! TARGET_ARCH64 && current_function_returns_struct)
@@ -9213,7 +8936,7 @@
[(set (match_operand:DI 0 "restore_operand" "")
(match_operand:DI 1 "arith_double_operand" "rHI"))
(return)]
- "TARGET_ARCH64 && ! TARGET_EPILOGUE"
+ "sparc_emitting_epilogue && TARGET_ARCH64"
"ret\;restore %%g0, %1, %Y0"
[(set_attr "type" "multi")
(set_attr "length" "2")])
@@ -9223,7 +8946,7 @@
(plus:DI (match_operand:DI 1 "arith_operand" "%r")
(match_operand:DI 2 "arith_double_operand" "rHI")))
(return)]
- "TARGET_ARCH64 && ! TARGET_EPILOGUE"
+ "sparc_emitting_epilogue && TARGET_ARCH64"
"ret\;restore %r1, %2, %Y0"
[(set_attr "type" "multi")
(set_attr "length" "2")])
@@ -9233,23 +8956,24 @@
(lo_sum:DI (match_operand:DI 1 "arith_operand" "%r")
(match_operand:DI 2 "immediate_operand" "in")))
(return)]
- "TARGET_ARCH64 && ! TARGET_EPILOGUE && ! TARGET_CM_MEDMID"
+ "sparc_emitting_epilogue && TARGET_ARCH64 && ! TARGET_CM_MEDMID"
"ret\;restore %r1, %%lo(%a2), %Y0"
[(set_attr "type" "multi")
(set_attr "length" "2")])
-;; The following pattern is only generated by delayed-branch scheduling,
-;; when the insn winds up in the epilogue.
(define_insn "*return_sf"
[(set (reg:SF 32)
(match_operand:SF 0 "register_operand" "f"))
(return)]
- "! TARGET_EPILOGUE"
+ "sparc_emitting_epilogue"
"ret\;fmovs\\t%0, %%f0"
[(set_attr "type" "multi")
(set_attr "length" "2")])
;; Now peepholes to do a call followed by a jump.
+;; Do not match this on V9 and later processors, which have a call-return
+;; stack as this corrupts it and causes the code to run slower not faster.
+;; There are not TARGET_ARCH64 patterns because that implies TARGET_V9.
(define_peephole
[(parallel [(set (match_operand 0 "" "")
@@ -9257,7 +8981,8 @@
(match_operand 2 "" "")))
(clobber (reg:SI 15))])
(set (pc) (label_ref (match_operand 3 "" "")))]
- "short_branch (INSN_UID (insn), INSN_UID (operands[3]))
+ "! TARGET_V9
+ && short_branch (INSN_UID (insn), INSN_UID (operands[3]))
&& (USING_SJLJ_EXCEPTIONS || ! can_throw_internal (ins1))"
"call\\t%a1, %2\\n\\tadd\\t%%o7, (%l3-.-4), %%o7")
@@ -9266,49 +8991,70 @@
(match_operand 1 "" ""))
(clobber (reg:SI 15))])
(set (pc) (label_ref (match_operand 2 "" "")))]
- "short_branch (INSN_UID (insn), INSN_UID (operands[2]))
- && (USING_SJLJ_EXCEPTIONS || ! can_throw_internal (ins1))"
- "call\\t%a0, %1\\n\\tadd\\t%%o7, (%l2-.-4), %%o7")
-
-(define_peephole
- [(parallel [(set (match_operand 0 "" "")
- (call (mem:SI (match_operand:DI 1 "call_operand_address" "ps"))
- (match_operand 2 "" "")))
- (clobber (reg:DI 15))])
- (set (pc) (label_ref (match_operand 3 "" "")))]
- "TARGET_ARCH64
- && short_branch (INSN_UID (insn), INSN_UID (operands[3]))
- && (USING_SJLJ_EXCEPTIONS || ! can_throw_internal (ins1))"
- "call\\t%a1, %2\\n\\tadd\\t%%o7, (%l3-.-4), %%o7")
-
-(define_peephole
- [(parallel [(call (mem:SI (match_operand:DI 0 "call_operand_address" "ps"))
- (match_operand 1 "" ""))
- (clobber (reg:DI 15))])
- (set (pc) (label_ref (match_operand 2 "" "")))]
- "TARGET_ARCH64
+ "! TARGET_V9
&& short_branch (INSN_UID (insn), INSN_UID (operands[2]))
&& (USING_SJLJ_EXCEPTIONS || ! can_throw_internal (ins1))"
"call\\t%a0, %1\\n\\tadd\\t%%o7, (%l2-.-4), %%o7")
-(define_insn "prefetch"
+;; ??? UltraSPARC-III note: A memory operation loading into the floating point register
+;; ??? file, if it hits the prefetch cache, has a chance to dual-issue with other memory
+;; ??? operations. With DFA we might be able to model this, but it requires a lot of
+;; ??? state.
+(define_expand "prefetch"
+ [(match_operand 0 "address_operand" "")
+ (match_operand 1 "const_int_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_V9"
+ "
+{
+ if (TARGET_ARCH64)
+ emit_insn (gen_prefetch_64 (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_prefetch_32 (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_insn "prefetch_64"
[(prefetch (match_operand:DI 0 "address_operand" "p")
(match_operand:DI 1 "const_int_operand" "n")
(match_operand:DI 2 "const_int_operand" "n"))]
- "TARGET_V9"
+ ""
+{
+ static const char * const prefetch_instr[2][2] = {
+ {
+ "prefetch\\t[%a0], 1", /* no locality: prefetch for one read */
+ "prefetch\\t[%a0], 0", /* medium to high locality: prefetch for several reads */
+ },
+ {
+ "prefetch\\t[%a0], 3", /* no locality: prefetch for one write */
+ "prefetch\\t[%a0], 2", /* medium to high locality: prefetch for several writes */
+ }
+ };
+ int read_or_write = INTVAL (operands[1]);
+ int locality = INTVAL (operands[2]);
+
+ if (read_or_write != 0 && read_or_write != 1)
+ abort ();
+ if (locality < 0 || locality > 3)
+ abort ();
+ return prefetch_instr [read_or_write][locality == 0 ? 0 : 1];
+}
+ [(set_attr "type" "load")])
+
+(define_insn "prefetch_32"
+ [(prefetch (match_operand:SI 0 "address_operand" "p")
+ (match_operand:SI 1 "const_int_operand" "n")
+ (match_operand:SI 2 "const_int_operand" "n"))]
+ ""
{
- static const char * const prefetch_instr[2][4] = {
+ static const char * const prefetch_instr[2][2] = {
{
"prefetch\\t[%a0], 1", /* no locality: prefetch for one read */
- "prefetch\\t[%a0], 0", /* medium locality: prefetch for several reads */
- "prefetch\\t[%a0], 0", /* medium locality: prefetch for several reads */
- "prefetch\\t[%a0], 4", /* high locality: prefetch page */
+ "prefetch\\t[%a0], 0", /* medium to high locality: prefetch for several reads */
},
{
"prefetch\\t[%a0], 3", /* no locality: prefetch for one write */
- "prefetch\\t[%a0], 2", /* medium locality: prefetch for several writes */
- "prefetch\\t[%a0], 2", /* medium locality: prefetch for several writes */
- "prefetch\\t[%a0], 4", /* high locality: prefetch page */
+ "prefetch\\t[%a0], 2", /* medium to high locality: prefetch for several writes */
}
};
int read_or_write = INTVAL (operands[1]);
@@ -9318,7 +9064,7 @@
abort ();
if (locality < 0 || locality > 3)
abort ();
- return prefetch_instr [read_or_write][locality];
+ return prefetch_instr [read_or_write][locality == 0 ? 0 : 1];
}
[(set_attr "type" "load")])
diff --git a/contrib/gcc/config/sparc/splet.h b/contrib/gcc/config/sparc/splet.h
index d2ef4fb..0d3f054 100644
--- a/contrib/gcc/config/sparc/splet.h
+++ b/contrib/gcc/config/sparc/splet.h
@@ -20,7 +20,7 @@ the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#undef TARGET_DEFAULT
-#define TARGET_DEFAULT (MASK_APP_REGS + MASK_EPILOGUE)
+#define TARGET_DEFAULT MASK_APP_REGS
#define CPP_PREDEFINES "-Dsparc -Acpu=sparc -Amachine=sparc"
diff --git a/contrib/gcc/config/sparc/t-crtfm b/contrib/gcc/config/sparc/t-crtfm
new file mode 100644
index 0000000..744537d
--- /dev/null
+++ b/contrib/gcc/config/sparc/t-crtfm
@@ -0,0 +1,4 @@
+EXTRA_PARTS += crtfastmath.o
+
+crtfastmath.o: $(srcdir)/config/sparc/crtfastmath.c $(GCC_PASSES)
+ $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -c -o crtfastmath.o $(srcdir)/config/sparc/crtfastmath.c
diff --git a/contrib/gcc/config/sparc/t-elf b/contrib/gcc/config/sparc/t-elf
index ead6e25..027940b 100644
--- a/contrib/gcc/config/sparc/t-elf
+++ b/contrib/gcc/config/sparc/t-elf
@@ -1,3 +1,6 @@
+LIB1ASMSRC = sparc/lb1spc.asm
+LIB1ASMFUNCS = _mulsi3 _divsi3 _modsi3
+
# We want fine grained libraries, so use the new code to build the
# floating point emulation libraries.
FPBIT = fp-bit.c
@@ -12,12 +15,9 @@ fp-bit.c: $(srcdir)/config/fp-bit.c
# MULTILIB_OPTIONS should have msparclite too, but we'd have to make
# gas build...
-#MULTILIB_OPTIONS = msoft-float mcpu=v8
-MULTILIB_OPTIONS = msoft-float
-#MULTILIB_DIRNAMES = soft v8
-MULTILIB_DIRNAMES = soft
-#MULTILIB_MATCHES = msoft-float=mno-fpu mcpu?v8=mv8
-MULTILIB_MATCHES = msoft-float=mno-fpu
+MULTILIB_OPTIONS = msoft-float mcpu=v8
+MULTILIB_DIRNAMES = soft v8
+MULTILIB_MATCHES = msoft-float=mno-fpu mcpu?v8=mv8
LIBGCC = stmp-multilib
INSTALL_LIBGCC = install-multilib
diff --git a/contrib/gcc/config/sparc/t-linux64 b/contrib/gcc/config/sparc/t-linux64
index 4f552e0..c93ff25 100644
--- a/contrib/gcc/config/sparc/t-linux64
+++ b/contrib/gcc/config/sparc/t-linux64
@@ -8,3 +8,11 @@ LIBGCC = stmp-multilib
INSTALL_LIBGCC = install-multilib
EXTRA_MULTILIB_PARTS=crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o
+
+SHLIB_SLIBDIR_SUFFIXES = 64:64 32:
+
+# Override t-slibgcc-elf-ver to export some libgcc symbols with
+# the symbol versions that glibc used.
+# Avoid the t-linux version file.
+SHLIB_MAPFILES = $(srcdir)/libgcc-std.ver \
+ $(srcdir)/config/sparc/libgcc-sparc-glibc.ver
diff --git a/contrib/gcc/config/sparc/t-netbsd64 b/contrib/gcc/config/sparc/t-netbsd64
new file mode 100644
index 0000000..1292b86
--- /dev/null
+++ b/contrib/gcc/config/sparc/t-netbsd64
@@ -0,0 +1,6 @@
+MULTILIB_OPTIONS = m32/m64
+MULTILIB_DIRNAMES = 32 64
+MULTILIB_MATCHES =
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/contrib/gcc/config/sparc/t-sol2-64 b/contrib/gcc/config/sparc/t-sol2-64
index 8d42c44..ef7dee7 100644
--- a/contrib/gcc/config/sparc/t-sol2-64
+++ b/contrib/gcc/config/sparc/t-sol2-64
@@ -6,3 +6,5 @@ LIBGCC = stmp-multilib
INSTALL_LIBGCC = install-multilib
EXTRA_MULTILIB_PARTS=crtbegin.o crtend.o gmon.o crt1.o crti.o crtn.o gcrt1.o
+
+SHLIB_SLIBDIR_SUFFIXES = sparcv9:/sparcv9 sparcv7:
diff --git a/contrib/gcc/config/sparc/vxsim.h b/contrib/gcc/config/sparc/vxsim.h
index 94cfb92..c821e82 100644
--- a/contrib/gcc/config/sparc/vxsim.h
+++ b/contrib/gcc/config/sparc/vxsim.h
@@ -71,6 +71,9 @@ do { \
#undef COMMON_ASM_OP
#define COMMON_ASM_OP "\t.common\t"
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
/* This is how to output a definition of an internal numbered label where
PREFIX is the class of label and NUM is the number within the class. */
diff --git a/contrib/gcc/config/sparc/vxsparc64.h b/contrib/gcc/config/sparc/vxsparc64.h
index ecfe25b..358f2c0 100644
--- a/contrib/gcc/config/sparc/vxsparc64.h
+++ b/contrib/gcc/config/sparc/vxsparc64.h
@@ -71,7 +71,7 @@ Boston, MA 02111-1307, USA. */
#define CPP_SPEC "%(cpp_cpu) %(cpp_arch) -DCPU=ULTRASPARC -D__CPU__=CPU"
#undef TARGET_DEFAULT
-#define TARGET_DEFAULT (MASK_APP_REGS | MASK_EPILOGUE | MASK_FPU \
+#define TARGET_DEFAULT (MASK_APP_REGS | MASK_FPU \
| MASK_LONG_DOUBLE_128 | MASK_64BIT)
#undef SPARC_DEFAULT_CMODEL
diff --git a/contrib/gcc/config/t-slibgcc-elf-ver b/contrib/gcc/config/t-slibgcc-elf-ver
index 2912e0a..c02ff9d 100644
--- a/contrib/gcc/config/t-slibgcc-elf-ver
+++ b/contrib/gcc/config/t-slibgcc-elf-ver
@@ -2,21 +2,28 @@
# with the GNU linker.
SHLIB_EXT = .so
-SHLIB_NAME = @shlib_base_name@.so
-SHLIB_SONAME = @shlib_base_name@.so.1
+SHLIB_SOLINK = @shlib_base_name@.so
+SHLIB_SONAME = @shlib_so_name@.so.1
+SHLIB_NAME = @shlib_dir@@shlib_so_name@.so.1
SHLIB_MAP = @shlib_map_file@
SHLIB_OBJS = @shlib_objs@
+SHLIB_SLIBDIR_QUAL = @shlib_slibdir_qual@
+SHLIB_LC = -lc
SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
-Wl,--soname=$(SHLIB_SONAME) \
-Wl,--version-script=$(SHLIB_MAP) \
- -o $(SHLIB_NAME) @multilib_flags@ $(SHLIB_OBJS) -lc && \
- rm -f $(SHLIB_SONAME) && \
- $(LN_S) $(SHLIB_NAME) $(SHLIB_SONAME)
+ -o $(SHLIB_NAME) @multilib_flags@ $(SHLIB_OBJS) $(SHLIB_LC) && \
+ rm -f $(SHLIB_SOLINK) && \
+ $(LN_S) $(SHLIB_NAME) $(SHLIB_SOLINK)
# $(slibdir) double quoted to protect it from expansion while building
# libgcc.mk. We want this delayed until actual install time.
-SHLIB_INSTALL = $(INSTALL_DATA) $(SHLIB_NAME) $$(slibdir)/$(SHLIB_SONAME); \
- rm -f $$(slibdir)/$(SHLIB_NAME); \
- $(LN_S) $(SHLIB_SONAME) $$(slibdir)/$(SHLIB_NAME)
+SHLIB_INSTALL = \
+ $$(SHELL) $$(srcdir)/mkinstalldirs $$(slibdir)$(SHLIB_SLIBDIR_QUAL); \
+ $(INSTALL_DATA) $(SHLIB_NAME) \
+ $$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SONAME); \
+ rm -f $$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SOLINK); \
+ $(LN_S) $(SHLIB_SONAME) \
+ $$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SOLINK)
SHLIB_MKMAP = $(srcdir)/mkmap-symver.awk
SHLIB_MAPFILES = $(srcdir)/libgcc-std.ver
diff --git a/contrib/gcc/config/t-slibgcc-nolc-override b/contrib/gcc/config/t-slibgcc-nolc-override
new file mode 100644
index 0000000..959d2cc
--- /dev/null
+++ b/contrib/gcc/config/t-slibgcc-nolc-override
@@ -0,0 +1 @@
+SHLIB_LC =
diff --git a/contrib/gcc/config/t-slibgcc-sld b/contrib/gcc/config/t-slibgcc-sld
index 35f1bc1..c11a572 100644
--- a/contrib/gcc/config/t-slibgcc-sld
+++ b/contrib/gcc/config/t-slibgcc-sld
@@ -1,21 +1,27 @@
# Build a shared libgcc library with the Solaris linker.
SHLIB_EXT = .so
-SHLIB_NAME = @shlib_base_name@.so
-SHLIB_SONAME = @shlib_base_name@.so.1
+SHLIB_SOLINK = @shlib_base_name@.so
+SHLIB_SONAME = @shlib_so_name@.so.1
+SHLIB_NAME = @shlib_dir@@shlib_so_name@.so.1
SHLIB_MAP = @shlib_map_file@
SHLIB_OBJS = @shlib_objs@
+SHLIB_SLIBDIR_QUAL = @shlib_slibdir_qual@
SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
-Wl,-h,$(SHLIB_SONAME) -Wl,-z,text -Wl,-z,defs \
-Wl,-M,$(SHLIB_MAP) -o $(SHLIB_NAME) \
@multilib_flags@ $(SHLIB_OBJS) -lc && \
- rm -f $(SHLIB_SONAME) && \
- $(LN_S) $(SHLIB_NAME) $(SHLIB_SONAME)
+ rm -f $(SHLIB_SOLINK) && \
+ $(LN_S) $(SHLIB_NAME) $(SHLIB_SOLINK)
# $(slibdir) double quoted to protect it from expansion while building
# libgcc.mk. We want this delayed until actual install time.
-SHLIB_INSTALL = $(INSTALL_DATA) $(SHLIB_NAME) $$(slibdir)/$(SHLIB_SONAME); \
- rm -f $$(slibdir)/$(SHLIB_NAME); \
- $(LN_S) $(SHLIB_SONAME) $$(slibdir)/$(SHLIB_NAME)
+SHLIB_INSTALL = \
+ $$(SHELL) $$(srcdir)/mkinstalldirs $$(slibdir)$(SHLIB_SLIBDIR_QUAL); \
+ $(INSTALL_DATA) $(SHLIB_NAME) \
+ $$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SONAME); \
+ rm -f $$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SOLINK); \
+ $(LN_S) $(SHLIB_SONAME) \
+ $$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SOLINK)
SHLIB_MKMAP = $(srcdir)/mkmap-symver.awk
SHLIB_MAPFILES = $(srcdir)/libgcc-std.ver
OpenPOWER on IntegriCloud